query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Returns the quantization config for transformerbased models.
def _get_transformer_quantization_config(subset_size: int) -> Dict[str, Any]: return { "algorithm": "quantization", "preset": "mixed", "initializer": { "range": {"num_init_samples": subset_size, "type": DEFAULT_RANGE_TYPE}, "batchnorm_adaptation": {"num_bn_adaptation_samples": 0}, }, "scope_overrides": {"activations": {"{re}.*matmul_0": {"mode": "symmetric"}}}, "ignored_scopes": [ "{re}.*Embeddings.*", "{re}.*__add___[0-1]", "{re}.*layer_norm_0", "{re}.*matmul_1", "{re}.*__truediv__*", ], "overflow_fix": "first_layer_only", }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_default_quantization_config(preset: QuantizationPreset, subset_size: int) -> Dict[str, Any]:\n return {\n \"algorithm\": \"quantization\",\n \"preset\": preset.value,\n \"initializer\": {\n \"range\": {\"num_init_samples\": subset_size, \"type\": DEFAULT_RANGE_TYPE},\n \"batchnorm_adaptation\": {\"num_bn_adaptation_samples\": subset_size},\n },\n \"overflow_fix\": \"first_layer_only\",\n }", "def get_config_object() -> \"BaseConfig\":\n assert (\n len(G_CONFIG_OBJECT) == 1\n ), \"Have you created quantize config object before calling `quantize_model`?\"\n if G_CONFIG_OBJECT:\n return G_CONFIG_OBJECT[0]", "def get_config(self):\n return {'reduction': self.reduction, 'name': self.name}", "def param_quantizers(self):\n return self._param_quantizers", "def get_quantization_capability(self):\n return self.cur_config['capabilities']", "def input_quantizers(self):\n return self._input_quantizers", "def quantize(self, tune_cfg, model, data_loader, q_func=None):\n assert q_func is None, \"quantization aware training mode is not support on tensorflow\"\n logger.info('Start to run model quantization...')\n quantized_model = os.path.join(os.getcwd(), \"tf_quantized.pb\")\n self.tuning_cfg_to_fw(tune_cfg)\n logger.debug('Dump quantization configurations:')\n logger.debug(self.quantize_config)\n from .tf_utils.graph_converter import GraphConverter\n converter = GraphConverter(self.pre_optimized_graph if self.pre_optimized_graph else model,\n quantized_model,\n inputs=self.inputs,\n outputs=self.outputs,\n qt_config=self.quantize_config,\n fp32_ops=self.fp32_ops,\n bf16_ops=self.bf16_ops,\n data_loader=data_loader)\n return converter.convert()", "def get_config(self):\n config = super(EpsGreedyQPolicy, self).get_config()\n config['eps'] = self.eps\n return config", "def quantize(self, tune_cfg, model, data_loader, q_func=None):\n assert q_func is None, \"quantization aware training has not been supported on ONNXRUNTIME\"\n model = self.pre_optimized_model if self.pre_optimized_model else model\n ort_version = StrictVersion(ort.__version__)\n if ort_version < ONNXRT152_VERSION: # pragma: no cover\n logger.warning(\"Quantize input needs onnxruntime 1.5.2 or newer.\")\n return model\n if model.model.opset_import[0].version < 11: # pragma: no cover\n logger.warning(\"Quantize input needs model opset 11 or newer.\")\n from neural_compressor.adaptor.ox_utils.onnx_quantizer import ONNXQuantizer\n from onnxruntime.quantization.quant_utils import QuantizationMode\n backend = QuantizationMode.QLinearOps if self.backend == \\\n \"qlinearops\" else QuantizationMode.IntegerOps\n\n self.quantizable_ops = self._query_quantizable_ops(model.model)\n tmp_model = copy.deepcopy(model)\n \n quantize_config = self._cfg_to_quantize_config(tune_cfg)\n iterations = tune_cfg.get('calib_iteration', 1)\n if self.static:\n quantize_params = self._get_quantize_params(tmp_model.model, data_loader, \\\n quantize_config, iterations)\n else:\n quantize_params = None\n self.quantize_params = quantize_params\n quantizer = ONNXQuantizer(tmp_model.model,\n quantize_config,\n backend,\n self.static,\n quantize_params,\n self.quantizable_op_types)\n quantizer.quantize_model()\n tmp_model.q_config = self._generate_qconfig(model.model, tune_cfg, quantize_params)\n tmp_model.model = quantizer.model.model\n self.quantize_config = quantize_config # update so other methods can know current configs\n \n self._dump_model_op_stastics(tmp_model)\n return tmp_model", "def use_config_qty_increments(self):\n return self._use_config_qty_increments", "def _quantize(self, model: nn.Module) -> nn.Module:\n if self.mask:\n model_utils.remove_pruning_reparameterization(self.params_all)\n\n # check the accuracy after each epoch\n quantized_model = torch.quantization.convert(model.eval(), inplace=False)\n quantized_model.eval()\n\n # set masks again\n if self.mask:\n self._load_masks()\n\n return quantized_model", "def get_config(self):\n config = {\n \"units\": self.units,\n \"activation\": activations.serialize(self.activation),\n \"recurrent_activation\": activations.serialize(\n self.recurrent_activation\n ),\n \"attention_activation\": activations.serialize(\n self.attention_activation\n ),\n \"use_bias\": self.use_bias,\n \"kernel_initializer\": initializers.serialize(self.kernel_initializer),\n \"recurrent_initializer\": initializers.serialize(\n self.recurrent_initializer\n ),\n \"bias_initializer\": initializers.serialize(self.bias_initializer),\n \"attention_initializer\": initializers.serialize(\n self.attention_initializer\n ),\n \"use_chrono_initialization\": self.unit_forget_bias,\n \"kernel_regularizer\": regularizers.serialize(self.kernel_regularizer),\n \"recurrent_regularizer\": regularizers.serialize(\n self.recurrent_regularizer\n ),\n \"bias_regularizer\": regularizers.serialize(self.bias_regularizer),\n \"activity_regularizer\": regularizers.serialize(\n self.activity_regularizer\n ),\n \"attention_regularizer\": regularizers.serialize(\n self.attention_regularizer\n ),\n \"kernel_constraint\": constraints.serialize(self.kernel_constraint),\n \"recurrent_constraint\": constraints.serialize(\n self.recurrent_constraint\n ),\n \"bias_constraint\": constraints.serialize(self.bias_constraint),\n \"attention_constraint\": constraints.serialize(\n self.attention_constraint\n ),\n \"dropout\": self.dropout,\n \"recurrent_dropout\": self.recurrent_dropout,\n \"return_attention\": self.return_attention,\n }\n base_config = super().get_config()\n del base_config[\"cell\"]\n return dict(list(base_config.items()) + list(config.items()))", "def get_config(self):\n return {'name': self.name, 'dtype': self.dtype}", "def use_config_min_qty(self):\n return self._use_config_min_qty", "def output_quantizers(self):\n return self._output_quantizers", "def get_config(self):\n config = {\n 'membership_transform': self.membership_transform,\n 'predictions_transform': self.predictions_transform,\n 'membership_kernel': self.membership_kernel,\n 'predictions_kernel': self.predictions_kernel,\n 'name': self.name,\n }\n config = {k: v for k, v in config.items() if v is not None}\n return self._serialize_config(config)", "def get_config(self):\n config = {'epsilon':self.eps}\n base_config = super(PowTransform, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))", "def get_config(self) -> Dict[str, Any]:\n return {\n 'num_classes': self.num_classes,\n 'name': self.name,\n 'dtype': self.dtype,\n 'sparse_y_true': self.sparse_y_true,\n 'sparse_y_pred': self.sparse_y_pred,\n 'axis': self.axis,\n }", "def get_config(self):\n config = {'name': self.name, 'trainable': self.trainable}\n if hasattr(self, '_batch_input_shape'):\n config['batch_input_shape'] = self._batch_input_shape\n if hasattr(self, 'dtype'):\n config['dtype'] = self.dtype\n return config", "def base_model_config():\n return {\n # TFRecord file pattern containing Example protos.\n \"input_file_pattern\": \"\",\n\n # Number of examples to keep in the input queue.\n \"input_queue_capacity\": 5 * 640000, # 5 shards of the BookCorpus.\n\n # Number of threads for prefetching TFRecord values.\n \"num_input_reader_threads\": 1,\n\n # Whether to shuffle the input data.\n \"shuffle_input_data\": True,\n\n # Scale of the random uniform initializer.\n \"uniform_init_scale\": 0.1,\n\n # Number of unique words in the vocab.\n \"vocab_size\": 20000,\n\n # Batch size (training and evaluation only).\n \"batch_size\": 128,\n\n # Word embedding dimension.\n \"word_embedding_dim\": 620,\n\n # Whether to use a bidirectional or unidirectional encoder RNN.\n \"bidirectional_encoder\": False,\n\n # Number of output dimensions of the sentence encoder.\n \"encoder_dim\": 2400,\n\n # Operation for combining the final states of the encoder GRU\n \"pooling_operation\": \"last\",\n }", "def config(self) -> dict:\n if self.__class__.__name__ == 'MyFunctionTransformer':\n pass\n else:\n check_is_fitted(self)\n\n _config = {}\n for attr in self.config_paras:\n _config[attr] = getattr(self, attr)\n\n return {\"params\": self.get_params(),\n \"config\": _config}", "def get_config(self):\n config = super(Sc2Policy, self).get_config()\n config['eps'] = self.eps\n config['testing'] = self.testing\n return config", "def get_config(self):\n config = {\n }\n base_config = super(MatrixConcat, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))", "def recover(self, model, q_config):\n self._pre_optimize(model)\n model = self.pre_optimized_model\n ort_version = StrictVersion(ort.__version__)\n if ort_version < ONNXRT152_VERSION: # pragma: no cover\n logger.warning(\"Quantize input needs onnxruntime 1.5.2 or newer.\")\n return model\n if model.model.opset_import[0].version < 11: # pragma: no cover\n logger.warning(\"Quantize input needs model opset 11 or newer.\")\n\n from neural_compressor.adaptor.ox_utils.onnx_quantizer import ONNXQuantizer\n from onnxruntime.quantization.quant_utils import QuantizationMode\n backend = QuantizationMode.QLinearOps if self.backend == \\\n \"qlinearops\" else QuantizationMode.IntegerOps\n \n self.quantizable_ops = self._query_quantizable_ops(model.model)\n quantize_params, tune_cfg = self._parse_qconfig(q_config)\n quantize_config = self._cfg_to_quantize_config(tune_cfg)\n quantizer = ONNXQuantizer(model.model,\n quantize_config,\n backend,\n self.static,\n quantize_params,\n self.quantizable_op_types)\n\n quantizer.quantize_model()\n model.model = quantizer.model.model\n return model", "def override_config(args):\n args.transformer_enc_config = (\n \"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3\"\n )", "def override_config(args):\n args.transformer_enc_config = (\n \"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3\"\n )", "def get_config(self):\n config = self._kwargs.copy()\n config.update({\n 'metric': self.__class__.__name__,\n 'name': self.name,\n 'output_names': self.output_names,\n 'label_names': self.label_names})\n return config", "def get_config(self):\n config = {'epsilon':self.eps}\n base_config = super(LogTransform, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))", "def use_config_enable_qty_inc(self):\n return self._use_config_enable_qty_inc", "def override_config(args):\n args.transformer_enc_config = (\n \"((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 3\"\n )" ]
[ "0.66286147", "0.5942934", "0.5907931", "0.5872018", "0.5804783", "0.57093644", "0.56935316", "0.5644522", "0.56273764", "0.5596549", "0.5582015", "0.55781955", "0.5568165", "0.5547753", "0.5484123", "0.54839206", "0.54510504", "0.5421256", "0.54176253", "0.5367854", "0.5312555", "0.53048676", "0.53018904", "0.52835107", "0.5274694", "0.5274694", "0.52663773", "0.52303475", "0.52228343", "0.5207249" ]
0.780377
0
Returns the default quantization config
def _get_default_quantization_config(preset: QuantizationPreset, subset_size: int) -> Dict[str, Any]: return { "algorithm": "quantization", "preset": preset.value, "initializer": { "range": {"num_init_samples": subset_size, "type": DEFAULT_RANGE_TYPE}, "batchnorm_adaptation": {"num_bn_adaptation_samples": subset_size}, }, "overflow_fix": "first_layer_only", }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_transformer_quantization_config(subset_size: int) -> Dict[str, Any]:\n return {\n \"algorithm\": \"quantization\",\n \"preset\": \"mixed\",\n \"initializer\": {\n \"range\": {\"num_init_samples\": subset_size, \"type\": DEFAULT_RANGE_TYPE},\n \"batchnorm_adaptation\": {\"num_bn_adaptation_samples\": 0},\n },\n \"scope_overrides\": {\"activations\": {\"{re}.*matmul_0\": {\"mode\": \"symmetric\"}}},\n \"ignored_scopes\": [\n \"{re}.*Embeddings.*\",\n \"{re}.*__add___[0-1]\",\n \"{re}.*layer_norm_0\",\n \"{re}.*matmul_1\",\n \"{re}.*__truediv__*\",\n ],\n \"overflow_fix\": \"first_layer_only\",\n }", "def default_config(self):\n return {\n \"xtype\": \"scalar\",\n \"ytype\": \"scalar\",\n \"xtick-delta\": 50, \n \"ytick-delta\": 20, \n \"xtick-format\": '{:g}', \n \"ytick-format\": '{:g}',\n }", "def get_default_configuration():\n # Pre-configured default values for various parameters:\n default_config = {\n \"name\":\"Transient\",\n \"auto\":True,\n \"ra\":0.0,\n \"dec\":0.0,\n \"radius\":10.0,\n \"resolution\":1.8,\n \"energy\":70.0,\n \"pixsize\": 16,\n \"respcode\":\"czti_Aepix.out\",\n \"txycode\":\"radec2txty.out\",\n \"resppath\":\"pixarea\",\n \"plotfile\":\"plots/localize.pdf\",\n\t \"lc_bin\":5.0,\n\t \"typ\":\"band\",\n\t \"comp_bin\":20,\t\n \"verbose\":True,\n \"do_fit\":True\n }\n required_config = {\n 'l2file':\"_level2.evt\",\n 'infile':\"file.evt\",\n 'mkffile':\"file.mkf\",\n 'trigtime':0.00,\n 'transtart':0.00,\n 'tranend':0.00,\n 'bkg1start':0.00,\n 'bkg1end':0.00,\n 'bkg2start':0.00,\n 'bkg2end':0.00,\n\t 'alpha':0.00,\n\t 'beta':0.00,\n\t 'E0':0.00,\n\t 'A':0.00\n }\n return default_config, required_config", "def get_config(self):\n config = super(EpsGreedyQPolicy, self).get_config()\n config['eps'] = self.eps\n return config", "def getDefaultConfig():\n config = {\n \"samples\": _DEFAULT_SAMPLE_COUNT,\n \"channel\": \"all\",\n \"rate\": _DEFAULT_SAMPLE_RATE,\n \"update\": 1,\n \"output\": \"data.rld\",\n \"format\": \"rld\",\n \"size\": _DEFAULT_FILE_SIZE,\n \"comment\": _DEFAULT_FILE_COMMENT,\n \"digital\": True,\n \"ambient\": False,\n \"aggregate\": \"downsample\",\n \"high-range\": [],\n \"web\": False,\n }\n return config", "def get_config_object() -> \"BaseConfig\":\n assert (\n len(G_CONFIG_OBJECT) == 1\n ), \"Have you created quantize config object before calling `quantize_model`?\"\n if G_CONFIG_OBJECT:\n return G_CONFIG_OBJECT[0]", "def get_default_config(self):\n \n config = {}\n \n # default z_0_hat, zeros, flexible\n config['z_0_hat_option'] = 'flexible'\n config['initial_z_0_hat'] = np.zeros(self.dimension)\n \n # default P_0_hat, identity times a small scalar, flexible\n config['P_0_hat_option'] = 'flexible'\n config['initial_P_0_hat'] = 0.1 * np.eye(self.dimension)\n \n # default A, identity, flexible\n config['AB_option'] = 'flexible'\n config['initial_A'] = np.eye(self.dimension)\n config['initial_B'] = np.zeros((self.dimension, self.control_dimension))\n \n # default Q, identity times a small scalar, flexible\n config['Q_option'] = 'flexible'\n config['initial_Q'] = 0.1 * np.eye(self.dimension)\n \n # default R, identity times a small scalar, flexible\n config['R_option'] = 'flexible'\n config['initial_R'] = 0.1 * np.eye(self.dimension)\n \n # default stopping criteria, threshold 1e-5, num_iterations 1000\n # stop whenever either of the two critieria is reached\n config['threshold'] = 1e-5\n config['num_iterations'] = 1000\n\n return config", "def default_options(cls) -> Dict:\n options = super().default_options()\n # scaling factor for temperature adaptation\n options['eta'] = 100\n # controls the adaptation degeneration velocity of the temperature\n # adaption.\n options['nu'] = 1e3\n\n return options", "def get_default_visualizer_config(self):\n\n default_config = {\n\n \"format\": \"png\",\n\n \"fontsize\": 16,\n \"fontname\": 'Roboto',\n \"rankdir\": \"TB\",\n\n # Specific offsets can be specified here\n # for different shades. Change the values\n # below 0 and 1. For best results we recommend\n # to keep the range between 0.1 - 0.3\n \"layer_color_dict\": {\n \"InputLayer\": 0.1,\n \"Reshape\": 0.12,\n \"Conv1D\": 0.13,\n \"Conv2D\": 0.17,\n \"MaxPooling1D\": 0.19,\n \"MaxPooling2D\": 0.20,\n \"ZeroPadding3D\": 0.22,\n \"Flatten\": 0.25,\n \"AveragePooling2D\": 0.27,\n \"Dropout\": 0.29,\n \"Dense\": 0.3,\n \"Concatenate\": 0.32,\n \"Model\": 0.34,\n \"RepeatVector\": 0.36,\n \"Multiply\": 0.38,\n \"Add\": 0.39,\n \"Lambda\": 0.4,\n \"SpatialDropout1D\": 0.41,\n \"SpatialDropout2D\": 0.44\n },\n # Please provide as many colors\n # as many models you expect.\n # This package will\n # generate random colors incase\n # colors fall short, but there\n # is no guarantee that they will be\n # pretty\n 'default_color_package': [\n [0.586, 1.000, 1.000],\n [0.513, 0.141, 0.725],\n [0.094, 1.000, 1.000],\n [0.375, 0.739, 0.780],\n [0.967, 0.816, 0.961],\n [0.286, 1.000, 1.000],\n [0.750, 0.416, 0.961],\n [0.778, 0.631, 0.871],\n [0.613, 0.141, 0.725],\n [0.850, 0.539, 0.780],\n [0.186, 1.000, 1.000]\n ],\n \"class_names\": True,\n \"graph_label\": \"Nested SeeNN\",\n \"node_seperation_distance\": 0.4,\n\n 'module_connection_color': 'black',\n 'collapse_inputs': False,\n 'layer_height': 0.5,\n 'layer_width': 2,\n # 'condense_dropout_layer': False,\n\n # Specify if to use multiple color layers,\n # rather than shade\n 'use_multiple_colors_layers': False,\n\n # If use_multiple_colors_layers is Fa,se,\n # provide the colors\n 'multiple_colors_layer_package': {\n \"InputLayer\": \"grey\",\n \"Reshape\": \"#F5A286\",\n \"Conv1D\": \"#F7D7A8\",\n \"Conv2D\": \"#F7D7A8\",\n \"MaxPooling1D\": \"#AADFA2\",\n \"MaxPooling2D\": \"#AADFA2\",\n \"ZeroPadding3D\": \"grey\",\n \"Flatten\": \"grey\",\n \"AveragePooling2D\": \"#A8CFE7\",\n \"Dropout\": \"#9896C8\",\n \"Dense\": \"#C66AA7\",\n \"Concatenate\": \"#F5A286\",\n \"Model\": \"#292D30\",\n \"RepeatVector\": \"grey\",\n \"Multiply\": \"grey\",\n \"Add\": \"grey\",\n \"Lambda\": \"#CAAFE7\",\n \"SpatialDropout1D\": \"#FFAAEE\",\n \"SpatialDropout2D\": \"#CAAFE7\"\n },\n\n 'show_activation': False,\n 'rotate': 90,\n 'show_constant_input': False\n\n }\n\n return default_config", "def default_config():\n return {'grid': {'regular': {'width': 0.05,\n 'wake': {'width': 0.1, 'progression': None},\n 'layers': 50,\n 'thickness': 5,\n 'boundary_layer': { 'initial_thickness': 4.2e-5 }}}}", "def _default_config(self):\n return {\n 'penalty': 'l1',\n 'solver': 'liblinear'\n }", "def getDefaultCurrency():", "def _getDefaultSettings(cls):\n return {'minimumROIDimensions': 1,\n 'minimumROISize': None, # Skip testing the ROI size by default\n 'normalize': False,\n 'normalizeScale': 1,\n 'removeOutliers': None,\n 'resampledPixelSpacing': None, # No resampling by default\n 'interpolator': 'sitkBSpline', # Alternative: sitk.sitkBSpline,\n 'padDistance': 5,\n 'distances': [1],\n 'force2D': False,\n 'force2Ddimension': 0,\n 'label': 1,\n 'enableCExtensions': True,\n 'additionalInfo': True}", "def get_config():\n config = dict(\n name=\"defaults\",\n # Either use geometric, zipf, or uniform i.e., data variable\n # can take one of \"geometric\", \"zipf\", \"uniform\".\n distribution=\"zipf\",\n lbd_geometric=0.8,\n degree_zipf=1.0,\n # Flags to indicate which methods to compare.\n run_approx_miracle=False,\n run_miracle=False,\n run_modified_miracle=True,\n run_ss=True,\n run_rhr=True,\n encoding_type=\"fast\", # Can take either fast or normal\n # Common parameters.\n num_itr=1,\n coding_cost=14,\n coding_cost_multiplier=1,\n approx_coding_cost_multiplier=3,\n approx_t=6,\n # Specific parameters (leave them as they are for now).\n delta=10**(-6),\n alpha=1.0,\n # Variation.\n vary=\"eps\", # Can take one of \"cc\", \"k\", \"n\", \"eps\".\n cc_space=[6, 8, 10, 12, 14],\n k_space=[200, 400, 600, 800, 1000],\n n_space=[2000, 4000, 6000, 8000, 10000],\n eps_space=list(range(1, 9)),\n # Defaults.\n n=5000,\n k=500,\n t=3,\n epsilon_target=6,\n )\n config = config_dict.ConfigDict(config)\n config.lock() # Prevent addition of new fields.\n return config", "def default_tune(self):\n return self._default_tune", "def _GetDefaultConfig(self) -> str:\n try:\n region = util.GetRegionFromZone(\n FLAGS.zones[0] if FLAGS.zones else FLAGS.zone[0])\n except IndexError:\n region = _DEFAULT_REGION\n return f'regional-{region}'", "def get_default_config(self):\n default_config = super(SNMPInterfacePollCollector,\n self).get_default_config()\n default_config['path'] = 'snmp.interface'\n default_config['byte_unit'] = ['bit', 'byte']\n return default_config", "def get_default_config(self):\n config = super(NumaCollector, self).get_default_config()\n config.update(\n {\n \"path\": \"numa\",\n \"bin\": self.find_binary(\"numactl\"),\n }\n )\n\n return config", "def get_default_config(self):\n config = super(SignalfxHandler, self).get_default_config()\n\n config.update({\n 'url': 'https://ingest.signalfx.com/v2/datapoint',\n 'batch': 300,\n # Don't wait more than 10 sec between pushes\n 'batch_max_interval': 10,\n 'auth_token': '',\n })\n\n return config", "def get_config(self):\n config = {'epsilon':self.eps}\n base_config = super(PowTransform, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))", "def get_config(self):\n config = super(Sc2Policy, self).get_config()\n config['eps'] = self.eps\n config['testing'] = self.testing\n return config", "def get_config(self):\n return {'reduction': self.reduction, 'name': self.name}", "def _default_config(cls):\n return dict()", "def default_metric():\n return ProductRiemannianMetric", "def get_config(self):\n config = {'epsilon':self.eps}\n base_config = super(MatrixReLU, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))", "def default_parameters():\n return BackendNSParameters()", "def use_config_min_qty(self):\n return self._use_config_min_qty", "def get_default_settings(cfg):\n cfg = deepcopy(cfg)\n cfg.setdefault('metric', 'regression_slope')\n cfg.setdefault('n_jobs', 1)\n cfg.setdefault('savefig_kwargs', {\n 'bbox_inches': 'tight',\n 'dpi': 600,\n 'orientation': 'landscape',\n })\n logger.info(\"Using at most %i processes\", cfg['n_jobs'])\n return cfg", "def _default_options(cls):\n default_options = super()._default_options()\n default_options.data_processor = dp.DataProcessor(\n input_key=\"counts\",\n data_actions=[dp.Probability(\"1\"), dp.BasisExpectationValue()],\n )\n default_options.plotter.set_figure_options(\n xlabel=\"Flat top width\",\n ylabel=\"Pauli expectation values\",\n xval_unit=\"s\",\n ylim=(-1, 1),\n )\n default_options.data_subfit_map = {\n \"x\": {\"meas_basis\": \"x\"},\n \"y\": {\"meas_basis\": \"y\"},\n \"z\": {\"meas_basis\": \"z\"},\n }\n\n return default_options", "def default_config(cls):\n\n config = {\n \"checkpoint_path\": \"\", # path to model checkpoint\n \"separated_audio_folder\": \"\" # path to folder where to save the separated audio tracks.\n }\n return config" ]
[ "0.7033103", "0.6777367", "0.6720709", "0.65748835", "0.6455274", "0.642663", "0.63270366", "0.63183445", "0.63130504", "0.6306793", "0.62361944", "0.6232875", "0.621485", "0.6185362", "0.6169694", "0.6101184", "0.6091094", "0.6084804", "0.60460657", "0.59970856", "0.5981526", "0.5975689", "0.5931584", "0.5931163", "0.5893432", "0.5892437", "0.58827627", "0.58786976", "0.5866956", "0.5846194" ]
0.8306506
0
Creates the NNCFConfig for the quantization algorithm.
def _create_nncf_config( preset: QuantizationPreset, target_device: TargetDevice, subset_size: int, model_type: Optional[ModelType], ignored_scope: Optional[IgnoredScope], advanced_parameters: Optional[AdvancedQuantizationParameters], ) -> NNCFConfig: if model_type is None: compression_config = _get_default_quantization_config(preset, subset_size) elif model_type == ModelType.TRANSFORMER: compression_config = _get_transformer_quantization_config(subset_size) if ignored_scope is not None: _ignored_scope = convert_ignored_scope_to_list(ignored_scope) if "ignored_scopes" in compression_config: compression_config["ignored_scopes"].extend(_ignored_scope) else: compression_config["ignored_scopes"] = _ignored_scope compression_config["validate_scopes"] = ignored_scope.validate if advanced_parameters is not None: compression_config = apply_advanced_parameters_to_config(compression_config, advanced_parameters) if model_type == ModelType.TRANSFORMER: compression_config["validate_scopes"] = False return NNCFConfig({"target_device": target_device.value, "compression": compression_config})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, quantized_edges_in_cfg: int, total_edges_in_cfg: int):\n self.quantized_edges_in_cfg = quantized_edges_in_cfg\n self.total_edges_in_cfg = total_edges_in_cfg", "def _add_fp_configs(CONFIG):\n CONFIG.declare(\n 'fp_cutoffdecr',\n ConfigValue(\n default=1e-1,\n domain=PositiveFloat,\n description='Additional relative decrement of cutoff value for the original objective function.',\n ),\n )\n CONFIG.declare(\n 'fp_iteration_limit',\n ConfigValue(\n default=20,\n domain=PositiveInt,\n description='Feasibility pump iteration limit',\n doc='Number of maximum iterations in the feasibility pump methods.',\n ),\n )\n # TODO: integrate this option\n CONFIG.declare(\n 'fp_projcuts',\n ConfigValue(\n default=True,\n description='Whether to add cut derived from regularization of MIP solution onto NLP feasible set.',\n domain=bool,\n ),\n )\n CONFIG.declare(\n 'fp_transfercuts',\n ConfigValue(\n default=True,\n description='Whether to transfer cuts from the Feasibility Pump MIP to main MIP in selected strategy (all except from the round in which the FP MIP became infeasible).',\n domain=bool,\n ),\n )\n CONFIG.declare(\n 'fp_projzerotol',\n ConfigValue(\n default=1e-4,\n domain=PositiveFloat,\n description='Tolerance on when to consider optimal value of regularization problem as zero, which may trigger the solution of a Sub-NLP.',\n ),\n )\n CONFIG.declare(\n 'fp_mipgap',\n ConfigValue(\n default=1e-2,\n domain=PositiveFloat,\n description='Optimality tolerance (relative gap) to use for solving MIP regularization problem.',\n ),\n )\n CONFIG.declare(\n 'fp_discrete_only',\n ConfigValue(\n default=True,\n description='Only calculate the distance among discrete variables in regularization problems.',\n domain=bool,\n ),\n )\n CONFIG.declare(\n 'fp_main_norm',\n ConfigValue(\n default='L1',\n domain=In(['L1', 'L2', 'L_infinity']),\n description='Different forms of objective function MIP regularization problem.',\n ),\n )\n CONFIG.declare(\n 'fp_norm_constraint',\n ConfigValue(\n default=True,\n description='Whether to add the norm constraint to FP-NLP',\n domain=bool,\n ),\n )\n CONFIG.declare(\n 'fp_norm_constraint_coef',\n ConfigValue(\n default=1,\n domain=PositiveFloat,\n description='The coefficient in the norm constraint, correspond to the Beta in the paper.',\n ),\n )", "def _get_default_quantization_config(preset: QuantizationPreset, subset_size: int) -> Dict[str, Any]:\n return {\n \"algorithm\": \"quantization\",\n \"preset\": preset.value,\n \"initializer\": {\n \"range\": {\"num_init_samples\": subset_size, \"type\": DEFAULT_RANGE_TYPE},\n \"batchnorm_adaptation\": {\"num_bn_adaptation_samples\": subset_size},\n },\n \"overflow_fix\": \"first_layer_only\",\n }", "def _get_transformer_quantization_config(subset_size: int) -> Dict[str, Any]:\n return {\n \"algorithm\": \"quantization\",\n \"preset\": \"mixed\",\n \"initializer\": {\n \"range\": {\"num_init_samples\": subset_size, \"type\": DEFAULT_RANGE_TYPE},\n \"batchnorm_adaptation\": {\"num_bn_adaptation_samples\": 0},\n },\n \"scope_overrides\": {\"activations\": {\"{re}.*matmul_0\": {\"mode\": \"symmetric\"}}},\n \"ignored_scopes\": [\n \"{re}.*Embeddings.*\",\n \"{re}.*__add___[0-1]\",\n \"{re}.*layer_norm_0\",\n \"{re}.*matmul_1\",\n \"{re}.*__truediv__*\",\n ],\n \"overflow_fix\": \"first_layer_only\",\n }", "def create_config(self, context, mgmtport):\n pass", "def create_config(self) -> None:\n pass", "def create_config(self) -> None:\n pass", "def get_configspace():\n cs = CS.ConfigurationSpace()\n\n \n\n # Learning rate hyperparameter\n lr = CSH.UniformFloatHyperparameter('lr', lower=1e-6, upper=1e-1, default_value='1e-2', log=True)\n\n \n\n # Stochastic gradient descent momentum as parameter.\n sgd_momentum = CSH.UniformFloatHyperparameter('sgd_momentum', lower=0.0, upper=0.99, default_value=0.9, log=False)\n\n cs.add_hyperparameters([lr, sgd_momentum])\n \n # Optimizer hyperparameters.\n #optimizer = CSH.CategoricalHyperparameter('optimizer', ['Adam', 'SGD'])\n #cs.add_hyperparameters([optimizer])\n \n # Only add the sgd_momentum hyperparameter if the optimizer is stochastic gradient descent. Otherwise, it doesn't make sense.\n #cond = CS.EqualsCondition(sgd_momentum, optimizer, 'SGD')\n #cs.add_condition(cond)\n\n ''' The below is commented out because we're not fiddling with architecture in this optimization.'''\n #num_new_fc_layers = CSH.UniformIntegerHyperparameter('num_new_fc_layers', lower=0, upper=3, default_value=0, log=False)\n #num_els_new_1 = CSH.UniformIntegerHyperparameter('num_els_new_1', lower=128, upper=4096, default_value = 1000, log=True)\n #num_els_new_2 = CSH.UniformIntegerHyperparameter('num_els_new_2', lower=128, upper=4096, default_value = 1000, log=True)\n #num_els_new_3 = CSH.UniformIntegerHyperparameter('num_els_new_3', lower=128, upper=4096, default_value = 1000, log=True)\n\n #freeze0_old = CSH.UniformIntegerHyperparameter('freeze0_cat', lower = 0, upper = 1, default_value = 1, log=False)\n #freeze1_old = CSH.UniformIntegerHyperparameter('freeze1_cat', lower=0, upper=1, default_value=1, log=False)\n\n #cs.add_hyperparameters([num_new_fc_layers, num_els_new_1, num_els_new_2, num_els_new_3, freeze0_old, freeze1_old, batchsize])\n\n dropout_rate = CSH.UniformFloatHyperparameter('dropout_rate', lower=0.0, upper=0.9, default_value=0.5, log=False)\n\n cs.add_hyperparameters([dropout_rate])\n\n return cs", "def __init__(self, mode, cfg):\n super(DMCM, self).__init__()\n\n self.conv_net = cfg.get_image_net(mode)\n self.sparse_net = cfg.get_genes_net(mode)\n\n # Matrix network does not need weight initialization because there can\n # be no vanishing gradients.\n self.conv_net.apply(_init_weights_xavier)", "def get_config():\n name = 'dynamic_pricing'\n num_products = 5\n scale = 1\n noise_var = 10\n p_max = 1\n\n agents = collections.OrderedDict(\n [('bsPricing',\n functools.partial(BootstrapDynamicPricing,\n num_products, scale, noise_var, p_max))]\n )\n\n environments = collections.OrderedDict(\n [('env',\n functools.partial(DynamicPricing,\n num_products, scale, noise_var, p_max))]\n )\n experiments = collections.OrderedDict(\n [(name, ExperimentNoAction)]\n )\n n_steps = 80\n n_seeds = 2000\n config = Config(name, agents, environments, experiments, n_steps, n_seeds)\n return config", "def __set_special_config_values(cfg: __Config, config: dict) -> \"__Config\":\n cfg.file_name_plane_masks = lambda i: str(i) + config['file_name_plane_mask_suf']\n cfg.file_name_planercnn_image = lambda i: str(i) + config['file_name_planercnn_image_suf']\n cfg.dir_results = f\"{cfg.edge_detection_type}\" # will be the output folder, create in data dir\n cfg.image_size = tuple(int(x) for x in config['image_size'].split(\" \"))\n return cfg", "def __init__(self, cfg_file,\n ckpt: str,\n output_path: str = None,\n logger: Logger = None) -> None:\n\n if logger is None:\n logger = make_logger()\n\n cfg = load_config(cfg_file)\n\n if \"test\" not in cfg[\"data\"].keys():\n raise ValueError(\"Test data must be specified in config.\")\n\n #print(cfg.keys())\n if \"dqn\" not in cfg.keys():\n raise ValueError(\"dqn data must be specified in config.\")\n self.model_dir = cfg[\"training\"][\"model_dir\"]\n # when checkpoint is not specified, take latest (best) from model dir\n if ckpt is None:\n model_dir = cfg[\"training\"][\"model_dir\"]\n ckpt = get_latest_checkpoint(model_dir)\n if ckpt is None:\n raise FileNotFoundError(\"No checkpoint found in directory {}.\"\n .format(model_dir))\n try:\n step = ckpt.split(model_dir+\"/\")[1].split(\".ckpt\")[0]\n except IndexError:\n step = \"best\"\n\n\n self.batch_size = 1 #**\n self.batch_type = cfg[\"training\"].get(\n \"eval_batch_type\", cfg[\"training\"].get(\"batch_type\", \"sentence\"))\n self.use_cuda = cfg[\"training\"].get(\"use_cuda\", False)\n self.level = cfg[\"data\"][\"level\"]\n self.eval_metric = cfg[\"training\"][\"eval_metric\"]\n self.max_output_length = cfg[\"training\"].get(\"max_output_length\", None)\n\n \n\n # load the data\n train_data, dev_data, test_data, src_vocab, trg_vocab = load_data(\n data_cfg=cfg[\"data\"])\n #Loading the DQN parameters:\n self.sample_size = cfg[\"dqn\"][\"sample_size\"]\n self.lr = cfg[\"dqn\"].get(\"lr\", 0.01)\n self.egreed_max = cfg[\"dqn\"].get(\"egreed_max\", 0.9)\n self.egreed_min = cfg[\"dqn\"].get(\"egreed_min\", 0.01)\n self.gamma_max = cfg[\"dqn\"].get(\"gamma_max\", 0.9)\n self.gamma_min = cfg[\"dqn\"].get(\"gamma_min\", 0.5)\n self.nu_iter = cfg[\"dqn\"][\"nu_iter\"]\n self.mem_cap = cfg[\"dqn\"][\"mem_cap\"]\n self.beam_min = cfg[\"dqn\"][\"beam_min\"]\n self.beam_max = cfg[\"dqn\"][\"beam_max\"]\n self.state_type = cfg[\"dqn\"][\"state_type\"]\n \n if self.state_type == 'hidden':\n self.state_size = cfg[\"model\"][\"encoder\"][\"hidden_size\"]*2\n else:\n self.state_size = cfg[\"model\"][\"encoder\"][\"hidden_size\"]\n\n self.actions_size = len(src_vocab)\n self.gamma = None\n \n print(\"Sample size: \", self.sample_size )\n print(\"State size: \", self.state_size)\n print(\"Action size: \", self.actions_size)\n self.epochs = cfg[\"dqn\"][\"epochs\"]\n\n # Inii the Qnet and Qnet2\n self.eval_net = Net(self.state_size, self.actions_size)\n self.target_net = Net(self.state_size, self.actions_size)\n\n #Following the algorithm\n self.target_net.load_state_dict(self.eval_net.state_dict())\n\n self.learn_step_counter = 0\n self.memory_counter = 0\n self.size_memory1 = self.state_size * 2 + 2 + 1\n self.memory = np.zeros((self.mem_cap, self.size_memory1 ))\n self.optimizer = torch.optim.Adam(self.eval_net.parameters()\n , lr=self.lr )\n self.loss_func = nn.MSELoss()\n\n #others parameters\n self.bos_index = trg_vocab.stoi[BOS_TOKEN]\n self.eos_index = trg_vocab.stoi[EOS_TOKEN]\n self.pad_index = trg_vocab.stoi[PAD_TOKEN]\n\n self.data_to_train_dqn = {\"train\": train_data}\n \n #self.data_to_train_dqn = {\"test\": test_data}\n #self.data_to_dev = {\"dev\": dev_data}\n self.data_to_dev = {\"dev\": dev_data}\n #self.data_to_train_dqn = {\"train\": train_data\n # ,\"dev\": dev_data, \"test\": test_data}\n # load model state from disk\n model_checkpoint = load_checkpoint(ckpt, use_cuda=self.use_cuda)\n\n # build model and load parameters into it\n self.model = build_model(cfg[\"model\"], src_vocab=src_vocab, trg_vocab=trg_vocab)\n self.model.load_state_dict(model_checkpoint[\"model_state\"])\n\n if self.use_cuda:\n self.model.cuda()\n\n # whether to use beam search for decoding, 0: greedy decoding\n beam_size = 1\n beam_alpha = -1\n\n #others not important parameters\n self.index_fin = None\n path_tensroboard = self.model_dir + \"/tensorboard_DQN/\"\n self.tb_writer = SummaryWriter( log_dir=path_tensroboard , purge_step=0)\n self.dev_network_count = 0\n print(cfg[\"dqn\"][\"reward_type\"])\n #Reward funtion related:\n if cfg[\"dqn\"][\"reward_type\"] == \"bleu_diff\" : \n print(\"You select the reward based on the Bleu score differences\")\n self.Reward = self.Reward_bleu_diff\n elif cfg[\"dqn\"][\"reward_type\"] == \"bleu_lin\" : \n print(\"You select the reward based on the linear Bleu socres, and several punishments\")\n self.Reward = self.Reward_lin\n else:\n print(\"You select the reward based on the final score on the last state \")\n self.Reward = self.Reward_bleu_fin", "def generate_nnie_config(nnie_cfg, config, nnie_out_path='./config.json', tensor_type='float'):\n u8_start = False if tensor_type == 'float' else False\n default_config = {\n \"default_net_type_token\": \"nnie\",\n \"rand_input\": False,\n \"data_num\": 100,\n \"input_path_map\": {\n \"data\": \"./image_bins\",\n },\n \"nnie\": {\n \"max_batch\": 1,\n \"output_names\": [],\n \"mapper_version\": 11,\n \"u8_start\": u8_start,\n \"device\": \"gpu\",\n \"verbose\": False,\n \"image_path_list\": [\"./image_list.txt\"],\n \"mean\": [128, 128, 128],\n \"std\": [1, 1, 1]\n }\n }\n image_path_list = nnie_cfg['image_path_list']\n assert os.path.exists(image_path_list)\n with open(image_path_list, 'r') as f:\n image_list = [item.strip() for item in f.readlines()]\n\n mean = config.to_kestrel.get('pixel_means', [123.675, 116.28, 103.53])\n std = config.to_kestrel.get('pixel_stds', [58.395, 57.12, 57.375])\n resize_hw = config.to_kestrel.get('resize_hw', (224, 224))\n resize_hw = tuple(resize_hw)\n data_num = len(image_list)\n image_bin_path = generate_image_bins(image_list, mean, std, resize_hw)\n default_config['data_num'] = data_num\n default_config['input_path_map']['data'] = image_bin_path\n default_config['nnie']['max_batch'] = nnie_cfg.get('max_batch', 1)\n default_config['nnie']['mapper_version'] = nnie_cfg.get('mapper_version', 11)\n default_config['nnie']['image_path_list'] = [image_path_list]\n default_config['nnie']['mean'] = [128] * len(std)\n default_config['nnie']['std'] = [1] * len(std)\n with open(nnie_out_path, \"w\") as f:\n json.dump(default_config, f, indent=2)\n\n return nnie_out_path", "def construct_and_initialize_trainable_quantizers(self, quant_scheme):\n # Copy user settable attributes for outputs\n\n def _create_trainable_quantizer(bw, name, quantizer):\n \"\"\" create trainable quantizer from static grid quantizer. \"\"\"\n # Initialize trainable parameters to None\n self.register_parameter(f'{name}_encoding_min', None)\n self.register_parameter(f'{name}_encoding_max', None)\n # Pass name of tensor quantizer and reference of Wrapper to tensor quantizer\n # Input quantizer\n new_quantizer = tensor_quantizer_factory(bw, self._round_mode,\n quant_scheme,\n self._is_symmetric,\n enabled_by_default=True,\n data_type=self._data_type)\n new_quantizer.name = name\n new_quantizer.wrapper_ref = self\n new_quantizer.device = self.device\n initialize_learned_grid_quantizer_attributes(new_quantizer, quantizer)\n return new_quantizer\n\n new_grouped_quantizers = {name: _create_trainable_quantizer(self._activation_bw, name, quantizer)\n for name, quantizer in self._grouped_quantizers.items()}\n\n def create_trainable_quantizer(bw, name, quantizer):\n \"\"\" create trainable quantizer if not part of a group else reuse the group quantizer. \"\"\"\n if quantizer in self._grouped_quantizers.values():\n group_names = [n for n, q in self._grouped_quantizers.items() if q == quantizer]\n assert len(group_names) == 1\n # creating a param min/max references to the shared group min/max parameters.\n setattr(self, f'{name}_encoding_min', getattr(self, f'{group_names[0]}_encoding_min'))\n setattr(self, f'{name}_encoding_max', getattr(self, f'{group_names[0]}_encoding_max'))\n return new_grouped_quantizers[group_names[0]]\n\n return _create_trainable_quantizer(bw, name, quantizer)\n\n\n self._input_quantizers = {name: create_trainable_quantizer(self._activation_bw, name, quantizer)\n for name, quantizer in self.input_quantizers.items()}\n self._output_quantizers = {name: create_trainable_quantizer(self._activation_bw, name, quantizer)\n for name, quantizer in self.output_quantizers.items()}\n self._param_quantizers = {name: create_trainable_quantizer(self._weight_bw, name, quantizer)\n for name, quantizer in self._param_quantizers.items()}\n\n self._grouped_quantizers = new_grouped_quantizers\n self._mode = QcQuantizeOpMode.LEARN_ENCODINGS", "def get_config(self):\n config = {\n 'F_': self.F_,\n 'attn_heads': self.attn_heads,\n 'attn_heads_reduction': self.attn_heads_reduction,\n 'edge_type_reduction': self.edge_type_reduction,\n 'attention_type': self.attention_type,\n 'attn_dropout': self.attn_dropout,\n 'feature_dropout': self.feature_dropout,\n 'activation': self.activation,\n 'use_value_bias': self.use_value_bias,\n 'use_key_bias': self.use_key_bias,\n 'kernel_initializer': self.kernel_initializer,\n 'bias_initializer': self.bias_initializer,\n 'attn_kernel_initializer': self.attn_kernel_initializer,\n 'attn_bias_initalizer': self.attn_bias_initializer,\n 'kernel_regularizer': self.kernel_regularizer,\n 'bias_regularizer': self.bias_regularizer,\n 'attn_kernel_regularizer': self.attn_kernel_regularizer,\n 'attn_bias_regularizer': self.attn_bias_regularizer,\n 'activity_regularizer': self.activity_regularizer,\n 'kernel_constraint': self.kernel_constraint,\n 'bias_constraint': self.bias_constraint,\n 'attn_kernel_constraint': self.attn_kernel_constraint,\n 'attn_bias_constraint': self.attn_bias_constraint\n }\n base_config = super(BatchShawMultigraphAttention, self).get_config()\n return dict(list(base_config.items())) + list(config.items())", "def __init__(self, network_controller: NetworkQuantizationController, in_channels, out_channels, kernel_size,\n stride=1,\n padding=0, dilation=1,\n group=1, disable_bn=False,\n batch_norm_epsilon=1e-5, batch_norm_momentum=0.1, tf_padding=False):\n super(ConvBN, self).__init__()\n self.stride = stride\n self.padding = padding\n self.dilation = dilation\n self.group = group\n self.network_controller = network_controller\n self.tf_padding = tf_padding\n\n if not tf_padding:\n self.pad_tensor = Identity()\n self.padding_conv = self.padding\n\n else:\n padding = 0\n self.padding_conv = padding\n pad_h = self.padding\n pad_w = self.padding\n self.pad_tensor = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2))\n\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=group,\n bias=disable_bn)\n if disable_bn:\n self.bn = Identity()\n else:\n self.bn = nn.BatchNorm2d(out_channels, eps=batch_norm_epsilon, momentum=batch_norm_momentum)\n self.q = Quantization(network_controller, is_signed=True,\n weights_values=self.conv.weight.detach())", "def __init__(self, cfg):\n\n super().__init__()\n self.K = cfg.queue_size\n self.output_size = cfg.ent_mention_output_size\n self.qid = 0\n self.span_pair_queue = []\n self.context_pair_queue = []\n self.span_tmp_queue = []\n self.context_tmp_queue = []\n self.temperature = cfg.temperature\n self.gama = 0.999\n self.device = cfg.device\n\n self.span_pair_encoder = PretrainedSpanEncoder(cfg, momentum=False)\n self.momentum_span_pair_encoder = PretrainedSpanEncoder(cfg, momentum=True)\n\n self.contrastive_loss = nn.CrossEntropyLoss()", "def get_configspace():\r\n cs = CS.ConfigurationSpace()\r\n\r\n lr = CSH.UniformFloatHyperparameter('lr', lower=1e-6, upper=1e-1, default_value='1e-2', log=True)\r\n\r\n # For demonstration purposes, we add different optimizers as categorical hyperparameters.\r\n # To show how to use conditional hyperparameters with ConfigSpace, we'll add the optimizers 'Adam' and 'SGD'.\r\n # SGD has a different parameter 'momentum'.\r\n optimizer = CSH.CategoricalHyperparameter('optimizer', ['Adam', 'SGD'])\r\n\r\n sgd_momentum = CSH.UniformFloatHyperparameter('sgd_momentum', lower=0.0, upper=0.99, default_value=0.9, log=False)\r\n\r\n cs.add_hyperparameters([lr, optimizer, sgd_momentum])\r\n\r\n\r\n\r\n num_conv_layers = CSH.UniformIntegerHyperparameter('num_conv_layers', lower=1, upper=3, default_value=2)\r\n\r\n num_filters_1 = CSH.UniformIntegerHyperparameter('num_filters_1', lower=4, upper=64, default_value=16, log=True)\r\n num_filters_2 = CSH.UniformIntegerHyperparameter('num_filters_2', lower=4, upper=64, default_value=16, log=True)\r\n num_filters_3 = CSH.UniformIntegerHyperparameter('num_filters_3', lower=4, upper=64, default_value=16, log=True)\r\n\r\n cs.add_hyperparameters([num_conv_layers, num_filters_1, num_filters_2, num_filters_3])\r\n\r\n\r\n dropout_rate = CSH.UniformFloatHyperparameter('dropout_rate', lower=0.0, upper=0.9, default_value=0.5, log=False)\r\n num_fc_units = CSH.UniformIntegerHyperparameter('num_fc_units', lower=8, upper=256, default_value=32, log=True)\r\n\r\n cs.add_hyperparameters([dropout_rate, num_fc_units])\r\n\r\n\r\n # The hyperparameter sgd_momentum will be used,if the configuration\r\n # contains 'SGD' as optimizer.\r\n cond = CS.EqualsCondition(sgd_momentum, optimizer, 'SGD')\r\n cs.add_condition(cond)\r\n\r\n # You can also use inequality conditions:\r\n cond = CS.GreaterThanCondition(num_filters_2, num_conv_layers, 1)\r\n cs.add_condition(cond)\r\n\r\n cond = CS.GreaterThanCondition(num_filters_3, num_conv_layers, 2)\r\n cs.add_condition(cond)\r\n\r\n return cs", "def make_config(config, out_dir=None, pism_root=pism_root):\n\n # ensure that config is a list\n if type(config) is str:\n config = [config]\n\n # initialize netCDF dataset\n nc_path = os.path.join(out_dir, 'config.nc')\n nc = Dataset(nc_path, 'w')\n var = nc.createVariable('pism_overrides', 'i1')\n\n # loop on config files\n for c in config:\n c_path = '%s/config/%s.txt' % (pism_root, c)\n\n # fill in pism overrides\n with open(c_path) as f:\n for line in f:\n\n # ignore what follows '//'\n line = line.split('//', 1)[0].strip()\n\n # parse non-empty lines and overwrite existing values\n if line:\n k, v = line.split(':', 1)\n k = k.strip()\n v = v.strip().strip('\"')\n try:\n v = float(v)\n except ValueError:\n pass\n var.setncattr(k, v)\n\n # close and return path to output file\n nc.close()\n return nc_path", "def _get_MindtPy_FP_config():\n CONFIG = ConfigBlock('MindtPy-GOA')\n CONFIG.declare(\n 'init_strategy',\n ConfigValue(\n default='FP',\n domain=In(['FP']),\n description='Initialization strategy',\n doc='Initialization strategy used by any method. Currently the '\n 'continuous relaxation of the MINLP (rNLP), solve a maximal '\n 'covering problem (max_binary), and fix the initial value for '\n 'the integer variables (initial_binary).',\n ),\n )\n\n _add_common_configs(CONFIG)\n _add_fp_configs(CONFIG)\n _add_oa_cuts_configs(CONFIG)\n _add_subsolver_configs(CONFIG)\n _add_tolerance_configs(CONFIG)\n _add_bound_configs(CONFIG)\n return CONFIG", "def test_quantize_training():\n config = EasyDict()\n\n config.NETWORK_CLASS = FlowNetSV3Quantized\n config.DATASET_CLASS = FlyingChairs\n\n config.IS_DEBUG = False\n config.IMAGE_SIZE = [384, 512]\n config.BATCH_SIZE = 8\n config.TEST_STEPS = 200\n config.MAX_STEPS = 5000\n config.SAVE_CHECKPOINT_STEPS = 100\n config.KEEP_CHECKPOINT_MAX = 5\n config.SUMMARISE_STEPS = 100\n config.IS_PRETRAIN = False\n config.IS_DISTRIBUTION = False\n config.TASK = Tasks.OPTICAL_FLOW_ESTIMATION\n\n # network model config\n config.NETWORK = EasyDict()\n config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer\n config.NETWORK.OPTIMIZER_KWARGS = {\"beta1\": 0.9, \"beta2\": 0.999}\n config.NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant\n config.NETWORK.LEARNING_RATE_KWARGS = {\n \"values\": [0.0000125, 0.00005],\n \"boundaries\": [5000]\n }\n\n config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE\n config.NETWORK.BATCH_SIZE = config.BATCH_SIZE\n config.NETWORK.DATA_FORMAT = \"NHWC\"\n config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer\n config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = {\n 'bit': 2,\n 'max_value': 2.0\n }\n config.NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer\n config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {}\n\n # dataset config\n config.DATASET = EasyDict()\n config.DATASET.PRE_PROCESSOR = None\n config.DATASET.BATCH_SIZE = config.BATCH_SIZE\n config.DATASET.DATA_FORMAT = \"NHWC\"\n config.DATASET.VALIDATION_RATE = 0.2\n config.DATASET.VALIDATION_SEED = 2019\n config.DATASET.AUGMENTOR = Sequence([\n # Geometric transformation\n FlipLeftRight(0.5),\n FlipTopBottom(0.5),\n Translate(-0.2, 0.2),\n Rotate(-17, +17),\n Scale(1.0, 2.0),\n # Pixel-wise augmentation\n Brightness(0.8, 1.2),\n Contrast(0.2, 1.4),\n Color(0.5, 2.0),\n Gamma(0.7, 1.5),\n # Hue(-128.0, 128.0),\n GaussianNoise(0.0, 10.0)\n ])\n config.DATASET.PRE_PROCESSOR = Sequence([\n DevideBy255(),\n ])\n environment.init(\"test_flownet_s_v3_quantize\")\n prepare_dirs(recreate=True)\n start_training(config)", "def get_config():\n config = ml_collections.ConfigDict()\n config.seed = 42\n\n config.eval_num = 30000\n config.eval_avg_num = 3\n config.num_train_steps = -1\n config.log_loss_every_steps = 1000\n config.eval_every_steps = 1000\n config.checkpoint_every_steps = 5000\n\n config.dataset = \"mscoco\"\n config.coco_version = \"2014\"\n config.data_dir = \"data/\"\n config.return_text = False\n config.return_filename = False\n\n config.trial = 0 # dummy for repeated runs.\n config.beta1 = 0.5\n config.beta2 = 0.999\n config.d_lr = 0.0004\n config.g_lr = 0.0001\n config.polyak_decay = 0.999\n config.show_num = 64\n config.shuffle_buffer_size = 1000\n config.batch_norm_group_size = -1\n config.dtype = \"bfloat16\"\n config.train_shuffle = True\n\n config.image_size = 128\n config.batch_size = 56\n config.eval_batch_size = 7\n\n config.df_dim = 96\n config.gf_dim = 96\n config.z_dim = 128\n config.num_epochs = 500\n config.model_name = \"xmc\"\n config.d_step_per_g_step = 2\n config.g_spectral_norm = False\n config.d_spectral_norm = True\n config.architecture = \"xmc_net\"\n config.gamma_for_g = 15\n config.word_contrastive = True\n config.sentence_contrastive = True\n config.image_contrastive = True\n config.pretrained_image_contrastive = True\n config.cond_size = 16\n\n return config", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 3, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 5, 0, 1, 3), # Layer 4: Convolution(Layer1)\n (5, 7, 0, 0, 0), # Layer 5: Convolution(Layer4)\n ]", "def createNCDF(self):\n\n rootgrp = Dataset(self.filename_out, 'w', format=self.format, clobber=True)\n\n # Create dimensions.\n if 'dimensions' in self.input_dict:\n for k, v in self.input_dict['dimensions'].items():\n rootgrp.createDimension(k, v)\n else:\n if not self.Quiet:\n print('No netCDF created:')\n print(' No dimension key found (!! has to be \\\"dimensions\\\"!!!)')\n return()\n\n # Create global attributes.\n if 'global attributes' in self.input_dict:\n for k, v in self.input_dict['global attributes'].items():\n rootgrp.setncattr(k, v)\n else:\n if not self.Quiet:\n print(' No global attribute key found (!! has to be \\\"global attributes\\\"!!!)')\n\n # Create variables.\n for k, v in self.input_dict['variables'].items():\n dims = self.input_dict['variables'][k]['dimensions']\n data = v['data']\n # Create correct data type if provided\n if 'data_type' in self.input_dict['variables'][k]:\n data_type = self.input_dict['variables'][k]['data_type']\n else:\n data_type = 'f4'\n # Check whether we've been given a fill value.\n if 'fill_value' in self.input_dict['variables'][k]:\n fill_value = self.input_dict['variables'][k]['fill_value']\n else:\n fill_value = None\n # Create ncdf variable\n if not self.Quiet:\n print(' Creating variable: {} {} {}'.format(k, data_type, dims))\n var = rootgrp.createVariable(k, data_type, dims, fill_value=fill_value)\n if len(dims) > np.ndim(data):\n # If number of dimensions given to netCDF is greater than the\n # number of dimension of the data, then fill the netCDF\n # variable accordingly.\n if 'time' in dims:\n # Check for presence of time dimension (which can be\n # unlimited variable: defined by None).\n try:\n var[:] = data\n except IndexError:\n raise(IndexError(('Supplied data shape {} does not match the specified'\n ' dimensions {}, for variable \\'{}\\'.'.format(data.shape, var.shape, k))))\n else:\n if not self.Quiet:\n print('Problem in the number of dimensions')\n else:\n try:\n var[:] = data\n except IndexError:\n raise(IndexError(('Supplied data shape {} does not match the specified'\n ' dimensions {}, for variable \\'{}\\'.'.format(data.shape, var.shape, k))))\n\n # Create attributes for variables\n if 'attributes' in self.input_dict['variables'][k]:\n for ka, va in self.input_dict['variables'][k]['attributes'].items():\n var.setncattr(ka, va)\n\n rootgrp.close()", "def __init__(self, encut, name=\"scf_settings\"):\n InputParameters.__init__(self, name=name)\n self.update_electronic_settings(\"ENCUT\", encut)", "def get_config(self):\n config = {\n \"units\": self.units,\n \"activation\": activations.serialize(self.activation),\n \"recurrent_activation\": activations.serialize(\n self.recurrent_activation\n ),\n \"attention_activation\": activations.serialize(\n self.attention_activation\n ),\n \"use_bias\": self.use_bias,\n \"kernel_initializer\": initializers.serialize(self.kernel_initializer),\n \"recurrent_initializer\": initializers.serialize(\n self.recurrent_initializer\n ),\n \"bias_initializer\": initializers.serialize(self.bias_initializer),\n \"attention_initializer\": initializers.serialize(\n self.attention_initializer\n ),\n \"use_chrono_initialization\": self.unit_forget_bias,\n \"kernel_regularizer\": regularizers.serialize(self.kernel_regularizer),\n \"recurrent_regularizer\": regularizers.serialize(\n self.recurrent_regularizer\n ),\n \"bias_regularizer\": regularizers.serialize(self.bias_regularizer),\n \"activity_regularizer\": regularizers.serialize(\n self.activity_regularizer\n ),\n \"attention_regularizer\": regularizers.serialize(\n self.attention_regularizer\n ),\n \"kernel_constraint\": constraints.serialize(self.kernel_constraint),\n \"recurrent_constraint\": constraints.serialize(\n self.recurrent_constraint\n ),\n \"bias_constraint\": constraints.serialize(self.bias_constraint),\n \"attention_constraint\": constraints.serialize(\n self.attention_constraint\n ),\n \"dropout\": self.dropout,\n \"recurrent_dropout\": self.recurrent_dropout,\n \"return_attention\": self.return_attention,\n }\n base_config = super().get_config()\n del base_config[\"cell\"]\n return dict(list(base_config.items()) + list(config.items()))", "def setup(args):\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n # customize reszied parameters\n # cfg['INPUT']['MIN_SIZE_TRAIN'] = (20,)\n # cfg['INPUT']['MAX_SIZE_TRAIN'] = 50\n cfg.freeze()\n default_setup(\n cfg, args\n ) # if you don't like any of the default setup, write your own setup code\n return cfg", "def createCfg_comp_jetToTauFakeRate(self, jobOptions):\n for charge_selection in self.charge_selections:\n lines = []\n lines.append(\"process.fwliteInput.fileNames = cms.vstring('%s')\" % jobOptions['inputFile'])\n lines.append(\"process.fwliteOutput.fileName = cms.string('%s')\" % os.path.basename(jobOptions['outputFile']))\n lines.append(\"process.comp_jetToTauFakeRate.looseRegion = cms.string('%s')\" % jobOptions['looseRegion'])\n lines.append(\"process.comp_jetToTauFakeRate.tightRegion = cms.string('%s')\" % jobOptions['tightRegion'])\n lines.append(\"process.comp_jetToTauFakeRate.processData = cms.string('data_obs')\")\n lines.append(\"process.comp_jetToTauFakeRate.processesToSubtract = cms.vstring(\")\n lines.append(\" 'TTt', 'TTl',\")\n lines.append(\" 'EWKt', 'EWKl',\")\n lines.append(\" 'Rarest', 'Raresl',\") \n lines.append(\" 'TTWt', 'TTWl', \")\n lines.append(\" 'TTZt', 'TTZl', \")\n lines.append(\" 'signalt', 'signall'\")\n lines.append(\")\")\n lines.append(\"process.comp_jetToTauFakeRate.processMC = cms.string('TTj')\")\n lines.append(\"process.comp_jetToTauFakeRate.absEtaBins = cms.vdouble(%s)\" % jobOptions['absEtaBins'])\n lines.append(\"process.comp_jetToTauFakeRate.ptBins = cms.vdouble(%s)\" % jobOptions['ptBins'])\n create_cfg(self.cfgFile_comp_jetToTauFakeRate, jobOptions['cfgFile_modified'], lines)", "def get_base_config():\n return dict(\n dim=768,\n ff_dim=3072,\n num_heads=12,\n num_layers=12,\n attention_dropout_rate=0.0,\n dropout_rate=0.1,\n representation_size=768,\n classifier='token'\n )", "def prepare_config(device='npu'):\n if device == 'npu':\n # config for Ascend processor\n config = tf.ConfigProto()\n custom_op = config.graph_options.rewrite_options.custom_optimizers.add()\n custom_op.name = \"NpuOptimizer\"\n custom_op.parameter_map[\"use_off_line\"].b = True\n custom_op.parameter_map[\"precision_mode\"].s = tf.compat.as_bytes(\"force_fp16\")\n custom_op.parameter_map[\"graph_run_mode\"].i = 0\n config.graph_options.rewrite_options.remapping = RewriterConfig.OFF\n custom_op.parameter_map[\"debug_dir\"].s = tf.compat.as_bytes(str(TMP))\n else:\n config = tf.ConfigProto()\n return config" ]
[ "0.6211365", "0.5947636", "0.5926075", "0.5917099", "0.58897614", "0.584447", "0.584447", "0.56702006", "0.5627676", "0.55820346", "0.5560999", "0.55283594", "0.54756486", "0.54677653", "0.5465148", "0.54618865", "0.5459208", "0.54586923", "0.54444957", "0.54440576", "0.5437182", "0.5412364", "0.54110235", "0.5406182", "0.5401891", "0.539887", "0.5388667", "0.53857255", "0.5381153", "0.53794265" ]
0.7439748
0
Implementation of the `compress_weights()` method for the PyTorch backend.
def compress_weights(model: torch.nn.Module, use_fake_quantize: bool = False) -> torch.nn.Module: compressed_model, _ = replace_modules_by_nncf_modules(model) insert_pre_compression_operations(model, use_fake_quantize) return compressed_model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compress(self, tensor):", "def compress(self, tensor, *args, **kwargs):\n pass", "def weight_compression(weights, bits, axis=0, quantizer=None):\n assert bits <= 8\n n = 2**bits\n index_table = []\n codebook_table = np.zeros((weights.shape[axis], n))\n km_models = [None] * weights.shape[axis]\n\n for i, w in tqdm(enumerate(np.split(weights, weights.shape[axis], axis))):\n original_shape = w.shape\n w = w.ravel()\n km = KMeans(n)\n km.fit(w.reshape(-1, 1))\n if quantizer:\n km.cluster_centers_ = quantizer(km.cluster_centers_).numpy()\n km.cluster_centers_.sort(axis=0)\n\n km_models[i] = km\n codebook_table[i, :] = km.cluster_centers_.flatten()\n preds = km.predict(w.reshape(-1, 1))\n index_table.append(preds.reshape(original_shape))\n\n index_table = np.concatenate(index_table, axis)\n return index_table, codebook_table", "def compress(self, tensor, *args, **kwargs):\n return self.compressor.compress(tensor)", "def compress(self, tensor, *args, **kwargs):\n return self.compressor.compress(tensor)", "def apply_compressed_sensing(self, inputs, rng):\n print('using compressed sensing!')\n train_path = os.path.join(\n self.data_dir, 'assist{0}-{1}'.format(self.which_year, 'train'))\n\n if self.which_set == 'test':\n loaded = np.load(train_path + '-compression-matrix.npz')\n self.compress_matrix = loaded['compress_matrix']\n self.compress_dim = self.compress_matrix.shape[1]\n elif self.which_set == 'train':\n self.compress_matrix = self.make_compression_matrix(train_path, rng)\n\n inputs = self.compress_inputs(inputs)\n return inputs", "def mask_weights(self, mask, weights):\n new_weights = list()\n for idx, layer in enumerate(self.model.layers):\n if len(layer.get_weights())>0:\n new_weights.append(weights[idx]*mask[idx])\n new_weights.append(layer.get_weights()[1])\n else:\n continue\n return new_weights", "def init_weights_(self):\n raise NotImplementedError", "def compress(emb):\n if params.sum_word_vecs:\n return np.sum(emb, axis=0)\n if params.max_pool_word_vecs:\n return np.amax(emb, axis=0)\n if params.concat_word_vecs:\n return concat_word_vecs(emb, params.max_transcript_len)\n if params.avg_word_vecs:\n return np.mean(emb, axis=0)", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n xavier_uniform_(m.weight)\n if m.bias is not None:\n zeros_(m.bias)", "def compress(self, tensor, *args, **kwargs):\n tensor_compressed = tensor\n if 'float' in str(tensor.dtype):\n # Only allow compression from other floating point types\n tensor_compressed = tensor.astype('float16', copy=False)\n return tensor_compressed, tensor.dtype", "def normalizeWeights(self):\n for wt in self.weights:\n wt[wt>1] = 1\n wt[wt<-1] = -1\n for bs in self.bias:\n bs[bs>1] = 1\n bs[bs<-1] = -1", "def _set_weights(self, weights):\r\n self.weights = weights.reshape(self.output_size, self.input_size+1)", "def compress(self, tensor, *args, **kwargs):\n return tensor, None", "def init_weights(self):\n with torch.no_grad():\n self._init_weights()", "def _init_weights(self):\n nn.init.xavier_normal_(self.out.weight)", "def _create_weights(self):\n gate_size = self._hidden_size * self._num_gates\n # Compute the shape of weight and bias.\n matrix_shapes, bias_shapes = [], []\n for layer in range(self._num_layers):\n for direction in range(self._num_directions):\n layer_input_size = self._input_size if layer == 0 \\\n else self._hidden_size * self._num_directions\n w_ih_shape = [gate_size, layer_input_size]\n w_hh_shape = [gate_size, self._hidden_size]\n b_ih_shape, b_hh_shape = [gate_size], [gate_size]\n matrix_shapes.extend([w_ih_shape, w_hh_shape])\n bias_shapes.extend([b_ih_shape, b_hh_shape])\n # Create single float32 weights.\n weights_count = 0\n self._weights_shapes = matrix_shapes + bias_shapes\n for shape in self._weights_shapes:\n weights_count += math_util.prod(shape)\n self._weights = Tensor([weights_count])\n self._weights.requires_grad = True", "def update_weights(self):\n self._weights = self._weights + self.update_weights_value\n self.weights_clipping()", "def sparsify_weights(self, threshold = 1e-6):\n weights = self.list_cnn[-1].get_weights()\n sparsified_weights = []\n for w in weights:\n bool_mask = (abs(w) > threshold).astype(int)\n sparsified_weights.append(w * bool_mask)\n self.list_cnn[-1].set_weights(sparsified_weights)", "def update_weights(self):\n\t\tpass", "def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)\n # Tie weights if needed\n self.tie_weights()", "def decompress(self, tensor, ctx, *args, **kwargs):\n if \"x\" not in kwargs:\n raise ValueError(\"x is missing\")\n\n x = kwargs[\"x\"].astype(tensor.dtype, copy=False) \n \n if not self.inited:\n self.cache = nd.zeros_like(tensor)\n if size(tensor.shape) >= self.threshold:\n self.mom = nd.zeros_like(tensor)\n self.wdmom = True\n self.inited = True\n \n # weight decay\n nd._internal._mul_scalar(x, self.wd, out=self.cache)\n\n # weight decay momentum\n if self.wdmom:\n self.mom += self.cache\n nd._internal._mul_scalar(self.mom, self.mu, out=self.mom)\n tensor += self.mom\n\n tensor += self.cache\n return self.compressor.decompress(tensor, ctx, *args, **kwargs)", "def compress(self, samples):\n rms = np.sqrt(np.dot(samples, samples) / window)\n power = self.power * (1.0 - self.smooth) + rms * self.smooth\n self.power = power\n if power <= 1e-40:\n samples *= 0\n return\n db_in = 10.0 * math.log10(power)\n if db_in <= self.limit:\n samples *= 0\n return\n db_out = self.cf(db_in)\n db_gain = db_out - db_in + self.postgain\n gain = 10**(0.1 * db_gain)\n samples *= gain", "def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)", "def make_conv_weight_image(all_weights, limit=144):\n import vtool as vt\n # Try to infer if use_color should be shown\n num, channels, height, width = all_weights.shape\n # Try to infer if use_color should be shown\n use_color = (channels == 3)\n # non-use_color features need to be flattened\n if not use_color:\n all_weights_ = all_weights.reshape(num * channels, height, width, 1)\n else:\n # convert from theano to cv2 BGR\n all_weights_ = utils.convert_theano_images_to_cv2_images(all_weights)\n # convert from BGR to RGB\n all_weights_ = all_weights_[..., ::-1]\n #cv2.cvtColor(all_weights_[-1], cv2.COLOR_BGR2RGB)\n\n # Limit all_weights_\n #num = all_weights_.shape[0]\n num, height, width, channels = all_weights_.shape\n if limit is not None and num > limit:\n all_weights_ = all_weights_[:limit]\n num = all_weights_.shape[0]\n\n # Convert weight values to image values\n normalize_individually = False\n if normalize_individually:\n # Normalize each feature individually\n all_max = vt.multiaxis_reduce(np.amax, all_weights_, startaxis=1)\n all_min = vt.multiaxis_reduce(np.amin, all_weights_, startaxis=1)\n all_domain = all_max - all_min\n extra_dims = (None,) * (len(all_weights_.shape) - 1)\n broadcaster = (slice(None),) + extra_dims\n all_features = ((all_weights_ - all_min[broadcaster]) *\n (255.0 / all_domain[broadcaster])).astype(np.uint8)\n else:\n # Normalize jointly across all filters\n _max = all_weights_.max()\n _min = all_weights_.min()\n _domain = _max - _min\n all_features = ((all_weights_ - _min) * (255.0 / _domain)).astype(np.uint8)\n\n #import scipy.misc\n # resize feature, give them a border, and stack them together\n new_height, new_width = max(32, height), max(32, width)\n nbp_ = 1 # num border pixels\n _resized_features = np.array([\n cv2.resize(img, (new_width, new_height),\n interpolation=cv2.INTER_NEAREST)\n for img in all_features\n ])\n resized_features = _resized_features.reshape(\n num, new_height, new_width, channels)\n border_shape = (num, new_height + (nbp_ * 2),\n new_width + (nbp_ * 2), channels)\n bordered_features = np.zeros(border_shape, dtype=resized_features.dtype)\n bordered_features[:, nbp_:-nbp_, nbp_:-nbp_, :] = resized_features\n #img_list = bordered_features\n stacked_img = vt.stack_square_images(bordered_features)\n return stacked_img", "def make_compression_matrix(self, train_path, rng):\n self.compress_dim = 100 # value used in original DKT paper\n if rng:\n compress_matrix = rng.randn(self.encoding_dim, self.compress_dim)\n else:\n compress_matrix = np.random.randn(self.encoding_dim, self.compress_dim)\n\n np.savez(train_path + '-compression-matrix', compress_matrix=compress_matrix)\n return compress_matrix", "def _init_weights(self):\n for m in self.modules():\n if type(m) in {\n nn.Linear,\n nn.Conv3d,\n nn.Conv2d,\n nn.ConvTranspose2d,\n nn.ConvTranspose3d\n }:\n nn.init.kaiming_normal_(\n m.weight.data, a=0, mode='fan_out', nonlinearity='relu',\n )\n if m.bias is not None:\n fan_in, fan_out = \\\n nn.init._calculate_fan_in_and_fan_out(m.weight.data)\n bound = 1 / math.sqrt(fan_out)\n nn.init.normal_(m.bias, -bound, bound)", "def put_weights(self, content: ndarray, var_id: int, batch_no: int, block_id: int) -> None:\n pass", "def get_weights_tensor(self):\n return [self.hidden_layers[i].get_weight_tensor() for i in range(self.depth)] + \\\n [self.output_weight.clone()]", "def __create_conv_weights(self, conv_weights):\n\n conv_xform_weights = []\n curr_n = 32\n k = 5\n for idx, conv_w in enumerate(conv_weights):\n\n curr_n = self.n_values[idx]\n W = self.__create_W_matrix(curr_n, conv_w)\n conv_xform_weights.append(W)\n\n return conv_xform_weights" ]
[ "0.67688566", "0.6305428", "0.62857914", "0.59490013", "0.59490013", "0.57656217", "0.5713356", "0.57009125", "0.56955546", "0.56836677", "0.55329573", "0.5532722", "0.5511265", "0.5505167", "0.5443281", "0.54317117", "0.5425128", "0.54003054", "0.53999305", "0.5396085", "0.5390862", "0.53767896", "0.53749967", "0.53343743", "0.5334274", "0.5331206", "0.5327853", "0.5327203", "0.5318852", "0.5310714" ]
0.67650056
1
Create an embedded document instance from MongoDB data
def build_from_mongo(cls, data, use_cls=True): # If a _cls is specified, we have to use this document class if use_cls and '_cls' in data: cls = cls.opts.instance.retrieve_embedded_document(data['_cls']) doc = cls() doc.from_mongo(data) return doc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_mongo(data):\n if not data:\n return None\n\n data['id'] = str(data['_id'])\n return data", "def from_mongo(cls, data: dict) -> Union[\"MongoModel\", Dict]:\n if not data:\n return data\n id = data.pop('_id', None)\n return cls(**dict(data, id=id))", "def create(cls, collection, data, schema=None):\n validated = cls.validate(data, schema=schema)\n result = collection.insert_one(validated)\n return collection.find_one({\"_id\": result.inserted_id})", "def _to_document(self, document):\n obj = self.document()\n obj._set_from_db(document)\n return obj", "def _doc_create(type, data):\n doc = dict(data)\n doc.update({'model_type': type})\n return doc", "def init_doc(self, obj, update_dict=True):\n try:\n obj.essentials\n except AttributeError:\n raise AttributeError(\"An object to be saved in db is supposed to have the essentials attribute\")\n\n if obj.essentials is None:\n raise AttributeError(\"An object to be saved in db should not have NoneType as its essentials\")\n\n print(\"Saving this object into db: {}\".format(type(obj)))\n\n start = datetime.now()\n essen = self.mongo_doc_generator(obj.essentials)\n document = {\"essentials\": essen, 'datetime': start, 'filepaths': obj.filepaths}\n\n db_location = obj.db\n element = obj.decide_element()\n host = db_location[\"host\"]\n project = db_location[\"project\"]\n\n target_db = connect_collection(host, project, element)\n doc_created = target_db.insert_one(document)\n inserted_id = doc_created.inserted_id\n\n return inserted_id", "def create_document(document: DocumentIn, db: Session = Depends(get_db)):\n return add_document(db, document)", "def createDocument(self, document):\n data = self.createDocumentAll([document])\n try:\n return data[0]\n except: pass", "def new_document(klass, name=None, author=None):\n doc = Factory.new_document(klass, author)\n doc.name = name\n doc._osl.id = uuid.uuid4()\n return doc", "def create_document(self, data):\n command = CreateDocumentFromOneOffixxTemplateCommand(self.context, data['title'], data['template'])\n return command.execute()", "def subdocument(db_type):\n if db_type != \"ephemeraldb\":\n pytest.skip(\"ephemeraldb test only\")\n yield EphemeralDocument(\n {\"_id\": 1, \"hello\": \"there\", \"mighty\": \"duck\", \"and\": {\"the\": \"drake\"}}\n )", "def create_from_dict(new_info: dict):\n doc = Db2Document(mhr_number=new_info.get('mhrNumber'),\n document_type=new_info.get('documentType'),\n document_reg_id=new_info.get('documentRegistrationId'),\n interimed=new_info.get('interimed', ''),\n owner_cross_reference=new_info.get('ownerCrossReference', ''),\n interest_denominator=new_info.get('interestDenominator', 0),\n declared_value=new_info.get('declaredValue', 0),\n own_land=new_info.get('ownLand', ''),\n routing_slip_number=new_info.get('routingSlipNumber', ''))\n doc.last_service = new_info.get('lastService', '')\n doc.bcol_account = new_info.get('bcolAccount', '')\n doc.dat_number = new_info.get('datNumber', '')\n doc.examiner_id = new_info.get('examinerId', '')\n doc.update_id = new_info.get('updateId', '')\n doc.phone_number = new_info.get('phoneNumber', '')\n doc.attention_reference = new_info.get('attentionReference', '')\n doc.name = new_info.get('name', '')\n doc.legacy_address = new_info.get('legacyAddress', '')\n doc.number_of_pages = new_info.get('numberOfPages', 0)\n doc.consideration_value = new_info.get('considerationValue', '')\n doc.affirm_by_name = new_info.get('affirmByName', '')\n doc.liens_with_consent = new_info.get('liensWithConsent', '')\n doc.client_reference_id = new_info.get('clientReferenceId', '')\n if new_info.get('createDateTime', None):\n doc.registration_ts = model_utils.ts_from_iso_format(new_info.get('createDateTime'))\n if new_info.get('draftDateTime', None):\n doc.draft_ts = model_utils.ts_from_iso_format(new_info.get('draftDateTime'))\n if new_info.get('transferExecutionDate', None):\n date_val: str = str(new_info.get('transferExecutionDate'))[0:10]\n doc.transfer_execution_date = model_utils.date_from_iso_format(date_val)\n return doc", "def from_db(data):\n \n return Collection(\n dbid = data['id'],\n title = data['title'],\n query = data['query'],\n priority = data['priority'],\n export = data['export'])", "def test_find_embedded(self):\n\n class User(EmbeddedDocument):\n name = StringField()\n\n class BlogPost(Document):\n content = StringField()\n author = EmbeddedDocumentField(User)\n\n BlogPost.drop_collection()\n\n user = User(name=\"Test User\")\n BlogPost.objects.create(author=user, content=\"Had a good coffee today...\")\n\n result = BlogPost.objects.first()\n assert isinstance(result.author, User)\n assert result.author.name == \"Test User\"\n\n result = BlogPost.objects.get(author__name=user.name)\n assert isinstance(result.author, User)\n assert result.author.name == \"Test User\"\n\n result = BlogPost.objects.get(author={\"name\": user.name})\n assert isinstance(result.author, User)\n assert result.author.name == \"Test User\"\n\n # Fails, since the string is not a type that is able to represent the\n # author's document structure (should be dict)\n with pytest.raises(InvalidQueryError):\n BlogPost.objects.get(author=user.name)", "def test_as_pymongo(self):\n data = []\n self.maxDiff = None\n for item in self.docs[0:2]:\n el = item.to_dict(oid_as_str=False, call_child_to_dict=False)\n el[\"_id\"] = el.pop(\"id\")\n data.append(el)\n actual = self.doc_cls.objects[0:2].as_pymongo()\n self.assertEqual(data, list(actual))", "def create(init_document: 'Document') -> 'DocumentArray':", "def create_document(content: Union[str, bytes]) -> Document:\n r = requests.post(\"https://pastecord.com/documents\", data=content)\n r.raise_for_status()\n \n return Document(r.json()['key'])", "def create_mongodb(config):\n\n \n mongo_url = \"mongodb://\"\n mongo_url += \",\".join(map(lambda srv: srv['host'] + \":\" + str(srv['port']), config['data']['mongoServers']))\n \n if 'replica' in config['data']:\n mongo_url += \"/?replicaSet={0}\".format(config['data']['replica'])\n\n client = MongoClient(mongo_url)\n\n return client", "def _resolve_embedded_documents(document, resource, embedded_fields):\n schema = config.DOMAIN[resource]['schema']\n for field in embedded_fields:\n data_relation = schema[field]['data_relation']\n # Retrieve and serialize the requested document\n if 'version' in data_relation and data_relation['version'] is True:\n # support late versioning\n if document[field][config.VERSION] == 0:\n # there is a chance this document hasn't been saved\n # since versioning was turned on\n embedded_doc = missing_version_field(\n data_relation, document[field])\n\n if embedded_doc is None:\n # this document has been saved since the data_relation was\n # made - we basically do not have the copy of the document\n # that existed when the data relation was made, but we'll\n # try the next best thing - the first version\n document[field][config.VERSION] = 1\n embedded_doc = get_data_version_relation_document(\n data_relation, document[field])\n\n latest_embedded_doc = embedded_doc\n else:\n # grab the specific version\n embedded_doc = get_data_version_relation_document(\n data_relation, document[field])\n\n # grab the latest version\n latest_embedded_doc = get_data_version_relation_document(\n data_relation, document[field], latest=True)\n\n # make sure we got the documents\n if embedded_doc is None or latest_embedded_doc is None:\n # your database is not consistent!!! that is bad\n abort(404, description=debug_error_message(\n \"Unable to locate embedded documents for '%s'\" %\n field\n ))\n\n # build the response document\n _build_response_document(\n embedded_doc, data_relation['resource'],\n [], latest_embedded_doc)\n else:\n embedded_doc = app.data.find_one(\n data_relation['resource'], None,\n **{config.ID_FIELD: document[field]}\n )\n if embedded_doc:\n document[field] = embedded_doc", "def create_document(self):\n # set single values\n if len(self.field_values) > 0:\n self._set_field_values()\n\n # set multi values\n if len(self.block_field_values) > 0:\n self._set_multi_field_values()\n\n self.field_values = {}\n self.block_field_values = {}\n\n self.client.service.CreateDocument()", "def test_query_generic_embedded_document(self):\n\n class A(EmbeddedDocument):\n a_name = StringField()\n\n class B(EmbeddedDocument):\n b_name = StringField()\n\n class Doc(Document):\n document = GenericEmbeddedDocumentField(choices=(A, B))\n\n Doc.drop_collection()\n Doc(document=A(a_name=\"A doc\")).save()\n Doc(document=B(b_name=\"B doc\")).save()\n\n # Using raw in filter working fine\n assert Doc.objects(__raw__={\"document.a_name\": \"A doc\"}).count() == 1\n assert Doc.objects(__raw__={\"document.b_name\": \"B doc\"}).count() == 1\n assert Doc.objects(document__a_name=\"A doc\").count() == 1\n assert Doc.objects(document__b_name=\"B doc\").count() == 1", "def test_find_empty_embedded(self):\n\n class User(EmbeddedDocument):\n name = StringField()\n\n class BlogPost(Document):\n content = StringField()\n author = EmbeddedDocumentField(User)\n\n BlogPost.drop_collection()\n\n BlogPost.objects.create(content=\"Anonymous post...\")\n\n result = BlogPost.objects.get(author=None)\n assert result.author is None", "def transform_bson(self, value: Dict) -> Entity:\n try:\n return from_dict(\n data_class=self.python_type,\n config=self._config,\n data=value\n )\n except MissingValueError as e:\n raise e\n except DefaultValueNotFoundError as e:\n raise e", "def test_mongodb_origin_simple_with_BSONBinary(sdc_builder, sdc_executor, mongodb):\n\n ORIG_BINARY_DOCS = [\n {'data': binary.Binary(b'Binary Data Flute')},\n {'data': binary.Binary(b'Binary Data Oboe')},\n {'data': binary.Binary(b'Binary Data Violin')}\n ]\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_BINARY_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{'data': str(record.value2['data'])} for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == [{'data': str(record.get('data'))} for record in ORIG_BINARY_DOCS]\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def document(self):\n query = {\"_id\": ObjectId(self.document_id)}\n return Document(get_collection(\"documents\").find_one(query))", "def create_from_document(doc: Dict[str, Any]) -> 'Publication':\n return Publication(\n name=doc['doc_name'],\n title=doc['doc_title'][:100],\n type=doc['doc_type'],\n number=doc['doc_num'],\n is_ignored=False,\n is_revoked=False\n )", "def test_as_pymongo(self):\n data = []\n for item in self.users:\n el = item.to_dict(oid_as_str=False, call_child_to_dict=False)\n el[\"_id\"] = el.pop(\"id\")\n data.append(el)\n actual = User.objects.as_pymongo()\n self.assertEqual(data, list(actual))", "def test_convert_hwpc_report_from_mongodb_work(hwpc_model, json_input):\n hwpc_model.from_mongodb(json_input)\n assert True", "def create_instance(*, db_session, instance_in: WorkflowInstanceCreate) -> WorkflowInstance:\n instance = WorkflowInstance(\n **instance_in.dict(exclude={\"incident\", \"workflow\", \"creator\", \"artifacts\"})\n )\n\n incident = incident_service.get(db_session=db_session, incident_id=instance_in.incident[\"id\"])\n instance.incident = incident\n\n workflow = get(db_session=db_session, workflow_id=instance_in.workflow[\"id\"])\n instance.workflow = workflow\n\n creator = participant_service.get_by_incident_id_and_email(\n db_session=db_session, incident_id=incident.id, email=instance_in.creator[\"email\"]\n )\n instance.creator = creator\n\n for a in instance_in.artifacts:\n artifact_document = document_service.create(\n db_session=db_session, document_in=DocumentCreate(**a)\n )\n instance.artifacts.append(artifact_document)\n\n db_session.add(instance)\n db_session.commit()\n\n return instance", "def test_find_dict_item(self):\n\n class BlogPost(Document):\n info = DictField()\n\n BlogPost.drop_collection()\n\n post = BlogPost(info={\"title\": \"test\"})\n post.save()\n\n post_obj = BlogPost.objects(info__title=\"test\").first()\n assert post_obj.id == post.id\n\n BlogPost.drop_collection()" ]
[ "0.7066903", "0.6612481", "0.63551545", "0.6348332", "0.6091033", "0.6033079", "0.5778242", "0.5773537", "0.5751162", "0.5727225", "0.5722528", "0.5691701", "0.56832474", "0.56717724", "0.55828565", "0.55790466", "0.5539406", "0.5517863", "0.5484895", "0.54323846", "0.5430872", "0.54172623", "0.5416529", "0.54002947", "0.5398329", "0.53900117", "0.5369912", "0.5361611", "0.5354257", "0.53513414" ]
0.73655903
0
Multidimensional Gaussian fourier filter. The array is multiplied with the fourier transform of a Gaussian kernel.
def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None): input = numpy.asarray(input) output = _get_output_fourier(output, input) axis = normalize_axis_index(axis, input.ndim) sigmas = _ni_support._normalize_sequence(sigma, input.ndim) sigmas = numpy.asarray(sigmas, dtype=numpy.float64) if not sigmas.flags.contiguous: sigmas = sigmas.copy() _nd_image.fourier_filter(input, sigmas, n, axis, output, 0) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fmgf(array, sigma):\n x, y = np.arange(len(array)), array.copy()\n yg = ndimage.filters.gaussian_filter(y, sigma)\n y -= yg\n\n # digitizing\n m = 101\n dy = 6.0 * mad(y) / m\n ybin = np.arange(np.min(y) - 5 * dy, np.max(y) + 5 * dy + dy, dy)\n z = np.zeros([len(ybin), len(x)])\n z[np.digitize(y, ybin), x] = 1.0\n\n # filtering\n g = partial(ndimage.filters.gaussian_filter, sigma=(0, sigma))\n c = partial(ndimage.filters.convolve1d, weights=np.ones(m), axis=0)\n zf = c(c(c(g(z))))\n\n # estimates\n ym1, y0, yp1 = [ybin[np.argmax(zf, 0) + i] for i in (-1, 0, 1)]\n zm1, z0, zp1 = [zf[np.argmax(zf, 0) + i, x] for i in (-1, 0, 1)]\n t = (zm1 - z0) / (zm1 - 2 * z0 + zp1)\n\n filtered = yg + ((1 - t) ** 2) * ym1 + (2 * t * (1 - t)) * y0 + (t**2) * yp1\n return filtered", "def gaussian2d(filter_size=5, sig=1.0):\n ax = np.arange(-filter_size // 2 + 1., filter_size // 2 + 1.)\n xx, yy = np.meshgrid(ax, ax)\n kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))\n return kernel / np.sum(kernel)", "def blur_fourier(im, kernel_size):\n im = im.astype(np.float64)\n # build the kernel with zero padding\n kernel_base = gaussian_kernel_factory(kernel_size)\n window = np.zeros_like(im).astype(np.float64)\n M, N = im.shape\n dx, dy = kernel_base.shape\n x_middle, y_middle = N//2, M//2\n\n window[(y_middle-dy//2):(y_middle+dy//2+1), (x_middle-dx//2):(x_middle+dx//2+1)] = kernel_base\n\n # multiply in the freq domain\n return IDFT2(DFT2(im) * DFT2(np.fft.ifftshift(window))).real", "def fourier(img):\n return fourierCV(img)", "def blur_fourier(im, kernel_size):\n kernel = gaus_kernel_calc(kernel_size)\n\n zeros = np.zeros(im.shape)\n x_mid = np.math.floor(im.shape[1] / 2)\n y_mid = np.math.floor(im.shape[0] / 2)\n distance = np.math.floor(kernel_size / 2)\n zeros[x_mid - distance: x_mid + distance + 1, y_mid - distance: y_mid + distance + 1] = kernel\n\n fourier_kernel = DFT2(np.fft.ifftshift(zeros))\n fourier_img = DFT2(im)\n fourier_blured = fourier_kernel * fourier_img\n\n return IDFT2(fourier_blured).real.astype(np.float64)", "def gaussian_filter(x):\n return _gaussian_filter(x, 3)", "def Gauss_filter(data, sigma=(0,2,2), mode='wrap'): \n import scipy.ndimage.filters as flt\n return flt.gaussian_filter(data, sigma=sigma, mode=mode)", "def gaussian_filter(stddev, array):\n\n return astropy.convolution.convolve(\n array, astropy.convolution.Gaussian2DKernel(stddev))", "def apply_gaussian(X, sigma):\n return np.array([ndimage.gaussian_filter(x, sigma) for x in X])", "def _get_fourier_filter(self):\n size = max(64, int(2 ** np.ceil(np.log2(2 * self.m[-1].item()))))\n\n pi = torch.acos(torch.zeros(1)).item() * 2.0\n n = torch.cat(\n [\n torch.arange(1, size // 2 + 1, 2, device=self.n.device),\n torch.arange(size // 2 - 1, 0, -2, device=self.n.device),\n ]\n )\n f = torch.zeros(size, device=self.n.device)\n f[0] = 0.25\n if self.flat:\n f[1::2] = -1 / (pi * n).pow(2)\n else:\n f[1::2] = -self.s_detect.abs().pow(2) / (\n pi\n * (self.d_source + self._d_detect())\n * torch.sin(\n n\n * self.s_detect.abs()\n / (self.d_source + self._d_detect())\n )\n ).pow(2)\n f = torch.stack(\n [f, torch.zeros(f.shape, device=self.n.device)], dim=-1\n )\n f = fftshift(f, dim=(-2,))\n\n filt = fft1(f)[..., 0]\n\n if self.filter_type == \"hamming\":\n # hamming filter\n fac = torch.tensor(\n np.hamming(size).astype(np.float32), device=f.device\n )\n elif self.filter_type == \"hann\":\n # hann filter\n fac = torch.tensor(\n np.hanning(size).astype(np.float32), device=f.device\n )\n elif self.filter_type == \"cosine\":\n # cosine filter\n fac = torch.sin(\n torch.linspace(0, pi, size + 1, device=f.device)[:-1]\n )\n else:\n # ramp / ram-lak filter\n fac = 1.0\n\n return fac * filt", "def _irfft2d(f_x) :", "def gaussian_filter(size,sigma=-1):\n\n if sigma == -1:\n sigma = np.sqrt(size)\n\n filter = np.zeros((size,size))\n\n for i,j in it.product(range(size),range(size)):\n x = j-size//2\n y = i-size//2\n filter[i,j] = 1/(2*np.pi*sigma**2) * np.exp(-(x**2+y**2)/(2*sigma**2))\n\n filter = filter/filter[0,0]\n filter = filter/filter.sum()\n\n return filter", "def filterf(self):\n from scipy.ndimage.filters import gaussian_filter as gf\n self._obj['u'] = xr.DataArray(gf(self._obj['u'],1),dims=('x','y'))\n self._obj['v'] = xr.DataArray(gf(self._obj['v'],1),dims=('x','y'))\n return self._obj", "def gauss_convolution_fft(im_array, n_fwhm, fwhm) :\n \n sigma = fwhm / (2.*math.sqrt(2.*math.log(2.)))\n\t\n im_kernel_array = gauss_kernel(n_fwhm, sigma)\n fftconv_image = signal.fftconvolve(im_array,im_kernel_array,mode = 'same')\n\n return (fftconv_image)", "def gs_blur(self,k,img):\n SIG = self.sigma\n sig = [SIG,k*SIG,k*k*SIG,k*k*k*SIG,k*k*k*k*SIG]\n gsArray = [0,1,2,3,4]\n scaleImages = [0,1,2,3,4]\n \n for i in range(5):\n gsArray[i] = scipy.ndimage.filters.gaussian_filter(img,sig[i])\n\n return gsArray", "def calc_psf_fwhm_inpix_gaussian(arr):\n\tmodel = fit_gaussian(arr)\n\n\tsigma = max(model.y_stddev, model.x_stddev)\n\tfwhm = 2.355 * sigma\n\n\treturn fwhm", "def smooth_spectra(xarr, farr, sigma=3, nkern=20):\n xkern = np.arange(nkern)\n kern = np.exp(-(xkern - 0.5 * nkern) ** 2 / (sigma) ** 2)\n\n return gaussian_filter1d(farr, sigma)", "def conv_gaus(array, sigma = 1.0):\r\n arrayout = fft2(array + 0.0J)\r\n arrayout = ifft2(arrayout * gaus_Fourier(array.shape[0], sigma))\r\n arrayout = np.array(arrayout, dtype=array.dtype)\r\n return arrayout", "def fourier_transform2d(self):\n\n zerofill = np.zeros(1024 * np.array([1,1])) #so it will always be square\n zerofill[:len(self.windowed), :len(self.windowed)] = self.windowed\n transform = np.fft.fft2(zerofill)\n transform = np.fft.fftshift(transform) # shift center to zero\n transformed = np.absolute(transform)\n tmax = transformed.max()\n zdata = (transformed)/(tmax) # normalize to maximum value\n\n return zdata", "def gauss_convolution(im_array, n_fwhm, fwhm) :\n \n sigma = fwhm / (2.*math.sqrt(2.*math.log(2.)))\n\t\n im_kernel_array = gauss_kernel(n_fwhm, sigma)\n conv_image = signal.convolve(im_array,im_kernel_array,mode = 'same')\n\n return (conv_image)", "def kernel_gaussiano(image: np.ndarray, sigma: float, kind: str = 'low') -> np.ndarray:\n U, V = fourier_meshgrid(image)\n D = fourier_distance(U, V)\n H = np.exp( (-1.0 * D) / (2.0 * sigma**2) )\n \n if kind == 'high' or kind == 'highpass':\n H = 1.0 - H\n \n return H", "def gaussian(dims: Tuple[int, int], cutoff_freq: float) -> np.ndarray:\n # create grid\n m, n = [(dim - 1) / 2 for dim in dims]\n yy, xx = np.ogrid[-m : m + 1, -n : n + 1]\n\n # compute transfer function\n tf = np.exp(-(np.power(xx, 2) + np.power(yy, 2)) / (2 * np.power(cutoff_freq, 2)))\n\n # normalize and return transfer func\n return (tf - np.max(tf)) / (np.max(tf) - np.min(tf))", "def create_gaussian_filter(size, sigma):\n h = size[0] #height of the template\n w = size[1] #width of the template \n if h % 2 == 0: h += 1 #add 1 if dimensions are even\n if w % 2 == 0: w += 1\n x = math.floor(h/2)\n y = math.floor(w/2) \n sum = 0\n #create our template\n template = np.zeros((h,w))\n #fill the template in with the numbers from Gaussian distribution\n for i in range(h):\n for j in range(w):\n template[i,j] = math.exp(-((((j-x)**2)+((i-y)**2))/(2*(sigma**2))))\n sum = sum + template[i,j]\n #normalise the numbers\n gaussian_filter = template/sum\n return gaussian_filter", "def gaussian_filter(shape=(3,3),sigma=0.5):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma))\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def test_gaussian_filter():\n\n def rgb2gray(rgb):\n r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n\n return gray\n\n img = rgb2gray(np.array(Image.open('data/graf.png')))\n gx, x = gauss_module.gauss(4)\n gx = gx.reshape(1, gx.shape[0])\n gy = gx.reshape(gx.shape[1], gx.shape[0])\n smooth_img = conv2(img, gx * np.array(gy))\n\n test_smooth_img = gauss_module.gaussianfilter(img, 4)\n\n assert np.all(smooth_img.round(5) == test_smooth_img.round(5))", "def fftkernel(x, w):\n L = len(x)\n Lmax = L + 3 * w\n n = nextpow2(Lmax)\n X = np.fft.fft(x, n)\n f = np.arange(0, n, 1.0) / n\n f = np.concatenate((-f[:int(n / 2)], f[int(n / 2):0:-1]))\n K = np.exp(-0.5 * (w * 2 * np.pi * f) ** 2)\n y = np.fft.ifft(X * K, n)\n y = y[:L].copy()\n return y", "def _FWHMGauss(sigma, pixel=12):\n return sigma*2*np.sqrt(2*np.log(2))*pixel", "def flatten(img,sigma=20.) :\n\n for i in range(img.shape[0]) :\n img[i] /= np.median(img[i])\n for i in range(img.shape[1]) :\n img[:,i] /= np.median(img[:,i])\n\n hw=int(3*sigma)\n u=np.linspace(-hw,hw,2*hw+1)\n x=np.tile(u,(2*hw+1,1))\n y=x.T\n k=np.exp(-x**2/2/sigma**2-y**2/2/sigma**2)\n k /= np.sum(k)\n smooth=convolve2d(img,k,weight=None)\n img /= smooth\n\n return img", "def apply_gaussian_filter(mat, sigma_x, sigma_y, pad=None, mode=None):\n if mode is None:\n # Default for a sinogram image.\n mode1 = \"edge\"\n mode2 = \"mean\"\n else:\n if isinstance(mode, list) or isinstance(mode, tuple):\n mode1 = mode[0]\n mode2 = mode[1]\n else:\n mode1 = mode2 = mode\n if pad is None:\n pad = min(150, int(0.1 * min(mat.shape)))\n mat_pad = np.pad(mat, ((0, 0), (pad, pad)), mode=mode1)\n mat_pad = np.pad(mat_pad, ((pad, pad), (0, 0)), mode=mode2)\n (nrow, ncol) = mat_pad.shape\n window = make_2d_gaussian_window(nrow, ncol, sigma_x, sigma_y)\n xlist = np.arange(0, ncol)\n ylist = np.arange(0, nrow)\n x, y = np.meshgrid(xlist, ylist)\n mat_sign = np.power(-1.0, x + y)\n mat_filt = np.real(\n fft.ifft2(fft.fft2(mat_pad * mat_sign) * window) * mat_sign)\n return mat_filt[pad:nrow - pad, pad:ncol - pad]", "def tomography_filter(arr, axis=2, filter_type='hamming', backend='autograd'):\n func = getattr(scipy.signal.windows, filter_type)\n filter = func(arr.shape[axis])\n if axis != len(arr.shape) - 1:\n arr = swap_axes(arr, [axis, len(arr.shape) - 1])\n if backend == 'pytorch':\n args = {'device': arr.device}\n else:\n args = {}\n arr_r, arr_i = fft(arr, zeros_like(arr, requires_grad=False, **args))\n arr_r = arr_r * filter\n arr_i = arr_i * filter\n arr, _ = ifft(arr_r, arr_i)\n if axis != len(arr.shape) - 1:\n arr = swap_axes(arr, [axis, len(arr.shape) - 1])\n return arr" ]
[ "0.6724297", "0.6515853", "0.64436597", "0.64298147", "0.6300525", "0.62142223", "0.6133565", "0.61210185", "0.60772467", "0.6005786", "0.59797704", "0.58723", "0.58492655", "0.5830647", "0.575321", "0.56844056", "0.5630864", "0.56108207", "0.55966944", "0.5580227", "0.55787235", "0.55725807", "0.55296147", "0.551112", "0.550536", "0.54819894", "0.5481008", "0.54772764", "0.546164", "0.54606855" ]
0.66539884
1
Multidimensional uniform fourier filter. The array is multiplied with the Fourier transform of a box of given size.
def fourier_uniform(input, size, n=-1, axis=-1, output=None): input = numpy.asarray(input) output = _get_output_fourier(output, input) axis = normalize_axis_index(axis, input.ndim) sizes = _ni_support._normalize_sequence(size, input.ndim) sizes = numpy.asarray(sizes, dtype=numpy.float64) if not sizes.flags.contiguous: sizes = sizes.copy() _nd_image.fourier_filter(input, sizes, n, axis, output, 1) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_fourier_filter(self):\n size = max(64, int(2 ** np.ceil(np.log2(2 * self.m[-1].item()))))\n\n pi = torch.acos(torch.zeros(1)).item() * 2.0\n n = torch.cat(\n [\n torch.arange(1, size // 2 + 1, 2, device=self.n.device),\n torch.arange(size // 2 - 1, 0, -2, device=self.n.device),\n ]\n )\n f = torch.zeros(size, device=self.n.device)\n f[0] = 0.25\n if self.flat:\n f[1::2] = -1 / (pi * n).pow(2)\n else:\n f[1::2] = -self.s_detect.abs().pow(2) / (\n pi\n * (self.d_source + self._d_detect())\n * torch.sin(\n n\n * self.s_detect.abs()\n / (self.d_source + self._d_detect())\n )\n ).pow(2)\n f = torch.stack(\n [f, torch.zeros(f.shape, device=self.n.device)], dim=-1\n )\n f = fftshift(f, dim=(-2,))\n\n filt = fft1(f)[..., 0]\n\n if self.filter_type == \"hamming\":\n # hamming filter\n fac = torch.tensor(\n np.hamming(size).astype(np.float32), device=f.device\n )\n elif self.filter_type == \"hann\":\n # hann filter\n fac = torch.tensor(\n np.hanning(size).astype(np.float32), device=f.device\n )\n elif self.filter_type == \"cosine\":\n # cosine filter\n fac = torch.sin(\n torch.linspace(0, pi, size + 1, device=f.device)[:-1]\n )\n else:\n # ramp / ram-lak filter\n fac = 1.0\n\n return fac * filt", "def _irfft2d(f_x) :", "def smoothfield( d , boxsize, Rth ):\n\n ng = N.shape(d)[0]\n dk = N.fft.rfftn(d)\n kgrid = getkgrid(boxsize,ng)\n dk = dk * N.exp( -(kgrid * Rth) ** 2. / 2. )\n d = N.fft.irfftn(dk)\n\n return d", "def blur_fourier(im, kernel_size):\n im = im.astype(np.float64)\n # build the kernel with zero padding\n kernel_base = gaussian_kernel_factory(kernel_size)\n window = np.zeros_like(im).astype(np.float64)\n M, N = im.shape\n dx, dy = kernel_base.shape\n x_middle, y_middle = N//2, M//2\n\n window[(y_middle-dy//2):(y_middle+dy//2+1), (x_middle-dx//2):(x_middle+dx//2+1)] = kernel_base\n\n # multiply in the freq domain\n return IDFT2(DFT2(im) * DFT2(np.fft.ifftshift(window))).real", "def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None):\n input = numpy.asarray(input)\n if input.ndim > 3:\n raise NotImplementedError(\"Only 1d, 2d and 3d inputs are supported\")\n output = _get_output_fourier(output, input)\n axis = normalize_axis_index(axis, input.ndim)\n sizes = _ni_support._normalize_sequence(size, input.ndim)\n sizes = numpy.asarray(sizes, dtype=numpy.float64)\n if not sizes.flags.contiguous:\n sizes = sizes.copy()\n _nd_image.fourier_filter(input, sizes, n, axis, output, 2)\n return output", "def blur_fourier(im, kernel_size):\n kernel = gaus_kernel_calc(kernel_size)\n\n zeros = np.zeros(im.shape)\n x_mid = np.math.floor(im.shape[1] / 2)\n y_mid = np.math.floor(im.shape[0] / 2)\n distance = np.math.floor(kernel_size / 2)\n zeros[x_mid - distance: x_mid + distance + 1, y_mid - distance: y_mid + distance + 1] = kernel\n\n fourier_kernel = DFT2(np.fft.ifftshift(zeros))\n fourier_img = DFT2(im)\n fourier_blured = fourier_kernel * fourier_img\n\n return IDFT2(fourier_blured).real.astype(np.float64)", "def fourier(img):\n return fourierCV(img)", "def fourier_transform2d(self):\n\n zerofill = np.zeros(1024 * np.array([1,1])) #so it will always be square\n zerofill[:len(self.windowed), :len(self.windowed)] = self.windowed\n transform = np.fft.fft2(zerofill)\n transform = np.fft.fftshift(transform) # shift center to zero\n transformed = np.absolute(transform)\n tmax = transformed.max()\n zdata = (transformed)/(tmax) # normalize to maximum value\n\n return zdata", "def process( fids, ndim=2 ):\n\timg = np.empty_like( fids )\n\tax = -1*(np.array( range(ndim) )+1)\n\t\n\timg = np.fft.fftshift( np.fft.fftn( fids, axes=ax, ).astype( np.complex64), axes=ax )\n\t\n\treturn np.squeeze(img)", "def fourier_spectra(x, axis=1, flip=True, duplicate=True, **kwargs):\n\n x = forward_fourier(x, axis=axis, duplicate=duplicate)\n ns = x.shape[axis]\n nw = np.int32(np.floor(ns/2) + 1)\n slc = [slice(None)] * len(x.shape)\n slc[axis] = slice(0, nw)\n if flip:\n x = np.flip(x[tuple(slc)], axis=axis)\n return x", "def urfft2(inarray):\n return urfftn(inarray, 2)", "def dwt(image_array, quantization_Array):\n # Create the high pass and low pass filters\n # both filters are non-causal\n # symmetric\n # [-2, -1, 0, 1, 2]\n LPF = [-0.125, 0.25, 0.75, 0.25, -0.125]\n LPF_center = 2\n\n # [ -2,-1, 0]\n HPF = [-0.5, 1, -0.5]\n HPF_center = 2\n\n nrow, ncol = image_array.shape\n\n # create an array that will contain the 4 different subbands of the image\n LL = np.zeros((nrow, ncol))\n LH = np.zeros((nrow, ncol))\n HL = np.zeros((nrow, ncol))\n HH = np.zeros((nrow, ncol))\n filtered_image = [LL, LH, HL, HH]\n\n # filtering the rows using a low pass and high pass filters\n LowPass_rows = np.zeros((nrow, ncol))\n HighPass_rows = np.zeros((nrow, ncol))\n for i in range(0, nrow):\n LowPass_rows[i, :] = lfilter(LPF, image_array[i, :], LPF_center)\n HighPass_rows[i, :] = lfilter(HPF, image_array[i, :], HPF_center)\n\n # down sample rows.\n # which means we will have half the number of columns\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][:, ::2]\n\n # apply filters accross columns\n for i in range(0, ncol):\n LL[:, i] = lfilter(LPF, LowPass_rows[:, i], LPF_center)\n LH[:, i] = lfilter(HPF, LowPass_rows[:, i], HPF_center)\n HL[:, i] = lfilter(LPF, HighPass_rows[:, i], LPF_center)\n HH[:, i] = lfilter(HPF, HighPass_rows[:, i], HPF_center)\n\n # down sample columns and quantize\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][::2, :]\n filtered_image[i] = np.round(\n filtered_image[i]/quantization_Array[i]).astype(int)\n\n return filtered_image", "def IDFT2D_slow(x):\r\n x = np.asarray(x, dtype=complex)\r\n \r\n M = len(x)\r\n N = len(x[0])\r\n mat = np.zeros((M, N), np.complex)\r\n \r\n for row in range(M):\r\n mat[row] = IDFT_slow(x[row])\r\n\r\n for col in range(N):\r\n mat[:,col] = IDFT_slow(mat[:,col])\r\n\r\n return mat", "def fftn_1d(a):\r\n b = np.fft.fftpack.fftn(a, axes=(len(a.shape)-1, )) \r\n return np.divide(b, np.sqrt(a.shape[-1]))", "def ufft2(inarray):\n return ufftn(inarray, 2)", "def SpectrumGen(data, bin_size):\n nfft = bin_size\n mask = np.isfinite(data)\n data = data-np.nanmean(data)\n \n\n spectrum = scipy.fftpack.fft(data[mask], n=nfft)\n \n\n return spectrum", "def medianfilter(data, filtersize, threshold=None,verbose=False):\n # 2006/02/01 IJC at the Jet Propulsion Laboratory\n # 2010-02-18 13:52 IJC: Converted to python\n from numpy import zeros, median, abs, std\n\n print \"Just use scipy.signal.medfilt !!!\"\n print \"Just use scipy.signal.medfilt !!!\"\n print \"Just use scipy.signal.medfilt !!!\"\n\n if len(filtersize)<1:\n print 'medianfilter2 requires that filtersize be a 1- or 2-element vector'\n return -1\n elif len(filtersize)==1:\n filtersize = [filtersize[0], filtersize[0]]\n else:\n filtersize = filtersize[0:2]\n\n npix = data.shape[0]\n npiy = data.shape[1]\n bigsize = npix+2*(filtersize[0]-1)\n bigdata = zeros((bigsize,bigsize),float)\n ind = filtersize[0]-1\n if ind==0:\n bigdata = data\n else:\n bigdata[ind:(bigsize-ind), ind:(bigsize-ind)] = data\n\n\n # FOR NOW, WE ASSUME FILTERSIZE IS ODD!!\n # AND THAT DATA IS SQUARE!\n niter_x = npix + (filtersize[0]-1)\n niter_y = npiy + (filtersize[1]-1)\n filt = zeros((niter_x,niter_y), float)\n\n for ii in range(niter_x):\n for jj in range(niter_y):\n if verbose>1:\n print \"ii,jj>>\",ii,jj\n if filtersize[0]==1:\n indi = 1\n else:\n indi = filtersize[0]-1\n if filtersize[1]==1:\n indj = 1\n else:\n indj = filtersize[1]-1\n select = bigdata[ii:(ii+indi),jj:(jj+indj)].ravel()\n select = select[isfinite(select)]\n #residualSelection = abs(select - median(select))\n\n if verbose: \n print \"select.shape>>\",select.shape\n print \"threshold>>\",threshold\n\n if threshold is not None:\n if threshold >= 0: # raw threshold\n doFilter = abs(bigdata[ii,jj]-median(select))/std(select)>=threshold\n elif threshold<0: # remove outliers before applying threshold\n npts_init = len(select)\n select = removeoutliers(select, abs(threshold), center='median')\n npts_final = len(select)\n if verbose>1:\n print \"threshold=\",threshold,\", removed %i points\" % (npts_init-npts_final)\n \n doFilter = abs(bigdata[ii,jj]-median(select))/std(select)>=threshold \n else: # filter everything; threshold not set.\n doFilter = True\n\n if verbose:\n print \"doFilter?>>\",doFilter\n if verbose>1:\n print \"select>>\",select\n\n if doFilter: \n newval = median( select )\n else:\n newval = bigdata[ii,jj]\n\n if verbose>1:\n print \"newval>>\",newval\n\n filt[ii,jj] = newval\n\n print filt.shape, [(filtersize[0]-1)/2,niter_x-(filtersize[0]-1)/2,(filtersize[0]-1)/2,niter_y-(filtersize[0]-1)/2]\n return filt[(filtersize[0]-1)/2:niter_x-(filtersize[0]-1)/2,(filtersize[0]-1)/2:niter_y-(filtersize[0]-1)/2]", "def test_fft_complex_2d():\n\ta, b, c = np.meshgrid([0, 1, 0, 0], [0, 1j, 1j], [0, 1, 1, 1])\n\tdummy_array = xr.DataArray(a * b * c, dims=['x', 'y', 'z'])\n\tchunked_array = dummy_array.chunk(chunks={'x': 2, 'y': 2, 'z': 2})\n\tspectrum_array, spectrum_coords, spectrum_dims = \\\n\t\txfft._fft(chunked_array, nfft={'y': 6, 'z': 8}, dim=['y', 'z'],\n\t\t dx={'y': 0.01, 'z': 0.02})\n\tassert np.allclose(spectrum_array.compute(),\n\t np.fft.fftn(a * b * c, s=(8, 6), axes=(2, 1)))\n\tassert np.array_equal(spectrum_coords['f_y'], np.fft.fftfreq(6, d=0.01))\n\tassert np.array_equal(spectrum_coords['f_z'], np.fft.fftfreq(8, d=0.02))\n\tassert ('x', 'f_y', 'f_z') == spectrum_dims", "def get_fft(u: np.ndarray, n: int) -> np.ndarray:\n\n # We get the fft of each time slice.\n num_slices = u.shape[0]\n ut = np.empty(u.shape, dtype=complex) # shape (20, 262144)\n for s in range(num_slices):\n # We reshape each slice into a 3D cube.\n u_slice = np.reshape(u[s,:], (n, n, n)) # shape (64, 64, 64)\n # We then take the fft of the 3D cube and add it to ut.\n ut_slice = np.fft.fftshift(np.fft.fftn(u_slice)) # shape (64, 64, 64)\n ut[s, :] = ut_slice.flatten()\n\n return ut", "def forward_fft(self, array_in):\r\n # Find side length, as real array may or may not be doubled for\r\n # aliasing control\r\n side = array_in.shape[0]\r\n div_side = 1.0/side**2\r\n\r\n out = np.fft.fft2(self.sign_mat[0:side, 0:side]*array_in)*div_side\r\n return out", "def denoise_frequency_domain(ut: np.ndarray, \n filter_3d: np.ndarray) -> np.ndarray:\n\n num_rows = ut.shape[0]\n ut_denoised = np.empty(ut.shape, dtype=complex)\n for row in range(num_rows):\n ut_slice_cube = np.reshape(ut[row, :], filter_3d.shape)\n ut_slice_cube_denoised = ut_slice_cube*filter_3d\n ut_denoised[row, :] = ut_slice_cube_denoised.flatten()\n\n return ut_denoised", "def DFT2D_slow(x):\r\n x = np.asarray(x, dtype=complex)\r\n \r\n M = len(x)\r\n N = len(x[0])\r\n mat = np.zeros((M, N), np.complex)\r\n \r\n for row in range(M):\r\n mat[row] = DFT_slow(x[row])\r\n\r\n for col in range(N):\r\n mat[:,col] = DFT_slow(mat[:,col])\r\n\r\n return mat", "def stdfilt2d(data, filtersize, threshold=None,verbose=False):\n # 2012-08-07 13:42 IJMC: Created from medianfilter\n from numpy import zeros, median, abs, std, isfinite\n\n if not hasattr(filtersize, '__iter__'):\n filtersize = [filtersize]\n\n if len(filtersize)<1:\n print 'medianfilter2 requires that filtersize be a 1- or 2-element vector'\n return -1\n elif len(filtersize)==1:\n filtersize = [filtersize[0], filtersize[0]]\n else:\n filtersize = filtersize[0:2]\n\n npix = data.shape[0]\n npiy = data.shape[1]\n bigsize_x = npix+2*(filtersize[0]-1)\n bigsize_y = npiy+2*(filtersize[1]-1)\n bigdata = zeros((bigsize_x,bigsize_y),float)\n ind = filtersize[0]-1\n if ind==0:\n bigdata = data\n else:\n bigdata[ind:(bigsize_x-ind), ind:(bigsize_y-ind)] = data\n\n\n # FOR NOW, WE ASSUME FILTERSIZE IS ODD!!\n # AND THAT DATA IS SQUARE!\n niter_x = npix + (filtersize[0]-1)\n niter_y = npiy + (filtersize[1]-1)\n filt = zeros((niter_x,niter_y), float)\n\n for ii in range(niter_x):\n for jj in range(niter_y):\n if verbose>1:\n print \"ii,jj>>\",ii,jj\n if filtersize[0]==1:\n indi = 1\n else:\n indi = filtersize[0]-1\n if filtersize[1]==1:\n indj = 1\n else:\n indj = filtersize[1]-1\n select = bigdata[ii:(ii+indi),jj:(jj+indj)].ravel()\n #select = select[isfinite(select)]\n #residualSelection = abs(select - median(select))\n\n doFilter = True\n\n if verbose:\n print \"doFilter?>>\",doFilter\n if verbose>1:\n print \"select>>\",select\n\n if doFilter: \n newval = ( select ).std()\n else:\n newval = bigdata[ii,jj]\n\n if verbose>1:\n print \"newval>>\",newval\n\n filt[ii,jj] = newval\n\n #print filt.shape, [(filtersize[0]-1)/2,niter_x-(filtersize[0]-1)/2,(filtersize[0]-1)/2,niter_y-(filtersize[0]-1)/2]\n return filt[(filtersize[0]-1)/2:niter_x-(filtersize[0]-1)/2,(filtersize[0]-1)/2:niter_y-(filtersize[0]-1)/2]", "def fourier_coefficient(t, freq, n=1, axis=-1):\n t, freq, n = map(np.asanyarray, (t, freq, n))\n return np.exp(1j * 2 * np.pi * freq * n * t).sum(axis=axis)", "def test_fft_complex_1d():\n\ta = np.exp(2j * np.pi * np.arange(8) / 8)\n\tdummy_array = xr.DataArray(a, dims=['x'])\n\tchunked_array = dummy_array.chunk(chunks={'x': 2})\n\tspectrum_array, spectrum_coords, spectrum_dims = \\\n\t\txfft._fft(chunked_array, nfft={'x': 16}, dim=['x'], dx={'x': 0.5})\n\tassert np.array_equal(spectrum_array.compute(), np.fft.fft(a, n=16))\n\tassert np.array_equal(spectrum_coords['f_x'], np.fft.fftfreq(16, d=0.5))\n\tassert 'f_x' in spectrum_dims", "def stft(x, fft_size, hopsamp):\n w = np.hamming(fft_size)\n return np.array([np.fft.rfft(w*x[i:i+fft_size]) \n for i in range(0, len(x)-fft_size, hopsamp)])", "def FFT(x):\n x = np.asarray(x, dtype=float)\n N = x.shape[0]\n \n if N % 2 > 0:\n raise ValueError(\"size of x must be a power of 2\")\n elif N <= 32: # this cutoff should be optimized\n return DFT(x)\n else:\n X_even = FFT(x[::2])\n X_odd = FFT(x[1::2])\n factor = np.exp(-2j * np.pi * np.arange(N) / N)\n return np.concatenate([X_even + factor[:int(N / 2)] * X_odd,\n X_even + factor[int(N / 2):] * X_odd])", "def tomography_filter(arr, axis=2, filter_type='hamming', backend='autograd'):\n func = getattr(scipy.signal.windows, filter_type)\n filter = func(arr.shape[axis])\n if axis != len(arr.shape) - 1:\n arr = swap_axes(arr, [axis, len(arr.shape) - 1])\n if backend == 'pytorch':\n args = {'device': arr.device}\n else:\n args = {}\n arr_r, arr_i = fft(arr, zeros_like(arr, requires_grad=False, **args))\n arr_r = arr_r * filter\n arr_i = arr_i * filter\n arr, _ = ifft(arr_r, arr_i)\n if axis != len(arr.shape) - 1:\n arr = swap_axes(arr, [axis, len(arr.shape) - 1])\n return arr", "def test_fft_real_2d():\n\ta = np.mgrid[:5, :5, :5][0]\n\tdummy_array = xr.DataArray(a, dims=['x', 'y', 'z'])\n\tchunked_array = dummy_array.chunk(chunks={'x': 2, 'y': 2, 'z': 2})\n\tspectrum_array, spectrum_coords, spectrum_dims = \\\n\t\txfft._fft(chunked_array, nfft={'y': 14, 'z': 18}, dim=['y', 'z'],\n\t\t dx={'y': 0.01, 'z': 0.02}, sym=False)\n\tassert np.allclose(spectrum_array.compute(),\n\t np.fft.rfftn(a, s=(18, 14), axes=(2, 1)))\n\tassert np.array_equal(spectrum_coords['f_y'], np.fft.rfftfreq(14, d=0.01))\n\tassert np.array_equal(spectrum_coords['f_z'], np.fft.fftfreq(18, d=0.02))\n\tassert ('x', 'f_y', 'f_z') == spectrum_dims", "def IDFT2(fourier_image):\n return IDFT(IDFT(fourier_image).transpose()).transpose()" ]
[ "0.6378481", "0.6309765", "0.62861556", "0.62434644", "0.61714876", "0.60929567", "0.6092913", "0.6006388", "0.59960955", "0.59704673", "0.57724774", "0.57447904", "0.5739366", "0.5689734", "0.5677718", "0.5673864", "0.56690097", "0.5644021", "0.56193554", "0.56177664", "0.56163305", "0.5598021", "0.5587617", "0.5584242", "0.55652815", "0.5549149", "0.5548605", "0.55480033", "0.55172896", "0.5505413" ]
0.672973
0
Multidimensional ellipsoid Fourier filter. The array is multiplied with the fourier transform of a ellipsoid of given sizes.
def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None): input = numpy.asarray(input) if input.ndim > 3: raise NotImplementedError("Only 1d, 2d and 3d inputs are supported") output = _get_output_fourier(output, input) axis = normalize_axis_index(axis, input.ndim) sizes = _ni_support._normalize_sequence(size, input.ndim) sizes = numpy.asarray(sizes, dtype=numpy.float64) if not sizes.flags.contiguous: sizes = sizes.copy() _nd_image.fourier_filter(input, sizes, n, axis, output, 2) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _irfft2d(f_x) :", "def process( fids, ndim=2 ):\n\timg = np.empty_like( fids )\n\tax = -1*(np.array( range(ndim) )+1)\n\t\n\timg = np.fft.fftshift( np.fft.fftn( fids, axes=ax, ).astype( np.complex64), axes=ax )\n\t\n\treturn np.squeeze(img)", "def _get_fourier_filter(self):\n size = max(64, int(2 ** np.ceil(np.log2(2 * self.m[-1].item()))))\n\n pi = torch.acos(torch.zeros(1)).item() * 2.0\n n = torch.cat(\n [\n torch.arange(1, size // 2 + 1, 2, device=self.n.device),\n torch.arange(size // 2 - 1, 0, -2, device=self.n.device),\n ]\n )\n f = torch.zeros(size, device=self.n.device)\n f[0] = 0.25\n if self.flat:\n f[1::2] = -1 / (pi * n).pow(2)\n else:\n f[1::2] = -self.s_detect.abs().pow(2) / (\n pi\n * (self.d_source + self._d_detect())\n * torch.sin(\n n\n * self.s_detect.abs()\n / (self.d_source + self._d_detect())\n )\n ).pow(2)\n f = torch.stack(\n [f, torch.zeros(f.shape, device=self.n.device)], dim=-1\n )\n f = fftshift(f, dim=(-2,))\n\n filt = fft1(f)[..., 0]\n\n if self.filter_type == \"hamming\":\n # hamming filter\n fac = torch.tensor(\n np.hamming(size).astype(np.float32), device=f.device\n )\n elif self.filter_type == \"hann\":\n # hann filter\n fac = torch.tensor(\n np.hanning(size).astype(np.float32), device=f.device\n )\n elif self.filter_type == \"cosine\":\n # cosine filter\n fac = torch.sin(\n torch.linspace(0, pi, size + 1, device=f.device)[:-1]\n )\n else:\n # ramp / ram-lak filter\n fac = 1.0\n\n return fac * filt", "def blur_fourier(im, kernel_size):\n im = im.astype(np.float64)\n # build the kernel with zero padding\n kernel_base = gaussian_kernel_factory(kernel_size)\n window = np.zeros_like(im).astype(np.float64)\n M, N = im.shape\n dx, dy = kernel_base.shape\n x_middle, y_middle = N//2, M//2\n\n window[(y_middle-dy//2):(y_middle+dy//2+1), (x_middle-dx//2):(x_middle+dx//2+1)] = kernel_base\n\n # multiply in the freq domain\n return IDFT2(DFT2(im) * DFT2(np.fft.ifftshift(window))).real", "def make_quad_frs_imag(dims,numlevels,numorientations,bandwidth):\n \n freq_resps_imag = make_steer_frs(dims,numlevels,numorientations,bandwidth)\n freq_resps_imag[0] = np.zeros(dims)\n freq_resps_imag[2] = np.zeros(dims)\n return freq_resps_imag", "def make_ellipsoid_faces(center, dims, nsubdivs=2):\n t = (1+sqrt(5))/2.0\n s = sqrt(1+t**2)\n \n vertices = [(t/s,1/s,0), (-t/s,1/s,0), (t/s,-1/s,0),\\\n (-t/s,-1/s,0), (1/s,0,t/s), (1/s,0,-t/s), (-1/s,0,t/s),(-1/s,0,-t/s),\\\n (0,t/s,1/s), (0,-t/s,1/s), (0,t/s,-1/s), (0,-t/s,-1/s)]\n\n v = vertices\n faces = [(v[0],v[8],v[4]),(v[1],v[10],v[7]),(v[2],v[9],v[11]),(v[7],v[3],v[1]),(v[0],v[5],v[10]),(v[3],v[9],v[6]),\\\n (v[3],v[11],v[9]),(v[8],v[6],v[4]),(v[2],v[4],v[9]),(v[3],v[7],v[11]),(v[4],v[2],v[0]),\\\n (v[9],v[4],v[6]),(v[2],v[11],v[5]),(v[0],v[10],v[8]),(v[5],v[0],v[2]),(v[10],v[5],v[7]),(v[1],v[6],v[8]),\\\n (v[1],v[8],v[10]),(v[6],v[1],v[3]),(v[11],v[7],v[5])]\n \n #subdivide each of the faces into 9 faces\n for i in xrange(nsubdivs):\n new_faces = []\n for face in faces:\n new_faces.extend(subdivide(face[0], face[1], face[2]))\n faces = new_faces\n faces = scale_faces(dims[0], dims[1], dims[2], faces)\n faces = translate_faces(center, faces)\n return faces", "def smoothfield( d , boxsize, Rth ):\n\n ng = N.shape(d)[0]\n dk = N.fft.rfftn(d)\n kgrid = getkgrid(boxsize,ng)\n dk = dk * N.exp( -(kgrid * Rth) ** 2. / 2. )\n d = N.fft.irfftn(dk)\n\n return d", "def blur_fourier(im, kernel_size):\n kernel = gaus_kernel_calc(kernel_size)\n\n zeros = np.zeros(im.shape)\n x_mid = np.math.floor(im.shape[1] / 2)\n y_mid = np.math.floor(im.shape[0] / 2)\n distance = np.math.floor(kernel_size / 2)\n zeros[x_mid - distance: x_mid + distance + 1, y_mid - distance: y_mid + distance + 1] = kernel\n\n fourier_kernel = DFT2(np.fft.ifftshift(zeros))\n fourier_img = DFT2(im)\n fourier_blured = fourier_kernel * fourier_img\n\n return IDFT2(fourier_blured).real.astype(np.float64)", "def make_steer_frs(dims, numlevels, numorientations, bandwidth):\n \n result = []\n bands=[]\n p = numorientations-1\n const = math.sqrt(float(math.pow(2,(2*p))*math.pow(math.factorial(p),2)) / float(math.factorial(2*p)*(p+1)))\n f1 = freqspace(dims[0])\n f2 = freqspace(dims[1])\n wx, wy = np.meshgrid(f1, f2)\n size = wx.shape\n r = np.sqrt(wx**2 + wy**2)\n theta = np.arctan2(wy, wx) \n \n bands = np.full((numlevels, numorientations, dims[0], dims[1]), const*1j)\n for level in range(numlevels):\n for orientation in range(numorientations):\n theta_offset = orientation * np.pi / numorientations\n ctrfreq = pi / math.pow(2, (level+1)*bandwidth)\n band = np.cos(theta - theta_offset)**p * log_raised_cos(r, ctrfreq, bandwidth)\n bands[level,orientation,:,:] *= band\n \n hi = log_raised_coshi(r, pi / math.pow(2, bandwidth), bandwidth)\n\n lo = log_raised_coslo(r, pi / math.pow(2, bandwidth * numlevels), bandwidth)\n \n result.append(hi)\n result.append(bands)\n result.append(lo)\n return result", "def vvc_filters_2d(kernel_size):\n vvc_filters = []\n half_kernel = (kernel_size - 8) // 2\n for frac_pos in frac_positions():\n filter_x = filter_coefficients(int(frac_pos.split(\",\")[0]))\n filter_y = filter_coefficients(int(frac_pos.split(\",\")[1]))\n\n filter_vvc = np.tile(filter_x, 8).reshape((8, 8))\n for index in range(len(filter_y)):\n filter_vvc[index, :] *= filter_y[index]\n filter_vvc = filter_vvc / (64 * 64)\n\n vvc_filters.append(np.pad(filter_vvc, ((half_kernel + 1, half_kernel), (half_kernel + 1, half_kernel)),\n 'constant', constant_values=0))\n return vvc_filters", "def IDFT2D_slow(x):\r\n x = np.asarray(x, dtype=complex)\r\n \r\n M = len(x)\r\n N = len(x[0])\r\n mat = np.zeros((M, N), np.complex)\r\n \r\n for row in range(M):\r\n mat[row] = IDFT_slow(x[row])\r\n\r\n for col in range(N):\r\n mat[:,col] = IDFT_slow(mat[:,col])\r\n\r\n return mat", "def test_fft_complex_2d():\n\ta, b, c = np.meshgrid([0, 1, 0, 0], [0, 1j, 1j], [0, 1, 1, 1])\n\tdummy_array = xr.DataArray(a * b * c, dims=['x', 'y', 'z'])\n\tchunked_array = dummy_array.chunk(chunks={'x': 2, 'y': 2, 'z': 2})\n\tspectrum_array, spectrum_coords, spectrum_dims = \\\n\t\txfft._fft(chunked_array, nfft={'y': 6, 'z': 8}, dim=['y', 'z'],\n\t\t dx={'y': 0.01, 'z': 0.02})\n\tassert np.allclose(spectrum_array.compute(),\n\t np.fft.fftn(a * b * c, s=(8, 6), axes=(2, 1)))\n\tassert np.array_equal(spectrum_coords['f_y'], np.fft.fftfreq(6, d=0.01))\n\tassert np.array_equal(spectrum_coords['f_z'], np.fft.fftfreq(8, d=0.02))\n\tassert ('x', 'f_y', 'f_z') == spectrum_dims", "def _fourier_extp(cls, series=None, max_trun=None, forecast_period=None):\n import numpy as np\n import copy\n n = len(series)\n\n smoothing_loc = np.where((series < np.mean(series) - 3 * np.std(series)) | (series > np.mean(series)\n + 3 * np.std(series)))\n smoothed_series = copy.deepcopy(series)\n if len(smoothing_loc[0]) > 0:\n for idx in smoothing_loc[0]:\n smoothed_series[idx] = np.mean(smoothed_series[max(0, idx - 6): max(0, idx - 1)]) if idx > 5 \\\n else smoothed_series[idx]\n\n iyf = []\n\n # Generating the indices based on odd and event number of terms in the time series\n if int(n) % 2 != 0:\n all_idx = np.arange(1, n // 2 + 1)\n else:\n all_idx = np.arange(1, n // 2)\n\n # Performing Fourier transformation\n yf = np.fft.rfft(smoothed_series)\n\n # Spectral density for the fourier transformation (to identify the significant frequencies)\n psd = abs(yf[all_idx]) ** 2 + abs(yf[-all_idx]) ** 2\n psd_sorted = np.copy(psd)\n psd_sorted[::-1].sort()\n\n max_trun = min(max_trun, max(len(psd_sorted) - 1, 0))\n\n # Computing inverse Fourier transformation by appending next two significant frequencies up to (2 * max_trun)\n # frequencies\n\n idx = all_idx[np.where(psd > psd_sorted[max_trun])[0]]\n idx = np.concatenate((np.array([0]), idx), axis=0)\n a = yf[idx]\n\n # Storing the inverse Fourier transformations with (2 * trun) many frequencies\n iyf.append(cls._inv_fft(n + forecast_period, n, idx, a))\n\n return np.array(iyf)", "def idft(X):\n N = len(X)\n x = np.zeros(N, 'complex')\n \n K = np.arange(0, N, 1)\n for n in range(0, N, 1):\n x[n] = np.dot(X, np.exp(-1j * 2 * np.pi * K * n / N))\n return x / N", "def fourier_transform2d(self):\n\n zerofill = np.zeros(1024 * np.array([1,1])) #so it will always be square\n zerofill[:len(self.windowed), :len(self.windowed)] = self.windowed\n transform = np.fft.fft2(zerofill)\n transform = np.fft.fftshift(transform) # shift center to zero\n transformed = np.absolute(transform)\n tmax = transformed.max()\n zdata = (transformed)/(tmax) # normalize to maximum value\n\n return zdata", "def test_fft_complex_1d():\n\ta = np.exp(2j * np.pi * np.arange(8) / 8)\n\tdummy_array = xr.DataArray(a, dims=['x'])\n\tchunked_array = dummy_array.chunk(chunks={'x': 2})\n\tspectrum_array, spectrum_coords, spectrum_dims = \\\n\t\txfft._fft(chunked_array, nfft={'x': 16}, dim=['x'], dx={'x': 0.5})\n\tassert np.array_equal(spectrum_array.compute(), np.fft.fft(a, n=16))\n\tassert np.array_equal(spectrum_coords['f_x'], np.fft.fftfreq(16, d=0.5))\n\tassert 'f_x' in spectrum_dims", "def fourier(img):\n return fourierCV(img)", "def test_fft_complex_3d():\n\ta, b, c = np.meshgrid([0, 1, 0, 0], [0, 1j, 1j], [0, 1, 1, 1])\n\tdummy_array = xr.DataArray(a * b * c, dims=['x', 'y', 'z'])\n\tchunked_array = dummy_array.chunk(chunks={'x': 2, 'y': 2, 'z': 2})\n\tspectrum_array, spectrum_coords, spectrum_dims = \\\n\t\txfft._fft(chunked_array, nfft={'x': 8, 'y': 6, 'z': 8},\n\t\t dim=['x', 'y', 'z'], dx={'x':12, 'y': 0.01, 'z': 0.02})\n\tassert np.allclose(spectrum_array.compute(),\n\t np.fft.fftn(a * b * c, s=(8, 6, 8)))\n\tassert np.array_equal(spectrum_coords['f_x'], np.fft.fftfreq(8, d=12))\n\tassert np.array_equal(spectrum_coords['f_y'], np.fft.fftfreq(6, d=0.01))\n\tassert np.array_equal(spectrum_coords['f_z'], np.fft.fftfreq(8, d=0.02))\n\tassert ('f_x', 'f_y', 'f_z') == spectrum_dims", "def fourier_der(im):\n ft_img = DFT2(im)\n ft_img = np.fft.fftshift(ft_img)\n\n n_x = im.shape[1]\n coeff_x = (2 * np.pi * 1j)/n_x\n u_freq = np.array([n if n < int(n_x/2) else (n-n_x) for n in range(n_x)]) * 1j\n u_freq = np.array([np.fft.fftshift(u_freq)]*im.shape[0]).transpose()\n dx_ft = coeff_x * IDFT2(np.fft.ifftshift(u_freq.transpose() * ft_img))\n\n m_y = im.shape[0]\n coeff_y = (2 * np.pi * 1j)/m_y\n v_freq = np.array([m if m < int(m_y/2) else (m-m_y) for m in range(m_y)]) * 1j\n v_freq = np.array([np.fft.fftshift(v_freq)] * im.shape[1]).transpose()\n tr = IDFT2(np.fft.ifftshift(v_freq * ft_img))\n dy_ft = coeff_y * tr\n\n magnitude = np.sqrt(np.abs(dx_ft)**2 + np.abs(dy_ft)**2)\n return magnitude.real.astype(np.float64)", "def fft_wrapping_illustration(ez=10, en=20, sev_clause='', small2=0, cmap='plasma'):\n fig, axs = plt.subplots(1, 3, figsize=(3 * FIG_W, FIG_H + 0.3), constrained_layout=True)\n ax0, ax1, ax2 = axs.flat\n\n if ez == 0 or sev_clause != '':\n if sev_clause == '':\n raise ValueError('Must input one of ez>0 or a valid DecL sev_clause')\n sev = build(f'agg Junk 1 claim {sev_clause} fixed')\n ez = sev.sev_m\n q1 = sev.q(1)\n if small2 == 0:\n small2 = int(np.ceil(np.log2(q1)))\n xs = np.hstack((-np.inf, np.arange(1 << small2) + 0.5))\n z = np.diff(sev.sev.cdf(xs))\n # enough space for aggregate\n big2 = int(np.ceil(np.log2(q1 * en)))\n elif ez == 1:\n assert small2, 'Need to input small2 in Poisson mode'\n z = np.zeros(1 << small2)\n z[1] = 1\n # full space\n sigma = np.sqrt(en)\n big2 = int(np.ceil(np.log2(en + 5 * sigma)))\n else:\n # enough space for severity and make sev\n if small2 == 0:\n small2 = int(np.ceil(np.log2(2 * ez)))\n z = np.zeros(1 << small2)\n z[:ez*2] = 1 / ez / 2\n # enough space for aggregate\n big2 = int(np.ceil(np.log2(2 * ez * en)))\n\n if big2 <= 8:\n ds = 'steps-post'\n else:\n ds = 'default'\n if ez == 1:\n wrapped = irfft( np.exp(en * (rfft(z) - 1)))\n full = irfft( np.exp(en * (rfft(z, 1 << big2) - 1)))\n else:\n wrapped = irfft( rfft(z) ** en )\n full = irfft( rfft(z, 1 << big2) ** en )\n\n ax0.plot(wrapped, c='C0', drawstyle=ds)\n ax0.xaxis.set_major_locator(mpl.ticker.MultipleLocator(4))\n ax0.set(title=f'Wrapped distribution\\nlog2={small2}')\n lm = ax0.get_ylim()\n lm = (-lm[1] / 20, lm[1]* 1.1)\n ax0.set(ylim=lm)\n\n norm = mpl.colors.Normalize(0, 1, clip=True)\n cmappable = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)\n mapper = cmappable.to_rgba\n cc = list(map(mapper, np.linspace(0, 1, 1 << big2-small2)))\n ax1.plot(full, label='Full computation', c='w', alpha=1, lw=3, drawstyle=ds)\n ax1.xaxis.set_major_locator(mpl.ticker.MultipleLocator(32))\n for n, (s, c) in enumerate(zip(full.reshape((1<<big2-small2, 1<<small2)), cc)):\n ax1.plot(s, c=c, label=f'Part {n}', drawstyle=ds)\n ax1.plot(np.arange((1<<small2) * n, (1<<small2) * (n+1)), s, c=c, lw=2,\n drawstyle=ds, label=None)\n for n in range(1 << big2-small2):\n ax1.axvline(n * (1 << small2), lw=.25, c='C7')\n for n in range(1 << big2-small2):\n ax1.axvline(n * (1 << small2), lw=.25, c='C7')\n if big2 - small2 <= 3:\n ax1.legend(loc='center right')\n ax1.set(title=f'Full distribution\\nlog2={big2}, {1<<big2-small2} components')\n\n wrapped_from_full = full.reshape((1<<big2-small2, 1<<small2))\n ax2.plot(wrapped_from_full.T, label=None, c='C7', lw=.5, drawstyle=ds)\n ax2.plot(wrapped_from_full.sum(0), lw=3\n , drawstyle=ds, label='Wrapped from full', c='C1')\n ax2.plot(wrapped, lw=1, label='Wrapped', c='C0', drawstyle=ds)\n ax2.xaxis.set_major_locator(mpl.ticker.MultipleLocator(4))\n ax2.set(title='Wrapping components (grey)\\nSums (blue, organge as left)')\n # ax2.legend(loc)\n ax2.set(ylim=lm)\n\n assert np.allclose(wrapped_from_full.sum(0), wrapped)", "def numpyInverseFourierTransform(self,coefficients,**kwargs):\n return [(z.real,z.imag) for z in np.fft.ifft(coefficients,**kwargs)]", "def elliptic_mesh(F):\n # draw sphere\n u, v = np.mgrid[0:2*np.pi:40j, 0:np.pi:20j]\n x = np.cos(u)*np.sin(v)\n y = np.sin(u)*np.sin(v)\n z = np.cos(v)\n # put into array\n XYZ = np.vstack([x.flatten(),y.flatten(),z.flatten()])\n # deform according to F\n XYZ = np.dot(F.Fij,XYZ)\n # put back into meshgrid\n a, b, c = np.vsplit(XYZ,3)\n x = np.reshape(a,x.shape)\n y = np.reshape(b,y.shape)\n z = np.reshape(c,z.shape)\n return x, y, z", "def IDFT(fourier_signal):\n fourier_signal = fourier_signal.astype(np.complex128)\n # find the length of the signal\n N = fourier_signal.shape[0]\n if fourier_signal.ndim == 2:\n M, N = fourier_signal.shape\n\n # calculate IDFT matrix\n u, v = np.meshgrid(np.arange(N), np.arange(N))\n omega = np.exp(2 * np.pi * 1j / N)\n idft_matrix = np.power(omega, u*v)\n\n # if it is a matrix of fourier signals\n if fourier_signal.ndim == 2:\n # calculate the Fourier Transform\n signal = np.dot(idft_matrix, fourier_signal.transpose())\n return 1/N * signal.transpose()\n\n # calculate the inverse Fourier Transform\n signal = np.dot(idft_matrix, fourier_signal)\n return 1/N * signal", "def fourier_der(im):\n im = im.astype(np.float64)\n # constants\n M, N = im.shape\n u = np.meshgrid(np.arange(N), np.arange(M))[0] - N//2\n v = np.meshgrid(np.arange(N), np.arange(M))[1] - M//2\n u_der, v_der = (2 * np.pi * 1j / N), (2 * np.pi * 1j / M)\n\n # calculate dx, dy\n dx = u_der * IDFT2(np.fft.fftshift(u) * DFT2(im))\n dy = v_der * IDFT2(np.fft.fftshift(v) * DFT2(im))\n\n return np.sqrt(np.abs(dx)**2 + np.abs(dy)**2) # = magnitude", "def wave_vectors_2D(nx, ny, d=1):\n\n return 2*np.pi*vector_vector_grid(\n np.fft.fftfreq(nx, d=d),\n np.fft.fftfreq(ny, d=d))", "def spherefcn(x: np.ndarray) -> np.ndarray:\n if x.ndim == 1:\n x = x.reshape(-1, len(x))\n f = np.sum(x**2, axis=1)\n return f.reshape(-1, 1)", "def half_hermitian_to_real_inverse_fft_image_filter(*args, **kwargs):\n import itk\n instance = itk.HalfHermitianToRealInverseFFTImageFilter.New(*args, **kwargs)\n return instance.__internal_call__()", "def fourier_uniform(input, size, n=-1, axis=-1, output=None):\n input = numpy.asarray(input)\n output = _get_output_fourier(output, input)\n axis = normalize_axis_index(axis, input.ndim)\n sizes = _ni_support._normalize_sequence(size, input.ndim)\n sizes = numpy.asarray(sizes, dtype=numpy.float64)\n if not sizes.flags.contiguous:\n sizes = sizes.copy()\n _nd_image.fourier_filter(input, sizes, n, axis, output, 1)\n return output", "def IDFT(fourier_signal):\n n = fourier_signal.shape[0]\n omega = np.exp((((2 * np.pi)*1j) / n))\n\n e_items = np.vander(omega**np.arange(n), n, True)\n org_signal = np.dot(e_items, fourier_signal)/n\n\n return org_signal", "def _inv_fft(cls, n_extp, n, idx, a):\n import numpy as np\n ts = []\n for i in range(0, n_extp):\n # Sinusoid for the ith frequency\n s_array = cls._signals(idx, i, n)\n\n # Computing the inverse Fouries transformation term for the significant coefficients obtained from the\n # spectral density\n ts.append(np.sum(a * s_array) // n)\n return np.array(ts)" ]
[ "0.5919297", "0.5817323", "0.5366147", "0.5307928", "0.52779627", "0.5270385", "0.5188855", "0.5180645", "0.51758784", "0.5060478", "0.5059644", "0.5033383", "0.50284445", "0.5026053", "0.5017994", "0.50149393", "0.5008967", "0.4998893", "0.4998793", "0.49833018", "0.49821383", "0.49507245", "0.4931019", "0.49096128", "0.4901749", "0.48875242", "0.48857862", "0.48810938", "0.4865674", "0.48550248" ]
0.6894624
0
Safe conversion of page to utf
def __init__(self, page): try: self.page = page.encode("utf8") except UnicodeDecodeError: self.page = page.decode('iso-8859-1').encode('utf8')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convertFromUnicode(content):\n return content", "def process_page(page):\n content = utils.any2unicode(page, 'utf8').strip()\n content = re.sub(r\"[^a-zA-Z]\", \" \", content)\n \n return content", "def fix_unicode_encode_error(cls, safe=False):\n from .path9 import Path\n from .file9 import File\n from .print9 import Print\n lockfile = Path.combine(Path.commands(), \".windows_codepage_lock\")\n if File.exist(lockfile):\n cp = cls.get_cmd_code_page()\n if not safe:\n raise IOError(f\"Cannot use codepage 65001, continue using {cp}, \"\n f\"you can set other by Windows.set_cmd_code_page\")\n return cp\n previous_codepage = cls.get_cmd_code_page()\n try:\n if previous_codepage != 65001:\n cls.set_cmd_code_page(65001)\n import os\n with Print.s_print_lock:\n command = r'''python3 -c \"print('йЙ\\r', end='')\"'''\n Print(\"йЙ\\r\", end=\"\")\n Print(\" \\r\", end=\"\")\n os.system(command)\n Print(\" \\r\", end=\"\")\n return cls.get_cmd_code_page()\n except Exception:\n if int(previous_codepage) >= 0:\n if previous_codepage != 65001:\n cls.set_cmd_code_page(previous_codepage)\n else:\n cls.set_cmd_code_page(437)\n Print(\" \\r\", end=\"\")\n from .os9 import OS\n OS._cyrillic_support = False\n File.create(lockfile)\n if not safe:\n raise IOError(f\"Cannot use codepage 65001, returning to {previous_codepage}, \"\n f\"you can set other by Windows.set_cmd_code_page\")\n return previous_codepage", "def removeThreeByteUtf(html):\n entRe = re.compile('&#x[0-9ABCDEabcde]{5,9}')\n return entRe.sub('<WideUnicodeChar>', html)", "def __get_utl_charset(self, url_content):\n pass", "def _force_unicode(data):\n try:\n data = unicode(data, \"utf-8\")\n except UnicodeDecodeError:\n data = unicode(data, \"latin1\")\n return data", "def download_to_utf_string(url: str) -> str:\n request = get(url)\n content = request.content.decode(\"utf-8\")\n return content", "def decode_to_unicode(content):\n if content:\n try:\n # Try to decode ISO-8859-1 to unicode\n return content.decode(\"ISO-8859-1\")\n except UnicodeEncodeError:\n # Assume content is unicode already\n return content", "def cleanUpString(text):\r\n if text is None or text == '':\r\n return text\r\n try:\r\n text = text.encode(\"utf-8\")\r\n except:\r\n newText = \"\"\r\n t = text.decode(\"utf-8\")\r\n for c in t:\r\n newC = c\r\n if ord(c)>127:\r\n newC = \"&#%s;\" % ord(c)\r\n if ord(c)==8211:\r\n #change to this otherwise the toc has &#8211; value instead of endash\r\n newC = chr(45)\r\n if ord(c)==160:\r\n #&nbsp;\r\n newC = \" \"\r\n newText += newC\r\n text = newText\r\n text = str(text)\r\n return text", "def unicode2html(_unicrap):\n xlate = {u'\\u0022': '&quot;',\nu'\\u0026': '&amp;',\nu'\\u0027': '&apos;',\nu'\\u003C': '&lt;',\nu'\\u003E': '&gt;',\nu'\\u00A0': '&nbsp;',\nu'\\u00A1': '&iexcl;',\nu'\\u00A2': '&cent;',\nu'\\u00A3': '&pound;',\nu'\\u00A4': '&curren;',\nu'\\u00A5': '&yen;',\nu'\\u00A6': '&brvbar;',\nu'\\u00A7': '&sect;',\nu'\\u00A8': '&uml;',\nu'\\u00A9': '&copy;',\nu'\\u00AA': '&ordf;',\nu'\\u00AB': '&laquo;',\nu'\\u00AC': '&not;',\nu'\\u00AD': '&shy;',\nu'\\u00AE': '&reg;',\nu'\\u00AF': '&macr;',\nu'\\u00B0': '&deg;',\nu'\\u00B1': '&plusmn;',\nu'\\u00B2': '&sup2;',\nu'\\u00B3': '&sup3;',\nu'\\u00B4': '&acute;',\nu'\\u00B5': '&micro;',\nu'\\u00B6': '&para;',\nu'\\u00B7': '&middot;',\nu'\\u00B8': '&cedil;',\nu'\\u00B9': '&sup1;',\nu'\\u00BA': '&ordm;',\nu'\\u00BB': '&raquo;',\nu'\\u00BC': '&frac14;',\nu'\\u00BD': '&frac12;',\nu'\\u00BE': '&frac34;',\nu'\\u00BF': '&iquest;',\nu'\\u00C0': '&Agrave;',\nu'\\u00C1': '&Aacute;',\nu'\\u00C2': '&Acirc;',\nu'\\u00C3': '&Atilde;',\nu'\\u00C4': '&Auml;',\nu'\\u00C5': '&Aring;',\nu'\\u00C6': '&AElig;',\nu'\\u00C7': '&Ccedil;',\nu'\\u00C8': '&Egrave;',\nu'\\u00C9': '&Eacute;',\nu'\\u00CA': '&Ecirc;',\nu'\\u00CB': '&Euml;',\nu'\\u00CC': '&Igrave;',\nu'\\u00CD': '&Iacute;',\nu'\\u00CE': '&Icirc;',\nu'\\u00CF': '&Iuml;',\nu'\\u00D0': '&ETH;',\nu'\\u00D1': '&Ntilde;',\nu'\\u00D2': '&Ograve;',\nu'\\u00D3': '&Oacute;',\nu'\\u00D4': '&Ocirc;',\nu'\\u00D5': '&Otilde;',\nu'\\u00D6': '&Ouml;',\nu'\\u00D7': '&times;',\nu'\\u00D8': '&Oslash;',\nu'\\u00D9': '&Ugrave;',\nu'\\u00DA': '&Uacute;',\nu'\\u00DB': '&Ucirc;',\nu'\\u00DC': '&Uuml;',\nu'\\u00DD': '&Yacute;',\nu'\\u00DE': '&THORN;',\nu'\\u00DF': '&szlig;',\nu'\\u00E0': '&agrave;',\nu'\\u00E1': '&aacute;',\nu'\\u00E2': '&acirc;',\nu'\\u00E3': '&atilde;',\nu'\\u00E4': '&auml;',\nu'\\u00E5': '&aring;',\nu'\\u00E6': '&aelig;',\nu'\\u00E7': '&ccedil;',\nu'\\u00E8': '&egrave;',\nu'\\u00E9': '&eacute;',\nu'\\u00EA': '&ecirc;',\nu'\\u00EB': '&euml;',\nu'\\u00EC': '&igrave;',\nu'\\u00ED': '&iacute;',\nu'\\u00EE': '&icirc;',\nu'\\u00EF': '&iuml;',\nu'\\u00F0': '&eth;',\nu'\\u00F1': '&ntilde;',\nu'\\u00F2': '&ograve;',\nu'\\u00F3': '&oacute;',\nu'\\u00F4': '&ocirc;',\nu'\\u00F5': '&otilde;',\nu'\\u00F6': '&ouml;',\nu'\\u00F7': '&divide;',\nu'\\u00F8': '&oslash;',\nu'\\u00F9': '&ugrave;',\nu'\\u00FA': '&uacute;',\nu'\\u00FB': '&ucirc;',\nu'\\u00FC': '&uuml;',\nu'\\u00FD': '&yacute;',\nu'\\u00FE': '&thorn;',\nu'\\u00FF': '&yuml;',\nu'\\u0152': '&OElig;',\nu'\\u0153': '&oelig;',\nu'\\u0160': '&Scaron;',\nu'\\u0161': '&scaron;',\nu'\\u0178': '&Yuml;',\nu'\\u0192': '&fnof;',\nu'\\u02C6': '&circ;',\nu'\\u02DC': '&tilde;',\nu'\\u0391': '&Alpha;',\nu'\\u0392': '&Beta;',\nu'\\u0393': '&Gamma;',\nu'\\u0394': '&Delta;',\nu'\\u0395': '&Epsilon;',\nu'\\u0396': '&Zeta;',\nu'\\u0397': '&Eta;',\nu'\\u0398': '&Theta;',\nu'\\u0399': '&Iota;',\nu'\\u039A': '&Kappa;',\nu'\\u039B': '&Lambda;',\nu'\\u039C': '&Mu;',\nu'\\u039D': '&Nu;',\nu'\\u039E': '&Xi;',\nu'\\u039F': '&Omicron;',\nu'\\u03A0': '&Pi;',\nu'\\u03A1': '&Rho;',\nu'\\u03A3': '&Sigma;',\nu'\\u03A4': '&Tau;',\nu'\\u03A5': '&Upsilon;',\nu'\\u03A6': '&Phi;',\nu'\\u03A7': '&Chi;',\nu'\\u03A8': '&Psi;',\nu'\\u03A9': '&Omega;',\nu'\\u03B1': '&alpha;',\nu'\\u03B2': '&beta;',\nu'\\u03B3': '&gamma;',\nu'\\u03B4': '&delta;',\nu'\\u03B5': '&epsilon;',\nu'\\u03B6': '&zeta;',\nu'\\u03B7': '&eta;',\nu'\\u03B8': '&theta;',\nu'\\u03B9': '&iota;',\nu'\\u03BA': '&kappa;',\nu'\\u03BB': '&lambda;',\nu'\\u03BC': '&mu;',\nu'\\u03BD': '&nu;',\nu'\\u03BE': '&xi;',\nu'\\u03BF': '&omicron;',\nu'\\u03C0': '&pi;',\nu'\\u03C1': '&rho;',\nu'\\u03C2': '&sigmaf;',\nu'\\u03C3': '&sigma;',\nu'\\u03C4': '&tau;',\nu'\\u03C5': '&upsilon;',\nu'\\u03C6': '&phi;',\nu'\\u03C7': '&chi;',\nu'\\u03C8': '&psi;',\nu'\\u03C9': '&omega;',\nu'\\u03D1': '&thetasym;',\nu'\\u03D2': '&upsih;',\nu'\\u03D6': '&piv;',\nu'\\u2002': '&ensp;',\nu'\\u2003': '&emsp;',\nu'\\u2009': '&thinsp;',\nu'\\u200C': '&zwnj;',\nu'\\u200D': '&zwj;',\nu'\\u200E': '&lrm;',\nu'\\u200F': '&rlm;',\nu'\\u2013': '&ndash;',\nu'\\u2014': '&mdash;',\nu'\\u2018': '&lsquo;',\nu'\\u2019': '&rsquo;',\nu'\\u201A': '&sbquo;',\nu'\\u201C': '&ldquo;',\nu'\\u201D': '&rdquo;',\nu'\\u201E': '&bdquo;',\nu'\\u2020': '&dagger;',\nu'\\u2021': '&Dagger;',\nu'\\u2022': '&bull;',\nu'\\u2026': '&hellip;',\nu'\\u2030': '&permil;',\nu'\\u2032': '&prime;',\nu'\\u2033': '&Prime;',\nu'\\u2039': '&lsaquo;',\nu'\\u203A': '&rsaquo;',\nu'\\u203E': '&oline;',\nu'\\u2044': '&frasl;',\nu'\\u20AC': '&euro;',\nu'\\u2111': '&image;',\nu'\\u2118': '&weierp;',\nu'\\u211C': '&real;',\nu'\\u2122': '&trade;',\nu'\\u2135': '&alefsym;',\nu'\\u2190': '&larr;',\nu'\\u2191': '&uarr;',\nu'\\u2192': '&rarr;',\nu'\\u2193': '&darr;',\nu'\\u2194': '&harr;',\nu'\\u21B5': '&crarr;',\nu'\\u21D0': '&lArr;',\nu'\\u21D1': '&uArr;',\nu'\\u21D2': '&rArr;',\nu'\\u21D3': '&dArr;',\nu'\\u21D4': '&hArr;',\nu'\\u2200': '&forall;',\nu'\\u2202': '&part;',\nu'\\u2203': '&exist;',\nu'\\u2205': '&empty;',\nu'\\u2207': '&nabla;',\nu'\\u2208': '&isin;',\nu'\\u2209': '&notin;',\nu'\\u220B': '&ni;',\nu'\\u220F': '&prod;',\nu'\\u2211': '&sum;',\nu'\\u2212': '&minus;',\nu'\\u2217': '&lowast;',\nu'\\u221A': '&radic;',\nu'\\u221D': '&prop;',\nu'\\u221E': '&infin;',\nu'\\u2220': '&ang;',\nu'\\u2227': '&and;',\nu'\\u2228': '&or;',\nu'\\u2229': '&cap;',\nu'\\u222A': '&cup;',\nu'\\u222B': '&int;',\nu'\\u2234': '&there4;',\nu'\\u223C': '&sim;',\nu'\\u2245': '&cong;',\nu'\\u2248': '&asymp;',\nu'\\u2260': '&ne;',\nu'\\u2261': '&equiv;',\nu'\\u2264': '&le;',\nu'\\u2265': '&ge;',\nu'\\u2282': '&sub;',\nu'\\u2283': '&sup;',\nu'\\u2284': '&nsub;',\nu'\\u2286': '&sube;',\nu'\\u2287': '&supe;',\nu'\\u2295': '&oplus;',\nu'\\u2297': '&otimes;',\nu'\\u22A5': '&perp;',\nu'\\u22C5': '&sdot;',\nu'\\u2308': '&lceil;',\nu'\\u2309': '&rceil;',\nu'\\u230A': '&lfloor;',\nu'\\u230B': '&rfloor;',\nu'\\u27E8': '&lang;',\nu'\\u27E9': '&rang;',\nu'\\u25CA': '&loz;',\nu'\\u2660': '&spades;',\nu'\\u2663': '&clubs;',\nu'\\u2665': '&hearts;',\nu'\\u2666': '&diams;'}\n\n strOut = \"\"\n if _unicrap is not None:\n for i in _unicrap:\n if i in xlate:\n strOut += xlate[i]\n else:\n strOut += str(i)\n return strOut", "def escapeDecode(s: unicode) -> unicode:\n ...", "def _hidden_in_unicode(self, txt):", "def decode_to_utf8(text) -> bytes: # pragma: no cover\n try:\n return text.decode(\"utf-8\")\n except (AttributeError, UnicodeEncodeError):\n return text", "def test_unicode_io(self):\r\n tm1 = TestModel.create(count=9, text=u'4567ë9989')\r\n tm2 = TestModel.get(tm1.vid)", "def textify(read_pdf,spage,epage):\n\n page_text = \"\"\n for page in range(spage, epage):\n page_content = read_pdf.getPage(page)\n page_text += page_content.extractText()\n\n full_text = page_text #.encode('utf-8')\n return full_text", "def test_file_utf8_write_noraise_unicodeerror(self):\n FileWriter(self.unicode_path).write(self.unicode_string)\n unicode_text = FileReader(self.unicode_path).read_utf8()\n self.assertEqual(self.unicode_string, unicode_text)", "def su(value):\n return safe_unicode(value, encoding=get_charset())", "def test_file_utf8_readwrite_noraise_unicodeerror(self):\n FileWriter(self.unicode_path).write(self.unicode_string)\n unicode_text = FileReader(self.unicode_path).read()\n self.assertEqual(self.unicode_string, unicode_text)", "def fixstring(str):\n\tstr = str.replace(u\"“\",u'\"').replace(u\"’\",u\"'\").replace(u\"â€\",u'\"')\n\tstr = cf.convert_entities(str)\n\tstr = cf.convert_unicode_u(str)\n\tstr = html_to_segments(str)\n\treturn str.strip()", "def force_utf8(text):\n if isinstance(text, binary_type):\n return text\n else:\n return text.encode('utf-8')", "def test_file_utf8_readwrite(self):\n FileWriter(self.unicode_path).write_utf8(self.unicode_string)\n unicode_text = FileReader(self.unicode_path).read_utf8()\n self.assertEqual(unicode_text, self.unicode_string)", "def cp1251ToUtf8(text):\n return unicode(text.decode('cp1251'))", "def test_unquote(self):\n fwa = FakeWikiArchivo('abcd <a href=\"/wiki/f%C3%B3u\">FooBar</a> dcba')\n _, r = self.peishranc(fwa)\n self.assertEqual(r, [(u'fóu', SCORE_PEISHRANC)])", "def test_non_utf8_message_will_fail(self):\n self.getPage(\"/echo?message=A+bient%F4t\",\n headers=[\n ('Accept-Charset', 'ISO-8859-1,utf-8'),\n ('Content-Type', 'text/html;charset=ISO-8859-1')\n ]\n )\n self.assertStatus('404 Not Found')", "def _convertUnicodeForCPS(self, value):\n try:\n value = str(value)\n except UnicodeEncodeError:\n try:\n value = str(value.encode('ISO-8859-15'))\n except UnicodeEncodeError:\n value = repr(value)\n return value", "def test_file_utf8_readas_writeas(self):\n FileWriter(self.unicode2_path).write_as(self.unicode_string, \"utf-8\")\n unicode_text = FileReader(self.unicode2_path).read_as(\"utf-8\")\n self.assertEqual(unicode_text, self.unicode_string)", "def __init__(self, encoding):\n self.trans = {}\n for char in u\"ÀÁÂẦẤẪẨẬÃĀĂẰẮẴẶẲȦǠẠḀȂĄǍẢ\":\n self.trans[char] = u\"A\"\n for char in u\"ȀǞ\":\n self.trans[char] = u\"Ä\"\n self.trans[u\"Ǻ\"] = u\"Å\"\n self.trans[u\"Ä\"] = u\"Ae\"\n self.trans[u\"Å\"] = u\"Aa\"\n for char in u\"àáâầấẫẩậãāăằắẵặẳȧǡạḁȃąǎảẚ\":\n self.trans[char] = u\"a\"\n for char in u\"ȁǟ\":\n self.trans[char] = u\"ä\"\n self.trans[u\"ǻ\"] = u\"å\"\n self.trans[u\"ä\"] = u\"ae\"\n self.trans[u\"å\"] = u\"aa\"\n for char in u\"ḂḄḆƁƂ\":\n self.trans[char] = u\"B\"\n for char in u\"ḃḅḇƀɓƃ\":\n self.trans[char] = u\"b\"\n for char in u\"ĆĈĊÇČƇ\":\n self.trans[char] = u\"C\"\n for char in u\"ćĉċçčƈȼ\":\n self.trans[char] = u\"c\"\n self.trans[u\"Ḉ\"] = u\"Ç\"\n self.trans[u\"ḉ\"] = u\"ç\"\n self.trans[u\"Ð\"] = u\"Dh\"\n self.trans[u\"ð\"] = u\"dh\"\n for char in u\"ĎḊḌḎḐḒĐƉƊƋ\":\n self.trans[char] = u\"D\"\n for char in u\"ďḋḍḏḑḓđɖɗƌ\":\n self.trans[char] = u\"d\"\n for char in u\"ÈȄÉÊḚËĒḔḖĔĖẸE̩ȆȨḜĘĚẼḘẺ\":\n self.trans[char] = u\"E\"\n for char in u\"ỀẾỄỆỂ\":\n self.trans[char] = u\"Ê\"\n for char in u\"èȅéêḛëēḕḗĕėẹe̩ȇȩḝęěẽḙẻ\":\n self.trans[char] = u\"e\"\n for char in u\"ềếễệể\":\n self.trans[char] = u\"ê\"\n for char in u\"ḞƑ\":\n self.trans[char] = u\"F\"\n for char in u\"ḟƒ\":\n self.trans[char] = u\"f\"\n for char in u\"ǴḠĞĠĢǦǤƓ\":\n self.trans[char] = u\"G\"\n for char in u\"ǵḡğġģǧǥɠ\":\n self.trans[char] = u\"g\"\n self.trans[u\"Ĝ\"] = u\"Gx\"\n self.trans[u\"ĝ\"] = u\"gx\"\n for char in u\"ḢḤḦȞḨḪH̱ĦǶ\":\n self.trans[char] = u\"H\"\n for char in u\"ḣḥḧȟḩḫ̱ẖħƕ\":\n self.trans[char] = u\"h\"\n for char in u\"IÌȈÍÎĨḬÏḮĪĬȊĮǏİỊỈƗ\":\n self.trans[char] = u\"I\"\n for char in u\"ıìȉíîĩḭïḯīĭȋįǐiịỉɨ\":\n self.trans[char] = u\"i\"\n for char in u\"ĴJ\":\n self.trans[char] = u\"J\"\n for char in u\"ɟĵ̌ǰ\":\n self.trans[char] = u\"j\"\n for char in u\"ḰǨĶḲḴƘ\":\n self.trans[char] = u\"K\"\n for char in u\"ḱǩķḳḵƙ\":\n self.trans[char] = u\"k\"\n for char in u\"ĹĻĽḶḸḺḼȽŁ\":\n self.trans[char] = u\"L\"\n for char in u\"ĺļľḷḹḻḽƚłɫ\":\n self.trans[char] = u\"l\"\n for char in u\"ḾṀṂ\":\n self.trans[char] = u\"M\"\n for char in u\"ḿṁṃɱ\":\n self.trans[char] = u\"m\"\n for char in u\"ǸŃÑŅŇṄṆṈṊŊƝɲȠ\":\n self.trans[char] = u\"N\"\n for char in u\"ǹńñņňṅṇṉṋŋɲƞ\":\n self.trans[char] = u\"n\"\n for char in u\"ÒÓÔÕṌṎȬÖŌṐṒŎǑȮȰỌǪǬƠỜỚỠỢỞỎƟØǾ\":\n self.trans[char] = u\"O\"\n for char in u\"òóôõṍṏȭöōṑṓŏǒȯȱọǫǭơờớỡợởỏɵøǿ\":\n self.trans[char] = u\"o\"\n for char in u\"ȌŐȪ\":\n self.trans[char] = u\"Ö\"\n for char in u\"ȍőȫ\":\n self.trans[char] = u\"ö\"\n for char in u\"ỒỐỖỘỔȎ\":\n self.trans[char] = u\"Ô\"\n for char in u\"ồốỗộổȏ\":\n self.trans[char] = u\"ô\"\n for char in u\"ṔṖƤ\":\n self.trans[char] = u\"P\"\n for char in u\"ṕṗƥ\":\n self.trans[char] = u\"p\"\n self.trans[u\"ᵽ\"] = u\"q\"\n for char in u\"ȐŔŖŘȒṘṚṜṞ\":\n self.trans[char] = u\"R\"\n for char in u\"ȑŕŗřȓṙṛṝṟɽ\":\n self.trans[char] = u\"r\"\n for char in u\"ŚṤŞȘŠṦṠṢṨ\":\n self.trans[char] = u\"S\"\n for char in u\"śṥşșšṧṡṣṩȿ\":\n self.trans[char] = u\"s\"\n self.trans[u\"Ŝ\"] = u\"Sx\"\n self.trans[u\"ŝ\"] = u\"sx\"\n for char in u\"ŢȚŤṪṬṮṰŦƬƮ\":\n self.trans[char] = u\"T\"\n for char in u\"ţțťṫṭṯṱŧȾƭʈ\":\n self.trans[char] = u\"t\"\n for char in u\"ÙÚŨṸṴÜṲŪṺŬỤŮŲǓṶỦƯỮỰỬ\":\n self.trans[char] = u\"U\"\n for char in u\"ùúũṹṵüṳūṻŭụůųǔṷủưữựửʉ\":\n self.trans[char] = u\"u\"\n for char in u\"ȔŰǛǗǕǙ\":\n self.trans[char] = u\"Ü\"\n for char in u\"ȕűǜǘǖǚ\":\n self.trans[char] = u\"ü\"\n self.trans[u\"Û\"] = u\"Ux\"\n self.trans[u\"û\"] = u\"ux\"\n self.trans[u\"Ȗ\"] = u\"Û\"\n self.trans[u\"ȗ\"] = u\"û\"\n self.trans[u\"Ừ\"] = u\"Ù\"\n self.trans[u\"ừ\"] = u\"ù\"\n self.trans[u\"Ứ\"] = u\"Ú\"\n self.trans[u\"ứ\"] = u\"ú\"\n for char in u\"ṼṾ\":\n self.trans[char] = u\"V\"\n for char in u\"ṽṿ\":\n self.trans[char] = u\"v\"\n for char in u\"ẀẂŴẄẆẈ\":\n self.trans[char] = u\"W\"\n for char in u\"ẁẃŵẅẇẉ\":\n self.trans[char] = u\"w\"\n for char in u\"ẊẌ\":\n self.trans[char] = u\"X\"\n for char in u\"ẋẍ\":\n self.trans[char] = u\"x\"\n for char in u\"ỲÝŶŸỸȲẎỴỶƳ\":\n self.trans[char] = u\"Y\"\n for char in u\"ỳýŷÿỹȳẏỵỷƴ\":\n self.trans[char] = u\"y\"\n for char in u\"ŹẐŻẒŽẔƵȤ\":\n self.trans[char] = u\"Z\"\n for char in u\"źẑżẓžẕƶȥ\":\n self.trans[char] = u\"z\"\n self.trans[u\"ɀ\"] = u\"zv\"\n\n # Latin: extended Latin alphabet\n self.trans[u\"ɑ\"] = u\"a\"\n for char in u\"ÆǼǢ\":\n self.trans[char] = u\"AE\"\n for char in u\"æǽǣ\":\n self.trans[char] = u\"ae\"\n self.trans[u\"Ð\"] = u\"Dh\"\n self.trans[u\"ð\"] = u\"dh\"\n for char in u\"ƎƏƐ\":\n self.trans[char] = u\"E\"\n for char in u\"ǝəɛ\":\n self.trans[char] = u\"e\"\n for char in u\"ƔƢ\":\n self.trans[char] = u\"G\"\n for char in u\"ᵷɣƣᵹ\":\n self.trans[char] = u\"g\"\n self.trans[u\"Ƅ\"] = u\"H\"\n self.trans[u\"ƅ\"] = u\"h\"\n self.trans[u\"Ƕ\"] = u\"Wh\"\n self.trans[u\"ƕ\"] = u\"wh\"\n self.trans[u\"Ɩ\"] = u\"I\"\n self.trans[u\"ɩ\"] = u\"i\"\n self.trans[u\"Ŋ\"] = u\"Ng\"\n self.trans[u\"ŋ\"] = u\"ng\"\n self.trans[u\"Œ\"] = u\"OE\"\n self.trans[u\"œ\"] = u\"oe\"\n self.trans[u\"Ɔ\"] = u\"O\"\n self.trans[u\"ɔ\"] = u\"o\"\n self.trans[u\"Ȣ\"] = u\"Ou\"\n self.trans[u\"ȣ\"] = u\"ou\"\n self.trans[u\"Ƽ\"] = u\"Q\"\n for char in u\"ĸƽ\":\n self.trans[char] = u\"q\"\n self.trans[u\"ȹ\"] = u\"qp\"\n self.trans[u\"\"] = u\"r\"\n self.trans[u\"ſ\"] = u\"s\"\n self.trans[u\"ß\"] = u\"ss\"\n self.trans[u\"Ʃ\"] = u\"Sh\"\n for char in u\"ʃᶋ\":\n self.trans[char] = u\"sh\"\n self.trans[u\"Ʉ\"] = u\"U\"\n self.trans[u\"ʉ\"] = u\"u\"\n self.trans[u\"Ʌ\"] = u\"V\"\n self.trans[u\"ʌ\"] = u\"v\"\n for char in u\"ƜǷ\":\n self.trans[char] = u\"W\"\n for char in u\"ɯƿ\":\n self.trans[char] = u\"w\"\n self.trans[u\"Ȝ\"] = u\"Y\"\n self.trans[u\"ȝ\"] = u\"y\"\n self.trans[u\"IJ\"] = u\"IJ\"\n self.trans[u\"ij\"] = u\"ij\"\n self.trans[u\"Ƨ\"] = u\"Z\"\n for char in u\"ʮƨ\":\n self.trans[char] = u\"z\"\n self.trans[u\"Ʒ\"] = u\"Zh\"\n self.trans[u\"ʒ\"] = u\"zh\"\n self.trans[u\"Ǯ\"] = u\"Dzh\"\n self.trans[u\"ǯ\"] = u\"dzh\"\n for char in u\"ƸƹʔˀɁɂ\":\n self.trans[char] = u\"'\"\n self.trans['Þ'] = 'Th'\n self.trans['þ'] = 'th'\n for char in u\"Cʗǃ\":\n self.trans[char] = u\"!\"\n\n # Punctuation and typography\n for char in u\"«»“”„¨\":\n self.trans[char] = u'\"'\n for char in u\"‘’′\":\n self.trans[char] = u\"'\"\n self.trans[u\"•\"] = u\"*\"\n self.trans[u\"@\"] = u\"(at)\"\n self.trans[u\"¤\"] = u\"$\"\n self.trans[u\"¢\"] = u\"c\"\n self.trans[u\"€\"] = u\"E\"\n self.trans[u\"£\"] = u\"L\"\n self.trans[u\"¥\"] = u\"yen\"\n self.trans[u\"†\"] = u\"+\"\n self.trans[u\"‡\"] = u\"++\"\n self.trans[u\"°\"] = u\":\"\n self.trans[u\"¡\"] = u\"!\"\n self.trans[u\"¿\"] = u\"?\"\n self.trans[u\"‰\"] = u\"o/oo\"\n self.trans[u\"‱\"] = u\"o/ooo\"\n for char in u\"¶§\":\n self.trans[char] = u\">\"\n self.trans['…'] = '...'\n for char in u\"‒–—―\":\n self.trans[char] = u\"-\"\n self.trans['·'] = ' '\n self.trans[u\"¦\"] = u\"|\"\n self.trans[u\"⁂\"] = u\"***\"\n self.trans[u\"◊\"] = u\"<>\"\n self.trans[u\"‽\"] = u\"?!\"\n self.trans[u\"؟\"] = u\";-)\"\n self.trans[u\"¹\"] = u\"1\"\n self.trans[u\"²\"] = u\"2\"\n self.trans[u\"³\"] = u\"3\"\n\n # Cyrillic\n self.trans.update({u\"А\": u\"A\", u\"а\": u\"a\", u\"Б\": u\"B\", u\"б\": u\"b\",\n u\"В\": u\"V\", u\"в\": u\"v\", u\"Г\": u\"G\", u\"г\": u\"g\",\n u\"Д\": u\"D\", u\"д\": u\"d\", u\"Е\": u\"E\", u\"е\": u\"e\",\n u\"Ж\": u\"Zh\", u\"ж\": u\"zh\", u\"З\": u\"Z\", u\"з\": u\"z\",\n u\"И\": u\"I\", u\"и\": u\"i\", u\"Й\": u\"J\", u\"й\": u\"j\",\n u\"К\": u\"K\", u\"к\": u\"k\", u\"Л\": u\"L\", u\"л\": u\"l\",\n u\"М\": u\"M\", u\"м\": u\"m\", u\"Н\": u\"N\", u\"н\": u\"n\",\n u\"О\": u\"O\", u\"о\": u\"o\", u\"П\": u\"P\", u\"п\": u\"p\",\n u\"Р\": u\"R\", u\"р\": u\"r\", u\"С\": u\"S\", u\"с\": u\"s\",\n u\"Т\": u\"T\", u\"т\": u\"t\", u\"У\": u\"U\", u\"у\": u\"u\",\n u\"Ф\": u\"F\", u\"ф\": u\"f\", u\"х\": u\"kh\", u\"Ц\": u\"C\",\n u\"ц\": u\"c\", u\"Ч\": u\"Ch\", u\"ч\": u\"ch\", u\"Ш\": u\"Sh\",\n u\"ш\": u\"sh\", u\"Щ\": u\"Shch\", u\"щ\": u\"shch\", u\"Ь\": u\"'\",\n u\"ь\": \"'\", u\"Ъ\": u'\"', u\"ъ\": '\"', u\"Ю\": u\"Yu\",\n u\"ю\": u\"yu\", u\"Я\": u\"Ya\", u\"я\": u\"ya\", u\"Х\": u\"Kh\",\n u\"Χ\": u\"Kh\"})\n\n # Additional Cyrillic letters, most occuring in only one or a few languages\n self.trans.update({u\"Ы\": u\"Y\", u\"ы\": u\"y\", u\"Ё\": u\"Ë\", u\"ё\": u\"ë\",\n u\"Э\": u\"È\", u\"Ѐ\": u\"È\", u\"э\": u\"è\", u\"ѐ\": u\"è\",\n u\"І\": u\"I\", u\"і\": u\"i\", u\"Ї\": u\"Ji\", u\"ї\": u\"ji\",\n u\"Є\": u\"Je\", u\"є\": u\"je\", u\"Ґ\": u\"G\", u\"Ҝ\": u\"G\",\n u\"ґ\": u\"g\", u\"ҝ\": u\"g\", u\"Ђ\": u\"Dj\", u\"ђ\": u\"dj\",\n \"Љ\": \"Lj\", \"љ\": \"lj\",\n u\"Њ\": u\"Nj\", u\"њ\": u\"nj\", u\"Ћ\": u\"Cj\", u\"ћ\": u\"cj\",\n 'Җ': 'Zhj', 'Ѓ': 'Gj', 'ѓ': 'gj',\n u\"Ќ\": u\"Kj\", u\"ќ\": u\"kj\", u\"Ӣ\": u\"Ii\", u\"ӣ\": u\"ii\",\n \"Ҳ\": \"H\", \"ҳ\": \"h\",\n u\"Ҷ\": u\"Dz\", u\"ҷ\": u\"dz\", u\"Ө\": u\"Ô\", u\"Ӫ\": u\"Ô\",\n u\"ө\": u\"ô\", u\"ӫ\": u\"ô\", u\"Ү\": u\"Y\", u\"ү\": u\"y\", u\"Һ\": u\"H\",\n u\"һ\": u\"h\", u\"Ә\": u\"AE\", u\"Ӕ\": u\"AE\", u\"ә\": u\"ae\",\n 'Ӛ': 'Ë', 'Ӭ': 'Ë', 'ӛ': 'ë', 'ӭ': 'ë',\n 'җ': 'zhj', 'Ұ': 'U', 'ў': 'ù', 'Ў': 'Ù',\n u\"ѝ\": u\"ì\", u\"Ѝ\": u\"Ì\", u\"Ӑ\": u\"A\", u\"ă\": u\"a\", u\"Ӓ\": u\"Ä\",\n \"Ҽ\": \"Ts\", \"Ҿ\": \"Ts\", \"ҽ\": \"ts\", \"ҿ\": \"ts\",\n u\"Ҙ\": u\"Dh\", u\"ҙ\": u\"dh\", u\"Ӏ\": u\"\", u\"ӏ\": u\"\", u\"Ӆ\": u\"L\",\n u\"ӆ\": u\"l\", u\"Ӎ\": u\"M\", u\"ӎ\": u\"m\", u\"Ӧ\": u\"Ö\", u\"ӧ\": u\"ö\",\n u\"Ҩ\": u\"u\", u\"ҩ\": u\"u\", u\"Ҧ\": u\"Ph\", u\"ҧ\": u\"ph\", u\"Ҏ\": u\"R\",\n u\"ҏ\": u\"r\", u\"Ҫ\": u\"Th\", u\"ҫ\": u\"th\", u\"Ҭ\": u\"T\", u\"ҭ\": u\"t\",\n 'Ӯ': 'Û', 'ӯ': 'û', 'Ӹ': 'U', 'ұ': 'u',\n u\"ӹ\": u\"u\", u\"Ҵ\": u\"Tts\", u\"ҵ\": u\"tts\", u\"Ӵ\": u\"Ch\", u\"ӵ\": u\"ch\"})\n\n for char in u\"ЈӤҊ\":\n self.trans[char] = u\"J\"\n for char in u\"јӥҋ\":\n self.trans[char] = u\"j\"\n for char in u\"ЏӁӜҶ\":\n self.trans[char] = u\"Dzh\"\n for char in u\"џӂӝҷ\":\n self.trans[char] = u\"dzh\"\n for char in u\"ЅӞӠӋҸ\":\n self.trans[char] = u\"Dz\"\n for char in u\"ѕӟӡӌҹ\":\n self.trans[char] = u\"dz\"\n for char in u\"ҒӶҔ\":\n self.trans[char] = u\"G\"\n for char in u\"ғӷҕ\":\n self.trans[char] = u\"g\"\n for char in u\"ҚҞҠӃ\":\n self.trans[char] = u\"Q\"\n for char in u\"қҟҡӄ\":\n self.trans[char] = u\"q\"\n for char in u\"ҢҤӉӇ\":\n self.trans[char] = u\"Ng\"\n for char in u\"ңҥӊӈ\":\n self.trans[char] = u\"ng\"\n for char in u\"ӖѢҌ\":\n self.trans[char] = u\"E\"\n for char in u\"ӗѣҍ\":\n self.trans[char] = u\"e\"\n for char in u\"ӲӰҮ\":\n self.trans[char] = u\"Ü\"\n for char in u\"ӳӱү\":\n self.trans[char] = u\"ü\"\n\n # Archaic Cyrillic letters\n self.trans.update({u\"Ѹ\": u\"Ou\", u\"ѹ\": u\"ou\", u\"Ѡ\": u\"O\", u\"Ѻ\": u\"O\", u\"ѡ\": u\"o\",\n u\"ѻ\": u\"o\", u\"Ѿ\": u\"Ot\", u\"ѿ\": u\"ot\", u\"Ѣ\": u\"E\", u\"ѣ\": u\"e\",\n u\"Ѥ\": u\"Ei\", u\"Ѧ\": u\"Ei\", u\"ѥ\": u\"ei\", u\"ѧ\": u\"ei\", u\"Ѫ\": u\"Ai\",\n u\"ѫ\": u\"ai\", u\"Ѯ\": u\"X\", u\"ѯ\": u\"x\", u\"Ѱ\": u\"Ps\", u\"ѱ\": u\"ps\",\n u\"Ѳ\": u\"Th\", u\"ѳ\": u\"th\", u\"Ѵ\": u\"Ü\", u\"Ѷ\": u\"Ü\", u\"ѵ\": u\"ü\"})\n\n # Hebrew alphabet\n for char in u\"אע\":\n self.trans[char] = u\"'\"\n self.trans[u\"ב\"] = u\"b\"\n self.trans[u\"ג\"] = u\"g\"\n self.trans[u\"ד\"] = u\"d\"\n self.trans[u\"ה\"] = u\"h\"\n self.trans[u\"ו\"] = u\"v\"\n self.trans[u\"ז\"] = u\"z\"\n self.trans[u\"ח\"] = u\"kh\"\n self.trans[u\"ט\"] = u\"t\"\n self.trans[u\"י\"] = u\"y\"\n for char in u\"ךכ\":\n self.trans[char] = u\"k\"\n self.trans[u\"ל\"] = u\"l\"\n for char in u\"םמ\":\n self.trans[char] = u\"m\"\n for char in u\"ןנ\":\n self.trans[char] = u\"n\"\n self.trans[u\"ס\"] = u\"s\"\n for char in u\"ףפ\":\n self.trans[char] = u\"ph\"\n for char in u\"ץצ\":\n self.trans[char] = u\"ts\"\n self.trans[u\"ק\"] = u\"q\"\n self.trans[u\"ר\"] = u\"r\"\n self.trans[u\"ש\"] = u\"sh\"\n self.trans[u\"ת\"] = u\"th\"\n\n # Arab alphabet\n for char in u\"اﺍﺎ\":\n self.trans[char] = u\"a\"\n for char in u\"بﺏﺐﺒﺑ\":\n self.trans[char] = u\"b\"\n for char in u\"تﺕﺖﺘﺗ\":\n self.trans[char] = u\"t\"\n for char in u\"ثﺙﺚﺜﺛ\":\n self.trans[char] = u\"th\"\n for char in u\"جﺝﺞﺠﺟ\":\n self.trans[char] = u\"g\"\n for char in u\"حﺡﺢﺤﺣ\":\n self.trans[char] = u\"h\"\n for char in u\"خﺥﺦﺨﺧ\":\n self.trans[char] = u\"kh\"\n for char in u\"دﺩﺪ\":\n self.trans[char] = u\"d\"\n for char in u\"ذﺫﺬ\":\n self.trans[char] = u\"dh\"\n for char in u\"رﺭﺮ\":\n self.trans[char] = u\"r\"\n for char in u\"زﺯﺰ\":\n self.trans[char] = u\"z\"\n for char in u\"سﺱﺲﺴﺳ\":\n self.trans[char] = u\"s\"\n for char in u\"شﺵﺶﺸﺷ\":\n self.trans[char] = u\"sh\"\n for char in u\"صﺹﺺﺼﺻ\":\n self.trans[char] = u\"s\"\n for char in u\"ضﺽﺾﻀﺿ\":\n self.trans[char] = u\"d\"\n for char in u\"طﻁﻂﻄﻃ\":\n self.trans[char] = u\"t\"\n for char in u\"ظﻅﻆﻈﻇ\":\n self.trans[char] = u\"z\"\n for char in u\"عﻉﻊﻌﻋ\":\n self.trans[char] = u\"'\"\n for char in u\"غﻍﻎﻐﻏ\":\n self.trans[char] = u\"gh\"\n for char in u\"فﻑﻒﻔﻓ\":\n self.trans[char] = u\"f\"\n for char in u\"قﻕﻖﻘﻗ\":\n self.trans[char] = u\"q\"\n for char in u\"كﻙﻚﻜﻛک\":\n self.trans[char] = u\"k\"\n for char in u\"لﻝﻞﻠﻟ\":\n self.trans[char] = u\"l\"\n for char in u\"مﻡﻢﻤﻣ\":\n self.trans[char] = u\"m\"\n for char in u\"نﻥﻦﻨﻧ\":\n self.trans[char] = u\"n\"\n for char in u\"هﻩﻪﻬﻫ\":\n self.trans[char] = u\"h\"\n for char in u\"وﻭﻮ\":\n self.trans[char] = u\"w\"\n for char in u\"یيﻱﻲﻴﻳ\":\n self.trans[char] = u\"y\"\n # Arabic - additional letters, modified letters and ligatures\n self.trans[u\"ﺀ\"] = u\"'\"\n for char in u\"آﺁﺂ\":\n self.trans[char] = u\"'a\"\n for char in u\"ةﺓﺔ\":\n self.trans[char] = u\"th\"\n for char in u\"ىﻯﻰ\":\n self.trans[char] = u\"á\"\n for char in u\"یﯼﯽﯿﯾ\":\n self.trans[char] = u\"y\"\n self.trans[u\"؟\"] = u\"?\"\n # Arabic - ligatures\n for char in u\"ﻻﻼ\":\n self.trans[char] = u\"la\"\n self.trans[u\"ﷲ\"] = u\"llah\"\n for char in u\"إأ\":\n self.trans[char] = u\"a'\"\n self.trans[u\"ؤ\"] = u\"w'\"\n self.trans[u\"ئ\"] = u\"y'\"\n for char in u\"◌◌\":\n self.trans[char] = u\"\" # indicates absence of vowels\n # Arabic vowels\n self.trans[u\"◌\"] = u\"a\"\n self.trans[u\"◌\"] = u\"u\"\n self.trans[u\"◌\"] = u\"i\"\n self.trans[u\"◌\"] = u\"a\"\n self.trans[u\"◌\"] = u\"ay\"\n self.trans[u\"◌\"] = u\"ay\"\n self.trans[u\"◌\"] = u\"u\"\n self.trans[u\"◌\"] = u\"iy\"\n # Arab numerals\n for char in u\"٠۰\":\n self.trans[char] = u\"0\"\n for char in u\"١۱\":\n self.trans[char] = u\"1\"\n for char in u\"٢۲\":\n self.trans[char] = u\"2\"\n for char in u\"٣۳\":\n self.trans[char] = u\"3\"\n for char in u\"٤۴\":\n self.trans[char] = u\"4\"\n for char in u\"٥۵\":\n self.trans[char] = u\"5\"\n for char in u\"٦۶\":\n self.trans[char] = u\"6\"\n for char in u\"٧۷\":\n self.trans[char] = u\"7\"\n for char in u\"٨۸\":\n self.trans[char] = u\"8\"\n for char in u\"٩۹\":\n self.trans[char] = u\"9\"\n # Perso-Arabic\n for char in u\"پﭙﭙپ\":\n self.trans[char] = u\"p\"\n for char in u\"چچچچ\":\n self.trans[char] = u\"ch\"\n for char in u\"ژژ\":\n self.trans[char] = u\"zh\"\n for char in u\"گﮔﮕﮓ\":\n self.trans[char] = u\"g\"\n\n # Greek\n self.trans.update({u\"Α\": u\"A\", u\"α\": u\"a\", u\"Β\": u\"B\", u\"β\": u\"b\", u\"Γ\": u\"G\",\n u\"γ\": u\"g\", u\"Δ\": u\"D\", u\"δ\": u\"d\", u\"Ε\": u\"E\", u\"ε\": u\"e\",\n u\"Ζ\": u\"Z\", u\"ζ\": u\"z\", u\"Η\": u\"I\", u\"η\": u\"i\", u\"θ\": u\"th\",\n u\"Θ\": u\"Th\", u\"Ι\": u\"I\", u\"ι\": u\"i\", u\"Κ\": u\"K\", u\"κ\": u\"k\",\n u\"Λ\": u\"L\", u\"λ\": u\"l\", u\"Μ\": u\"M\", u\"μ\": u\"m\", u\"Ν\": u\"N\",\n u\"ν\": u\"n\", u\"Ξ\": u\"X\", u\"ξ\": u\"x\", u\"Ο\": u\"O\", u\"ο\": u\"o\",\n u\"Π\": u\"P\", u\"π\": u\"p\", u\"Ρ\": u\"R\", u\"ρ\": u\"r\", u\"Σ\": u\"S\",\n u\"σ\": u\"s\", u\"ς\": u\"s\", u\"Τ\": u\"T\", u\"τ\": u\"t\", u\"Υ\": u\"Y\",\n u\"υ\": u\"y\", u\"Φ\": u\"F\", u\"φ\": u\"f\", u\"Ψ\": u\"Ps\", u\"ψ\": u\"ps\",\n u\"Ω\": u\"O\", u\"ω\": u\"o\", u\"ϗ\": u\"&\", u\"Ϛ\": u\"St\", u\"ϛ\": u\"st\",\n u\"Ϙ\": u\"Q\", u\"Ϟ\": u\"Q\", u\"ϙ\": u\"q\", u\"ϟ\": u\"q\", u\"Ϻ\": u\"S\",\n u\"ϻ\": u\"s\", u\"Ϡ\": u\"Ss\", u\"ϡ\": u\"ss\", u\"Ϸ\": u\"Sh\", u\"ϸ\": u\"sh\",\n u\"·\": u\":\", u\"Ά\": u\"Á\", u\"ά\": u\"á\", u\"Έ\": u\"É\", u\"Ή\": u\"É\",\n u\"έ\": u\"é\", u\"ή\": u\"é\", u\"Ί\": u\"Í\", u\"ί\": u\"í\", u\"Ϊ\": u\"Ï\",\n u\"ϊ\": u\"ï\", u\"ΐ\": u\"ï\", u\"Ό\": u\"Ó\", u\"ό\": u\"ó\", u\"Ύ\": u\"Ý\",\n u\"ύ\": u\"ý\", u\"Ϋ\": u\"Y\", u\"ϋ\": u\"ÿ\", u\"ΰ\": u\"ÿ\", u\"Ώ\": u\"Ó\",\n u\"ώ\": u\"ó\"})\n\n # Japanese (katakana and hiragana)\n for char in u\"アァあ\":\n self.trans[char] = u\"a\"\n for char in u\"イィい\":\n self.trans[char] = u\"i\"\n for char in u\"ウう\":\n self.trans[char] = u\"u\"\n for char in u\"エェえ\":\n self.trans[char] = u\"e\"\n for char in u\"オォお\":\n self.trans[char] = u\"o\"\n for char in u\"ャや\":\n self.trans[char] = u\"ya\"\n for char in u\"ュゆ\":\n self.trans[char] = u\"yu\"\n for char in u\"ョよ\":\n self.trans[char] = u\"yo\"\n for char in u\"カか\":\n self.trans[char] = u\"ka\"\n for char in u\"キき\":\n self.trans[char] = u\"ki\"\n for char in u\"クく\":\n self.trans[char] = u\"ku\"\n for char in u\"ケけ\":\n self.trans[char] = u\"ke\"\n for char in u\"コこ\":\n self.trans[char] = u\"ko\"\n for char in u\"サさ\":\n self.trans[char] = u\"sa\"\n for char in u\"シし\":\n self.trans[char] = u\"shi\"\n for char in u\"スす\":\n self.trans[char] = u\"su\"\n for char in u\"セせ\":\n self.trans[char] = u\"se\"\n for char in u\"ソそ\":\n self.trans[char] = u\"so\"\n for char in u\"タた\":\n self.trans[char] = u\"ta\"\n for char in u\"チち\":\n self.trans[char] = u\"chi\"\n for char in u\"ツつ\":\n self.trans[char] = u\"tsu\"\n for char in u\"テて\":\n self.trans[char] = u\"te\"\n for char in u\"トと\":\n self.trans[char] = u\"to\"\n for char in u\"ナな\":\n self.trans[char] = u\"na\"\n for char in u\"ニに\":\n self.trans[char] = u\"ni\"\n for char in u\"ヌぬ\":\n self.trans[char] = u\"nu\"\n for char in u\"ネね\":\n self.trans[char] = u\"ne\"\n for char in u\"ノの\":\n self.trans[char] = u\"no\"\n for char in u\"ハは\":\n self.trans[char] = u\"ha\"\n for char in u\"ヒひ\":\n self.trans[char] = u\"hi\"\n for char in u\"フふ\":\n self.trans[char] = u\"fu\"\n for char in u\"ヘへ\":\n self.trans[char] = u\"he\"\n for char in u\"ホほ\":\n self.trans[char] = u\"ho\"\n for char in u\"マま\":\n self.trans[char] = u\"ma\"\n for char in u\"ミみ\":\n self.trans[char] = u\"mi\"\n for char in u\"ムむ\":\n self.trans[char] = u\"mu\"\n for char in u\"メめ\":\n self.trans[char] = u\"me\"\n for char in u\"モも\":\n self.trans[char] = u\"mo\"\n for char in u\"ラら\":\n self.trans[char] = u\"ra\"\n for char in u\"リり\":\n self.trans[char] = u\"ri\"\n for char in u\"ルる\":\n self.trans[char] = u\"ru\"\n for char in u\"レれ\":\n self.trans[char] = u\"re\"\n for char in u\"ロろ\":\n self.trans[char] = u\"ro\"\n for char in u\"ワわ\":\n self.trans[char] = u\"wa\"\n for char in u\"ヰゐ\":\n self.trans[char] = u\"wi\"\n for char in u\"ヱゑ\":\n self.trans[char] = u\"we\"\n for char in u\"ヲを\":\n self.trans[char] = u\"wo\"\n for char in u\"ンん\":\n self.trans[char] = u\"n\"\n for char in u\"ガが\":\n self.trans[char] = u\"ga\"\n for char in u\"ギぎ\":\n self.trans[char] = u\"gi\"\n for char in u\"グぐ\":\n self.trans[char] = u\"gu\"\n for char in u\"ゲげ\":\n self.trans[char] = u\"ge\"\n for char in u\"ゴご\":\n self.trans[char] = u\"go\"\n for char in u\"ザざ\":\n self.trans[char] = u\"za\"\n for char in u\"ジじ\":\n self.trans[char] = u\"ji\"\n for char in u\"ズず\":\n self.trans[char] = u\"zu\"\n for char in u\"ゼぜ\":\n self.trans[char] = u\"ze\"\n for char in u\"ゾぞ\":\n self.trans[char] = u\"zo\"\n for char in u\"ダだ\":\n self.trans[char] = u\"da\"\n for char in u\"ヂぢ\":\n self.trans[char] = u\"dji\"\n for char in u\"ヅづ\":\n self.trans[char] = u\"dzu\"\n for char in u\"デで\":\n self.trans[char] = u\"de\"\n for char in u\"ドど\":\n self.trans[char] = u\"do\"\n for char in u\"バば\":\n self.trans[char] = u\"ba\"\n for char in u\"ビび\":\n self.trans[char] = u\"bi\"\n for char in u\"ブぶ\":\n self.trans[char] = u\"bu\"\n for char in u\"ベべ\":\n self.trans[char] = u\"be\"\n for char in u\"ボぼ\":\n self.trans[char] = u\"bo\"\n for char in u\"パぱ\":\n self.trans[char] = u\"pa\"\n for char in u\"ピぴ\":\n self.trans[char] = u\"pi\"\n for char in u\"プぷ\":\n self.trans[char] = u\"pu\"\n for char in u\"ペぺ\":\n self.trans[char] = u\"pe\"\n for char in u\"ポぽ\":\n self.trans[char] = u\"po\"\n for char in u\"ヴゔ\":\n self.trans[char] = u\"vu\"\n self.trans[u\"ヷ\"] = u\"va\"\n self.trans[u\"ヸ\"] = u\"vi\"\n self.trans[u\"ヹ\"] = u\"ve\"\n self.trans[u\"ヺ\"] = u\"vo\"\n\n # Japanese and Chinese punctuation and typography\n for char in u\"・·\":\n self.trans[char] = u\" \"\n for char in u\"〃『』《》\":\n self.trans[char] = u'\"'\n for char in u\"「」〈〉〘〙〚〛\":\n self.trans[char] = u\"'\"\n for char in u\"(〔\":\n self.trans[char] = u\"(\"\n for char in u\")〕\":\n self.trans[char] = u\")\"\n for char in u\"[【〖\":\n self.trans[char] = u\"[\"\n for char in u\"]】〗\":\n self.trans[char] = u\"]\"\n self.trans['{'] = '{'\n self.trans['}'] = '}'\n self.trans['っ'] = ':'\n self.trans['ー'] = 'h'\n self.trans['゛'] = \"'\"\n self.trans['゜'] = 'p'\n self.trans['。'] = '. '\n self.trans['、'] = ', '\n self.trans['・'] = ' '\n self.trans['〆'] = 'shime'\n self.trans['〜'] = '-'\n self.trans['…'] = '...'\n self.trans['‥'] = '..'\n self.trans['ヶ'] = 'months'\n for char in u\"•◦\":\n self.trans[char] = u\"_\"\n for char in u\"※*\":\n self.trans[char] = u\"*\"\n self.trans['Ⓧ'] = '(X)'\n self.trans['Ⓨ'] = '(Y)'\n self.trans['!'] = '!'\n self.trans['?'] = '?'\n self.trans[';'] = ';'\n self.trans[':'] = ':'\n self.trans['。'] = '.'\n for char in u\",、\":\n self.trans[char] = u\",\"\n\n # Georgian\n self.trans['ა'] = 'a'\n self.trans['ბ'] = 'b'\n self.trans['გ'] = 'g'\n self.trans['დ'] = 'd'\n for char in u\"ეჱ\":\n self.trans[char] = u\"e\"\n self.trans['ვ'] = 'v'\n self.trans['ზ'] = 'z'\n self.trans['თ'] = 'th'\n self.trans['ი'] = 'i'\n self.trans['კ'] = 'k'\n self.trans['ლ'] = 'l'\n self.trans['მ'] = 'm'\n self.trans['ნ'] = 'n'\n self.trans['ო'] = 'o'\n self.trans['პ'] = 'p'\n self.trans['ჟ'] = 'zh'\n self.trans['რ'] = 'r'\n self.trans['ს'] = 's'\n self.trans['ტ'] = 't'\n self.trans['უ'] = 'u'\n self.trans['ფ'] = 'ph'\n self.trans['ქ'] = 'q'\n self.trans['ღ'] = 'gh'\n for char in u\"ყ\":\n self.trans[char] = u\"q'\"\n self.trans['შ'] = 'sh'\n self.trans['ჩ'] = 'ch'\n self.trans['ც'] = 'ts'\n self.trans['ძ'] = 'dz'\n for char in u\"წ\":\n self.trans[char] = u\"ts'\"\n for char in u\"ჭ\":\n self.trans[char] = u\"ch'\"\n self.trans['ხ'] = 'kh'\n self.trans['ჯ'] = 'j'\n self.trans['ჰ'] = 'h'\n self.trans['ჳ'] = 'w'\n self.trans['ჵ'] = 'o'\n self.trans['ჶ'] = 'f'\n\n # Devanagari\n for char in u\"पप\":\n self.trans[char] = u\"p\"\n self.trans['अ'] = 'a'\n for char in u\"आा\":\n self.trans[char] = u\"aa\"\n self.trans['प'] = 'pa'\n for char in u\"इि\":\n self.trans[char] = u\"i\"\n for char in u\"ईी\":\n self.trans[char] = u\"ii\"\n for char in u\"उु\":\n self.trans[char] = u\"u\"\n for char in u\"ऊू\":\n self.trans[char] = u\"uu\"\n for char in u\"एे\":\n self.trans[char] = u\"e\"\n for char in u\"ऐै\":\n self.trans[char] = u\"ai\"\n for char in u\"ओो\":\n self.trans[char] = u\"o\"\n for char in u\"औौ\":\n self.trans[char] = u\"au\"\n for char in u\"ऋृर\":\n self.trans[char] = u\"r\"\n for char in u\"ॠॄ\":\n self.trans[char] = u\"rr\"\n for char in u\"ऌॢल\":\n self.trans[char] = u\"l\"\n for char in u\"ॡॣ\":\n self.trans[char] = u\"ll\"\n self.trans['क'] = 'k'\n self.trans['ख'] = 'kh'\n self.trans['ग'] = 'g'\n self.trans['घ'] = 'gh'\n self.trans['ङ'] = 'ng'\n self.trans['च'] = 'c'\n self.trans['छ'] = 'ch'\n self.trans['ज'] = 'j'\n self.trans['झ'] = 'jh'\n self.trans['ञ'] = 'ñ'\n for char in u\"टत\":\n self.trans[char] = u\"t\"\n for char in u\"ठथ\":\n self.trans[char] = u\"th\"\n for char in u\"डद\":\n self.trans[char] = u\"d\"\n for char in u\"ढध\":\n self.trans[char] = u\"dh\"\n for char in u\"णन\":\n self.trans[char] = u\"n\"\n self.trans['फ'] = 'ph'\n self.trans['ब'] = 'b'\n self.trans['भ'] = 'bh'\n self.trans['म'] = 'm'\n self.trans['य'] = 'y'\n self.trans['व'] = 'v'\n self.trans['श'] = 'sh'\n for char in u\"षस\":\n self.trans[char] = u\"s\"\n self.trans['ह'] = 'h'\n self.trans['क'] = 'x'\n self.trans['त'] = 'tr'\n self.trans['ज'] = 'gj'\n for char in u\"क़\":\n self.trans[char] = u\"q\"\n self.trans['फ'] = 'f'\n self.trans['ख'] = 'hh'\n self.trans['H'] = 'gh'\n self.trans['ज'] = 'z'\n for char in u\"डढ\":\n self.trans[char] = u\"r\"\n # Devanagari ligatures (possibly incomplete and/or incorrect)\n for char in u\"ख्\":\n self.trans[char] = u\"khn\"\n self.trans['त'] = 'tn'\n for char in u\"द्\":\n self.trans[char] = u\"dn\"\n self.trans['श'] = 'cn'\n for char in u\"ह्\":\n self.trans[char] = u\"fn\"\n for char in u\"अँ\":\n self.trans[char] = u\"m\"\n for char in u\"॒॑\":\n self.trans[char] = u\"\"\n self.trans['०'] = '0'\n self.trans['१'] = '1'\n self.trans['२'] = '2'\n self.trans['३'] = '3'\n self.trans['४'] = '4'\n self.trans['५'] = '5'\n self.trans['६'] = '6'\n self.trans['७'] = '7'\n self.trans['८'] = '8'\n self.trans['९'] = '9'\n\n # Armenian\n self.trans['Ա'] = 'A'\n self.trans['ա'] = 'a'\n self.trans['Բ'] = 'B'\n self.trans['բ'] = 'b'\n self.trans['Գ'] = 'G'\n self.trans['գ'] = 'g'\n self.trans['Դ'] = 'D'\n self.trans['դ'] = 'd'\n self.trans['Ե'] = 'Je'\n self.trans['ե'] = 'e'\n self.trans['Զ'] = 'Z'\n self.trans['զ'] = 'z'\n self.trans['Է'] = 'É'\n self.trans['է'] = 'é'\n self.trans['Ը'] = 'Ë'\n self.trans['ը'] = 'ë'\n self.trans['Թ'] = 'Th'\n self.trans['թ'] = 'th'\n self.trans['Ժ'] = 'Zh'\n self.trans['ժ'] = 'zh'\n self.trans['Ի'] = 'I'\n self.trans['ի'] = 'i'\n self.trans['Լ'] = 'L'\n self.trans['լ'] = 'l'\n self.trans['Խ'] = 'Ch'\n self.trans['խ'] = 'ch'\n self.trans['Ծ'] = 'Ts'\n self.trans['ծ'] = 'ts'\n self.trans['Կ'] = 'K'\n self.trans['կ'] = 'k'\n self.trans['Հ'] = 'H'\n self.trans['հ'] = 'h'\n self.trans['Ձ'] = 'Dz'\n self.trans['ձ'] = 'dz'\n self.trans['Ղ'] = 'R'\n self.trans['ղ'] = 'r'\n self.trans['Ճ'] = 'Cz'\n self.trans['ճ'] = 'cz'\n self.trans['Մ'] = 'M'\n self.trans['մ'] = 'm'\n self.trans['Յ'] = 'J'\n self.trans['յ'] = 'j'\n self.trans['Ն'] = 'N'\n self.trans['ն'] = 'n'\n self.trans['Շ'] = 'S'\n self.trans['շ'] = 's'\n self.trans['Շ'] = 'Vo'\n self.trans['շ'] = 'o'\n self.trans['Չ'] = 'Tsh'\n self.trans['չ'] = 'tsh'\n self.trans['Պ'] = 'P'\n self.trans['պ'] = 'p'\n self.trans['Ջ'] = 'Dz'\n self.trans['ջ'] = 'dz'\n self.trans['Ռ'] = 'R'\n self.trans['ռ'] = 'r'\n self.trans['Ս'] = 'S'\n self.trans['ս'] = 's'\n self.trans['Վ'] = 'V'\n self.trans['վ'] = 'v'\n for char in u\"Տ\":\n self.trans[char] = u\"T'\"\n for char in u\"տ\":\n self.trans[char] = u\"t'\"\n self.trans['Ր'] = 'R'\n self.trans['ր'] = 'r'\n self.trans['Ց'] = 'Tsh'\n self.trans['ց'] = 'tsh'\n self.trans['Ւ'] = 'V'\n self.trans['ւ'] = 'v'\n self.trans['Փ'] = 'Ph'\n self.trans['փ'] = 'ph'\n self.trans['Ք'] = 'Kh'\n self.trans['ք'] = 'kh'\n self.trans['Օ'] = 'O'\n self.trans['օ'] = 'o'\n self.trans['Ֆ'] = 'F'\n self.trans['ֆ'] = 'f'\n self.trans['և'] = '&'\n self.trans['՟'] = '.'\n self.trans['՞'] = '?'\n self.trans['՝'] = ';'\n self.trans['՛'] = ''\n\n # Tamil\n for char in u\"க்\":\n self.trans[char] = u\"k\"\n for char in u\"ஙண்ந்ன்\":\n self.trans[char] = u\"n\"\n self.trans['ச'] = 'c'\n for char in u\"ஞ்\":\n self.trans[char] = u\"ñ\"\n for char in u\"ட்\":\n self.trans[char] = u\"th\"\n self.trans['த'] = 't'\n self.trans['ப'] = 'p'\n for char in u\"ம்\":\n self.trans[char] = u\"m\"\n for char in u\"ய்\":\n self.trans[char] = u\"y\"\n for char in u\"ர்ழ்ற\":\n self.trans[char] = u\"r\"\n for char in u\"ல்ள\":\n self.trans[char] = u\"l\"\n for char in u\"வ்\":\n self.trans[char] = u\"v\"\n self.trans['ஜ'] = 'j'\n self.trans['ஷ'] = 'sh'\n self.trans['ஸ'] = 's'\n self.trans['ஹ'] = 'h'\n for char in u\"க்ஷ\":\n self.trans[char] = u\"x\"\n self.trans['அ'] = 'a'\n self.trans['ஆ'] = 'aa'\n self.trans['இ'] = 'i'\n self.trans['ஈ'] = 'ii'\n self.trans['உ'] = 'u'\n self.trans['ஊ'] = 'uu'\n self.trans['எ'] = 'e'\n self.trans['ஏ'] = 'ee'\n self.trans['ஐ'] = 'ai'\n self.trans['ஒ'] = 'o'\n self.trans['ஓ'] = 'oo'\n self.trans['ஔ'] = 'au'\n self.trans['ஃ'] = ''\n\n # Bengali\n self.trans['অ'] = 'ô'\n for char in u\"আা\":\n self.trans[char] = u\"a\"\n for char in u\"ইিঈী\":\n self.trans[char] = u\"i\"\n for char in u\"উুঊূ\":\n self.trans[char] = u\"u\"\n for char in u\"ঋৃ\":\n self.trans[char] = u\"ri\"\n for char in u\"এেয়\":\n self.trans[char] = u\"e\"\n for char in u\"ঐৈ\":\n self.trans[char] = u\"oi\"\n for char in u\"ওো\":\n self.trans[char] = u\"o\"\n for char in u\"ঔৌ\":\n self.trans[char] = \"ou\"\n self.trans['্'] = ''\n self.trans['ৎ'] = 't'\n self.trans['ং'] = 'n'\n self.trans['ঃ'] = 'h'\n self.trans['ঁ'] = 'ñ'\n self.trans['ক'] = 'k'\n self.trans['খ'] = 'kh'\n self.trans['গ'] = 'g'\n self.trans['ঘ'] = 'gh'\n self.trans['ঙ'] = 'ng'\n self.trans['চ'] = 'ch'\n self.trans['ছ'] = 'chh'\n self.trans['জ'] = 'j'\n self.trans['ঝ'] = 'jh'\n self.trans['ঞ'] = 'n'\n for char in u\"টত\":\n self.trans[char] = u\"t\"\n for char in u\"ঠথ\":\n self.trans[char] = u\"th\"\n for char in u\"ডদ\":\n self.trans[char] = u\"d\"\n for char in u\"ঢধ\":\n self.trans[char] = u\"dh\"\n for char in u\"ণন\":\n self.trans[char] = u\"n\"\n self.trans['প'] = 'p'\n self.trans['ফ'] = 'ph'\n self.trans['ব'] = 'b'\n self.trans['ভ'] = 'bh'\n self.trans['ম'] = 'm'\n self.trans['য'] = 'dzh'\n self.trans['র'] = 'r'\n self.trans['ল'] = 'l'\n self.trans['শ'] = 's'\n self.trans['হ'] = 'h'\n for char in u\"য়\":\n self.trans[char] = u\"-\"\n for char in u\"ড়\":\n self.trans[char] = u\"r\"\n self.trans['ঢ'] = 'rh'\n self.trans['০'] = '0'\n self.trans['১'] = '1'\n self.trans['২'] = '2'\n self.trans['৩'] = '3'\n self.trans['৪'] = '4'\n self.trans['৫'] = '5'\n self.trans['৬'] = '6'\n self.trans['৭'] = '7'\n self.trans['৮'] = '8'\n self.trans['৯'] = '9'\n\n # Thai (because of complications of the alphabet, self.transliterations\n # are very imprecise here)\n self.trans['ก'] = 'k'\n for char in u\"ขฃคฅฆ\":\n self.trans[char] = u\"kh\"\n self.trans['ง'] = 'ng'\n for char in u\"จฉชฌ\":\n self.trans[char] = u\"ch\"\n for char in u\"ซศษส\":\n self.trans[char] = u\"s\"\n for char in u\"ญย\":\n self.trans[char] = u\"y\"\n for char in u\"ฎด\":\n self.trans[char] = u\"d\"\n for char in u\"ฏต\":\n self.trans[char] = u\"t\"\n for char in u\"ฐฑฒถทธ\":\n self.trans[char] = u\"th\"\n for char in u\"ณน\":\n self.trans[char] = u\"n\"\n self.trans['บ'] = 'b'\n self.trans['ป'] = 'p'\n for char in u\"ผพภ\":\n self.trans[char] = u\"ph\"\n for char in u\"ฝฟ\":\n self.trans[char] = u\"f\"\n self.trans['ม'] = 'm'\n self.trans['ร'] = 'r'\n self.trans['ฤ'] = 'rue'\n self.trans['ๅ'] = ':'\n for char in u\"ลฬ\":\n self.trans[char] = u\"l\"\n self.trans['ฦ'] = 'lue'\n self.trans['ว'] = 'w'\n for char in u\"หฮ\":\n self.trans[char] = u\"h\"\n self.trans['อ'] = ''\n self.trans['ร'] = 'ü'\n self.trans['ว'] = 'ua'\n for char in u\"อวโิ\":\n self.trans[char] = u\"o\"\n for char in u\"ะัา\":\n self.trans[char] = u\"a\"\n self.trans['ว'] = 'u'\n self.trans['ำ'] = 'am'\n self.trans['ิ'] = 'i'\n self.trans['ี'] = 'i:'\n self.trans['ึ'] = 'ue'\n self.trans['ื'] = 'ue:'\n self.trans['ุ'] = 'u'\n self.trans['ู'] = 'u:'\n for char in u\"เ็\":\n self.trans[char] = u\"e\"\n self.trans['แ'] = 'ae'\n for char in u\"ใไ\":\n self.trans[char] = u\"ai\"\n for char in u\"่้๊๋็์\":\n self.trans[char] = u\"\"\n self.trans['ฯ'] = '.'\n self.trans['ๆ'] = '(2)'\n\n # Korean (Revised Romanization system within possible, incomplete)\n self.trans['국'] = 'guk'\n self.trans['명'] = 'myeong'\n self.trans['검'] = 'geom'\n self.trans['타'] = 'ta'\n self.trans['분'] = 'bun'\n self.trans['사'] = 'sa'\n self.trans['류'] = 'ryu'\n self.trans['포'] = 'po'\n self.trans['르'] = 'reu'\n self.trans['투'] = 'tu'\n self.trans['갈'] = 'gal'\n self.trans['어'] = 'eo'\n self.trans['노'] = 'no'\n self.trans['웨'] = 'we'\n self.trans['이'] = 'i'\n self.trans['라'] = 'ra'\n self.trans['틴'] = 'tin'\n self.trans['루'] = 'ru'\n self.trans['마'] = 'ma'\n self.trans['니'] = 'ni'\n self.trans['아'] = 'a'\n self.trans['독'] = 'dok'\n self.trans['일'] = 'il'\n self.trans['모'] = 'mo'\n self.trans['크'] = 'keu'\n self.trans['샤'] = 'sya'\n self.trans['영'] = 'yeong'\n self.trans['불'] = 'bul'\n self.trans['가'] = 'ga'\n self.trans['리'] = 'ri'\n self.trans['그'] = 'geu'\n self.trans['지'] = 'ji'\n self.trans['야'] = 'ya'\n self.trans['바'] = 'ba'\n self.trans['슈'] = 'syu'\n self.trans['키'] = 'ki'\n self.trans['프'] = 'peu'\n self.trans['랑'] = 'rang'\n self.trans['스'] = 'seu'\n self.trans['로'] = 'ro'\n self.trans['메'] = 'me'\n self.trans['역'] = 'yeok'\n self.trans['도'] = 'do'\n\n # Kannada\n self.trans[u\"ಅ\"] = u\"a\"\n for char in u\"ಆಾ\":\n self.trans[char] = u\"aa\"\n for char in u\"ಇಿ\":\n self.trans[char] = u\"i\"\n for char in u\"ಈೀ\":\n self.trans[char] = u\"ii\"\n for char in u\"ಉು\":\n self.trans[char] = u\"u\"\n for char in u\"ಊೂ\":\n self.trans[char] = u\"uu\"\n for char in u\"ಋೂ\":\n self.trans[char] = u\"r'\"\n for char in u\"ಎೆ\":\n self.trans[char] = u\"e\"\n for char in u\"ಏೇ\":\n self.trans[char] = u\"ee\"\n for char in u\"ಐೈ\":\n self.trans[char] = u\"ai\"\n for char in u\"ಒೊ\":\n self.trans[char] = u\"o\"\n for char in u\"ಓೋ\":\n self.trans[char] = u\"oo\"\n for char in u\"ಔೌ\":\n self.trans[char] = u\"au\"\n self.trans[u\"ಂ\"] = u\"m'\"\n self.trans[u\"ಃ\"] = u\"h'\"\n self.trans[u\"ಕ\"] = u\"k\"\n self.trans[u\"ಖ\"] = u\"kh\"\n self.trans[u\"ಗ\"] = u\"g\"\n self.trans[u\"ಘ\"] = u\"gh\"\n self.trans[u\"ಙ\"] = u\"ng\"\n self.trans[u\"ಚ\"] = u\"c\"\n self.trans[u\"ಛ\"] = u\"ch\"\n self.trans[u\"ಜ\"] = u\"j\"\n self.trans[u\"ಝ\"] = u\"ny\"\n self.trans[u\"ಟ\"] = u\"tt\"\n self.trans[u\"ಠ\"] = u\"tth\"\n self.trans[u\"ಡ\"] = u\"dd\"\n self.trans[u\"ಢ\"] = u\"ddh\"\n self.trans[u\"ಣ\"] = u\"nn\"\n self.trans[u\"ತ\"] = u\"t\"\n self.trans[u\"ಥ\"] = u\"th\"\n self.trans[u\"ದ\"] = u\"d\"\n self.trans[u\"ಧ\"] = u\"dh\"\n self.trans[u\"ನ\"] = u\"n\"\n self.trans[u\"ಪ\"] = u\"p\"\n self.trans[u\"ಫ\"] = u\"ph\"\n self.trans[u\"ಬ\"] = u\"b\"\n self.trans[u\"ಭ\"] = u\"bh\"\n self.trans[u\"ಮ\"] = u\"m\"\n self.trans[u\"ಯ\"] = u\"y\"\n self.trans[u\"ರ\"] = u\"r\"\n self.trans[u\"ಲ\"] = u\"l\"\n self.trans[u\"ವ\"] = u\"v\"\n self.trans[u\"ಶ\"] = u\"sh\"\n self.trans[u\"ಷ\"] = u\"ss\"\n self.trans[u\"ಸ\"] = u\"s\"\n self.trans[u\"ಹ\"] = u\"h\"\n self.trans[u\"ಳ\"] = u\"ll\"\n self.trans[u\"೦\"] = u\"0\"\n self.trans[u\"೧\"] = u\"1\"\n self.trans[u\"೨\"] = u\"2\"\n self.trans[u\"೩\"] = u\"3\"\n self.trans[u\"೪\"] = u\"4\"\n self.trans[u\"೫\"] = u\"5\"\n self.trans[u\"೬\"] = u\"6\"\n self.trans[u\"೭\"] = u\"7\"\n self.trans[u\"೮\"] = u\"8\"\n self.trans[u\"೯\"] = u\"9\"\n # Telugu\n self.trans['అ'] = 'a'\n for char in u\"ఆా\":\n self.trans[char] = u\"aa\"\n for char in u\"ఇి\":\n self.trans[char] = u\"i\"\n for char in u\"ఈీ\":\n self.trans[char] = u\"ii\"\n for char in u\"ఉు\":\n self.trans[char] = u\"u\"\n for char in u\"ఊూ\":\n self.trans[char] = u\"uu\"\n for char in u\"ఋృ\":\n self.trans[char] = u\"r'\"\n for char in u\"ౠౄ\":\n self.trans[char] = u'r\"'\n self.trans[u\"ఌ\"] = u\"l'\"\n self.trans[u\"ౡ\"] = u'l\"'\n for char in u\"ఎె\":\n self.trans[char] = u\"e\"\n for char in u\"ఏే\":\n self.trans[char] = u\"ee\"\n for char in u\"ఐై\":\n self.trans[char] = u\"ai\"\n for char in u\"ఒొ\":\n self.trans[char] = u\"o\"\n for char in u\"ఓో\":\n self.trans[char] = u\"oo\"\n for char in u\"ఔౌ\":\n self.trans[char] = u\"au\"\n self.trans[u\"ం\"] = u\"'\"\n self.trans[u\"ః\"] = u'\"'\n self.trans[u\"క\"] = u\"k\"\n self.trans[u\"ఖ\"] = u\"kh\"\n self.trans[u\"గ\"] = u\"g\"\n self.trans[u\"ఘ\"] = u\"gh\"\n self.trans[u\"ఙ\"] = u\"ng\"\n self.trans[u\"చ\"] = u\"ts\"\n self.trans[u\"ఛ\"] = u\"tsh\"\n self.trans[u\"జ\"] = u\"j\"\n self.trans[u\"ఝ\"] = u\"jh\"\n self.trans[u\"ఞ\"] = u\"ñ\"\n for char in u\"టత\":\n self.trans[char] = u\"t\"\n for char in u\"ఠథ\":\n self.trans[char] = u\"th\"\n for char in u\"డద\":\n self.trans[char] = u\"d\"\n for char in u\"ఢధ\":\n self.trans[char] = u\"dh\"\n for char in u\"ణన\":\n self.trans[char] = u\"n\"\n self.trans[u\"ప\"] = u\"p\"\n self.trans[u\"ఫ\"] = u\"ph\"\n self.trans[u\"బ\"] = u\"b\"\n self.trans[u\"భ\"] = u\"bh\"\n self.trans[u\"మ\"] = u\"m\"\n self.trans[u\"య\"] = u\"y\"\n for char in u\"రఱ\":\n self.trans[char] = u\"r\"\n for char in u\"లళ\":\n self.trans[char] = u\"l\"\n self.trans[u\"వ\"] = u\"v\"\n self.trans[u\"శ\"] = u\"sh\"\n for char in u\"షస\":\n self.trans[char] = u\"s\"\n self.trans[u\"హ\"] = u\"h\"\n self.trans[u\"్\"] = \"\"\n for char in u\"ంఁ\":\n self.trans[char] = u\"^\"\n self.trans[u\"ః\"] = u\"-\"\n self.trans[u\"౦\"] = u\"0\"\n self.trans[u\"౧\"] = u\"1\"\n self.trans[u\"౨\"] = u\"2\"\n self.trans[u\"౩\"] = u\"3\"\n self.trans[u\"౪\"] = u\"4\"\n self.trans[u\"౫\"] = u\"5\"\n self.trans[u\"౬\"] = u\"6\"\n self.trans[u\"౭\"] = u\"7\"\n self.trans[u\"౮\"] = u\"8\"\n self.trans[u\"౯\"] = u\"9\"\n self.trans[u\"౹\"] = u\"1/4\"\n self.trans[u\"౺\"] = u\"1/2\"\n self.trans[u\"౻\"] = u\"3/4\"\n self.trans[u\"౼\"] = u\"1/16\"\n self.trans[u\"౽\"] = u\"1/8\"\n self.trans[u\"౾\"] = u\"3/16\"\n # Lao - note: pronounciation in initial position is used;\n # different pronounciation in final position is ignored\n self.trans[u\"ກ\"] = \"k\"\n for char in u\"ຂຄ\":\n self.trans[char] = \"kh\"\n self.trans[u\"ງ\"] = \"ng\"\n self.trans[u\"ຈ\"] = \"ch\"\n for char in u\"ສຊ\":\n self.trans[char] = \"s\"\n self.trans[u\"ຍ\"] = \"ny\"\n self.trans[u\"ດ\"] = \"d\"\n self.trans[u\"ຕ\"] = \"t\"\n for char in u\"ຖທ\":\n self.trans[char] = \"th\"\n self.trans[u\"ນ\"] = \"n\"\n self.trans[u\"ບ\"] = \"b\"\n self.trans[u\"ປ\"] = \"p\"\n for char in u\"ຜພ\":\n self.trans[char] = \"ph\"\n for char in u\"ຝຟ\":\n self.trans[char] = \"f\"\n for char in u\"ມໝ\":\n self.trans[char] = \"m\"\n self.trans[u\"ຢ\"] = \"y\"\n for char in u\"ຣຼ\":\n self.trans[char] = \"r\"\n for char in u\"ລຼ\":\n self.trans[char] = \"l\"\n self.trans[u\"ວ\"] = \"v\"\n self.trans['ຮ'] = 'h'\n self.trans[u\"ອ\"] = \"'\"\n for char in u\"ະັ\":\n self.trans[char] = \"a\"\n self.trans[u\"ິ\"] = \"i\"\n self.trans[u\"ຶ\"] = \"ue\"\n self.trans[u\"ຸ\"] = \"u\"\n self.trans[u\"ເ\"] = u\"é\"\n self.trans[u\"ແ\"] = u\"è\"\n for char in u\"ໂົາໍ\":\n self.trans[char] = \"o\"\n self.trans[u\"ຽ\"] = \"ia\"\n self.trans[u\"ເຶ\"] = \"uea\"\n self.trans[u\"ຍ\"] = \"i\"\n for char in u\"ໄໃ\":\n self.trans[char] = \"ai\"\n self.trans[u\"ຳ\"] = \"am\"\n self.trans[u\"າ\"] = \"aa\"\n self.trans[u\"ີ\"] = \"ii\"\n self.trans[u\"ື\"] = \"yy\"\n self.trans[u\"ູ\"] = \"uu\"\n self.trans[u\"ເ\"] = \"e\"\n self.trans[u\"ແ\"] = \"ei\"\n self.trans[u\"໐\"] = \"0\"\n self.trans[u\"໑\"] = \"1\"\n self.trans[u\"໒\"] = \"2\"\n self.trans[u\"໓\"] = \"3\"\n self.trans[u\"໔\"] = \"4\"\n self.trans[u\"໕\"] = \"5\"\n self.trans[u\"໖\"] = \"6\"\n self.trans[u\"໗\"] = \"7\"\n self.trans[u\"໘\"] = \"8\"\n self.trans[u\"໙\"] = \"9\"\n # Chinese -- note: incomplete\n for char in u\"埃挨哎唉哀皑癌蔼矮艾碍爱隘\":\n self.trans[char] = u\"ai\"\n for char in u\"鞍氨安俺按暗岸胺案\":\n self.trans[char] = u\"an\"\n for char in u\"肮昂盎\":\n self.trans[char] = u\"ang\"\n for char in u\"凹敖熬翱袄傲奥懊澳\":\n self.trans[char] = u\"ao\"\n for char in u\"芭捌扒叭吧笆八疤巴拔跋靶把耙坝霸罢爸\":\n self.trans[char] = u\"ba\"\n for char in u\"白柏百摆佰败拜稗\":\n self.trans[char] = u\"bai\"\n for char in u\"斑班搬扳般颁板版扮拌伴瓣半办绊\":\n self.trans[char] = u\"ban\"\n for char in u\"邦帮梆榜膀绑棒磅蚌镑傍谤\":\n self.trans[char] = u\"bang\"\n for char in u\"苞胞包褒剥薄雹保堡饱宝抱报暴豹鲍爆\":\n self.trans[char] = u\"bao\"\n for char in u\"杯碑悲卑北辈背贝钡倍狈备惫焙被\":\n self.trans[char] = u\"bei\"\n for char in u\"奔苯本笨\":\n self.trans[char] = u\"ben\"\n for char in u\"崩绷甭泵蹦迸\":\n self.trans[char] = u\"beng\"\n for char in u\"逼鼻比鄙笔彼碧蓖蔽毕毙毖币庇痹闭敝弊必辟壁臂避陛\":\n self.trans[char] = u\"bi\"\n for char in u\"鞭边编贬扁便变卞辨辩辫遍\":\n self.trans[char] = u\"bian\"\n for char in u\"标彪膘表\":\n self.trans[char] = u\"biao\"\n for char in u\"鳖憋别瘪\":\n self.trans[char] = u\"bie\"\n for char in u\"彬斌濒滨宾摈\":\n self.trans[char] = u\"bin\"\n for char in u\"兵冰柄丙秉饼炳病并\":\n self.trans[char] = u\"bing\"\n for char in u\"玻菠播拨钵波博勃搏铂箔伯帛舶脖膊渤泊驳捕卜亳\":\n self.trans[char] = u\"bo\"\n for char in u\"哺补埠不布步簿部怖\":\n self.trans[char] = u\"bu\"\n for char in u\"猜裁材才财睬踩采彩菜蔡\":\n self.trans[char] = u\"cai\"\n for char in u\"餐参蚕残惭惨灿\":\n self.trans[char] = u\"can\"\n for char in u\"苍舱仓沧藏\":\n self.trans[char] = u\"cang\"\n for char in u\"操糙槽曹草\":\n self.trans[char] = u\"cao\"\n for char in u\"厕策侧册测\":\n self.trans[char] = u\"ce\"\n for char in u\"层蹭\":\n self.trans[char] = u\"ceng\"\n for char in u\"插叉茬茶查碴搽察岔差诧\":\n self.trans[char] = u\"cha\"\n for char in u\"拆柴豺\":\n self.trans[char] = u\"chai\"\n for char in u\"搀掺蝉馋谗缠铲产阐颤\":\n self.trans[char] = u\"chan\"\n for char in u\"昌猖场尝常长偿肠厂敞畅唱倡\":\n self.trans[char] = u\"chang\"\n for char in u\"超抄钞朝嘲潮巢吵炒\":\n self.trans[char] = u\"chao\"\n for char in u\"车扯撤掣彻澈\":\n self.trans[char] = u\"che\"\n for char in u\"郴臣辰尘晨忱沉陈趁衬\":\n self.trans[char] = u\"chen\"\n for char in u\"撑称城橙成呈乘程惩澄诚承逞骋秤\":\n self.trans[char] = u\"cheng\"\n for char in u\"吃痴持匙池迟弛驰耻齿侈尺赤翅斥炽\":\n self.trans[char] = u\"chi\"\n for char in u\"充冲虫崇宠\":\n self.trans[char] = u\"chong\"\n for char in u\"抽酬畴踌稠愁筹仇绸瞅丑臭\":\n self.trans[char] = u\"chou\"\n for char in u\"初出橱厨躇锄雏滁除楚储矗搐触处\":\n self.trans[char] = u\"chu\"\n self.trans['揣'] = 'chuai'\n for char in u\"川穿椽传船喘串\":\n self.trans[char] = u\"chuan\"\n for char in u\"疮窗幢床闯创\":\n self.trans[char] = u\"chuang\"\n for char in u\"吹炊捶锤垂\":\n self.trans[char] = u\"chui\"\n for char in u\"春椿醇唇淳纯蠢\":\n self.trans[char] = u\"chun\"\n for char in u\"戳绰\":\n self.trans[char] = u\"chuo\"\n for char in u\"疵茨磁雌辞慈瓷词此刺赐次\":\n self.trans[char] = u\"ci\"\n for char in u\"聪葱囱匆从丛\":\n self.trans[char] = u\"cong\"\n self.trans['凑'] = 'cou'\n for char in u\"粗醋簇促\":\n self.trans[char] = u\"cu\"\n for char in u\"蹿篡窜\":\n self.trans[char] = u\"cuan\"\n for char in u\"摧崔催脆瘁粹淬翠\":\n self.trans[char] = u\"cui\"\n for char in u\"村存寸\":\n self.trans[char] = u\"cun\"\n for char in u\"磋撮搓措挫错\":\n self.trans[char] = u\"cuo\"\n for char in u\"搭达答瘩打大\":\n self.trans[char] = u\"da\"\n for char in u\"呆歹傣戴带殆代贷袋待逮怠\":\n self.trans[char] = u\"dai\"\n for char in u\"耽担丹单郸掸胆旦氮但惮淡诞弹蛋儋\":\n self.trans[char] = u\"dan\"\n for char in u\"当挡党荡档\":\n self.trans[char] = u\"dang\"\n for char in u\"刀捣蹈倒岛祷导到稻悼道盗\":\n self.trans[char] = u\"dao\"\n for char in u\"德得的\":\n self.trans[char] = u\"de\"\n for char in u\"蹬灯登等瞪凳邓\":\n self.trans[char] = u\"deng\"\n for char in u\"堤低滴迪敌笛狄涤翟嫡抵底地蒂第帝弟递缔\":\n self.trans[char] = u\"di\"\n for char in u\"颠掂滇碘点典靛垫电佃甸店惦奠淀殿\":\n self.trans[char] = u\"dian\"\n for char in u\"碉叼雕凋刁掉吊钓调\":\n self.trans[char] = u\"diao\"\n for char in u\"跌爹碟蝶迭谍叠\":\n self.trans[char] = u\"die\"\n for char in u\"丁盯叮钉顶鼎锭定订\":\n self.trans[char] = u\"ding\"\n self.trans['丢'] = 'diu'\n for char in u\"东冬董懂动栋侗恫冻洞\":\n self.trans[char] = u\"dong\"\n for char in u\"兜抖斗陡豆逗痘\":\n self.trans[char] = u\"dou\"\n for char in u\"都督毒犊独读堵睹赌杜镀肚度渡妒\":\n self.trans[char] = u\"du\"\n for char in u\"端短锻段断缎\":\n self.trans[char] = u\"duan\"\n for char in u\"堆兑队对\":\n self.trans[char] = u\"dui\"\n for char in u\"墩吨蹲敦顿囤钝盾遁\":\n self.trans[char] = u\"dun\"\n for char in u\"掇哆多夺垛躲朵跺舵剁惰堕\":\n self.trans[char] = u\"duo\"\n for char in u\"蛾峨鹅俄额讹娥恶厄扼遏鄂饿\":\n self.trans[char] = u\"e\"\n for char in u\"恩嗯\":\n self.trans[char] = u\"en\"\n for char in u\"而儿耳尔饵洱二贰\":\n self.trans[char] = u\"er\"\n for char in u\"发罚筏伐乏阀法珐\":\n self.trans[char] = u\"fa\"\n for char in u\"藩帆番翻樊矾钒繁凡烦反返范贩犯饭泛\":\n self.trans[char] = u\"fan\"\n for char in u\"坊芳方肪房防妨仿访纺放\":\n self.trans[char] = u\"fang\"\n for char in u\"菲非啡飞肥匪诽吠肺废沸费\":\n self.trans[char] = u\"fei\"\n for char in u\"芬酚吩氛分纷坟焚汾粉奋份忿愤粪\":\n self.trans[char] = u\"fen\"\n for char in u\"丰封枫蜂峰锋风疯烽逢冯缝讽奉凤\":\n self.trans[char] = u\"feng\"\n self.trans['佛'] = 'fo'\n self.trans['否'] = 'fou'\n for char in u\"夫敷肤孵扶拂辐幅氟符伏俘服浮涪福袱弗甫抚辅俯釜斧脯腑府腐赴副覆赋复傅付阜父腹负富讣附妇缚咐\":\n self.trans[char] = u\"fu\"\n for char in u\"噶嘎\":\n self.trans[char] = u\"ga\"\n for char in u\"该改概钙盖溉\":\n self.trans[char] = u\"gai\"\n for char in u\"干甘杆柑竿肝赶感秆敢赣\":\n self.trans[char] = u\"gan\"\n for char in u\"冈刚钢缸肛纲岗港杠\":\n self.trans[char] = u\"gang\"\n for char in u\"篙皋高膏羔糕搞镐稿告\":\n self.trans[char] = u\"gao\"\n for char in u\"哥歌搁戈鸽胳疙割革葛格蛤阁隔铬个各\":\n self.trans[char] = u\"ge\"\n self.trans['给'] = 'gei'\n for char in u\"根跟\":\n self.trans[char] = u\"gen\"\n for char in u\"耕更庚羹埂耿梗\":\n self.trans[char] = u\"geng\"\n for char in u\"工攻功恭龚供躬公宫弓巩汞拱贡共\":\n self.trans[char] = u\"gong\"\n for char in u\"钩勾沟苟狗垢构购够\":\n self.trans[char] = u\"gou\"\n for char in u\"辜菇咕箍估沽孤姑鼓古蛊骨谷股故顾固雇\":\n self.trans[char] = u\"gu\"\n for char in u\"刮瓜剐寡挂褂\":\n self.trans[char] = u\"gua\"\n for char in u\"乖拐怪\":\n self.trans[char] = u\"guai\"\n for char in u\"棺关官冠观管馆罐惯灌贯\":\n self.trans[char] = u\"guan\"\n for char in u\"光广逛\":\n self.trans[char] = u\"guang\"\n for char in u\"瑰规圭硅归龟闺轨鬼诡癸桂柜跪贵刽\":\n self.trans[char] = u\"gui\"\n for char in u\"辊滚棍\":\n self.trans[char] = u\"gun\"\n for char in u\"锅郭国果裹过\":\n self.trans[char] = u\"guo\"\n self.trans['哈'] = 'ha'\n for char in u\"骸孩海氦亥害骇\":\n self.trans[char] = u\"hai\"\n for char in u\"酣憨邯韩含涵寒函喊罕翰撼捍旱憾悍焊汗汉\":\n self.trans[char] = u\"han\"\n for char in u\"夯杭航\":\n self.trans[char] = u\"hang\"\n for char in u\"壕嚎豪毫郝好耗号浩\":\n self.trans[char] = u\"hao\"\n for char in u\"呵喝荷菏核禾和何合盒貉阂河涸赫褐鹤贺\":\n self.trans[char] = u\"he\"\n for char in u\"嘿黑\":\n self.trans[char] = u\"hei\"\n for char in u\"痕很狠恨\":\n self.trans[char] = u\"hen\"\n for char in u\"哼亨横衡恒\":\n self.trans[char] = u\"heng\"\n for char in u\"轰哄烘虹鸿洪宏弘红\":\n self.trans[char] = u\"hong\"\n for char in u\"喉侯猴吼厚候后\":\n self.trans[char] = u\"hou\"\n for char in u\"呼乎忽瑚壶葫胡蝴狐糊湖弧虎唬护互沪户\":\n self.trans[char] = u\"hu\"\n for char in u\"花哗华猾滑画划化话\":\n self.trans[char] = u\"hua\"\n for char in u\"槐徊怀淮坏\":\n self.trans[char] = u\"huai\"\n for char in u\"欢环桓还缓换患唤痪豢焕涣宦幻\":\n self.trans[char] = u\"huan\"\n for char in u\"荒慌黄磺蝗簧皇凰惶煌晃幌恍谎\":\n self.trans[char] = u\"huang\"\n for char in u\"灰挥辉徽恢蛔回毁悔慧卉惠晦贿秽会烩汇讳诲绘\":\n self.trans[char] = u\"hui\"\n for char in u\"荤昏婚魂浑混\":\n self.trans[char] = u\"hun\"\n for char in u\"豁活伙火获或惑霍货祸\":\n self.trans[char] = u\"huo\"\n for char in u\"击圾基机畸稽积箕肌饥迹激讥鸡姬绩缉吉极棘辑籍集及急疾汲即嫉级挤几脊己蓟技冀季伎祭剂悸济寄寂计记既忌际妓继纪\":\n self.trans[char] = u\"ji\"\n for char in u\"嘉枷夹佳家加荚颊贾甲钾假稼价架驾嫁\":\n self.trans[char] = u\"jia\"\n for char in u\"歼监坚尖笺间煎兼肩艰奸缄茧检柬碱硷拣捡简俭剪减荐槛鉴践贱见键箭件健舰剑饯渐溅涧建\":\n self.trans[char] = u\"jian\"\n for char in u\"僵姜将浆江疆蒋桨奖讲匠酱降\":\n self.trans[char] = u\"jiang\"\n for char in u\"蕉椒礁焦胶交郊浇骄娇嚼搅铰矫侥脚狡角饺缴绞剿教酵轿较叫窖\":\n self.trans[char] = u\"jiao\"\n for char in u\"揭接皆秸街阶截劫节桔杰捷睫竭洁结解姐戒藉芥界借介疥诫届\":\n self.trans[char] = u\"jie\"\n for char in u\"巾筋斤金今津襟紧锦仅谨进靳晋禁近烬浸尽劲\":\n self.trans[char] = u\"jin\"\n for char in u\"荆兢茎睛晶鲸京惊精粳经井警景颈静境敬镜径痉靖竟竞净\":\n self.trans[char] = u\"jing\"\n for char in u\"囧炯窘\":\n self.trans[char] = u\"jiong\"\n for char in u\"揪究纠玖韭久灸九酒厩救旧臼舅咎就疚\":\n self.trans[char] = u\"jiu\"\n for char in u\"鞠拘狙疽居驹菊局咀矩举沮聚拒据巨具距踞锯俱句惧炬剧\":\n self.trans[char] = u\"ju\"\n for char in u\"捐鹃娟倦眷卷绢\":\n self.trans[char] = u\"juan\"\n for char in u\"撅攫抉掘倔爵觉决诀绝\":\n self.trans[char] = u\"jue\"\n for char in u\"均菌钧军君峻俊竣浚郡骏\":\n self.trans[char] = u\"jun\"\n for char in u\"喀咖卡咯\":\n self.trans[char] = u\"ka\"\n for char in u\"开揩楷凯慨\":\n self.trans[char] = u\"kai\"\n for char in u\"刊堪勘坎砍看\":\n self.trans[char] = u\"kan\"\n for char in u\"康慷糠扛抗亢炕\":\n self.trans[char] = u\"kang\"\n for char in u\"考拷烤靠\":\n self.trans[char] = u\"kao\"\n for char in u\"坷苛柯棵磕颗科壳咳可渴克刻客课\":\n self.trans[char] = u\"ke\"\n for char in u\"肯啃垦恳\":\n self.trans[char] = u\"ken\"\n for char in u\"坑吭\":\n self.trans[char] = u\"keng\"\n for char in u\"空恐孔控\":\n self.trans[char] = u\"kong\"\n for char in u\"抠口扣寇\":\n self.trans[char] = u\"kou\"\n for char in u\"枯哭窟苦酷库裤\":\n self.trans[char] = u\"ku\"\n for char in u\"夸垮挎跨胯\":\n self.trans[char] = u\"kua\"\n for char in u\"块筷侩快\":\n self.trans[char] = u\"kuai\"\n for char in u\"宽款\":\n self.trans[char] = u\"kuan\"\n for char in u\"匡筐狂框矿眶旷况\":\n self.trans[char] = u\"kuang\"\n for char in u\"亏盔岿窥葵奎魁傀馈愧溃\":\n self.trans[char] = u\"kui\"\n for char in u\"坤昆捆困\":\n self.trans[char] = u\"kun\"\n for char in u\"括扩廓阔\":\n self.trans[char] = u\"kuo\"\n for char in u\"垃拉喇蜡腊辣啦\":\n self.trans[char] = u\"la\"\n for char in u\"莱来赖\":\n self.trans[char] = u\"lai\"\n for char in u\"蓝婪栏拦篮阑兰澜谰揽览懒缆烂滥\":\n self.trans[char] = u\"lan\"\n for char in u\"琅榔狼廊郎朗浪\":\n self.trans[char] = u\"lang\"\n for char in u\"捞劳牢老佬姥酪烙涝\":\n self.trans[char] = u\"lao\"\n for char in u\"勒乐\":\n self.trans[char] = u\"le\"\n for char in u\"雷镭蕾磊累儡垒擂肋类泪\":\n self.trans[char] = u\"lei\"\n for char in u\"棱楞冷\":\n self.trans[char] = u\"leng\"\n for char in u\"厘梨犁黎篱狸离漓理李里鲤礼莉荔吏栗丽厉励砾历利傈例俐痢立粒沥隶力璃哩\":\n self.trans[char] = u\"li\"\n self.trans['俩'] = 'lia'\n for char in u\"联莲连镰廉怜涟帘敛脸链恋炼练\":\n self.trans[char] = u\"lian\"\n for char in u\"粮凉梁粱良两辆量晾亮谅\":\n self.trans[char] = u\"liang\"\n for char in u\"撩聊僚疗燎寥辽潦了撂镣廖料\":\n self.trans[char] = u\"liao\"\n for char in u\"列裂烈劣猎\":\n self.trans[char] = u\"lie\"\n for char in u\"琳林磷霖临邻鳞淋凛赁吝拎\":\n self.trans[char] = u\"lin\"\n for char in u\"玲菱零龄铃伶羚凌灵陵岭领另令\":\n self.trans[char] = u\"ling\"\n for char in u\"溜琉榴硫馏留刘瘤流柳六\":\n self.trans[char] = u\"liu\"\n for char in u\"龙聋咙笼窿隆垄拢陇\":\n self.trans[char] = u\"long\"\n for char in u\"楼娄搂篓漏陋\":\n self.trans[char] = u\"lou\"\n for char in u\"芦卢颅庐炉掳卤虏鲁麓碌露路赂鹿潞禄录陆戮泸\":\n self.trans[char] = u\"lu\"\n for char in u\"峦挛孪滦卵乱\":\n self.trans[char] = u\"luan\"\n for char in u\"掠略\":\n self.trans[char] = u\"lue\"\n for char in u\"抡轮伦仑沦纶论\":\n self.trans[char] = u\"lun\"\n for char in u\"萝螺罗逻锣箩骡裸落洛骆络漯\":\n self.trans[char] = u\"luo\"\n for char in u\"驴吕铝侣旅履屡缕虑氯律率滤绿\":\n self.trans[char] = u\"lv\"\n for char in u\"妈麻玛码蚂马骂嘛吗\":\n self.trans[char] = u\"ma\"\n for char in u\"埋买麦卖迈脉\":\n self.trans[char] = u\"mai\"\n for char in u\"瞒馒蛮满蔓曼慢漫谩\":\n self.trans[char] = u\"man\"\n for char in u\"芒茫盲氓忙莽\":\n self.trans[char] = u\"mang\"\n for char in u\"猫茅锚毛矛铆卯茂冒帽貌贸\":\n self.trans[char] = u\"mao\"\n self.trans['么'] = 'me'\n for char in u\"玫枚梅酶霉煤没眉媒镁每美昧寐妹媚\":\n self.trans[char] = u\"mei\"\n for char in u\"门闷们\":\n self.trans[char] = u\"men\"\n for char in u\"萌蒙檬盟锰猛梦孟\":\n self.trans[char] = u\"meng\"\n for char in u\"眯醚靡糜迷谜弥米秘觅泌蜜密幂\":\n self.trans[char] = u\"mi\"\n for char in u\"棉眠绵冕免勉娩缅面\":\n self.trans[char] = u\"mian\"\n for char in u\"苗描瞄藐秒渺庙妙\":\n self.trans[char] = u\"miao\"\n for char in u\"蔑灭\":\n self.trans[char] = u\"mie\"\n for char in u\"民抿皿敏悯闽\":\n self.trans[char] = u\"min\"\n for char in u\"明螟鸣铭名命\":\n self.trans[char] = u\"ming\"\n self.trans['谬'] = 'miu'\n for char in u\"摸摹蘑模膜磨摩魔抹末莫墨默沫漠寞陌\":\n self.trans[char] = u\"mo\"\n for char in u\"谋牟某\":\n self.trans[char] = u\"mou\"\n for char in u\"拇牡亩姆母墓暮幕募慕木目睦牧穆\":\n self.trans[char] = u\"mu\"\n for char in u\"拿哪呐钠那娜纳\":\n self.trans[char] = u\"na\"\n for char in u\"氖乃奶耐奈\":\n self.trans[char] = u\"nai\"\n for char in u\"南男难\":\n self.trans[char] = u\"nan\"\n self.trans['囊'] = 'nang'\n for char in u\"挠脑恼闹淖\":\n self.trans[char] = u\"nao\"\n self.trans['呢'] = 'ne'\n for char in u\"馁内\":\n self.trans[char] = u\"nei\"\n self.trans['嫩'] = 'nen'\n self.trans['能'] = 'neng'\n for char in u\"妮霓倪泥尼拟你匿腻逆溺\":\n self.trans[char] = u\"ni\"\n for char in u\"蔫拈年碾撵捻念\":\n self.trans[char] = u\"nian\"\n for char in u\"娘酿\":\n self.trans[char] = u\"niang\"\n for char in u\"鸟尿\":\n self.trans[char] = u\"niao\"\n for char in u\"捏聂孽啮镊镍涅\":\n self.trans[char] = u\"nie\"\n self.trans['您'] = 'nin'\n for char in u\"柠狞凝宁拧泞\":\n self.trans[char] = u\"ning\"\n for char in u\"牛扭钮纽\":\n self.trans[char] = u\"niu\"\n for char in u\"脓浓农弄\":\n self.trans[char] = u\"nong\"\n for char in u\"奴努怒\":\n self.trans[char] = u\"nu\"\n self.trans['暖'] = 'nuan'\n for char in u\"虐疟\":\n self.trans[char] = u\"nue\"\n for char in u\"挪懦糯诺\":\n self.trans[char] = u\"nuo\"\n self.trans['女'] = 'nv'\n self.trans['哦'] = 'o'\n for char in u\"欧鸥殴藕呕偶沤\":\n self.trans[char] = u\"ou\"\n for char in u\"啪趴爬帕怕琶\":\n self.trans[char] = u\"pa\"\n for char in u\"拍排牌徘湃派\":\n self.trans[char] = u\"pai\"\n for char in u\"攀潘盘磐盼畔判叛\":\n self.trans[char] = u\"pan\"\n for char in u\"乓庞旁耪胖\":\n self.trans[char] = u\"pang\"\n for char in u\"抛咆刨炮袍跑泡\":\n self.trans[char] = u\"pao\"\n for char in u\"呸胚培裴赔陪配佩沛\":\n self.trans[char] = u\"pei\"\n for char in u\"喷盆\":\n self.trans[char] = u\"pen\"\n for char in u\"砰抨烹澎彭蓬棚硼篷膨朋鹏捧碰\":\n self.trans[char] = u\"peng\"\n for char in u\"坯砒霹批披劈琵毗啤脾疲皮匹痞僻屁譬\":\n self.trans[char] = u\"pi\"\n for char in u\"篇偏片骗\":\n self.trans[char] = u\"pian\"\n for char in u\"飘漂瓢票\":\n self.trans[char] = u\"piao\"\n for char in u\"撇瞥\":\n self.trans[char] = u\"pie\"\n for char in u\"拼频贫品聘\":\n self.trans[char] = u\"pin\"\n for char in u\"乒坪苹萍平凭瓶评屏\":\n self.trans[char] = u\"ping\"\n for char in u\"坡泼颇婆破魄迫粕剖\":\n self.trans[char] = u\"po\"\n for char in u\"扑铺仆莆葡菩蒲埔朴圃普浦谱曝瀑濮\":\n self.trans[char] = u\"pu\"\n for char in u\"期欺栖戚妻七凄漆柒沏其棋奇歧畦崎脐齐旗祈祁骑起岂乞企启契砌器气迄弃汽泣讫\":\n self.trans[char] = u\"qi\"\n for char in u\"掐恰洽\":\n self.trans[char] = u\"qia\"\n for char in u\"牵扦钎铅千迁签仟谦乾黔钱钳前潜遣浅谴堑嵌欠歉\":\n self.trans[char] = u\"qian\"\n for char in u\"枪呛腔羌墙蔷强抢\":\n self.trans[char] = u\"qiang\"\n for char in u\"橇锹敲悄桥瞧乔侨巧鞘撬翘峭俏窍\":\n self.trans[char] = u\"qiao\"\n for char in u\"切茄且怯窃\":\n self.trans[char] = u\"qie\"\n for char in u\"钦侵亲秦琴勤芹擒禽寝沁\":\n self.trans[char] = u\"qin\"\n for char in u\"青轻氢倾卿清擎晴氰情顷请庆\":\n self.trans[char] = u\"qing\"\n for char in u\"琼穷\":\n self.trans[char] = u\"qiong\"\n for char in u\"秋丘邱球求囚酋泅\":\n self.trans[char] = u\"qiu\"\n for char in u\"趋区蛆曲躯屈驱渠取娶龋趣去\":\n self.trans[char] = u\"qu\"\n for char in u\"圈颧权醛泉全痊拳犬券劝\":\n self.trans[char] = u\"quan\"\n for char in u\"缺炔瘸却鹊榷确雀\":\n self.trans[char] = u\"que\"\n for char in u\"裙群\":\n self.trans[char] = u\"qun\"\n for char in u\"然燃冉染\":\n self.trans[char] = u\"ran\"\n for char in u\"瓤壤攘嚷让\":\n self.trans[char] = u\"rang\"\n for char in u\"饶扰绕\":\n self.trans[char] = u\"rao\"\n for char in u\"惹热\":\n self.trans[char] = u\"re\"\n for char in u\"壬仁人忍韧任认刃妊纫\":\n self.trans[char] = u\"ren\"\n for char in u\"扔仍\":\n self.trans[char] = u\"reng\"\n self.trans['日'] = 'ri'\n for char in u\"戎茸蓉荣融熔溶容绒冗\":\n self.trans[char] = u\"rong\"\n for char in u\"揉柔肉\":\n self.trans[char] = u\"rou\"\n for char in u\"茹蠕儒孺如辱乳汝入褥\":\n self.trans[char] = u\"ru\"\n for char in u\"软阮\":\n self.trans[char] = u\"ruan\"\n for char in u\"蕊瑞锐\":\n self.trans[char] = u\"rui\"\n for char in u\"闰润\":\n self.trans[char] = u\"run\"\n for char in u\"若弱\":\n self.trans[char] = u\"ruo\"\n for char in u\"撒洒萨\":\n self.trans[char] = u\"sa\"\n for char in u\"腮鳃塞赛\":\n self.trans[char] = u\"sai\"\n for char in u\"三叁伞散\":\n self.trans[char] = u\"san\"\n for char in u\"桑嗓丧\":\n self.trans[char] = u\"sang\"\n for char in u\"搔骚扫嫂\":\n self.trans[char] = u\"sao\"\n for char in u\"瑟色涩\":\n self.trans[char] = u\"se\"\n self.trans['森'] = 'sen'\n self.trans['僧'] = 'seng'\n for char in u\"莎砂杀刹沙纱傻啥煞\":\n self.trans[char] = u\"sha\"\n for char in u\"筛晒\":\n self.trans[char] = u\"shai\"\n for char in u\"珊苫杉山删煽衫闪陕擅赡膳善汕扇缮\":\n self.trans[char] = u\"shan\"\n for char in u\"墒伤商赏晌上尚裳\":\n self.trans[char] = u\"shang\"\n for char in u\"梢捎稍烧芍勺韶少哨邵绍\":\n self.trans[char] = u\"shao\"\n for char in u\"奢赊蛇舌舍赦摄射慑涉社设\":\n self.trans[char] = u\"she\"\n for char in u\"砷申呻伸身深娠绅神沈审婶甚肾慎渗\":\n self.trans[char] = u\"shen\"\n for char in u\"声生甥牲升绳省盛剩胜圣\":\n self.trans[char] = u\"sheng\"\n for char in u\"师失狮施湿诗尸虱十石拾时什食蚀实识史矢使屎驶始式示士世柿事拭誓逝势是嗜噬适仕侍释饰氏市恃室视试\":\n self.trans[char] = u\"shi\"\n for char in u\"收手首守寿授售受瘦兽\":\n self.trans[char] = u\"shou\"\n for char in u\"蔬枢梳殊抒输叔舒淑疏书赎孰熟薯暑曙署蜀黍鼠属术述树束戍竖墅庶数漱恕\":\n self.trans[char] = u\"shu\"\n for char in u\"刷耍\":\n self.trans[char] = u\"shua\"\n for char in u\"摔衰甩帅\":\n self.trans[char] = u\"shuai\"\n for char in u\"栓拴\":\n self.trans[char] = u\"shuan\"\n for char in u\"霜双爽\":\n self.trans[char] = u\"shuang\"\n for char in u\"谁水睡税\":\n self.trans[char] = u\"shui\"\n for char in u\"吮瞬顺舜\":\n self.trans[char] = u\"shun\"\n for char in u\"说硕朔烁\":\n self.trans[char] = u\"shuo\"\n for char in u\"斯撕嘶思私司丝死肆寺嗣四伺似饲巳\":\n self.trans[char] = u\"si\"\n for char in u\"松耸怂颂送宋讼诵\":\n self.trans[char] = u\"song\"\n for char in u\"搜艘擞\":\n self.trans[char] = u\"sou\"\n for char in u\"嗽苏酥俗素速粟僳塑溯宿诉肃\":\n self.trans[char] = u\"su\"\n for char in u\"酸蒜算\":\n self.trans[char] = u\"suan\"\n for char in u\"虽隋随绥髓碎岁穗遂隧祟\":\n self.trans[char] = u\"sui\"\n for char in u\"孙损笋\":\n self.trans[char] = u\"sun\"\n for char in u\"蓑梭唆缩琐索锁所\":\n self.trans[char] = u\"suo\"\n for char in u\"塌他它她塔獭挞蹋踏\":\n self.trans[char] = u\"ta\"\n for char in u\"胎苔抬台泰酞太态汰\":\n self.trans[char] = u\"tai\"\n for char in u\"坍摊贪瘫滩坛檀痰潭谭谈坦毯袒碳探叹炭\":\n self.trans[char] = u\"tan\"\n for char in u\"汤塘搪堂棠膛唐糖倘躺淌趟烫\":\n self.trans[char] = u\"tang\"\n for char in u\"掏涛滔绦萄桃逃淘陶讨套\":\n self.trans[char] = u\"tao\"\n self.trans['特'] = 'te'\n for char in u\"藤腾疼誊\":\n self.trans[char] = u\"teng\"\n for char in u\"梯剔踢锑提题蹄啼体替嚏惕涕剃屉\":\n self.trans[char] = u\"ti\"\n for char in u\"兲天添填田甜恬舔腆\":\n self.trans[char] = u\"tian\"\n for char in u\"挑条迢眺跳\":\n self.trans[char] = u\"tiao\"\n for char in u\"贴铁帖\":\n self.trans[char] = u\"tie\"\n for char in u\"厅听烃汀廷停亭庭挺艇\":\n self.trans[char] = u\"ting\"\n for char in u\"通桐酮瞳同铜彤童桶捅筒统痛\":\n self.trans[char] = u\"tong\"\n for char in u\"偷投头透\":\n self.trans[char] = u\"tou\"\n for char in u\"凸秃突图徒途涂屠土吐兔\":\n self.trans[char] = u\"tu\"\n for char in u\"湍团\":\n self.trans[char] = u\"tuan\"\n for char in u\"推颓腿蜕褪退\":\n self.trans[char] = u\"tui\"\n for char in u\"吞屯臀\":\n self.trans[char] = u\"tun\"\n for char in u\"拖托脱鸵陀驮驼椭妥拓唾\":\n self.trans[char] = u\"tuo\"\n for char in u\"挖哇蛙洼娃瓦袜\":\n self.trans[char] = u\"wa\"\n for char in u\"歪外\":\n self.trans[char] = u\"wai\"\n for char in u\"豌弯湾玩顽丸烷完碗挽晚皖惋宛婉万腕莞\":\n self.trans[char] = u\"wan\"\n for char in u\"汪王亡枉网往旺望忘妄\":\n self.trans[char] = u\"wang\"\n for char in u\"威巍微危韦违桅围唯惟为潍维苇萎委伟伪尾纬未蔚味畏胃喂魏位渭谓尉慰卫\":\n self.trans[char] = u\"wei\"\n for char in u\"瘟温蚊文闻纹吻稳紊问\":\n self.trans[char] = u\"wen\"\n for char in u\"嗡翁瓮\":\n self.trans[char] = u\"weng\"\n for char in u\"挝蜗涡窝我斡卧握沃\":\n self.trans[char] = u\"wo\"\n for char in u\"巫呜钨乌污诬屋无芜梧吾吴毋武五捂午舞伍侮坞戊雾晤物勿务悟误\":\n self.trans[char] = u\"wu\"\n for char in u\"昔熙析西硒矽晰嘻吸锡牺稀息希悉膝夕惜熄烯溪汐犀檄袭席习媳喜铣洗系隙戏细\":\n self.trans[char] = u\"xi\"\n for char in u\"瞎虾匣霞辖暇峡侠狭下厦夏吓\":\n self.trans[char] = u\"xia\"\n for char in u\"掀锨先仙鲜纤咸贤衔舷闲涎弦嫌显险现献县腺馅羡宪陷限线\":\n self.trans[char] = u\"xian\"\n for char in u\"相厢镶香箱襄湘乡翔祥详想响享项巷橡像向象\":\n self.trans[char] = u\"xiang\"\n for char in u\"萧硝霄削哮嚣销消宵淆晓小孝校肖啸笑效\":\n self.trans[char] = u\"xiao\"\n for char in u\"楔些歇蝎鞋协挟携邪斜胁谐写械卸蟹懈泄泻谢屑\":\n self.trans[char] = u\"xie\"\n for char in u\"薪芯锌欣辛新忻心信衅\":\n self.trans[char] = u\"xin\"\n for char in u\"星腥猩惺兴刑型形邢行醒幸杏性姓\":\n self.trans[char] = u\"xing\"\n for char in u\"兄凶胸匈汹雄熊\":\n self.trans[char] = u\"xiong\"\n for char in u\"休修羞朽嗅锈秀袖绣\":\n self.trans[char] = u\"xiu\"\n for char in u\"墟戌需虚嘘须徐许蓄酗叙旭序畜恤絮婿绪续\":\n self.trans[char] = u\"xu\"\n for char in u\"轩喧宣悬旋玄选癣眩绚\":\n self.trans[char] = u\"xuan\"\n for char in u\"靴薛学穴雪血\":\n self.trans[char] = u\"xue\"\n for char in u\"勋熏循旬询寻驯巡殉汛训讯逊迅\":\n self.trans[char] = u\"xun\"\n for char in u\"压押鸦鸭呀丫芽牙蚜崖衙涯雅哑亚讶\":\n self.trans[char] = u\"ya\"\n for char in u\"焉咽阉烟淹盐严研蜒岩延言颜阎炎沿奄掩眼衍演艳堰燕厌砚雁唁彦焰宴谚验\":\n self.trans[char] = u\"yan\"\n for char in u\"殃央鸯秧杨扬佯疡羊洋阳氧仰痒养样漾\":\n self.trans[char] = u\"yang\"\n for char in u\"邀腰妖瑶摇尧遥窑谣姚咬舀药要耀\":\n self.trans[char] = u\"yao\"\n for char in u\"椰噎耶爷野冶也页掖业叶曳腋夜液\":\n self.trans[char] = u\"ye\"\n for char in u\"一壹医揖铱依伊衣颐夷遗移仪胰疑沂宜姨彝椅蚁倚已乙矣以艺抑易邑屹亿役臆逸肄疫亦裔意毅忆义益溢诣议谊译异翼翌绎\":\n self.trans[char] = u\"yi\"\n for char in u\"茵荫因殷音阴姻吟银淫寅饮尹引隐印\":\n self.trans[char] = u\"yin\"\n for char in u\"英樱婴鹰应缨莹萤营荧蝇迎赢盈影颖硬映\":\n self.trans[char] = u\"ying\"\n self.trans['哟'] = 'yo'\n for char in u\"拥佣臃痈庸雍踊蛹咏泳涌永恿勇用\":\n self.trans[char] = u\"yong\"\n for char in u\"幽优悠忧尤由邮铀犹油游酉有友右佑釉诱又幼迂\":\n self.trans[char] = u\"you\"\n for char in u\"淤于盂榆虞愚舆余俞逾鱼愉渝渔隅予娱雨与屿禹宇语羽玉域芋郁吁遇喻峪御愈欲狱育誉浴寓裕预豫驭\":\n self.trans[char] = u\"yu\"\n for char in u\"鸳渊冤元垣袁原援辕园员圆猿源缘远苑愿怨院\":\n self.trans[char] = u\"yuan\"\n for char in u\"曰约越跃钥岳粤月悦阅\":\n self.trans[char] = u\"yue\"\n for char in u\"耘云郧匀陨允运蕴酝晕韵孕\":\n self.trans[char] = u\"yun\"\n for char in u\"匝砸杂\":\n self.trans[char] = u\"za\"\n for char in u\"栽哉灾宰载再在\":\n self.trans[char] = u\"zai\"\n for char in u\"咱攒暂赞\":\n self.trans[char] = u\"zan\"\n for char in u\"赃脏葬\":\n self.trans[char] = u\"zang\"\n for char in u\"遭糟凿藻枣早澡蚤躁噪造皂灶燥\":\n self.trans[char] = u\"zao\"\n for char in u\"责择则泽\":\n self.trans[char] = u\"ze\"\n self.trans['贼'] = 'zei'\n self.trans['怎'] = 'zen'\n for char in u\"增憎曾赠\":\n self.trans[char] = u\"zeng\"\n for char in u\"扎喳渣札轧铡闸眨栅榨咋乍炸诈\":\n self.trans[char] = u\"zha\"\n for char in u\"摘斋宅窄债寨\":\n self.trans[char] = u\"zhai\"\n for char in u\"瞻毡詹粘沾盏斩辗崭展蘸栈占战站湛绽\":\n self.trans[char] = u\"zhan\"\n for char in u\"樟章彰漳张掌涨杖丈帐账仗胀瘴障\":\n self.trans[char] = u\"zhang\"\n for char in u\"招昭找沼赵照罩兆肇召\":\n self.trans[char] = u\"zhao\"\n for char in u\"遮折哲蛰辙者锗蔗这浙\":\n self.trans[char] = u\"zhe\"\n for char in u\"珍斟真甄砧臻贞针侦枕疹诊震振镇阵圳\":\n self.trans[char] = u\"zhen\"\n for char in u\"蒸挣睁征狰争怔整拯正政帧症郑证\":\n self.trans[char] = u\"zheng\"\n for char in u\"芝枝支吱蜘知肢脂汁之织职直植殖执值侄址指止趾只旨纸志挚掷至致置帜峙制智秩稚质炙痔滞治窒\":\n self.trans[char] = u\"zhi\"\n for char in u\"中盅忠钟衷终种肿重仲众\":\n self.trans[char] = u\"zhong\"\n for char in u\"舟周州洲诌粥轴肘帚咒皱宙昼骤\":\n self.trans[char] = u\"zhou\"\n for char in u\"珠株蛛朱猪诸诛逐竹烛煮拄瞩嘱主著柱助蛀贮铸筑住注祝驻\":\n self.trans[char] = u\"zhu\"\n for char in u\"抓爪\":\n self.trans[char] = u\"zhua\"\n self.trans['拽'] = 'zhuai'\n for char in u\"专砖转撰赚篆\":\n self.trans[char] = u\"zhuan\"\n for char in u\"桩庄装妆撞壮状\":\n self.trans[char] = u\"zhuang\"\n for char in u\"椎锥追赘坠缀\":\n self.trans[char] = u\"zhui\"\n for char in u\"谆准\":\n self.trans[char] = u\"zhun\"\n for char in u\"捉拙卓桌琢茁酌啄着灼浊\":\n self.trans[char] = u\"zhuo\"\n for char in u\"兹咨资姿滋淄孜紫仔籽滓子自渍字\":\n self.trans[char] = u\"zi\"\n for char in u\"鬃棕踪宗综总纵\":\n self.trans[char] = u\"zong\"\n for char in u\"邹走奏揍\":\n self.trans[char] = u\"zou\"\n for char in u\"租足卒族祖诅阻组\":\n self.trans[char] = u\"zu\"\n for char in u\"钻纂\":\n self.trans[char] = u\"zuan\"\n for char in u\"嘴醉最罪\":\n self.trans[char] = u\"zui\"\n for char in u\"尊遵\":\n self.trans[char] = u\"zun\"\n for char in u\"昨左佐柞做作坐座\":\n self.trans[char] = u\"zuo\"\n # from: https://www.wikidata.org/wiki/MediaWiki:Gadget-SimpleTransliterate.js\n self.trans[u\"ଂ\"] = \"anusvara\"\n self.trans[u\"ઇ\"] = \"i\"\n self.trans[u\"എ\"] = \"e\"\n self.trans[u\"ગ\"] = \"ga\"\n self.trans[u\"ਜ\"] = \"ja\"\n self.trans[u\"ഞ\"] = \"nya\"\n self.trans[u\"ଢ\"] = \"ddha\"\n self.trans[u\"ધ\"] = \"dha\"\n self.trans[u\"ਬ\"] = \"ba\"\n self.trans[u\"മ\"] = \"ma\"\n self.trans[u\"ଲ\"] = \"la\"\n self.trans[u\"ષ\"] = \"ssa\"\n self.trans[u\"਼\"] = \"nukta\"\n self.trans[u\"ാ\"] = \"aa\"\n self.trans[u\"ୂ\"] = \"uu\"\n self.trans[u\"ે\"] = \"e\"\n self.trans[u\"ੌ\"] = \"au\"\n self.trans[u\"ൎ\"] = \"reph\"\n self.trans[u\"ੜ\"] = \"rra\"\n self.trans[u\"՞\"] = \"?\"\n self.trans[u\"ୢ\"] = \"l\"\n self.trans[u\"૧\"] = \"1\"\n self.trans[u\"੬\"] = \"6\"\n self.trans[u\"൮\"] = \"8\"\n self.trans[u\"୲\"] = \"quarter\"\n self.trans[u\"ൾ\"] = \"ll\"\n self.trans[u\"ਇ\"] = \"i\"\n self.trans[u\"ഉ\"] = \"u\"\n self.trans[u\"ઌ\"] = \"l\"\n self.trans[u\"ਗ\"] = \"ga\"\n self.trans[u\"ങ\"] = \"nga\"\n self.trans[u\"ଝ\"] = \"jha\"\n self.trans[u\"જ\"] = \"ja\"\n self.trans[u\"؟\"] = \"?\"\n self.trans[u\"ਧ\"] = \"dha\"\n self.trans[u\"ഩ\"] = \"nnna\"\n self.trans[u\"ଭ\"] = \"bha\"\n self.trans[u\"બ\"] = \"ba\"\n self.trans[u\"ഹ\"] = \"ha\"\n self.trans[u\"ଽ\"] = \"avagraha\"\n self.trans[u\"઼\"] = \"nukta\"\n self.trans[u\"ੇ\"] = \"ee\"\n self.trans[u\"୍\"] = \"virama\"\n self.trans[u\"ૌ\"] = \"au\"\n self.trans[u\"੧\"] = \"1\"\n self.trans[u\"൩\"] = \"3\"\n self.trans[u\"୭\"] = \"7\"\n self.trans[u\"૬\"] = \"6\"\n self.trans[u\"൹\"] = \"mark\"\n self.trans[u\"ਖ਼\"] = \"khha\"\n self.trans[u\"ਂ\"] = \"bindi\"\n self.trans[u\"ഈ\"] = \"ii\"\n self.trans[u\"ઍ\"] = \"e\"\n self.trans[u\"ଌ\"] = \"l\"\n self.trans[u\"ഘ\"] = \"gha\"\n self.trans[u\"ઝ\"] = \"jha\"\n self.trans[u\"ଡ଼\"] = \"rra\"\n self.trans[u\"ਢ\"] = \"ddha\"\n self.trans[u\"ന\"] = \"na\"\n self.trans[u\"ભ\"] = \"bha\"\n self.trans[u\"ବ\"] = \"ba\"\n self.trans[u\"ਲ\"] = \"la\"\n self.trans[u\"സ\"] = \"sa\"\n self.trans[u\"ઽ\"] = \"avagraha\"\n self.trans[u\"଼\"] = \"nukta\"\n self.trans[u\"ੂ\"] = \"uu\"\n self.trans[u\"ൈ\"] = \"ai\"\n self.trans[u\"્\"] = \"virama\"\n self.trans[u\"ୌ\"] = \"au\"\n self.trans[u\"൨\"] = \"2\"\n self.trans[u\"૭\"] = \"7\"\n self.trans[u\"୬\"] = \"6\"\n self.trans[u\"ੲ\"] = \"iri\"\n self.trans[u\"ഃ\"] = \"visarga\"\n self.trans[u\"ં\"] = \"anusvara\"\n self.trans[u\"ଇ\"] = \"i\"\n self.trans[u\"ഓ\"] = \"oo\"\n self.trans[u\"ଗ\"] = \"ga\"\n self.trans[u\"ਝ\"] = \"jha\"\n self.trans[u\"?\"] = \"?\"\n self.trans[u\"ണ\"] = \"nna\"\n self.trans[u\"ઢ\"] = \"ddha\"\n self.trans[u\"ଧ\"] = \"dha\"\n self.trans[u\"ਭ\"] = \"bha\"\n self.trans[u\"ള\"] = \"lla\"\n self.trans[u\"લ\"] = \"la\"\n self.trans[u\"ଷ\"] = \"ssa\"\n self.trans[u\"ൃ\"] = \"r\"\n self.trans[u\"ૂ\"] = \"uu\"\n self.trans[u\"େ\"] = \"e\"\n self.trans[u\"੍\"] = \"virama\"\n self.trans[u\"ୗ\"] = \"mark\"\n self.trans[u\"ൣ\"] = \"ll\"\n self.trans[u\"ૢ\"] = \"l\"\n self.trans[u\"୧\"] = \"1\"\n self.trans[u\"੭\"] = \"7\"\n self.trans[u\"൳\"] = \"1/4\"\n self.trans[u\"୷\"] = \"sixteenths\"\n self.trans[u\"ଆ\"] = \"aa\"\n self.trans[u\"ઋ\"] = \"r\"\n self.trans[u\"ഊ\"] = \"uu\"\n self.trans[u\"ਐ\"] = \"ai\"\n self.trans[u\"ଖ\"] = \"kha\"\n self.trans[u\"છ\"] = \"cha\"\n self.trans[u\"ച\"] = \"ca\"\n self.trans[u\"ਠ\"] = \"ttha\"\n self.trans[u\"ଦ\"] = \"da\"\n self.trans[u\"ફ\"] = \"pha\"\n self.trans[u\"പ\"] = \"pa\"\n self.trans[u\"ਰ\"] = \"ra\"\n self.trans[u\"ଶ\"] = \"sha\"\n self.trans[u\"ഺ\"] = \"ttta\"\n self.trans[u\"ੀ\"] = \"ii\"\n self.trans[u\"ો\"] = \"o\"\n self.trans[u\"ൊ\"] = \"o\"\n self.trans[u\"ୖ\"] = \"mark\"\n self.trans[u\"୦\"] = \"0\"\n self.trans[u\"૫\"] = \"5\"\n self.trans[u\"൪\"] = \"4\"\n self.trans[u\"ੰ\"] = \"tippi\"\n self.trans[u\"୶\"] = \"eighth\"\n self.trans[u\"ൺ\"] = \"nn\"\n self.trans[u\"ଁ\"] = \"candrabindu\"\n self.trans[u\"അ\"] = \"a\"\n self.trans[u\"ઐ\"] = \"ai\"\n self.trans[u\"ക\"] = \"ka\"\n self.trans[u\"ਸ਼\"] = \"sha\"\n self.trans[u\"ਛ\"] = \"cha\"\n self.trans[u\"ଡ\"] = \"dda\"\n self.trans[u\"ઠ\"] = \"ttha\"\n self.trans[u\"ഥ\"] = \"tha\"\n self.trans[u\"ਫ\"] = \"pha\"\n self.trans[u\"ર\"] = \"ra\"\n self.trans[u\"വ\"] = \"va\"\n self.trans[u\"ୁ\"] = \"u\"\n self.trans[u\"ી\"] = \"ii\"\n self.trans[u\"ੋ\"] = \"oo\"\n self.trans[u\"ૐ\"] = \"om\"\n self.trans[u\"ୡ\"] = \"ll\"\n self.trans[u\"ૠ\"] = \"rr\"\n self.trans[u\"੫\"] = \"5\"\n self.trans[u\"ୱ\"] = \"wa\"\n self.trans[u\"૰\"] = \"sign\"\n self.trans[u\"൵\"] = \"quarters\"\n self.trans[u\"ਫ਼\"] = \"fa\"\n self.trans[u\"ઁ\"] = \"candrabindu\"\n self.trans[u\"ਆ\"] = \"aa\"\n self.trans[u\"ઑ\"] = \"o\"\n self.trans[u\"ଐ\"] = \"ai\"\n self.trans[u\"ഔ\"] = \"au\"\n self.trans[u\"ਖ\"] = \"kha\"\n self.trans[u\"ડ\"] = \"dda\"\n self.trans[u\"ଠ\"] = \"ttha\"\n self.trans[u\"ത\"] = \"ta\"\n self.trans[u\"ਦ\"] = \"da\"\n self.trans[u\"ର\"] = \"ra\"\n self.trans[u\"ഴ\"] = \"llla\"\n self.trans[u\"ુ\"] = \"u\"\n self.trans[u\"ୀ\"] = \"ii\"\n self.trans[u\"ൄ\"] = \"rr\"\n self.trans[u\"ૡ\"] = \"ll\"\n self.trans[u\"ୠ\"] = \"rr\"\n self.trans[u\"੦\"] = \"0\"\n self.trans[u\"૱\"] = \"sign\"\n self.trans[u\"୰\"] = \"isshar\"\n self.trans[u\"൴\"] = \"1/2\"\n self.trans[u\"ਁ\"] = \"bindi\"\n self.trans[u\"આ\"] = \"aa\"\n self.trans[u\"ଋ\"] = \"r\"\n self.trans[u\"ഏ\"] = \"ee\"\n self.trans[u\"ખ\"] = \"kha\"\n self.trans[u\"ଛ\"] = \"cha\"\n self.trans[u\"ട\"] = \"tta\"\n self.trans[u\"ਡ\"] = \"dda\"\n self.trans[u\"દ\"] = \"da\"\n self.trans[u\"ଫ\"] = \"pha\"\n self.trans[u\"യ\"] = \"ya\"\n self.trans[u\"શ\"] = \"sha\"\n self.trans[u\"ി\"] = \"i\"\n self.trans[u\"ੁ\"] = \"u\"\n self.trans[u\"ୋ\"] = \"o\"\n self.trans[u\"ੑ\"] = \"udaat\"\n self.trans[u\"૦\"] = \"0\"\n self.trans[u\"୫\"] = \"5\"\n self.trans[u\"൯\"] = \"9\"\n self.trans[u\"ੱ\"] = \"addak\"\n self.trans[u\"ൿ\"] = \"k\"\n self.trans[u\"ആ\"] = \"aa\"\n self.trans[u\"ଊ\"] = \"uu\"\n self.trans[u\"એ\"] = \"e\"\n self.trans[u\"ਔ\"] = \"au\"\n self.trans[u\"ഖ\"] = \"kha\"\n self.trans[u\"ଚ\"] = \"ca\"\n self.trans[u\"ટ\"] = \"tta\"\n self.trans[u\"ਤ\"] = \"ta\"\n self.trans[u\"ദ\"] = \"da\"\n self.trans[u\"ପ\"] = \"pa\"\n self.trans[u\"ય\"] = \"ya\"\n self.trans[u\"ശ\"] = \"sha\"\n self.trans[u\"િ\"] = \"i\"\n self.trans[u\"െ\"] = \"e\"\n self.trans[u\"൦\"] = \"0\"\n self.trans[u\"୪\"] = \"4\"\n self.trans[u\"૯\"] = \"9\"\n self.trans[u\"ੴ\"] = \"onkar\"\n self.trans[u\"ଅ\"] = \"a\"\n self.trans[u\"ਏ\"] = \"ee\"\n self.trans[u\"କ\"] = \"ka\"\n self.trans[u\"ઔ\"] = \"au\"\n self.trans[u\"ਟ\"] = \"tta\"\n self.trans[u\"ഡ\"] = \"dda\"\n self.trans[u\"ଥ\"] = \"tha\"\n self.trans[u\"ત\"] = \"ta\"\n self.trans[u\"ਯ\"] = \"ya\"\n self.trans[u\"റ\"] = \"rra\"\n self.trans[u\"ଵ\"] = \"va\"\n self.trans[u\"ਿ\"] = \"i\"\n self.trans[u\"ു\"] = \"u\"\n self.trans[u\"ૄ\"] = \"rr\"\n self.trans[u\"ൡ\"] = \"ll\"\n self.trans[u\"੯\"] = \"9\"\n self.trans[u\"൱\"] = \"100\"\n self.trans[u\"୵\"] = \"sixteenth\"\n self.trans[u\"અ\"] = \"a\"\n self.trans[u\"ਊ\"] = \"uu\"\n self.trans[u\"ഐ\"] = \"ai\"\n self.trans[u\"ક\"] = \"ka\"\n self.trans[u\"ଔ\"] = \"au\"\n self.trans[u\"ਚ\"] = \"ca\"\n self.trans[u\"ഠ\"] = \"ttha\"\n self.trans[u\"થ\"] = \"tha\"\n self.trans[u\"ତ\"] = \"ta\"\n self.trans[u\"ਪ\"] = \"pa\"\n self.trans[u\"ര\"] = \"ra\"\n self.trans[u\"વ\"] = \"va\"\n self.trans[u\"ീ\"] = \"ii\"\n self.trans[u\"ૅ\"] = \"e\"\n self.trans[u\"ୄ\"] = \"rr\"\n self.trans[u\"ൠ\"] = \"rr\"\n self.trans[u\"ਜ਼\"] = \"za\"\n self.trans[u\"੪\"] = \"4\"\n self.trans[u\"൰\"] = \"10\"\n self.trans[u\"୴\"] = \"quarters\"\n self.trans[u\"ਅ\"] = \"a\"\n self.trans[u\"ഋ\"] = \"r\"\n self.trans[u\"ઊ\"] = \"uu\"\n self.trans[u\"ଏ\"] = \"e\"\n self.trans[u\"ਕ\"] = \"ka\"\n self.trans[u\"ഛ\"] = \"cha\"\n self.trans[u\"ચ\"] = \"ca\"\n self.trans[u\"ଟ\"] = \"tta\"\n self.trans[u\"ਥ\"] = \"tha\"\n self.trans[u\"ഫ\"] = \"pha\"\n self.trans[u\"પ\"] = \"pa\"\n self.trans[u\"ଯ\"] = \"ya\"\n self.trans[u\"ਵ\"] = \"va\"\n self.trans[u\"ି\"] = \"i\"\n self.trans[u\"ോ\"] = \"oo\"\n self.trans[u\"ୟ\"] = \"yya\"\n self.trans[u\"൫\"] = \"5\"\n self.trans[u\"૪\"] = \"4\"\n self.trans[u\"୯\"] = \"9\"\n self.trans[u\"ੵ\"] = \"yakash\"\n self.trans[u\"ൻ\"] = \"n\"\n self.trans[u\"ઃ\"] = \"visarga\"\n self.trans[u\"ം\"] = \"anusvara\"\n self.trans[u\"ਈ\"] = \"ii\"\n self.trans[u\"ઓ\"] = \"o\"\n self.trans[u\"ഒ\"] = \"o\"\n self.trans[u\"ਘ\"] = \"gha\"\n self.trans[u\"ଞ\"] = \"nya\"\n self.trans[u\"ણ\"] = \"nna\"\n self.trans[u\"ഢ\"] = \"ddha\"\n self.trans[u\"ਲ਼\"] = \"lla\"\n self.trans[u\"ਨ\"] = \"na\"\n self.trans[u\"ମ\"] = \"ma\"\n self.trans[u\"ળ\"] = \"lla\"\n self.trans[u\"ല\"] = \"la\"\n self.trans[u\"ਸ\"] = \"sa\"\n self.trans[u\"¿\"] = \"?\"\n self.trans[u\"ା\"] = \"aa\"\n self.trans[u\"ૃ\"] = \"r\"\n self.trans[u\"ൂ\"] = \"uu\"\n self.trans[u\"ੈ\"] = \"ai\"\n self.trans[u\"ૣ\"] = \"ll\"\n self.trans[u\"ൢ\"] = \"l\"\n self.trans[u\"੨\"] = \"2\"\n self.trans[u\"୮\"] = \"8\"\n self.trans[u\"൲\"] = \"1000\"\n self.trans[u\"ਃ\"] = \"visarga\"\n self.trans[u\"ଉ\"] = \"u\"\n self.trans[u\"ઈ\"] = \"ii\"\n self.trans[u\"ਓ\"] = \"oo\"\n self.trans[u\"ଙ\"] = \"nga\"\n self.trans[u\"ઘ\"] = \"gha\"\n self.trans[u\"ഝ\"] = \"jha\"\n self.trans[u\"ਣ\"] = \"nna\"\n self.trans[u\"ન\"] = \"na\"\n self.trans[u\"ഭ\"] = \"bha\"\n self.trans[u\"ଜ\"] = \"ja\"\n self.trans[u\"ହ\"] = \"ha\"\n self.trans[u\"સ\"] = \"sa\"\n self.trans[u\"ഽ\"] = \"avagraha\"\n self.trans[u\"ૈ\"] = \"ai\"\n self.trans[u\"്\"] = \"virama\"\n self.trans[u\"୩\"] = \"3\"\n self.trans[u\"૨\"] = \"2\"\n self.trans[u\"൭\"] = \"7\"\n self.trans[u\"ੳ\"] = \"ura\"\n self.trans[u\"ൽ\"] = \"l\"\n self.trans[u\"ઉ\"] = \"u\"\n self.trans[u\"ଈ\"] = \"ii\"\n self.trans[u\"ഌ\"] = \"l\"\n self.trans[u\"ઙ\"] = \"nga\"\n self.trans[u\"ଘ\"] = \"gha\"\n self.trans[u\"ജ\"] = \"ja\"\n self.trans[u\"ਞ\"] = \"nya\"\n self.trans[u\"ନ\"] = \"na\"\n self.trans[u\"ബ\"] = \"ba\"\n self.trans[u\"ਮ\"] = \"ma\"\n self.trans[u\"હ\"] = \"ha\"\n self.trans[u\"ସ\"] = \"sa\"\n self.trans[u\"ਾ\"] = \"aa\"\n self.trans[u\"ૉ\"] = \"o\"\n self.trans[u\"ୈ\"] = \"ai\"\n self.trans[u\"ൌ\"] = \"au\"\n self.trans[u\"૩\"] = \"3\"\n self.trans[u\"୨\"] = \"2\"\n self.trans[u\"൬\"] = \"6\"\n self.trans[u\"੮\"] = \"8\"\n self.trans[u\"ർ\"] = \"rr\"\n self.trans[u\"ଃ\"] = \"visarga\"\n self.trans[u\"ഇ\"] = \"i\"\n self.trans[u\"ਉ\"] = \"u\"\n self.trans[u\"ଓ\"] = \"o\"\n self.trans[u\"ഗ\"] = \"ga\"\n self.trans[u\"ਙ\"] = \"nga\"\n self.trans[u\"ઞ\"] = \"nya\"\n self.trans[u\"ଣ\"] = \"nna\"\n self.trans[u\"ധ\"] = \"dha\"\n self.trans[u\"મ\"] = \"ma\"\n self.trans[u\"ଳ\"] = \"lla\"\n self.trans[u\"ഷ\"] = \"ssa\"\n self.trans[u\"ਹ\"] = \"ha\"\n self.trans[u\"ਗ਼\"] = \"ghha\"\n self.trans[u\"ા\"] = \"aa\"\n self.trans[u\"ୃ\"] = \"r\"\n self.trans[u\"േ\"] = \"ee\"\n self.trans[u\"ൗ\"] = \"mark\"\n self.trans[u\"ଢ଼\"] = \"rha\"\n self.trans[u\"ୣ\"] = \"ll\"\n self.trans[u\"൧\"] = \"1\"\n self.trans[u\"੩\"] = \"3\"\n self.trans[u\"૮\"] = \"8\"\n self.trans[u\"୳\"] = \"half\"\n for char in self.trans:\n value = self.trans[char]\n if value == \"?\":\n continue\n while value.encode(encoding, 'replace').decode(encoding) == \"?\" and value in self.trans:\n assert value != self.trans[value], \"%r == self.trans[%r]!\" % (value, value)\n value = self.trans[value]\n self.trans[char] = value", "def test_response_no_charset_with_iso_8859_1_content():\n content = \"Accented: Österreich abcdefghijklmnopqrstuzwxyz\".encode(\"iso-8859-1\")\n headers = {\"Content-Type\": \"text/plain\"}\n response = httpx.Response(\n 200, content=content, headers=headers, default_encoding=autodetect\n )\n assert response.text == \"Accented: Österreich abcdefghijklmnopqrstuzwxyz\"\n assert response.charset_encoding is None", "def _get_unicode_value(value: Union[Text, bytes]) -> Text:\n decoded_value = stats_util.maybe_get_utf8(value)\n # Check if we have a valid utf-8 string. If not, assign a placeholder.\n if decoded_value is None:\n _NON_UTF8_VALUES_COUNTER.inc()\n decoded_value = constants.NON_UTF8_PLACEHOLDER\n return decoded_value", "def clean_unicode(text):\n clean_text = text.encode(\"ascii\", errors=\"replace\").strip().decode(\"ascii\")\n clean_text = clean_text.replace(\"?\", ' ')\n return clean_text" ]
[ "0.65442806", "0.6308765", "0.62095857", "0.6024676", "0.59617436", "0.5852157", "0.58336884", "0.5832144", "0.5830336", "0.5777034", "0.5749223", "0.5742238", "0.5740002", "0.57341146", "0.57124454", "0.56925076", "0.5677184", "0.5604121", "0.5572304", "0.5555498", "0.5553839", "0.55488086", "0.55020696", "0.5498202", "0.5495886", "0.54845124", "0.5481835", "0.5480774", "0.54369485", "0.5433927" ]
0.64852524
1
Convert page to str
def __str__(self): return str(self.page)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def page_to_string(page, alph):\n s = ''\n links = Measurements.get_all_links(page)\n for l in links:\n s += alph[l]\n return s", "def process_page(page):\n content = utils.any2unicode(page, 'utf8').strip()\n content = re.sub(r\"[^a-zA-Z]\", \" \", content)\n \n return content", "def url_to_text(self, url):\r\n return url.toString()", "def textify(read_pdf,spage,epage):\n\n page_text = \"\"\n for page in range(spage, epage):\n page_content = read_pdf.getPage(page)\n page_text += page_content.extractText()\n\n full_text = page_text #.encode('utf-8')\n return full_text", "def __str__(self) -> str:\n return soupify(self.html).get_text()", "def __str__(self) -> str:\n return soupify(self.html).get_text()", "def __str__(self):\n return self.page.get_title()", "def gzipPage(page):\n #if not hasattr(page,\"info\"):\n # return(\"\")\n data = object()\n # Check if content encoding is gzip\n if page.info().get('Content-Encoding') == 'gzip':\n buf = StringIO(page.read())\n f = gzip.GzipFile(fileobj=buf)\n data = f.read()\n else :\n data = page.read()\n return(data)", "def pageText(self, url):\n try:\n request = urllib2.Request(url)\n request.add_header(\"User-Agent\", \"siruta_postcodes.py 1.0\")\n response = urllib2.urlopen(request)\n text = response.read()\n response.close()\n # When you load to many users, urllib2 can give this error.\n except urllib2.HTTPError, urllib2.URLError:\n self.loge(u\"Server or connection error. Pausing for 10 seconds... \" + time.strftime(\"%d %b %Y %H:%M:%S (UTC)\", time.gmtime()) )\n response.close()\n time.sleep(10)\n return pageText(url)\n return text", "def to_str(self) -> str:", "def nodeToString(cls, node):\n return lxml.etree.tostring(node, method='html').decode()", "def get_page(page):\n\timport urllib2\n\tsource = urllib2.urlopen(page)\n\treturn source.read()", "def getpage(self, page: str, limit: int = 5, lang: str = \"en\") -> str:\n\n tags = BeautifulSoup(\n requests.get(self.url.format(lang, page)).text, \"lxml\"\n ).select(self.selector)\n res = \"\"\n for i in range(min(limit, len(tags))):\n res += tags[i].text + \"\\n\\n\"\n return res", "def get_page_title(page):\n\n html = BeautifulSoup(page, \"html.parser\")\n return html.title.string", "def get_pi_as_string():\n\n request = requests.get(\"http://www.eveandersson.com/pi/digits/10000\")\n doc = BeautifulSoup(request.text, \"html.parser\").select_one(\"pre\").text.strip()\n pi_string = doc.replace(\" \", \"\").replace(\".\", \"\").replace(\"\\n\", \"\")\n return pi_string", "def __str__(self):\n if self.hocr is None:\n return ''\n body = self.hocr.find(\".//%sbody\"%(self.xmlns))\n if body:\n return self._get_element_text(body).encode('utf-8') # XML gives unicode\n else:\n return ''", "def str(x):\n return str(x)[:200]", "def alchemy_page_text(url):\n # import AlchemyAPI\n try:\n # Create an AlchemyAPI object.\n alchemyObj = AlchemyAPI.AlchemyAPI()\n\n # Load the API key from disk.\n alchemyObj.loadAPIKey(\"api_key.txt\")\n\n # Extract page text from a web URL (ignoring navigation links, ads, etc.).\n result = alchemyObj.URLGetText(url)\n\n # Result returned in xml format with node 'text' containing the text\n text = xml_to_text(result)\n # remove new lines, tabs, and whitespace\n text = clean_text(text)\n\n return text\n\n except TypeError:\n return \"There is a TypeError in alchemy_page_text\"", "def load_page(url: str) -> str:\n try:\n response = urlopen(url)\n\n if response.status == 200:\n body_text = str(response.read())\n return body_text\n return \"\"\n except URLError:\n return \"\"", "def get_string(self):\n return json.dumps(self.document, indent=4)", "def raw_text(self):\n\t\t\n\t\t #eliminating more headers\n\t\traw_text = re.sub(r\".*OPERATIONS O[PF].*\",r\"\",self.doc)\n\t\traw_text = re.sub(r\"Page \\d+\",r\"\",raw_text)\n\t\traw_text = re.sub(r\".*B[lL]OCK.*\",r\"\",raw_text)\n\t\traw_text = re.sub(r\".*WEST GULF.*\",r\"\",raw_text)\n\t\traw_text = re.sub(r\".*NAVAL FORCES ON.*\",r\"\",raw_text)\n\t\traw_text = re.sub(r\"\\s\",r\" \", raw_text) #eliminating tabs etc. \t \t \n\t\treturn raw_text", "def _get_plain_text(self, url, soup, site):\n print('Get plaint text: ' + url)\n title = str(soup.find(class_=self._title_tags[site]))\n content = str(soup.find(class_=self._content_tags[site]))\n # h = html2text.HTML2Text() # uncomment this segment of code\n # h.ignore_links = True # if you want to get plain text\n # h.ignore_images = True\n # title = h.handle(title)\n # content = h.handle(content)\n if title == None or content == None:\n print('Different website structure: ' + url)\n return ''\n return self._clean(title + content, no_punc=True) # with symbols\n # return title + content # without symbols", "def __str__(self):\n buf = io.StringIO()\n args.output.write(buf, self.root, self.headings)\n return buf.getvalue()", "def process_pdf(path):\r\n str = \"\"\r\n try:\r\n pages = layout_scanner.get_pages(path) \r\n i = 0\r\n l = len(pages)\r\n while i < l: \r\n str += pages[i]\r\n i += 1\r\n except Exception, e:\r\n return g_error_template % e, \"\" \r\n \r\n return \"\", str", "def _tostr(t):\n\treturn t.__unicode__()", "def __str__(self):\n buf = StringIO()\n self.write_to(buf)\n return buf.getvalue()", "def safeToString():", "def _html_text(self, html):\n ee = None\n try: return html.html_text()\n except Exception, e: ee = e; pass\n try: return html.xml_text()\n except Exception, e: print \"HtmlDocument/text\", ee, e; pass\n try: return str(html)\n except Exception, e: print \"HtmlDocument/text\", e; return \"&nbsp;\"", "def __init__(self, page):\r\n try:\r\n self.page = page.encode(\"utf8\")\r\n except UnicodeDecodeError:\r\n self.page = page.decode('iso-8859-1').encode('utf8')", "def _get_page_html(url: str, data: dict=None, headers: dict=None) -> str:\n response = requests.post(url, data=data, headers=headers) # TODO use a connection pool (single threading it like this is monumentally inefficient)\n content = response.content\n decoded = content.decode('utf-8') #TODO this is an assumption. we should probably get the charset header and use that\n return decoded" ]
[ "0.72416747", "0.6311964", "0.61297363", "0.61254025", "0.6117378", "0.6117378", "0.5759532", "0.5716281", "0.5664423", "0.5635445", "0.5631912", "0.5611601", "0.55995196", "0.5591691", "0.5526723", "0.551222", "0.5453172", "0.5440903", "0.5410642", "0.54025006", "0.5399925", "0.5399182", "0.5393333", "0.5363641", "0.5351531", "0.534183", "0.5328885", "0.52841896", "0.5279185", "0.5270547" ]
0.68699765
1
Returns the type of applying the binary operator with the current type and the type of the right operand, or returns None if the operation is not valid
def binop_type(cls, op, right_type): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_operation(\n statement: ast.BinOp,\n) -> Optional[Union[int, float, str, bytes]]:\n if isinstance(statement.left, ast.BinOp):\n left = evaluate_operation(statement.left)\n else:\n left = evaluate_node(statement.left)\n\n if isinstance(statement.right, ast.BinOp):\n right = evaluate_operation(statement.right)\n else:\n right = evaluate_node(statement.right)\n\n op = _AST_OPS_TO_OPERATORS.get(type(statement.op))\n\n evaluation = None\n if op is not None:\n with suppress(Exception):\n evaluation = op(left, right)\n\n return evaluation", "def get_op_type(self):\n return self.op_type", "def offending_op(self):\r\n return type(self.r.owner.op)", "def unaryop_type(cls, op):\n return None", "def visit_BinaryOp(self, node):\n token = node.token\n if token.type == PLUS:\n return self.visit(node.left) + self.visit(node.right)\n if token.type == MINUS:\n return self.visit(node.left) - self.visit(node.right)\n if token.type == MUL:\n return self.visit(node.left) * self.visit(node.right)\n if token.type == DIV:\n result = self.visit(node.left) / self.visit(node.right)\n if result.is_integer():\n return int(result)\n return result\n self.raise_error()", "def _calculate(self, node):\n if isinstance(node, ast.Num): # <number>\n return node.n\n elif isinstance(node, ast.BinOp): # <left> <operator> <right>\n return self._operators[type(node.op)](\n self._calculate(node.left),\n self._calculate(node.right)\n )\n elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1\n return self._operators[type(node.op)](self._calculate(node.operand))\n else:\n raise TypeError(node)", "def _calculate_operation_math(self, rule, left, right):\n\n # Attempt to keep integer data type for the result, when possible.\n if isinstance(left, IntegerRule) and isinstance(right, IntegerRule):\n result = self.evaluate_binop_math(rule.operation, left.value, right.value)\n if isinstance(result, list):\n return ListRule([IntegerRule(r) for r in result])\n return IntegerRule(result)\n\n # Otherwise the result is float.\n if isinstance(left, NumberRule) and isinstance(right, NumberRule):\n result = self.evaluate_binop_math(rule.operation, left.value, right.value)\n if isinstance(result, list):\n return ListRule([FloatRule(r) for r in result])\n return FloatRule(result)\n\n # This point should never be reached.\n raise Exception()", "def binary_operator(op):\n # When combining a Factor with a NumericalExpression, we use this\n # attrgetter instance to defer to the commuted implementation of the\n # NumericalExpression operator.\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n\n def binary_operator(self, other):\n # This can't be hoisted up a scope because the types returned by\n # binop_return_type aren't defined when the top-level function is\n # invoked in the class body of Factor.\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other,\n )\n return return_type(\n \"({left}) {op} ({right})\".format(\n left=self_expr,\n op=op,\n right=other_expr,\n ),\n new_inputs,\n )\n elif isinstance(other, NumExprFactor):\n # NumericalExpression overrides ops to correctly handle merging of\n # inputs. Look up and call the appropriate reflected operator with\n # ourself as the input.\n return commuted_method_getter(other)(self)\n elif isinstance(other, Factor):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n (self,),\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n (self, other),\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant})\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, self, other)\n\n binary_operator.__doc__ = \"Binary Operator: '%s'\" % op\n return binary_operator", "def _arithmetize2(self, left: Any, right: Any, op: str) -> Any:\n op_func = getattr(operator, op)\n left, right = _recycle_left_right(left, right)\n return op_func(left, right)", "def citem_to_specific_type(self):\n\n if self.op >= cot_empty and self.op <= cot_last:\n return self.cexpr\n elif self.op >= cit_empty and self.op < cit_end:\n return self.cinsn\n\n raise RuntimeError('unknown op type %s' % (repr(self.op), ))", "def visit_or(self, left_result: T, right_result: T) -> T:", "def op(\n self,\n opstring: str,\n precedence: int = 0,\n is_comparison: bool = False,\n return_type: Optional[\n Union[Type[TypeEngine[Any]], TypeEngine[Any]]\n ] = None,\n python_impl: Optional[Callable[..., Any]] = None,\n ) -> Callable[[Any], Operators]:\n operator = custom_op(\n opstring,\n precedence,\n is_comparison,\n return_type,\n python_impl=python_impl,\n )\n\n def against(other: Any) -> Operators:\n return operator(self, other) # type: ignore\n\n return against", "def applyOperator(self, operand1, operand2, operator):\n\n if operator == \"*\":\n return operand1 * operand2\n elif operator == \"/\":\n return operand1 / operand2\n elif operator == \"+\":\n return operand1 + operand2\n else:\n return operand1 - operand2", "def gen_binop(self, expr: expressions.BinaryOperator):\n if expr.op in [\"*\", \"/\", \"%\", \"^\", \"|\", \"&\", \">>\", \"<<\"]:\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n op = expr.op\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, op, rhs, ir_typ)\n elif expr.op == \",\":\n # Handle the comma operator by returning the second result\n self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n value = rhs\n elif expr.op == \"+\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n # left and right are swapped in semantics if right is pointer.\n if expr.a.typ.is_pointer:\n assert expr.b.typ.is_integer\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, \"+\", rhs, ir_typ)\n elif expr.op == \"-\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n if expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if expr.b.typ.is_pointer:\n # pointer - pointer\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir.ptr)\n value = self.emit(ir.Cast(value, \"typecast\", ir_typ))\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", ir_typ))\n value = self.emit(\n ir.Binop(value, \"/\", esize, \"rhs\", ir_typ)\n )\n else:\n # pointer - numeric\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n else:\n # numeric - numeric\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n\n elif expr.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\", \"||\", \"&&\"]:\n value = self.gen_condition_to_integer(expr)\n elif expr.op in [\n \"=\",\n \"+=\",\n \"-=\",\n \"*=\",\n \"%=\",\n \"/=\",\n \">>=\",\n \"<<=\",\n \"&=\",\n \"|=\",\n \"~=\",\n \"^=\",\n ]:\n # Handle struct assignment special case:\n if expr.op == \"=\" and expr.a.typ.is_struct:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=False)\n amount = self.sizeof(expr.a.typ)\n self.gen_copy_struct(lhs, rhs, amount)\n value = None\n else:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n if expr.op == \"=\":\n value = rhs\n else:\n # Handle '+=' and friends:\n op = expr.op[:-1]\n ir_typ = self.get_ir_type(expr.typ)\n loaded = self._load_value(lhs, expr.typ)\n\n # pointer arithmatic:\n if op in [\"+\", \"-\"] and expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n\n value = self.builder.emit_binop(loaded, op, rhs, ir_typ)\n self._store_value(value, lhs)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value", "def binary(op, l, r):\n if op == \"+\": return l + r\n if op == \"*\": return l * r\n if op == \"-\": return l - r\n if op == \"=\": return l == r\n if op == \"<>\": return l != r\n if op == \"!=\": return l != r\n if op == \"or\": return l or r\n if op == \"<\": return l < r\n if op == \">\": return l > r\n if op == \"/\": return l / r\n if op == \"and\": return bool(l and r)\n if op == \"in\": return l in r\n if op == \"==\": return l == r\n if op == \"<=\": return l <= r\n if op == \">=\": return l >= r\n raise Exception(\"binary op not implemented\")", "def reflected_binary_operator(op):\n assert not is_comparison(op)\n\n def reflected_binary_operator(self, other):\n\n if isinstance(self, NumericalExpression):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other\n )\n return NumExprFactor(\n \"({left}) {op} ({right})\".format(\n left=other_expr,\n right=self_expr,\n op=op,\n ),\n new_inputs,\n )\n\n # Only have to handle the numeric case because in all other valid cases\n # the corresponding left-binding method will be called.\n elif isinstance(other, Number):\n return NumExprFactor(\n \"{constant} {op} x_0\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, other, self)\n return reflected_binary_operator", "def get_const(op):\n return isinstance(op, (types.Int, types.Bytes))", "def operator(self) -> Optional[LogicalOperator]:\n return self.__operator", "def getRealOperator(self):\n if self._real_operator is not None:\n # use memoized\n return self._real_operator\n \n if isinstance(self.operator, Slot):\n self._real_operator = self.operator.getRealOperator()\n else:\n self._real_operator = self.operator\n\n return self._real_operator", "def operation_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"operation_type\")", "def get_operation_type(self, operation_name):\n # type: (Optional[str]) -> Optional[str]\n operations_map = self.operations_map\n if not operation_name and len(operations_map) == 1:\n return next(iter(operations_map.values()))\n return operations_map.get(operation_name)", "def visit_BinaryOperator(self, node: BinaryOperator) -> Instruction:\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n if isinstance(left, VarSymbol):\n left_symbol = self.GLOBAL_MEMORY[left.name]\n else:\n left_symbol = left\n\n if isinstance(right, VarSymbol):\n right_symbol = self.GLOBAL_MEMORY[right.name]\n else:\n right_symbol = right\n\n if node.operator.type == TokenType.PLUS:\n return self.builder.fadd(left_symbol, right_symbol, \"addtmp\")\n elif node.operator.type == TokenType.MINUS:\n return self.builder.fsub(left_symbol, right_symbol, \"subtmp\")\n elif node.operator.type == TokenType.MUL:\n return self.builder.fmul(left_symbol, right_symbol, \"multmp\")\n elif node.operator.type == TokenType.INTEGER_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"udivtmp\")\n elif node.operator.type == TokenType.FLOAT_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"fdivtmp\")", "def result_type(self):\n\n anc = self.find_ancestor(ASTDeclarationNode) or self.find_ancestor(ASTAssignmentNode)\n if anc:\n return anc.type()\n return get_expression_type(self)", "def binop(x, y, op):\n\n if (x is None) and (y is None):\n return None\n\n x = x if (x is not None) else 0\n y = y if (y is not None) else 0\n return op(x,y)", "def try_fold_arithmetic_binop(\n op: ast.ops.Operator, left: irast.Set, right: irast.Set, *,\n ctx: context.ContextLevel) -> typing.Optional[irast.Set]:\n schema = ctx.schema\n\n real_t = schema.get('std::anyreal')\n float_t = schema.get('std::anyfloat')\n int_t = schema.get('std::anyint')\n\n left_type = irutils.infer_type(left, schema)\n right_type = irutils.infer_type(right, schema)\n\n if not left_type.issubclass(real_t) or not right_type.issubclass(real_t):\n return\n\n result_type = left_type\n if right_type.issubclass(float_t):\n result_type = right_type\n\n left = left.expr\n right = right.expr\n\n if op == ast.ops.ADD:\n value = left.value + right.value\n elif op == ast.ops.SUB:\n value = left.value - right.value\n elif op == ast.ops.MUL:\n value = left.value * right.value\n elif op == ast.ops.DIV:\n if left_type.issubclass(int_t) and right_type.issubclass(int_t):\n value = left.value // right.value\n else:\n value = left.value / right.value\n elif op == ast.ops.POW:\n value = left.value ** right.value\n elif op == ast.ops.MOD:\n value = left.value % right.value\n else:\n value = None\n\n if value is not None:\n return setgen.ensure_set(\n irast.Constant(value=value, type=result_type), ctx=ctx)", "def _arithmetize1(self, operand: Any, op: str) -> Any:\n op_func = getattr(operator, op)\n # Data length might be changed after evaluation\n # operand = recycle_value(operand, self.data.shape[0])\n return op_func(operand)", "def detect_reduction_type(wcr_str):\n if wcr_str == '' or wcr_str is None:\n return None\n\n # Get lambda function from string\n wcr = eval(wcr_str)\n wcr_ast = ast.parse(wcr_str).body[0].value.body\n\n # Run function through symbolic math engine\n a = sympy.Symbol('a')\n b = sympy.Symbol('b')\n try:\n result = wcr(a, b)\n except (TypeError, AttributeError,\n NameError): # e.g., \"Cannot determine truth value of relational\"\n result = None\n\n # Check resulting value\n if result == sympy.Max(a, b) or (isinstance(wcr_ast, ast.Call)\n and isinstance(wcr_ast.func, ast.Name)\n and wcr_ast.func.id == 'max'):\n return dtypes.ReductionType.Max\n elif result == sympy.Min(a, b) or (isinstance(wcr_ast, ast.Call)\n and isinstance(wcr_ast.func, ast.Name)\n and wcr_ast.func.id == 'min'):\n return dtypes.ReductionType.Min\n elif result == a + b:\n return dtypes.ReductionType.Sum\n elif result == a * b:\n return dtypes.ReductionType.Product\n elif result == a & b:\n return dtypes.ReductionType.Bitwise_And\n elif result == a | b:\n return dtypes.ReductionType.Bitwise_Or\n elif result == a ^ b:\n return dtypes.ReductionType.Bitwise_Xor\n elif isinstance(wcr_ast, ast.BoolOp) and isinstance(wcr_ast.op, ast.And):\n return dtypes.ReductionType.Logical_And\n elif isinstance(wcr_ast, ast.BoolOp) and isinstance(wcr_ast.op, ast.Or):\n return dtypes.ReductionType.Logical_Or\n elif (isinstance(wcr_ast, ast.Compare)\n and isinstance(wcr_ast.ops[0], ast.NotEq)):\n return dtypes.ReductionType.Logical_Xor\n\n return dtypes.ReductionType.Custom", "def _max_weight_operator(ops: Iterable[PauliTerm]) -> Union[None, PauliTerm]:\n mapping = dict() # type: Dict[int, str]\n for op in ops:\n for idx, op_str in op:\n if idx in mapping:\n if mapping[idx] != op_str:\n return None\n else:\n mapping[idx] = op_str\n op = functools.reduce(mul, (PauliTerm(op, q) for q, op in mapping.items()), sI())\n return op", "def calc_rvalue_type(self, *args):\n return _ida_hexrays.cfunc_parentee_t_calc_rvalue_type(self, *args)", "def get_operator(self):\n if len(self) == 1:\n return self[0].get_operator()\n op = np.array(self._get_array_of_operators())\n return np.sum(op, axis=0)" ]
[ "0.6980014", "0.6376216", "0.63047373", "0.6289566", "0.6168144", "0.5986405", "0.594299", "0.5919433", "0.59048015", "0.58846456", "0.58320713", "0.5771884", "0.5764865", "0.5709627", "0.57065237", "0.56597155", "0.5610864", "0.5607209", "0.55886", "0.5584397", "0.5579483", "0.5578599", "0.5546708", "0.55402005", "0.5527811", "0.55063325", "0.5472618", "0.5467775", "0.54571176", "0.54255515" ]
0.7616685
0
Returns the type of applying the unary operator to the current type
def unaryop_type(cls, op): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unary_operator(op):\n # Only negate is currently supported for all our possible input types.\n valid_ops = {'-'}\n if op not in valid_ops:\n raise ValueError(\"Invalid unary operator %s.\" % op)\n\n def unary_operator(self):\n # This can't be hoisted up a scope because the types returned by\n # unary_op_return_type aren't defined when the top-level function is\n # invoked.\n if isinstance(self, NumericalExpression):\n return NumExprFactor(\n \"{op}({expr})\".format(op=op, expr=self._expr),\n self.inputs,\n )\n else:\n return NumExprFactor(\"{op}x_0\".format(op=op), (self,))\n\n unary_operator.__doc__ = \"Unary Operator: '%s'\" % op\n return unary_operator", "def is_unary_operator(oper):\n # definition:\n # memeber in class\n # ret-type operator symbol()\n # ret-type operator [++ --](int)\n # globally\n # ret-type operator symbol( arg )\n # ret-type operator [++ --](X&, int)\n symbols = ['!', '&', '~', '*', '+', '++', '-', '--']\n if not isinstance(oper, calldef.operator_t):\n return False\n if oper.symbol not in symbols:\n return False\n if isinstance(oper, calldef.member_operator_t):\n if 0 == len(oper.arguments):\n return True\n elif oper.symbol in ['++', '--'] and \\\n isinstance(oper.arguments[0].type, cpptypes.int_t):\n return True\n else:\n return False\n else:\n if 1 == len(oper.arguments):\n return True\n elif oper.symbol in ['++', '--'] \\\n and 2 == len(oper.arguments) \\\n and isinstance(oper.arguments[1].type, cpptypes.int_t):\n # may be I need to add additional check whether first argument is\n # reference or not?\n return True\n else:\n return False", "def visit_UnaryOp(self, node):\n self.generic_visit(node)\n if isinstance(node.operand, ast.Num):\n # Don't transform negations of numeric literals. Just treat them\n # as literals.\n return node\n return to_call(self.op_to_function(node.op), [node.operand])", "def visit_UnaryOperator(self, node: UnaryOperator) -> Constant:\n\n operator = node.operator.type\n if operator == TokenType.PLUS:\n expression = self.visit(node.expression)\n return Constant(DoubleType(), float(+expression.constant))\n elif operator == TokenType.MINUS:\n expression = self.visit(node.expression)\n return Constant(DoubleType(), float(-expression.constant))", "def visit_Unary(self, node):\n op = node.op.type\n if op == PLUS:\n return +self.visit(node.expr)\n elif op == MINUS:\n return -self.visit(node.expr)", "def _UnaryOp(self, t):\n self.write(\"(\")\n self.write(self.unop[t.op.__class__.__name__])\n self.dispatch(t.operand)\n self.write(\")\")", "def visit_UnaryOp(self, node):\n token = node.token\n if token.type == PLUS:\n return self.visit(node.right)\n if token.type == MINUS:\n return -1 * self.visit(node.right)\n self.raise_error()", "def unary_ops(self, ctx: Context) -> Iterator[AnnotatedExpression]:\n # TODO: extend to all types\n exprs = ctx.expressions_by_type(int)\n for expr in exprs:\n for unary_operator in self.unary_operators:\n yield AnnotatedExpression(\n ast.UnaryOp(op=unary_operator(), operand=expr.expr),\n TypeAnnotation(int),\n )", "def binop_type(cls, op, right_type):\n return None", "def get_op_type(self):\n return self.op_type", "def visit_UnaryOpNode(self, node: UnaryOpNode, symbol_table: SymbolTable) -> Number:\n number = self.visit(node.node, symbol_table)\n\n if node.op_tok.token_type == TokenType.MINUS:\n return number * Number(-1)\n elif node.op_tok.token_type == TokenType.PLUS:\n return number\n elif node.op_tok.value == 'not':\n return number.notted_by()", "def _is_unary_op(op):\n if op.type == TokenType.BitwiseNot:\n return True\n return False", "def unary(op, v):\n if op == \"+\":\n return v\n if op == \"-\":\n return -v\n if op.lower() == \"not\":\n return not(v)\n raise Exception(\"unary op not implemented\")", "def _UnaryOperatorVariable(operatorClass=None):\n\n class unOp(operatorClass):\n def _calcValue_(self):\n return self.op(self.var[0].value)\n\n @property\n def unit(self):\n assert(hasattr(self, \"_unit\") == True)\n if self._unit is None:\n try:\n var = self._varProxy\n return self._extractUnit(self.op(var[0]))\n except:\n return self._extractUnit(self._calcValue())\n else:\n return self._unit\n\n return unOp", "def offending_op(self):\r\n return type(self.r.owner.op)", "def calc_rvalue_type(self, *args):\n return _ida_hexrays.cfunc_parentee_t_calc_rvalue_type(self, *args)", "def unary_op(self):\n return plist([op(x) for x in self], root=self.__root__)", "def visit_UnaryOp(self, node):\n if node and not config.mutated:\n return self.visit_node(node)\n elif node and config.mutated and config.recovering:\n return self.recover_node(node)\n return node", "def binary_operator(op):\n # When combining a Factor with a NumericalExpression, we use this\n # attrgetter instance to defer to the commuted implementation of the\n # NumericalExpression operator.\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n\n def binary_operator(self, other):\n # This can't be hoisted up a scope because the types returned by\n # binop_return_type aren't defined when the top-level function is\n # invoked in the class body of Factor.\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other,\n )\n return return_type(\n \"({left}) {op} ({right})\".format(\n left=self_expr,\n op=op,\n right=other_expr,\n ),\n new_inputs,\n )\n elif isinstance(other, NumExprFactor):\n # NumericalExpression overrides ops to correctly handle merging of\n # inputs. Look up and call the appropriate reflected operator with\n # ourself as the input.\n return commuted_method_getter(other)(self)\n elif isinstance(other, Factor):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n (self,),\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n (self, other),\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant})\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, self, other)\n\n binary_operator.__doc__ = \"Binary Operator: '%s'\" % op\n return binary_operator", "def operator_numeric_type(method):\n def wrapper(self, other):\n if not isinstance(other, _NUMERIC_TYPES):\n raise TypeError(\n 'unsupported operand types: \\'{0}\\' and \\'{1}\\''.format(\n self.__class__.__name__, other.__class__.__name__))\n return method(self, other)\n return wrapper", "def visit_any_type(self, left: AnyType) -> T:", "def gen_unop(self, expr: expressions.UnaryOperator):\n if expr.op in [\"x++\", \"x--\", \"--x\", \"++x\"]:\n # Increment and decrement in pre and post form\n # Determine increment or decrement:\n op = expr.op[1]\n pre = expr.op[0] == \"x\"\n value = self.gen_inplace_mutation(expr, op, pre)\n elif expr.op == \"*\":\n value = self.gen_expr(expr.a, rvalue=True)\n assert expr.lvalue\n elif expr.op == \"&\":\n assert expr.a.lvalue\n value = self.gen_expr(expr.a, rvalue=False)\n elif expr.op in [\"-\", \"~\"]:\n a = self.gen_expr(expr.a, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n value = self.emit(ir.Unop(expr.op, a, \"unop\", ir_typ))\n elif expr.op in [\"!\"]:\n value = self.gen_condition_to_integer(expr)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value", "def visit_unary(spec):", "def convert_unary_op(g, op, block):\n\n # op_map stores mapping relationship between paddlepaddle and relay\n op_map = {\"isinf_v2\": _op.isinf, \"isfinite_v2\": _op.isfinite, \"isnan_v2\": _op.isnan}\n if op.type in op_map:\n unary_func = op_map[op.type]\n else:\n # while paddle operator's name is same with relay\n unary_func = get_relay_op(op.type)\n out = unary_func(g.get_node(op.input(\"X\")[0]))\n g.add_node(op.output(\"Out\")[0], out)", "def _arithmetize1(self, operand: Any, op: str) -> Any:\n op_func = getattr(operator, op)\n # Data length might be changed after evaluation\n # operand = recycle_value(operand, self.data.shape[0])\n return op_func(operand)", "def cg_inline_unary(self, fn):\n if fn == 'neg':\n op = '-'\n elif fn == 'not':\n op = '!'\n else:\n raise ValueError(f\"Unknown unary operator: {fn}\")\n self.asm(unindent(f\"\"\"\n @SP\n AM=M-1 // SP--\n D={op}M // D = MEM[SP]\n {self._cg_push_D}\n \"\"\"))", "def get_operator(self):\n if len(self) == 1:\n return self[0].get_operator()\n op = np.array(self._get_array_of_operators())\n return np.sum(op, axis=0)", "def veval_ast_unary_op(astc : 'AstContext', local_field : 'values.Field', graph : 'graphs.Graph', context : 'functions.VEvalContext' = None):\n assert(isinstance(astc.nast, gast.gast.UnaryOp))\n lineprop = utils.LineProperty(astc.lineno, astc.filename)\n\n unaryop = nodes.UnaryOpType.Unknown\n if isinstance(astc.nast.op, gast.UAdd):\n unaryop = nodes.UnaryOpType.UAdd\n if isinstance(astc.nast.op, gast.USub):\n unaryop = nodes.UnaryOpType.USub\n if isinstance(astc.nast.op, gast.Not):\n unaryop = nodes.UnaryOpType.Not\n\n operand = veval_ast(astc.c(astc.nast.operand), local_field, graph, context)\n operand_value = utils.try_get_value(operand, 'unary', lineprop)\n\n node = nodes.NodeUnaryOp(operand_value, unaryop)\n\n ret_value = veval_unary.veval(unaryop, operand_value)\n\n node.set_outputs([ret_value])\n graph.add_node(node)\n\n return values.Object(ret_value)", "def my_operator(self):\n return self._my_operator", "def citem_to_specific_type(self):\n\n if self.op >= cot_empty and self.op <= cot_last:\n return self.cexpr\n elif self.op >= cit_empty and self.op < cit_end:\n return self.cinsn\n\n raise RuntimeError('unknown op type %s' % (repr(self.op), ))" ]
[ "0.7026326", "0.62878203", "0.62745595", "0.6230687", "0.61832154", "0.6153777", "0.6153615", "0.6035553", "0.60099876", "0.5954016", "0.59390163", "0.5836451", "0.58051866", "0.5675768", "0.56312513", "0.55480313", "0.55199814", "0.5483337", "0.54803765", "0.5456091", "0.53950423", "0.5373799", "0.5337757", "0.53274447", "0.52787775", "0.5271115", "0.52608216", "0.5257524", "0.52458584", "0.52304286" ]
0.73739296
0
Ensure that settings are restored after test_settings_before.
def test_settings_restored(self) -> None: from django.conf import settings assert TestLiveServer._test_settings_before_run is True # type: ignore[attr-defined] assert ( f"{settings.__class__.__module__}.{settings.__class__.__name__}" == "django.conf.Settings" ) assert settings.ALLOWED_HOSTS == ["testserver"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def teardown_function():\n\n # Force module reload as the default test settings have been restored\n importlib.reload(defaults)", "def teardown_method(self, method):\n restore_settings()", "def teardown(self):\n # dump persistent storage to file\n dump_persistent_settings(self.settings_path, self.id_dict)", "def _teardown(self) -> None:\n try:\n if (\n self._config\n and \"restore_state\" in self._config[\"core\"]\n and self._config[\"core\"][\"restore_state\"]\n ):\n self._save_state()\n except Exception as e:\n logger.warning(\"Unexpected error while saving state: %s\", str(e))", "def tearDown(self):\n # set the config module level variables back to None\n config.config._conf_parser = None\n config.config._user_config_file = None", "def test_act_on_settings(self):\n pass # TODO(tlarsen)", "def test_act_on_settings(self):\n pass # TODO(tlarsen)", "def reset_settings():\n settings = Settings()\n settings.reset()\n settings.save()", "def reload_settings(self):\n importlib.reload(sys.modules['micromasters.settings'])\n # Restore settings to original settings after test\n self.addCleanup(importlib.reload, sys.modules['micromasters.settings'])\n return vars(sys.modules['micromasters.settings'])", "def test_reset_settings(self):\n\n self.feature_test.set_percentage(5)\n self.feature_test.add_to_whitelist(3)\n self.feature_test.add_to_blacklist(4)\n self.feature_test.reset_settings()\n\n generated = Feature(\"testing\")\n self.assertEqual(generated.percentage, 0)\n self.assertFalse(3 in generated.whitelist)\n self.assertFalse(4 in generated.blacklist)", "def test_settings_doesnt_break(self):\r\n self.settingsDeploy()", "def tearDown(self):\n test_utils.delete_test_config()", "def tearDown(self):\n test_env_teardown()", "def setUp(self):\n self.settings = MockSettings()\n django_yamlconf.load(project=\"testing\", settings=self.settings)", "def afterSetUp(self):\n self.load_config = {}\n self.load_config['monitor_interval'] = 1\n self.load_config['limit_number_request'] = 100\n self.load_config['limit_memory_used'] = 500", "def tearDown(self):\n\n # Remove the config\n del self.config\n\n # Nothing to Teardown\n return super().tearDown()", "def tearDown(self):\n reset()", "def testSaveSettings(self):\n \n self.waitForElement(\"link=Settings\")\n self.selenium.click(\"link=Settings\")\n self.selenium.wait_for_page_to_load(self.WAITTIME)\n self.selenium.click(\"name=zmanage_editProperties:method\")\n self.selenium.wait_for_page_to_load(self.WAITTIME)", "def teardown(self):\n for mr in self.mrs:\n mr.restore_pretest(pretest=mr.pretest_info)", "def setUp(self):\n super(MaintenanceModeMiddlewareTestCase, self).setUp()\n self._set_model_to(False)", "def test_restore_backup():", "def set_unhandled_settings(self):\n # Set any custom settings\n # which where not setted (ex. on some widget's state changed)\n\n # Save all settings\n settings.save()", "def setUp(self):\n self.state_client.flushdb()", "def teardown_state():\n yield\n # noinspection PyGlobalUndefined\n global state\n state = StateConfig()", "def setUp(self):\n self.Reinitialize()", "def cleanup(self):\n self._restore_als_disable()\n self._set_brightness_percent(self._original_brightness)\n\n # Check results to make sure backlight levels were preserved across\n # transition events.\n num_failed = 0\n for test_name in self._results:\n old_brightness = self._results[test_name]['old']\n new_brightness = self._results[test_name]['new']\n\n if old_brightness == new_brightness:\n logging.info('Transition event [ PASSED ]: %s', test_name)\n else:\n logging.info('Transition event [ FAILED ]: %s', test_name)\n logging.info(' Brightness changed: %d -> %d',\n old_brightness, new_brightness)\n num_failed += 1\n\n if num_failed > 0:\n raise error.TestFail(('Failed to preserve backlight over %d '\n 'transition event(s).') % num_failed)", "def test_settings(mock_os_environ, update_on_init):\n kwargs = {'prefix': 'TEST_STUFF'}\n if update_on_init is None:\n pass\n else:\n kwargs['update_on_init'] = update_on_init\n settings_map = settings_parser.Settings(**kwargs)\n assert isinstance(settings_map, Mapping)\n if update_on_init is False:\n expected = {}\n else:\n expected = {'testgroup': {'testvar': 7, 'test_var': 6}, 'testgroup_test_var': 9}\n assert dict(settings_map) == expected", "def tearDown(self):\n\n self.testInit.clearDatabase()\n\n self.testInit.delWorkDir()\n\n EmulatorSetup.deleteConfig(self.configFile)\n\n return", "def test_temporary_changes():\n s = settings_parser.Settings()\n s.update({'a': 1})\n with s.temporary_changes():\n # Change the settings within the context\n s.update({'a': 2, 'b': 2})\n s.settings_files.append('test')\n assert s['a'] == 2\n assert len(s.settings_files) == 1\n # Check that outside of the context the settings are back to their old state.\n assert s['a'] == 1\n assert len(s.settings_files) == 0", "def tearDown(self) -> None:\n os.remove(TestConfigFile.TEST_CONFIG)" ]
[ "0.73878825", "0.6784818", "0.6733559", "0.6715742", "0.6488583", "0.6481755", "0.6481755", "0.64424235", "0.64274263", "0.6407588", "0.6295553", "0.6175864", "0.61559063", "0.61030084", "0.609804", "0.6097769", "0.60595536", "0.60365754", "0.6018529", "0.60083866", "0.59859383", "0.5969681", "0.5947899", "0.5942874", "0.59168094", "0.58953744", "0.5889699", "0.5872239", "0.58711016", "0.5865094" ]
0.775121
0
LiveServer always serves statics with ``django.contrib.staticfiles`` handler.
def test_serve_static_with_staticfiles_app(self, django_testdir, settings) -> None: django_testdir.create_test_module( """ from urllib.request import urlopen from django.utils.encoding import force_str class TestLiveServer: def test_a(self, live_server, settings): assert ('django.contrib.staticfiles' in settings.INSTALLED_APPS) response_data = urlopen( live_server + '/static/a_file.txt').read() assert force_str(response_data) == 'bla\\n' """ ) result = django_testdir.runpytest_subprocess("--tb=short", "-v") result.stdout.fnmatch_lines(["*test_a*PASSED*"]) assert result.ret == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serve_static_files(request, path, insecure=False, **kwargs):\n\n if not settings.DEBUG and not insecure:\n raise Http404\n normalized_path = posixpath.normpath(unquote(path)).lstrip('/')\n absolute_path = finders.find(normalized_path)\n if not absolute_path:\n if path.endswith('/') or path == '':\n raise Http404(\"Directory indexes are not allowed here.\")\n raise Http404(\"'%s' could not be found\" % path)\n document_root, path = os.path.split(absolute_path)\n return static.serve(request, path, document_root=document_root, **kwargs)", "def server_static (filename):\n return static_file(filename, root=\"./static\")", "def test_serve_static_dj17_without_staticfiles_app(self, live_server, settings) -> None:\n with pytest.raises(HTTPError):\n urlopen(live_server + \"/static/a_file.txt\").read()", "def serve_static(request, path, document_root):\n # Clean up given path to only allow serving files below document_root.\n path = posixpath.normpath(urllib.unquote(path))\n path = path.lstrip('/')\n newpath = ''\n for part in path.split('/'):\n if not part:\n # Strip empty path components.\n continue\n drive, part = os.path.splitdrive(part)\n head, part = os.path.split(part)\n if part in (os.curdir, os.pardir):\n # Strip '.' and '..' in path.\n continue\n newpath = os.path.join(newpath, part).replace('\\\\', '/')\n if newpath and path != newpath:\n return HttpResponseRedirect(newpath)\n fullpath = os.path.join(document_root, newpath)\n if os.path.isdir(fullpath):\n #if show_indexes:\n # return directory_index(newpath, fullpath)\n raise Http404, \"Directory indexes are not allowed here.\"\n if not os.path.exists(fullpath):\n raise Http404, '\"%s\" does not exist' % fullpath\n # Respect the If-Modified-Since header.\n statobj = os.stat(fullpath)\n if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),\n statobj[stat.ST_MTIME], statobj[stat.ST_SIZE]):\n return HttpResponseNotModified()\n mimetype = mimetypes.guess_type(fullpath)[0] or 'application/octet-stream'\n # Treat the file as a django template\n template = Template(open(fullpath, 'rb').read())\n context = RequestContext(request)\n # Render the template giving the current request\n contents = template.render(context)\n response = HttpResponse(contents, mimetype=mimetype)\n response[\"Last-Modified\"] = http_date(statobj[stat.ST_MTIME])\n response[\"Content-Length\"] = len(contents)\n return response", "def collect_static_files():\n with env.cd(settings.PROJECT_PATH), prefix(COMMANDS['set_environment']), \\\n prefix(COMMANDS['activate_virtualenv']):\n env.run('python rnacentral/manage.py collectstatic --noinput')", "def include_static_files(app):\n file_path = sphinx_prolog.get_static_path(STATIC_FILE)\n if file_path not in app.config.html_static_path:\n app.config.html_static_path.append(file_path)", "def server_static(self, filepath):\n root = os.path.join(self.webbase, 'assets')\n return static_file(filepath, root=root)", "def static_text_files():\n return send_from_directory(\"static/\", request.path[1:])", "def server_static(filepath):\n return bottle.static_file(filepath, root=STATIC_ROOT)", "def server_static(filename):\n return static_file(filename, root='static/stats')", "def static(request):\n return {\n 'STATIC_URL': getattr(settings, 'STATIC_URL', settings.MEDIA_URL)\n }", "def server_static(filepath):\n root_folder = os.path.abspath(os.path.dirname(__file__))\n return bottle.static_file(filepath, root=os.path.join(root_folder, 'static'))", "def files_serve(path):\n return flask.send_from_directory(\"static/js\", path)", "def collectstatic():\n puts(yellow(\"Collect statics\"))\n django_manage('collectstatic', '-l', '--noinput')", "def send_from_static(filename, **kwargs):\n return send_from_directory(app.static_folder, filename, **kwargs)", "def test_get_serving_url(self):\n self.assertEquals(dirs.get_serving_url(), settings.STATIC_URL)\n \n with self.settings(MEDIABRUTE_USE_STATIC=False):\n self.assertEquals(dirs.get_serving_url(), settings.MEDIA_URL)", "def static_files(filename):\n static_path = os.path.join(frontend.root_path, 'templates', current_app.config['FRONTEND_THEME'], 'static')\n return send_from_directory(static_path, filename)", "def collectstatic():\n local(\"docker-compose exec web python3 manage.py {}\".format(\n 'collectstatic --noinput'))", "def deploy_static(): \n from fabdeploy.django import collectstatic as django_collectstatic\n# run(\"rm -rf %(root_path)s%(project_name)s/static/*\" % env) # call again git_add_commit_pull\n django_collectstatic()", "def static(website, request, **etc):\n return website.static.respond(request)", "def collectstatic():\n sudo(env.activate)\n sudo('cd %s' % env.whole_path_symlinked + '/aurora; python manage.py collectstatic;')", "def path_static():\n return os.path.abspath(os.path.dirname(__file__))+'/_static'", "def collectstatic(where=None):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n run('bin/django collectstatic --noinput')", "def pserve():\n import pyramid.scripts.pserve\n import pyramid_fanstatic\n import os\n\n dirname = os.path.dirname(__file__)\n dirname = os.path.join(dirname, 'resources')\n pyramid.scripts.pserve.add_file_callback(\n pyramid_fanstatic.file_callback(dirname))\n pyramid.scripts.pserve.main()", "def static(path):\n return static_file(path, root='media')", "def server_static_img(filename):\n return static_file(filename, root='static/img')", "def ssl_media(request):\n\n if request.is_secure():\n ssl_media_url = settings.STATIC_URL.replace('http://', 'https://')\n else:\n ssl_media_url = settings.STATIC_URL\n \n return {'STATIC_URL': ssl_media_url}", "def static(path):\n return bottle.static_file(path, root='static/')", "def static(path):\n return bottle.static_file(path, root='static/')", "def static(path):\n return bottle.static_file(path, root='static/')" ]
[ "0.692166", "0.6793395", "0.672456", "0.66780925", "0.6508464", "0.6489936", "0.646022", "0.6422593", "0.6398329", "0.6394773", "0.6379119", "0.6352794", "0.63344926", "0.6232494", "0.6215064", "0.6189717", "0.61667037", "0.6146262", "0.61179745", "0.6043764", "0.60087985", "0.5971607", "0.5960405", "0.5958896", "0.591151", "0.5852133", "0.58468443", "0.58356225", "0.58356225", "0.58356225" ]
0.70656496
0
Because ``django.contrib.staticfiles`` is not installed LiveServer can not serve statics with django >= 1.7 .
def test_serve_static_dj17_without_staticfiles_app(self, live_server, settings) -> None: with pytest.raises(HTTPError): urlopen(live_server + "/static/a_file.txt").read()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_serve_static_with_staticfiles_app(self, django_testdir, settings) -> None:\n django_testdir.create_test_module(\n \"\"\"\n from urllib.request import urlopen\n\n from django.utils.encoding import force_str\n\n class TestLiveServer:\n def test_a(self, live_server, settings):\n assert ('django.contrib.staticfiles'\n in settings.INSTALLED_APPS)\n response_data = urlopen(\n live_server + '/static/a_file.txt').read()\n assert force_str(response_data) == 'bla\\\\n'\n \"\"\"\n )\n result = django_testdir.runpytest_subprocess(\"--tb=short\", \"-v\")\n result.stdout.fnmatch_lines([\"*test_a*PASSED*\"])\n assert result.ret == 0", "def deploy_static(): \n from fabdeploy.django import collectstatic as django_collectstatic\n# run(\"rm -rf %(root_path)s%(project_name)s/static/*\" % env) # call again git_add_commit_pull\n django_collectstatic()", "def collect_static_files():\n with env.cd(settings.PROJECT_PATH), prefix(COMMANDS['set_environment']), \\\n prefix(COMMANDS['activate_virtualenv']):\n env.run('python rnacentral/manage.py collectstatic --noinput')", "def serve_static(request, path, document_root):\n # Clean up given path to only allow serving files below document_root.\n path = posixpath.normpath(urllib.unquote(path))\n path = path.lstrip('/')\n newpath = ''\n for part in path.split('/'):\n if not part:\n # Strip empty path components.\n continue\n drive, part = os.path.splitdrive(part)\n head, part = os.path.split(part)\n if part in (os.curdir, os.pardir):\n # Strip '.' and '..' in path.\n continue\n newpath = os.path.join(newpath, part).replace('\\\\', '/')\n if newpath and path != newpath:\n return HttpResponseRedirect(newpath)\n fullpath = os.path.join(document_root, newpath)\n if os.path.isdir(fullpath):\n #if show_indexes:\n # return directory_index(newpath, fullpath)\n raise Http404, \"Directory indexes are not allowed here.\"\n if not os.path.exists(fullpath):\n raise Http404, '\"%s\" does not exist' % fullpath\n # Respect the If-Modified-Since header.\n statobj = os.stat(fullpath)\n if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),\n statobj[stat.ST_MTIME], statobj[stat.ST_SIZE]):\n return HttpResponseNotModified()\n mimetype = mimetypes.guess_type(fullpath)[0] or 'application/octet-stream'\n # Treat the file as a django template\n template = Template(open(fullpath, 'rb').read())\n context = RequestContext(request)\n # Render the template giving the current request\n contents = template.render(context)\n response = HttpResponse(contents, mimetype=mimetype)\n response[\"Last-Modified\"] = http_date(statobj[stat.ST_MTIME])\n response[\"Content-Length\"] = len(contents)\n return response", "def serve_static_files(request, path, insecure=False, **kwargs):\n\n if not settings.DEBUG and not insecure:\n raise Http404\n normalized_path = posixpath.normpath(unquote(path)).lstrip('/')\n absolute_path = finders.find(normalized_path)\n if not absolute_path:\n if path.endswith('/') or path == '':\n raise Http404(\"Directory indexes are not allowed here.\")\n raise Http404(\"'%s' could not be found\" % path)\n document_root, path = os.path.split(absolute_path)\n return static.serve(request, path, document_root=document_root, **kwargs)", "def static(request):\n return {\n 'STATIC_URL': getattr(settings, 'STATIC_URL', settings.MEDIA_URL)\n }", "def ssl_media(request):\n\n if request.is_secure():\n ssl_media_url = settings.STATIC_URL.replace('http://', 'https://')\n else:\n ssl_media_url = settings.STATIC_URL\n \n return {'STATIC_URL': ssl_media_url}", "def include_static_files(app):\n file_path = sphinx_prolog.get_static_path(STATIC_FILE)\n if file_path not in app.config.html_static_path:\n app.config.html_static_path.append(file_path)", "def collectstatic():\n sudo(env.activate)\n sudo('cd %s' % env.whole_path_symlinked + '/aurora; python manage.py collectstatic;')", "def collectstatic():\n puts(yellow(\"Collect statics\"))\n django_manage('collectstatic', '-l', '--noinput')", "def collectstatic():\n local(\"docker-compose exec web python3 manage.py {}\".format(\n 'collectstatic --noinput'))", "def test_get_serving_url(self):\n self.assertEquals(dirs.get_serving_url(), settings.STATIC_URL)\n \n with self.settings(MEDIABRUTE_USE_STATIC=False):\n self.assertEquals(dirs.get_serving_url(), settings.MEDIA_URL)", "def collectstatic(where=None):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n run('bin/django collectstatic --noinput')", "def server_static (filename):\n return static_file(filename, root=\"./static\")", "def glr_path_static():\n return os.path.join(base_path, \"static\")", "def path_static():\n return os.path.abspath(os.path.dirname(__file__))+'/_static'", "def ext_static(context, extension, path):\n return static('ext/%s/%s' % (extension.id, path))", "def server_static(self, filepath):\n root = os.path.join(self.webbase, 'assets')\n return static_file(filepath, root=root)", "def __get_server_static__(app_path,static_dir):\n import os\n # from . import config_loader\n\n # root_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n _path = (static_dir).replace(\"/\", os.path.sep)\n return os.sep.join([app_path, _path])", "def add_static(ext):\n ext = ext.lower()\n\n compiler = StaticCompiler(ext)\n file_list = compiler.get_staticfiles_list()\n\n return render_to_string(\n \"mub/context_%s.html\" % ext,\n {\n \"items\": file_list,\n \"STATIC_URL\": settings.STATIC_URL,\n \"IS_MINIFIED\": compiler.is_minified\n }\n )", "def static_text_files():\n return send_from_directory(\"static/\", request.path[1:])", "def update_static_files(self):\n\n params = self.chose_param_value(\"--static\")\n self._check_path_availability([\"get_static_dir\", \"get_static_dir_to\"])\n if self._check_whether_has_params(params):\n self.updater.update_files(\n self.analizer.get_static_dir(),\n self.analizer.get_static_dir_to(),\n params\n )\n return self.write_debug_message(\"Static files upgrade is done!\\n\")\n return self.write_error_message(\"You haven't passed any params about static files\")", "def server_static(filename):\n return static_file(filename, root='static/stats')", "def collect_assets(systems, settings):\r\n for sys in systems:\r\n sh(django_cmd(sys, settings, \"collectstatic --noinput > /dev/null\"))", "def DjangoStaticResource(path, rel_url='static'):\r\n rel_url = rel_url.strip('/')\r\n StaticFilesResource = MediaResource(path)\r\n StaticFilesResource.namespace = rel_url\r\n return StaticFilesResource", "def collect_static():\n\n check_promt = (\n not env.prompt or\n console.confirm(\n \"Collect static files and copy them to collect_static?\",\n default=True,\n )\n )\n\n if check_promt:\n with cd(\"%s\" % env.work_path):\n with prefix(\"source %s/bin/activate\" % env.env_path):\n run(\n \"./manage.py collectstatic\"\n \" --noinput\"\n )", "def static(website, request, **etc):\n return website.static.respond(request)", "def static_files(filename):\n static_path = os.path.join(frontend.root_path, 'templates', current_app.config['FRONTEND_THEME'], 'static')\n return send_from_directory(static_path, filename)", "def static(request):\n return {\n 'JSERRORLOGGING_STATIC_URL': STATIC_URL\n }", "def static(path):\n return static_file(path, root='media')" ]
[ "0.6716268", "0.6231927", "0.6190265", "0.61497104", "0.592605", "0.59110135", "0.58870727", "0.5886149", "0.5850551", "0.5821276", "0.5779303", "0.5771501", "0.57279146", "0.57038695", "0.56011933", "0.5590249", "0.55280924", "0.5513304", "0.5501698", "0.5489761", "0.54814017", "0.5472841", "0.5466179", "0.5433787", "0.5417024", "0.53979486", "0.53653044", "0.536349", "0.53555876", "0.53399044" ]
0.6403566
1
TextResponse will be not applied by RuleExtractor. Need convert to HtmlResponse
def process_response(request, response, spider): headers = ['text/html; charset=UTF-8', 'text/html; charset=utf-8', 'text/html;charset=UTF-8', 'text/html;charset=utf-8', 'text/html;charset=ISO-8859-1', 'application/xhtml+xml; charset=utf-8'] # log.msg("In Middleware " + repr(response.headers['Content-Type']), level=log.INFO) body = response.body_as_unicode().encode('utf8') if hasattr(response, 'body_as_unicode') else response.body if response.status != 200 and hasattr(spider, 'suspect_requests'): spider.suspect_requests.append("%s %s \n" % (response.status, response.url)) if isinstance(response, TextResponse): return HtmlResponse(url=response.url, body=body, request=response.request, status=response.status, headers=headers) else: return response.replace(body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_html_output(self):\n pass", "def get_html(self):\r\n pass", "def _format_response(self, response):\n texts = []\n for result in response.results: \n texts.append(result.alternatives[0].transcript)\n return texts", "def process_response(self, request, response):\n\n if settings.DEBUG:\n return response\n\n if 'text/html' in response['Content-Type'] and settings.COMPRESS_HTML:\n response.content = strip_spaces_between_tags(\n response.content.strip())\n response.content = RE_NEWLINE.sub(\" \", response.content)\n response.content = RE_MULTISPACE.sub(\" \", response.content)\n response.content = RE_SPACETAG1.sub(\">\", response.content)\n response.content = RE_SPACETAG2.sub(\"<\", response.content)\n return response", "def render_text(self, context, result):\n\t\tcontext.response.text = result\n\t\treturn True", "def _get_text(self, remove_newlines=True):\n if not self.text:\n url = self.url\n try:\n self.log.debug(\"Try to get content from page {}\".format(url))\n r = requests.get(url)\n except requests.exceptions.RequestException as e:\n self.log.warn(\"Unable to get page content of the url: {url}. \"\n \"The reason: {exc!r}\".format(url=url, exc=e))\n raise ParsingError(e.strerror)\n\n ud = UnicodeDammit(r.content, is_html=True)\n\n enc = ud.original_encoding.lower()\n declared_enc = ud.declared_html_encoding\n if declared_enc:\n declared_enc = declared_enc.lower()\n # possible misregocnition of an encoding\n if (declared_enc and enc != declared_enc):\n detect_dict = chardet.detect(r.content)\n det_conf = detect_dict[\"confidence\"]\n det_enc = detect_dict[\"encoding\"].lower()\n if enc == det_enc and det_conf < THRESHOLD_OF_CHARDETECT:\n enc = declared_enc\n # if page contains any characters that differ from the main\n # encoding we will ignore them\n content = r.content.decode(enc, \"ignore\").encode(enc)\n htmlparser = etree.HTMLParser(encoding=enc)\n root = etree.HTML(content, parser=htmlparser)\n etree.strip_elements(root, html.etree.Comment, \"script\", \"style\")\n text = html.tostring(root, method=\"text\", encoding=\"unicode\")\n\n if remove_newlines:\n self.log.debug(str(type(text)))\n text = re.sub('\\s+', ' ', text)\n self.text = text\n\n return self.text", "def convert_html():\n return", "def process_response(self, response):\n return response", "def response_handling(self) -> global___Snippet.SimpleResponseHandling:", "def response_handling(self) -> global___Snippet.SimpleResponseHandling:", "def response_minify(response):\n if response.content_type == u'text/html; charset=utf-8':\n response.set_data(\n html_minify(response.get_data(as_text=True))\n )\n\n return response\n return response", "def parse(self, response):", "def _html(self, text):\r\n html = URL_REGEX.sub(self._parse_urls, text)\r\n html = USERNAME_REGEX.sub(self._parse_users, html)\r\n html = LIST_REGEX.sub(self._parse_lists, html)\r\n return HASHTAG_REGEX.sub(self._parse_tags, html)", "def body(self, response):\t\n\t\tx = response.xpath(\"//div[@class='story-content row-fluid']/p/text()\").extract()\n\n\t\tfor i in range(0,len(x)):\n\t\t\tx[i] = x[i].strip(\"\\r\\n\\t\")\n\t\treturn x", "def text_body(response: tornado.httpclient.HTTPResponse) -> str:\n return response.body.decode(encoding(response))", "def parse_response(self):\n pass", "def rawHTMLrendered(self):", "def HTMLResponse():\n\n\thtml = \t\"<html><head><title>MA MAURO ESISTE?</title><style type='text/css'>body{width:30%}</style></head><body><pre>\"\n\thtml += \" _ __<br>\"\n\thtml += \" (_) / /<br>\"\n\thtml += \" ______ __ ____ ____ / /____<br>\"\n\thtml += \" / ___/ / _ \\\\/ _ \\\\/ / _ \\\\<br>\"\n\thtml += \" / / / / /_) / /_) / / ____/<br>\"\n\thtml += \"/__/ /__/ .___/ .___/__/ \\\\_____/<br>\"\n\thtml += \" / / / /<br>\"\n\thtml += \" /__/ /__/<br>\"\n\thtml += \"<b>PYTHON > ALL VERSION</b><br><br>\"\n\thtml += \"<marquee style='white-space:pre;'><br>\"\n\thtml += \" .. o .<br>\"\n\thtml += \" o.o o . o<br>\"\n\thtml += \" oo...<br>\"\n\thtml += \" __[]__<br>\"\n\thtml += \" phwr--> _\\\\:D/_/o_o_o_|__ <span style=\\\"font-family: 'Comic Sans MS'; font-size: 8pt;\\\">u wot m8</span><br>\"\n\thtml += \" \\\\\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"\\\"/<br>\"\n\thtml += \" \\\\ . .. .. . /<br>\"\n\thtml += \"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^<br>\"\n\thtml += \"</marquee><br><strike>reverse engineering a protocol impossible to reverse engineer since always</strike><br>we are actually reverse engineering bancho successfully. for the third time.</pre></body></html>\"\n\treturn html", "def _scrape_response(self, headers, response):\n # identify the responding server\n server_type = None\n server_string = headers.get(\"server\", \"\")\n\n if server_string and \"jetty\" in server_string.lower():\n server_type = \"jetty\"\n\n if server_string and \"coyote\" in server_string.lower():\n server_type = \"tomcat\"\n\n reason = None\n full_html = \"\"\n dom_tree = None\n\n # In Python3, response can be made of bytes\n if IS_PY3 and hasattr(response, \"decode\"):\n response = response.decode()\n if response.startswith(\"<?xml\"):\n # Try a strict XML parse\n try:\n soup = ElementTree.fromstring(response)\n\n reason_node = soup.find('lst[@name=\"error\"]/str[@name=\"msg\"]')\n tb_node = soup.find('lst[@name=\"error\"]/str[@name=\"trace\"]')\n if reason_node is not None:\n full_html = reason = reason_node.text.strip()\n if tb_node is not None:\n full_html = tb_node.text.strip()\n if reason is None:\n reason = full_html\n\n # Since we had a precise match, we'll return the results now:\n if reason and full_html:\n return reason, full_html\n except ElementTree.ParseError:\n # XML parsing error, so we'll let the more liberal code handle it.\n pass\n\n if server_type == \"tomcat\":\n # Tomcat doesn't produce a valid XML response or consistent HTML:\n m = re.search(r\"<(h1)[^>]*>\\s*(.+?)\\s*</\\1>\", response, re.IGNORECASE)\n if m:\n reason = m.group(2)\n else:\n full_html = \"%s\" % response\n else:\n # Let's assume others do produce a valid XML response\n try:\n dom_tree = ElementTree.fromstring(response)\n reason_node = None\n\n # html page might be different for every server\n if server_type == \"jetty\":\n reason_node = dom_tree.find(\"body/pre\")\n else:\n reason_node = dom_tree.find(\"head/title\")\n\n if reason_node is not None:\n reason = reason_node.text\n\n if reason is None:\n full_html = ElementTree.tostring(dom_tree)\n except SyntaxError as err:\n LOG.warning( # NOQA: G200\n \"Unable to extract error message from invalid XML: %s\",\n err,\n extra={\"data\": {\"response\": response}},\n )\n full_html = \"%s\" % response\n\n full_html = force_unicode(full_html)\n full_html = full_html.replace(\"\\n\", \"\")\n full_html = full_html.replace(\"\\r\", \"\")\n full_html = full_html.replace(\"<br/>\", \"\")\n full_html = full_html.replace(\"<br />\", \"\")\n full_html = full_html.strip()\n return reason, full_html", "def get_html(self, *args, **kwargs):\n return Text(self.get_data(*args, **kwargs), escape=False)", "def parse(self, response):\n return super().parse(response)", "def parse(self, response):\n return super().parse(response)", "def process_response(self, request, response):\n return response", "def process_response(self, request, response):\n return response", "def get_response(text: str):\n # Step 01: Initialize the response.\n response = dict()\n results = dict()\n\n vectorized_text = dict()\n vectorized_text['test'] = (PredictionService.__vc.transform([text])) # see options in the above cell\n\n print ('DONE - [EMBEDDING] Apply Chosen Embeddings to the Tweets')\n # Step 02: Predict the label/class of the received text.\n predicted_sentiment = PredictionService.__model.predict(vectorized_text['test']).tolist()\n\n # Step 03: Parse the prediction result.\n if (predicted_sentiment[0] == 1):\n results[\"label\"] = \"Relevant\"\n else:\n results[\"label\"] = \"Not Relevant\"\n\n # Step 04: Prepare the response.\n response[\"status\"] = 200\n response[\"results\"] = results\n\n # Step 05: Return the response.\n return response", "def adapt_response(self, response):\n return response", "def adapt_response(self, response):\n return response", "def _target(self, response: Response):\n soup = BeautifulSoup(response.text, self._parser_library)\n urls = self._url_parser(response.url, soup)\n self._file_parser(response.url, soup, urls, self._logger)", "async def respondHTML(self, html):\n self.HTMLResponse = html", "def get_html(self):\r\n self.do_targeted_feedback(self.tree)\r\n html = contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context)\r\n return html" ]
[ "0.61386234", "0.59616053", "0.5953273", "0.5924247", "0.5882797", "0.5802751", "0.57943356", "0.5772422", "0.5726808", "0.5726808", "0.5669942", "0.56435025", "0.5613142", "0.55854046", "0.5585026", "0.5578091", "0.55673695", "0.55666703", "0.5552076", "0.553761", "0.5524938", "0.5524938", "0.5492435", "0.5492435", "0.5489749", "0.54619163", "0.54619163", "0.54506", "0.5410309", "0.5410181" ]
0.65883505
0
Tests a given component dataframe for convergence, returning True for converged components
def test_component(self, component_dataframe, ignore_weight=False): # define our acceptable bounds skew_range = [-0.6, 0.6] kurt_range = [-1.5, 0.75] # accept shorter tails for bang-on data weight_low = 0.008 # perform weight test first if not ignored if not ignore_weight: if component_dataframe.weight.mean() < weight_low: return False if skew(component_dataframe.weight) < skew_range[0]: return False if skew(component_dataframe.weight) > skew_range[1]: return False if kurtosis(component_dataframe.weight) < kurt_range[0]: return False if kurtosis(component_dataframe.weight) > kurt_range[1]: return False # now for the component parameter locations for param in ['loc'+str(i) for i in range(self._parameter_count)]: if skew(component_dataframe[param]) < skew_range[0]: return False if skew(component_dataframe[param]) > skew_range[1]: return False if kurtosis(component_dataframe[param]) < kurt_range[0]: return False if kurtosis(component_dataframe[param]) > kurt_range[1]: return False # all tests passed return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def converged(self) -> bool:", "def converged(self) -> bool:", "def converged(self) -> bool:", "def has_convergence_delta(self) -> bool:\n return False", "def converged(self) -> bool:\n if self._species is not None and self._species.n_atoms == 1:\n return True # Optimisation 0 DOF is always converged\n\n if self._abs_delta_e < self.etol / 10:\n logger.warning(\n f\"Energy change is overachieved. \"\n f'{self.etol.to(\"kcal\")/10:.3E} kcal mol-1. '\n f\"Signaling convergence\"\n )\n return True\n\n return self._abs_delta_e < self.etol and self._g_norm < self.gtol", "def test_gradient_convergence(self):\n pass", "def _is_converged(self):\n if self._last_operating_point is None:\n return False\n\n # Tolerance for comparing operating points. If all states changes\n # within this tolerance in the Euclidean norm then we've converged.\n TOLERANCE = 1e-4\n for ii in range(self._horizon):\n last_x = self._last_operating_point[0][ii]\n current_x = self._current_operating_point[0][ii]\n\n if np.linalg.norm(last_x - current_x) > TOLERANCE:\n return False\n\n return True", "def _CheckConvergence(self):\n self.is_converged = True\n self.are_converged[0] = (abs(self.delta_e) < self.conv_delta_e)\n self.are_converged[1] = (self.grad_rms < self.conv_grad_rms)\n self.are_converged[2] = (self.grad_max < self.conv_grad_max)\n self.are_converged[3] = (self.disp_rms < self.conv_disp_rms)\n self.are_converged[4] = (self.disp_max < self.conv_disp_max)\n for i in range(5):\n if self.must_converge[i] and not self.are_converged[i]:\n self.is_converged = False", "def converged(self) -> bool:\n return self._converged", "def check_matrix_converge(self):\n # If all of the running window changes are to the same value, then we stop learning\n test = list(map(lambda x: int(\n x.split(\"->\")[0]) == int(float(x.split(\"->\")[1])), self.running_update_window))\n is_converged = np.count_nonzero(test) == self.running_update_window_len\n return is_converged", "def _check_convergence(current_position,\n next_position,\n current_objective,\n next_objective,\n next_gradient,\n grad_tolerance,\n f_relative_tolerance,\n x_tolerance):\n grad_converged = _check_within_tolerance(next_gradient, grad_tolerance)\n x_converged = _check_within_tolerance(next_position - current_position,\n x_tolerance)\n f_converged = _check_within_tolerance(\n next_objective - current_objective,\n f_relative_tolerance * current_objective)\n return grad_converged | x_converged | f_converged", "def checkConvergence(self, iteration):\n threshold = abs(0.05*self.overBestVal)\n stdev = np.std(np.array([particle.bestXYZ[2] for particle in self.swarm]))\n if self.overBestVal==self.prevBestVal:\n self.bestStreak+=1\n else:\n self.bestStreak=0\n if stdev<=threshold:\n exitFlag = 0 #set this convergence pattern as exit flag 0\n print('Converged: All points converged to same position; std was less than threshold')\n elif self.bestStreak>=50:\n exitFlag = 1 #set this convergence patter as exit flag 1\n print('Converged: Best value did not increase %d times in a row' %50)\n elif iteration>=800:\n exitFlag = 2 #sets no convergence as exit flag 2\n print('Did not converge, exceeded iteration threshold')\n else:\n exitFlag = None\n return [stdev <= threshold or self.bestStreak>=50 or iteration>=800, exitFlag]", "def perfect_collinearity_test(X, min_rows=\"infer\", max_rows=None):\n # Sets the minimum number of rows to start with.\n if min_rows == \"infer\":\n rows_to_use = 2*X.shape[1]\n if rows_to_use > X.shape[0]:\n rows_to_use = X.shape[0]\n else:\n rows_to_use = min_rows\n \n # Sets the maximum number of rows to use.\n if max_rows is None:\n max_rows = X.shape[0]\n \n columns_in_dataframe = X.columns\n \n # Template for printing even columns\n template = \"{0:%s}{1:13}{2:16}\" % len(max(X.columns, key=lambda x: len(x)))\n \n # Series to save results\n results = pd.Series()\n \n # Runs a regression of every x against all other X variables.\n # Starts with a small dataset and if R^2 == 1, doubles the size\n # of the dataset until greater than max_rows.\n for temp_y_variable in columns_in_dataframe:\n rows_to_use_base = rows_to_use\n while True:\n X_master = X[:rows_to_use_base]\n temp_X_variables = [col for col in columns_in_dataframe if col != temp_y_variable]\n y_temp = X_master[temp_y_variable]\n X_temp = X_master[temp_X_variables]\n lin_model = LinearRegression()\n lin_model.fit(X_temp, y_temp)\n R_2 = lin_model.score(X_temp, y_temp)\n if R_2 != 1 and R_2 >= 0 or rows_to_use_base >= max_rows:\n if R_2 == 1:\n print(\"\")\n print(temp_y_variable + \": PERFECT COLLINEARITY ********\")\n temp_series = pd.Series(lin_model.coef_, index=temp_X_variables)\n print(list(temp_series[temp_series.round(9) != 0].index))\n print(\"\")\n else:\n print(template.format(temp_y_variable, \" VIF = \" + str(round((1.0/(1.0-R_2)),1)), \"R^2 = \" + str(round(R_2,4))))\n results[temp_y_variable] = R_2\n break\n rows_to_use_base += rows_to_use_base\n if rows_to_use_base > X.shape[0]:\n rows_to_use_base = X.shape[0]\n return results", "def converged(self) -> bool:\n assert self._coords is not None\n\n if self._converged_translation:\n logger.info(\n \"Converged purely based on translation of the \"\n \"dimer midpoint\"\n )\n return True\n\n rms_g0 = np.sqrt(np.mean(np.square(self._coords.g0)))\n return self.iteration > 0 and rms_g0 < self.gtol", "def converged(self, loss_hist):\n\n # Improvement in loss function over iteration.\n d_loss = np.diff(loss_hist[-self.patience:])\n\n # Objective went up\n if d_loss[-1] > 0:\n self.step_size /= self.step_decrement\n return False\n\n # Objective converged\n elif np.all(np.abs(d_loss) < self.tol):\n return True\n\n # Objective went down, but not yet converged.\n else:\n return False", "def convergence_dataframe(self):\n round_is = range(self.run.n_rounds)\n substeps = self.run.params.subbuild_uptos\n coords = np.array(list(itertools.product(round_is, substeps)))\n steps = self.run.params.spt * coords[:, 0] + coords[:, 1]\n\n conv_vals = np.asarray(\n [[c.converged for c in cs] for cs in self.load_convergences()]\n ).reshape(-1)\n\n df = pd.DataFrame(dict(\n round_i=coords[:, 0], steps=steps, converged=conv_vals\n )).set_index('steps')\n\n return df", "def _converged_workers(self, tol):\n x = self.ivp.t\n x_lower = self.model.workers.lower\n x_upper = self.model.workers.upper\n\n if self.model.assortativity == 'positive':\n if abs(x - x_lower) / x <= tol: # use relative values!\n converged = True\n else:\n converged = False\n else:\n if abs(x - x_upper) / x <= tol: # use relative values!\n converged = True\n else:\n converged = False\n\n return converged", "def fn(conn, libraries, params, predecessors):\n pd = libraries[\"pandas\"]\n\n preferred_distributions = params.get('preferred_distributions', False)\n\n # iterate through component list\n for pred in predecessors:\n if 'dist' in pred:\n weibull_table_name = pred\n elif 'domain' in pred:\n dc_table_name = pred\n else:\n rc_table_name = pred\n\n # load data from distributions and checks\n df_dist = pd.read_sql(sql=\"\"\"SELECT dd.*, rc.Range_Check, dc.Domain_Check FROM {} dd JOIN {} rc ON rc.distribution_id = dd.distribution_id\n JOIN {} dc ON dc.distribution_id = dd.distribution_id\"\"\".format(weibull_table_name, rc_table_name, dc_table_name), \n con=conn)\n\n # add preferred column, initially all false\n df_dist['Preferred'] = 0\n \n # save given preferred distributions if supplied\n if preferred_distributions: # empty list is false as well\n\n df_dist.loc[df_dist.distribution_id.isin(preferred_distributions), 'Preferred'] = 1\n\n return df_dist\n\n ## otherwise perform automated classification\n # save original df for saving at the end\n df_dist_to_return = df_dist.copy()\n\n ## first replace weibulls with exponentials if beta test doesn't pass\n # can do this for all wucs at once\n removed_distributions_weibull_test = 0\n for ips in list(df_dist.interval_parameter_set_id.unique()):\n df_dist_one_ips = df_dist.loc[(df_dist.interval_parameter_set_id == ips), :].copy()\n if df_dist_one_ips.loc[(df_dist_one_ips.dist_name == 'weibull'), 'beta_eq_one_pval'].iloc[0] > 0.05:\n removed_distributions_weibull_test += 1\n df_dist.drop(df_dist_one_ips.loc[(df_dist_one_ips.dist_name == 'weibull'), :].index[0], axis=0, inplace=True)\n else:\n df_dist.drop(df_dist_one_ips.loc[(df_dist_one_ips.dist_name == 'exponential'), :].index[0], axis=0, inplace=True)\n print('removed {} weibull distributions for failing the beta=1 check (will use exponential)'.format(removed_distributions_weibull_test))\n\n # add columns to df to help SE comparisons\n # using 2 as approximation for 2-sided 95% confidence intervals\n # (assuming normality of estimate, which is iffy)\n df_dist['eta_se_upper_ci'] = df_dist.apply(lambda row: row.eta + 2*row.eta_se, axis=1)\n df_dist['eta_se_lower_ci'] = df_dist.apply(lambda row: row.eta - 2*row.eta_se, axis=1)\n\n def exclude_based_on_time_frame(df):\n # returns indices to exclude based on ruled out by time frame / time period\n\n # use 5 years if 5 years is different than both 10 years and all years\n # use 10 years if above check doesn't pass and if 10 years is different than all years\n\n def compare_10_and_s04(df_s04, df_10yr):\n\n if df_s04.eta == df_10yr.eta:\n diff_10_s04 = False\n elif df_s04.eta > df_10yr.eta: \n if df_s04.eta_se_lower_ci < df_10yr.eta_se_upper_ci:\n diff_10_s04 = False\n else:\n diff_10_s04 = True\n elif df_s04.eta < df_10yr.eta:\n if df_s04.eta_se_upper_ci > df_10yr.eta_se_lower_ci:\n diff_10_s04 = False\n else:\n diff_10_s04 = True\n \n if diff_10_s04:\n use = 'Removed_Last_10_Years'\n else: \n use = 'Since 04'\n return use\n\n # if a WUC doesn't have any removals in the last 5 or 10 years there won't be a Weibull\n # we have to catch these instances and handle them separately\n\n # retrieve one-row dfs as series\n df_s04 = df.loc[df.Time_Frame == 'Since 04', :].iloc[0]\n if len(df.loc[df.Time_Frame == 'Removed_Last_10_Years', :]) > 0:\n df_10yr = df.loc[df.Time_Frame == 'Removed_Last_10_Years', :].iloc[0]\n else:\n use = 'Since 04'\n print('WUC {} using {} because no other time frames'.format(df.Work_Unit_Code.iloc[0], use))\n assert df[df.Time_Frame != use].empty\n return df[df.Time_Frame != use].index\n if len(df.loc[df.Time_Frame == 'Removed_Last_5_Years', :]) > 0:\n df_5yr = df.loc[df.Time_Frame == 'Removed_Last_5_Years', :].iloc[0]\n else:\n use = compare_10_and_s04(df_s04, df_10yr)\n print('WUC {} using {}'.format(df.Work_Unit_Code.iloc[0], use))\n # return indices to exclude\n return df[df.Time_Frame != use].index\n\n\n if df_10yr.eta > df_5yr.eta: # e.g 2000 and 1500\n if df_10yr.eta_se_lower_ci < df_5yr.eta_se_upper_ci: # e.g. 1800 & 1700\n diff_5_10 = False\n else:\n diff_5_10 = True\n elif df_10yr.eta < df_5yr.eta:\n if df_10yr.eta_se_upper_ci > df_5yr.eta_se_lower_ci:\n diff_5_10 = False\n else:\n diff_5_10 = True\n else:\n diff_5_10 = False\n if diff_5_10:\n # compare 5 and s04\n if df_s04.eta > df_5yr.eta: \n if df_s04.eta_se_lower_ci < df_5yr.eta_se_upper_ci:\n diff_5_s04 = False\n else:\n diff_5_s04 = True\n elif df_s04.eta < df_5yr.eta:\n if df_s04.eta_se_upper_ci > df_5yr.eta_se_lower_ci:\n diff_5_s04 = False\n else:\n diff_5_s04 = True\n else:\n diff_5_s04 = False\n\n if diff_5_10 and diff_5_s04:\n use = 'Removed_Last_5_Years'\n else:\n # disqualify 5\n # compare 10 and s04\n use = compare_10_and_s04(df_s04, df_10yr)\n \n \n print('WUC {} using {}'.format(df.Work_Unit_Code.iloc[0], use))\n \n # return indices to exclude\n return df[df.Time_Frame != use].index\n\n def exclude_based_on_mds(df):\n # if J and H are different, use both. \n # otherwise, use MDS \n # (assume checks were made already to make sure there are both MDS)\n \n # returns indices to exclude based on ruled out by MDS\n\n # retrieve one-row dfs as series\n df_mds = df.loc[df.MDS != 'Any MDS']\n df_a = df_mds.iloc[0, :]\n df_b = df_mds.iloc[1, :]\n\n if df_b.eta > df_a.eta: # e.g 2000 and 1500\n if df_b.eta_se_lower_ci < df_a.eta_se_upper_ci: # e.g. 1800 & 1700\n diff_a_b = False\n else:\n diff_a_b = True\n elif df_b.eta < df_a.eta:\n if df_b.eta_se_upper_ci > df_a.eta_se_lower_ci:\n diff_a_b = False\n else:\n diff_a_b = True\n else:\n diff_a_b = False\n \n if diff_a_b:\n print('WUC {} splitting by MDS'.format(df.Work_Unit_Code.iloc[0]))\n # return indices to exclude\n return df[df.MDS == 'Any MDS'].index\n else: \n print('WUC {} not splitting by MDS'.format(df.Work_Unit_Code.iloc[0]))\n # return indices to exclude\n return df[df.MDS != 'Any MDS'].index\n\n # now loop through distributions and check Time Range and MDS\n for w in list(df_dist.Work_Unit_Code.unique()):\n \n df_single_wuc = df_dist.loc[df_dist.Work_Unit_Code == w, :].copy()\n\n # remove the unused rows from All MDS and any specific MDS\n indices_to_exclude = exclude_based_on_time_frame(df_single_wuc)\n df_single_wuc.drop(indices_to_exclude, axis=0, inplace=True)\n\n # now compare MDS, but only if there are distributions to compare \n # (more than 2 distributions, which would be one-speicific WUC + Any MDS)\n if df_single_wuc[df_single_wuc.MDS != 'Any MDS'].shape[0] > 1:\n # compare MDS\n indices_to_exclude = exclude_based_on_mds(df_single_wuc)\n df_single_wuc.drop(indices_to_exclude, axis=0, inplace=True)\n else:\n # drop the specific-MDS distribution\n print('WUC {} has single MDS - use Any MDS'.format(df_single_wuc.iloc[0].Work_Unit_Code))\n df_single_wuc.drop(df_single_wuc[df_single_wuc.MDS != 'Any MDS'].index, axis=0, inplace=True)\n\n # distributions that remain are preferred\n df_single_wuc.Preferred = 1\n df_dist_to_return.update(df_single_wuc)\n\n return df_dist_to_return", "def test_dataframe(self):\r\n dataframe_tested = df.toEEFeatureCollection(\"y\", \"x\")\r\n self.assertIsInstance(dataframe_tested, ee.featurecollection.FeatureCollection)", "def perfect_collinearity_test_simple(X, min_rows=\"infer\", max_rows=None):\n # Sets the minimum number of rows to start with.\n if min_rows == \"infer\":\n rows_to_use = 2*X.shape[1]\n if rows_to_use > X.shape[0]:\n rows_to_use = X.shape[0]\n else:\n rows_to_use = min_rows\n \n # Sets the maximum number of rows to use.\n if max_rows is None:\n max_rows = X.shape[0]\n \n columns_in_dataframe = X.columns\n\n \n # Series to save results\n results = pd.Series()\n \n # Runs a regression of every x against all other X variables.\n # Starts with a small dataset and if R^2 == 1, doubles the size\n # of the dataset until greater than max_rows.\n for temp_y_variable in columns_in_dataframe:\n rows_to_use_base = rows_to_use\n while True:\n X_master = X[:rows_to_use_base]\n temp_X_variables = [col for col in columns_in_dataframe if col != temp_y_variable]\n y_temp = X_master[temp_y_variable]\n X_temp = X_master[temp_X_variables]\n lin_model = LinearRegression()\n lin_model.fit(X_temp, y_temp)\n R_2 = lin_model.score(X_temp, y_temp)\n if R_2 != 1 and R_2 >= 0 or rows_to_use_base >= max_rows:\n results[temp_y_variable] = R_2\n break\n rows_to_use_base += rows_to_use_base\n if rows_to_use_base > X.shape[0]:\n rows_to_use_base = X.shape[0]\n return results", "def __eq__(self, candidate):\n return np.linalg.norm(self.components()\n -\n candidate.components()) < 1.e-7", "def get_stat_dif(column, target_column, data, alpha):\n cols = data.loc[:, column].value_counts().index[:]\n combinations_all = list(combinations(cols, 2))\n for comb in combinations_all:\n a = data.loc[data.loc[:, column] == comb[0], target_column]\n b = data.loc[data.loc[:, column] == comb[1], target_column]\n result = ttest_ind(a, b).pvalue\n\n if result <= alpha/len(combinations_all):\n print('Найдены статистически значимые различия для колонки', column)\n break", "def chcek_exist(df, Point):\n\n exist = (df.OperatingPoint == Point)\n exist = exist.sum()\n if exist == 0:\n return False\n else:\n return True", "def test_gradient(gradient, thetas, activations_neural, classification_matrix, lambda_value=1, step=1E-4, tolerance=1E-4):\n \n dimensional_error(thetas[-1].shape, gradient[-1].shape)\n\n last_thetas = thetas[-1]\n \n last_thetas_plus_step = thetas[-1] + step\n last_thetas_minus_step = thetas[-1] - step\n\n num_grad_total = pd.DataFrame()\n\n for i in range( gradient[-1].shape[0] ):\n\n\n last_thetas_plus = pd.concat( [last_thetas[0:i], last_thetas_plus_step[i:i+1] , last_thetas[i+1:]] , axis=0 )\n\n last_thetas_minus = pd.concat( [last_thetas[0:i], last_thetas_minus_step[i:i+1], last_thetas[i+1:]] , axis=0 )\n\n last_activation_plus = activation_values(activations_neural[-2], last_thetas_plus ).to_numpy()\n last_activation_minus = activation_values(activations_neural[-2], last_thetas_minus).to_numpy()\n\n cost_plus = cost_function_sigmoid([last_activation_plus] , classification_matrix, [last_thetas_plus] , lambda_value)\n cost_minus = cost_function_sigmoid([last_activation_minus], classification_matrix, [last_thetas_minus], lambda_value)\n\n num_grad = (cost_plus - cost_minus)/(2*step) # it's a column DataFrame\n num_grad_total = pd.concat([num_grad_total, num_grad], axis=1)\n\n num_grad_total = num_grad_total.T\n\n dimensional_error(num_grad_total.shape, gradient[-1].shape)\n\n num_grad_total.index = gradient[-1].index\n num_grad_total.columns = gradient[-1].columns\n\n _ = ( np.abs( gradient[-1].to_numpy() - num_grad_total.to_numpy() ) <= tolerance )\n\n return _, num_grad_total", "def voxelConsistency(cleaned_dataframe, column_number, expected_size):\n consistency_boolean = True\n for row in cleaned_dataframe.index:\n if cleaned_dataframe[column_number][row] == expected_size:\n continue\n elif cleaned_dataframe[column_number][row] != expected_size:\n print(\"Subject scan \" + cleaned_dataframe[0][row] + \" does not have voxel size of \" +str(expected_size))\n consistency_boolean = False\n return consistency_boolean", "def test_for_convergence(self, error_tol):\n list_of_best_indvs = []\n for island in self._islands:\n best_indv = island.best_individual()\n list_of_best_indvs.append(best_indv)\n list_of_best_indvs.sort(key=lambda x: x.fitness)\n\n best_indv = list_of_best_indvs[0]\n converged = best_indv.fitness <= error_tol\n\n self._best_indv = best_indv\n self._converged = converged\n return converged", "def ensemble_compare(\n self,\n ensemble: Type[Ensemble],\n observables: List[Observables] = [\"nnsd\", \"nnnsd\", \"rigidity\", \"levelvar\"],\n metrics: List[Metric] = [\"msqd\"],\n spacings: Tuple[float, float] = (0.5, 2.5),\n kde_gridsize: int = 5000,\n L_rigidity: ndarray = np.arange(2, 50, 0.5),\n L_levelvar: ndarray = np.arange(0.2, 20, 0.2),\n show_progress: bool = False,\n ) -> pd.DataFrame:\n\n def compare(expected: ndarray, curve: ndarray, name: str, metric: Metric) -> np.float64:\n comp = Compare(curves=[curve], labels=[name], base_curve=expected, base_label=\"exp\")\n res = None\n if metric == \"mad\":\n res = comp.mean_abs_difference()\n elif metric == \"msqd\":\n res = comp.mean_sq_difference()\n elif metric == \"corr\":\n res = comp.correlate()\n else:\n raise ValueError(\"Invalid metric. Must be one of ['mad', 'msqd', 'corr'].\")\n return np.float64(res[\"exp\"][name])\n\n df = pd.DataFrame(index=metrics, columns=observables)\n if \"nnsd\" in observables:\n nnsd = self.__get_kde_values(spacings_range=spacings, kde_gridsize=kde_gridsize)\n nnsd_exp = ensemble.nnsd(spacings_range=spacings, n_points=kde_gridsize)\n for metric in metrics:\n df[\"nnsd\"][metric] = compare(nnsd_exp, nnsd, \"nnsd\", metric)\n\n if \"nnnsd\" in observables:\n nnnsd = self.__get_kde_values(\n spacings_range=spacings, nnnsd=True, kde_gridsize=kde_gridsize\n )\n nnnsd_exp = ensemble.nnnsd(spacings_range=spacings, n_points=kde_gridsize)\n for metric in metrics:\n df[\"nnnsd\"][metric] = compare(nnnsd_exp, nnnsd, \"nnnsd\", metric)\n\n if \"rigidity\" in observables:\n rigidity = self.spectral_rigidity(L=L_rigidity, show_progress=show_progress)[\n \"delta\"\n ].to_numpy()\n rigidity_exp = ensemble.spectral_rigidity(L=L_rigidity)\n for metric in metrics:\n df[\"rigidity\"][metric] = compare(rigidity_exp, rigidity, \"rigidity\", metric)\n\n if \"levelvar\" in observables:\n levelvar = self.level_variance(L=L_levelvar, show_progress=show_progress)[\n \"sigma\"\n ].to_numpy()\n levelvar_exp = ensemble.level_variance(L=L_levelvar)\n for metric in metrics:\n df[\"levelvar\"][metric] = compare(levelvar_exp, levelvar, \"levelvar\", metric)\n return df", "def test(self, data_set):\r\n\r\n correct = 0.0\r\n total = 0.0\r\n\r\n for input, target in data_set:\r\n #actual output from neural net\r\n output = self.predict(input)\r\n total += 1.0 #number of total output vectors\r\n\r\n if allclose(output, target, self.converge) == True:\r\n correct += 1.0\r\n\r\n return correct/total", "def _fp_evaluate(sequence, iteration, tolerance):\n return np.abs(sequence[iteration] - sequence[iteration - 1]) < tolerance", "def converged(self):\n if len(self.rundir) >= 2:\n if io.ionic_steps(self.rundir[-1]) <= 3:\n return True\n if self.settings[\"nrg_convergence\"] != None:\n if io.job_complete(self.rundir[-1]) and io.job_complete(self.rundir[-2]):\n o1 = io.Oszicar(os.path.join(self.rundir[-1],\"OSZICAR\"))\n o2 = io.Oszicar(os.path.join(self.rundir[-2],\"OSZICAR\"))\n if abs( o1.E[-1] - o2.E[-1]) < self.settings[\"nrg_convergence\"]:\n return True\n\n return False" ]
[ "0.6538666", "0.6538666", "0.6538666", "0.6366643", "0.6296365", "0.605637", "0.58805627", "0.58160526", "0.57632166", "0.5750119", "0.5745037", "0.57065284", "0.5695569", "0.5663899", "0.56550163", "0.5587026", "0.55850583", "0.5548356", "0.553449", "0.5469594", "0.5452946", "0.5451397", "0.5412875", "0.5394678", "0.5372883", "0.53697795", "0.53621244", "0.5348669", "0.5348308", "0.53371304" ]
0.6763347
0
Take location (code2,code3,country name) return countryName and coords
def locate(location): coord = None country_name = None if location: location = location.lower() for ind, row in country_map.iterrows(): if ( (re.match(r'(.*\W|\W*){}\b'.format(row['code2']), location)) or(re.match(r'(.*\W|\W*){}\b'.format(row['code3']), location)) or(re.match(r'(.*\W|\W*){}\b'.format(row['name']), location))): coord = [row['lat'], row['lang']] country_name = row['name'] break return country_name, coord
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loc_to_coord(codes):\n def adfilter(codes):\n return re.findall(\"\"\"[a-zA-Z]+, [A-Z]{2}\"\"\", \";\".join(codes))\n\n api_key = \"AIzaSyCxQCjOrHFAf7T-W3vtUYqWkgSFkvMjxN4\"\n\n g = geocoders.GoogleV3(api_key = api_key)\n coords = {\"lat\":[], \"long\":[]}\n for code in adfilter(codes):\n if code != \"\":\n try:\n place = g.geocode(code)\n if place != None:\n coords[\"lat\"].append(place.latitude)\n coords[\"long\"].append(place.longitude)\n except (exc.GeocoderTimedOut, exc.GeocoderQueryError):\n pass\n return coords", "def test_get_country_by_geo_location(self):\n pass", "def country(alpha_2_code: str) -> None:", "def get_country_details(self,country):\n try:\n country_obj = pycountry.countries.get(name=country)\n if country_obj is None:\n c = pycountry.countries.search_fuzzy(country)\n country_obj = c[0]\n continent_code = pc.country_alpha2_to_continent_code(country_obj.alpha_2)\n continent = pc.convert_continent_code_to_continent_name(continent_code)\n return country_obj.alpha_3, continent\n except:\n if 'Congo' in country:\n country = 'Congo'\n elif country == 'Diamond Princess' or country == 'Laos' or country == 'MS Zaandam' or country == 'Holy See' or country == 'Timor-Leste':\n return country, country\n elif country == 'Korea, South' or country == 'South Korea':\n country = 'Korea, Republic of'\n elif country == 'Taiwan*':\n country = 'Taiwan'\n elif country == 'Burma':\n country = 'Myanmar'\n elif country == 'West Bank and Gaza':\n country = 'Gaza'\n else:\n return country, country\n country_obj = pycountry.countries.search_fuzzy(country)\n continent_code = pc.country_alpha2_to_continent_code(country_obj[0].alpha_2)\n continent = pc.convert_continent_code_to_continent_name(continent_code)\n return country_obj[0].alpha_3, continent", "def get_country_from_coordinates(coordinates):\n geolocator = Nominatim(user_agent=\"random_one\")\n location = geolocator.reverse(coordinates, language='en')\n country = location.address.split(',')[-1].strip()\n return country", "def coordinates(latitude, longitude):\r\n location = geolocator.reverse(latitude + \", \" + longitude)\r\n data = location.raw\r\n data = data['address']\r\n state_code = data['state']\r\n return state_code", "def location_details_gen(self, args):\n lat, longt, city, code, timezone = faker.location_on_land()\n\n if args == 'lattlong':\n return lat, longt\n \n elif args == 'city':\n return city\n \n elif args == 'timezone':\n return timezone\n \n elif args == 'code':\n return code", "def maploc(loc):\n\n\n loc = REGEX['parens'].sub('', loc)\n loc = REGEX['and'].sub('', loc)\n loc = REGEX['num'].sub('', loc)\n\n \"\"\"\n 'parens' 'and' 'single' 'num' 'seeley' 'iab' 'brh'\n \"\"\"\n \"\"\"\n /* For non-street address, strip room numbers */\n if (!location.match(' Ave')) {\n location = location.replace(/LL[0-9]/g, '').replace(/[0-9]/g, '');\n }\n /* Some text substitutions */\n location = location.replace('Seeley W.', '').replace('International Affairs Building', '420 W 118th St').replace('Broadway Residence Hall', '2900 Broadway');\n\n \"\"\"\n return loc + ', New York, NY 10027'", "def get_ip_geo_location(ip):\n location = []\n country_code = \"\"\n try:\n response = requests.get(\"https://api.data.caida.org/ipmeta/v1/iplookup/\" + ip).json()\n if len(response) > 0:\n prefered_source = [r for r in response if r[\"source\"] == 2]\n if not prefered_source:\n logging.warning(\"no preferred location data found for {} failed: {}\".format(ip, response))\n location = prefered_source[0].get(\"lat_long\", [])\n country_code = prefered_source[0].get(\"country_code\", \"\")\n except Exception as e:\n logging.error(\"retrieving location for {} failed: {}\".format(ip, e))\n finally:\n return location, country_code", "def get_country_info(country):\n return GoogleV3().geocode(country)", "def geocode(df, col):\r\n pass", "def fix_location(r):\n \n # all is fine: just change zipcode datatype to str\n if not np.isnan(r['zip']) and not np.isnan(r['lat']):\n return [str(int(r['zip'])), r['lng'], r['lat']]\n \n # try to locate within zipcode polygons\n if not np.isnan(r['lat']):\n query = \"\"\"\n SELECT t.geoid as zip, {} as lng, {} as lat\n FROM us_zcta5 t JOIN usps_zcta5 z ON t.geoid = z.zip\n WHERE ST_Contains(t.shape, ST_GeomFromText('POINT({} {})', 2))\n \"\"\"\n res = pd.read_sql(query.format(r['lng'], r['lat'], r['lng'], r['lat']), con = con)\n if len(res) == 1:\n return res.values[0].tolist()\n\n # use zipcode center as location proxy: geocoding is prefered in this case, but might be quite expensive\n if not np.isnan(r['zip']):\n res = zipcodes[zipcodes['zip'] == str(int(r['zip']))]\n if len(res) == 1:\n return res.values[0].tolist()[:3]\n\n return [None, None, None]", "def country_codes(country):\n countryObject = None\n try:\n countryObject = pycountry.countries.search_fuzzy(country)\n return countryObject[0].alpha_2\n except LookupError:\n pass\n try:\n splittedCountry = country.split(',')[0]\n countryObject = pycountry.countries.search_fuzzy(splittedCountry)\n return countryObject[0].alpha_2\n except LookupError:\n return 'No Code'", "def geo_coder(house_number, boro_code, street_name, zip_code): \r\n wa1 = '1B{}{}{}{}{}C{}{}'.format(rightpad(house_number, 16), rightpad('', 38), boro_code, rightpad('', 10), rightpad(street_name, 32), rightpad('', 113), rightpad(zip_code, 5))\r\n wa1 = rightpad(wa1, 1200)\r\n wa2 = rightpad('', 4300)\r\n NYCGeo.NYCgeo(wa1, wa2)\r\n return wa1, wa2", "def get_location(body, returnthis):\n m = re.search(r\"\\bsteemitworldmap\\b\\s([-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?))\\s\\blat\\b\\s([-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?))\", body)\n if m:\n try:\n latitude = m.group(1)\n longitude = m.group(5)\n geolocator = Nominatim(user_agent=\"travelfeed/0.1\")\n rawlocation = geolocator.reverse(latitude+\", \"+longitude, language=\"en\", timeout=10).raw\n address = rawlocation['address']\n state = address.get('state', None)\n if state == None: #Not every location has a state/region/... set!\n state = address.get('region', None)\n if state == None:\n state = address.get('state_district', None)\n if state == None:\n state = address.get('county', None)\n if state == None:\n state = \"\"\n country_code = str(address[\"country_code\"]).upper()\n country_object = pycountry.countries.get(alpha_2=country_code)\n try:\n country = country_object.common_name #Some countries such as Taiwan or Bolivia have a common name that is used instead of the official name\n except:\n country = country_object.name\n continent_code = pycountry_convert.country_alpha2_to_continent_code(country_code)\n if continent_code == \"AF\":\n continent = \"Africa\"\n elif continent_code == \"NA\":\n continent = \"North America\"\n elif continent_code == \"OC\":\n continent = \"Oceania\"\n elif continent_code == \"AN\":\n continent = \"Antarctica\"\n elif continent_code == \"AS\":\n continent = \"Asia\"\n elif continent_code == \"EU\":\n continent = \"Europe\"\n elif continent_code == \"SA\":\n continent = \"South America\"\n if returnthis == None:\n location = state+\", \"+country+\", \"+continent\n return location\n if returnthis == \"continentcode\":\n return continent_code\n except Exception as error:\n logger.warning(\"Could not determine location: \"+repr(error))\n return None\n else:\n return None", "def geocodeLocations(locations):\n maxResults = 1\n location_query = ''\n for location in locations:\n location_query += \"&location=%s\" % encodeUrl(location)\n url = \"http://open.mapquestapi.com/geocoding/v1/batch?maxResults=%d%s\" % (maxResults, location_query)\n print url\n results = json.loads(urllib2.urlopen(url).read())\n print results\n return\n for location_result in results['results']:\n #print location_result\n if location_result['providedLocation']['location'] == location:\n latlng = location_result['locations'][0]['displayLatLng']\n return latlng\n else:\n print location_result", "def country(name):\n return location_db().find(name=name)[\"country\"]", "def getLocationString(self):\n street = ' '.join(self.context.getAddress().strip().split())\n # Remove Postfach from street, otherwise Google geocoder API will\n # return wrong results\n street = street.replace('Postfach', '').replace('\\r','').strip()\n zip_code = self.context.getZip()\n city = self.context.getCity()\n country = self.context.getCountry()\n\n # We need at least something other than country to be defined,\n # otherwise we can't do a meaningful geocode lookup\n if not (street or zip_code or city):\n return ''\n\n # Concatenate only the fields with a value into the location string\n location = country\n for field in [city, zip_code, street]:\n if field.strip():\n location = \"%s, %s\" % (field.strip(), location)\n\n return location", "def get_formatted_location(city, country):\n\tformatted_location = city + \", \" + country\n\treturn formatted_location.title()", "def get_geocode(self, address):\n\n try:\n raw_data = self.__get_raw_data(address)\n except (URLError, ValueError):\n return 503, None\n else:\n code, coords = self.__parse_raw_data(raw_data)\n return code, coords", "def test_getLocationFromPostcode1(self):\n \n pstPrc=PostcodeProcessor()\n coords=pstPrc.getLocationFromPostcode(self.postcode1)\n self.assertEqual(coords.latitude,self.longLat1.latitude)\n self.assertEqual(coords.longitude,self.longLat1.longitude)", "def get_location(coordinates):\n location_info = gmaps.reverse_geocode(latlng=coordinates)\n location_list = list()\n for location in location_info:\n if \"locality\" in location[\"types\"]:\n return location[\"formatted_address\"]\n # location_list.append(location[\"formatted_address\"])\n # return location_list", "def get_city_country(city, country, population=''):\n if population:\n location = city + ' ' + country + ' ' + str(population)\n return location.title()\n\n else:\n location = city + ' ' + country\n return location.title()", "def get_city_by_code(post_code):\n post_code = post_code.replace(' ', '').encode('utf-8')\n error = ''\n city = ''\n opener = urllib2.build_opener()\n url = 'http://maps.googleapis.com/maps/api/geocode/json?address={0}&sensor=false'.format(post_code)\n response = opener.open(url).read()\n response_dict = json.loads(response)\n request_status = response_dict['status']\n if request_status == 'OK':\n logger.debug('Google response')\n logger.debug(response_dict)\n results = response_dict['results']\n \"\"\"\n first get all results\n with required zip code\n \"\"\"\n results_with_required_zip_code = []\n for result in results:\n address_components = result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'postal_code' and address_component['short_name'].replace(' ', '').lower() == post_code.lower():\n results_with_required_zip_code.append(result)\n if not results_with_required_zip_code:\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n # error = 'No location with post code %s' % post_code\n else:\n \"\"\"\n next we need all results in GB\n \"\"\"\n results_with_required_zip_code_in_GB = ''\n for good_result in results_with_required_zip_code:\n address_components = good_result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'country' and address_component['short_name'].lower() == 'GB'.lower():\n results_with_required_zip_code_in_GB = good_result\n if not results_with_required_zip_code_in_GB:\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n \"\"\"\n finally find city name\n \"\"\"\n address_components = results_with_required_zip_code_in_GB['address_components']\n # first try get postal city\n searching_city = get_city_by_key(address_components, 'postal_town')\n if not searching_city:\n # next by administrative_area_level_2\n searching_city = get_city_by_key(address_components, 'administrative_area_level_2')\n if not searching_city:\n print url\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n city = searching_city\n elif request_status == 'ZERO_RESULTS':\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n else:\n error = request_status\n return {\n 'error': error,\n 'data': city\n }", "def get_location_codes(scanner, input):\n matches = scanner.search_places(input)\n codes = []\n for i in matches[\"Places\"]:\n codes.append(i[\"PlaceId\"])\n return codes", "def locToCoor(locList): # geocoding locations, output coordinates\n print('start assign goor to location')\n coorFromText = []\n for loc in locList:\n print(loc)\n coors = roadToCoor(loc)[1]\n print(coors)\n coorFromText.append((loc, coors))\n\n # g = geocoder.mapquest(locations, method='batch')\n\n return coorFromText", "def get_locations_by_country(df, country):\n locations = list(df[df.country == country].location.values)\n return locations", "def get_country_code(country_name):\n # worldmap_chart = pygal.maps.world.World()\n # for code, name in worldmap_chart:\n\n for code, name in i18n.COUNTRIES:\n\n # for code, name in COUNTRIES.items():\n if name == country_name:\n print(code)\n return code\n # If the country wasn't found, return None.\n return None", "def get_coord_from_address(code_postal, adresse=None):\n headers = {\"Content-Type\": \"application/json\"}\n if adresse != None:\n url = str((\"http://api-adresse.data.gouv.fr/search/?q=\" + str(adresse) + \"&postcode=\" + str(code_postal)))\n else:\n url = str((\"http://api-adresse.data.gouv.fr/search/?q=\" + str(code_postal)))\n print(url)\n r = requests.get(url, headers=headers, data=\"\")\n js = json.loads(r.text)\n if code_postal == 75001:\n x = js['features'][1]['geometry']['coordinates']\n else:\n \tx = js['features'][0]['geometry']['coordinates']\n longitude = x[0]\n latitude = x[1]\n pos = []\n pos.append(longitude)\n pos.append(latitude)\n print(pos)\n return pos", "def city_country(city, country):\n return(city + ', ' + country)" ]
[ "0.6872996", "0.6658761", "0.6427587", "0.64248663", "0.6413235", "0.6389267", "0.6352416", "0.6344214", "0.6342886", "0.6201723", "0.61685705", "0.61586225", "0.614145", "0.61208564", "0.6072315", "0.606234", "0.6036042", "0.6028122", "0.6024029", "0.60227036", "0.6012603", "0.5998221", "0.59589297", "0.59563106", "0.5952208", "0.5943059", "0.59264153", "0.5925012", "0.59105694", "0.59087795" ]
0.7792582
0
Read the steering file to gather user inputs from the GUI of pyRiverBed. Parameters are declared as global variables.
def read_steering(): print('+> Trying to read steering file...', end='') try: d = np.loadtxt('steering.txt', delimiter=',', skiprows=1) print(' [done]') except IOError: print('\nNo steering file found') print('Please provide steering file first\n') job_done() sys.exit(1) global MODE, NBENDS, LAMBDA, THETA0, JS, JF, WIDTH, DEPTH, SLOPE, DS, \ NUM, INTERVAL, LAG, LAGSTR, SAVEXYZ, SAVEBOUND, SAVEMESH, FLIPSTRM, \ FLIPTRANS, MIGRATION, UB0, C0, CF0, FR0, DT, E0, LPRINT, TSTEPS, \ GPRINT, FPS, ZERO, JPG_DIRS, FNAME, SMOLEV, STCORR MODE = int(d[0]) NBENDS = int(d[1]) LAMBDA = float(d[2]) THETA0 = float(d[3])*np.pi/180 JS = float(d[4]) JF = float(d[5]) WIDTH = float(d[6]) DEPTH = float(d[7]) SLOPE = float(d[8]) DS = float(d[9]) NUM = int(d[10]) INTERVAL = WIDTH/2/NUM LAG = int(d[11]) LAGSTR = d[12] SAVEXYZ = int(d[13]) SAVEBOUND = int(d[14]) SAVEMESH = int(d[15]) FLIPSTRM = int(d[16]) FLIPTRANS = int(d[17]) MIGRATION = int(d[18]) UB0 = float(d[19]) C0 = float(d[20]) CF0 = float(d[21]) FR0 = float(d[22]) DT = int(d[23]) E0 = float(d[24]) LPRINT = int(d[25]) TSTEPS = int(d[26]) if MIGRATION == 0: TSTEPS = 0 GPRINT = int(d[27]) FPS = int(d[28]) SMOLEV = int(d[29]) STCORR = d[30] ZERO = 1e-8 JPG_DIRS = ['./jpg1/', './jpg2/'] with open('steering.txt', 'r') as f: lines = f.readlines() FNAME = lines[0].rstrip() if MODE == 1: FNAME = 'kinoshita' params = WIDTH, DEPTH, SLOPE, NUM, LAG, FNAME, \ MIGRATION, DT, TSTEPS, GPRINT, JPG_DIRS return params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_input(self):\n try:\n path = tkFileDialog.askdirectory()\n if not path: # User the cancelled dialog box so bug out\n return False\n # Search the user-provided path for all the input files.\n foundall, missing = self.files.locate_input(path)\n # If any are missing show the user and error message\n # with the missing files listed.\n if not foundall:\n # Give indentation and numbered bullets to the missing names\n missing = [' {}) {}'.format(i, name)\n for i, name in enumerate(sorted(missing), 1)]\n missing.insert(0, 'The following files were not found:')\n missing.append('Nothing was modified')\n tkMessageBox.showerror('Error', '\\n'.join(missing))\n return False\n # Files were successfully located.\n # Read contents of gain/voltage files.\n # This must come first, as it also sets the row and column numbers.\n with self.files.open_file('GAIN_LARGE') as file:\n largegains = file.readlines()\n for x in NORTH_LARGE, SOUTH_LARGE:\n self.detectors[x].set_voltages(largegains)\n with self.files.open_file('GAIN_SMALL') as file:\n smallgains = file.readlines()\n for x in NORTH_SMALL, SOUTH_SMALL:\n self.detectors[x].set_voltages(smallgains)\n # Set QT information\n qtdirname = os.path.dirname(self.files['QT1'])\n self.qt = qt.System(qtdirname)\n for det in self.detectors.itervalues():\n det.set_qt(self.qt)\n # Now that the detector information is complete,\n # let's keep a copy of the initial detector state.\n # This is required for when we make a ROOT\n # tree with initial and final information.\n self.initial = copy.deepcopy(self.detectors)\n except IOError as err:\n tkMessageBox.askyesno('I/O error', str(err))\n self.image_window.canvas.focus_set()", "def read_input_file(self):\n\n # Check if input file exists in current directory, if not kill process\n if not os.path.isfile('./visualise.inpt'):\n print('Cannot find input file \"visualise.inpt\" in current directory')\n sys.exit()\n\n # Read input file and analysis options and parameters\n print('Reading input file')\n with open('visualise.inpt','r') as f:\n f.readline()\n self.prefix = f.readline().split()[0]\n f.readline()\n f.readline()\n self.frame = int(f.readline().split()[0])\n f.readline()\n f.readline()\n self.vis_particles = int(f.readline().split()[0])\n self.vis_vortype = int(f.readline().split()[0])\n self.vis_cellcolour = int(f.readline().split()[0])\n self.vis_save = int(f.readline().split()[0])", "def readDriverFile(self, input_file):\n\n\n fid = open(self.basePath + input_file,'r')\n\n # Line 1\n line = fid.readline()\n l_input = line.split('!')\n mshfile = l_input[0].rstrip()\n\n # Line 2\n line = fid.readline()\n l_input = line.split('!')\n obsfile = l_input[0].rstrip()\n\n # Line 3\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='null':\n topofile = []\n\n else:\n topofile = l_input[0].rstrip()\n\n\n # Line 4\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n mstart = float(l_input[1])\n\n else:\n mstart = l_input[0].rstrip()\n\n # Line 5\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n mref = float(l_input[1])\n\n else:\n mref = l_input[0].rstrip()\n\n # Line 6\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n staticInput = float(l_input[1])\n\n elif l_input[0]=='DEFAULT':\n staticInput = None\n\n else:\n staticInput = l_input[0].rstrip()\n\n\n # Line 7\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='DEFAULT':\n magfile = []\n\n else:\n magfile = l_input[0].rstrip()\n\n # Line 8\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='DEFAULT':\n wgtfile = []\n\n else:\n wgtfile = l_input[0].rstrip()\n\n # Line 9\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n chi = float(l_input[0])\n\n # Line 10\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n val = np.array(l_input[0:4])\n alphas = val.astype(np.float)\n\n # Line 11\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:3])\n bounds = val.astype(np.float)\n\n else:\n bounds = l_input[0].rstrip()\n\n # Line 12\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:6])\n lpnorms = val.astype(np.float)\n\n else:\n lpnorms = l_input[0].rstrip()\n\n # Line 13\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:3])\n eps = val.astype(np.float)\n\n else:\n eps = [None,None]\n\n self.mshfile = mshfile\n self.obsfile = obsfile\n self.topofile = topofile\n self.mstart = mstart\n self._mrefInput = mref\n self._staticInput = staticInput\n self.magfile = magfile\n self.wgtfile = wgtfile\n self.chi = chi\n self.alphas = alphas\n self.bounds = bounds\n self.lpnorms = lpnorms\n self.eps = eps", "def readSteering(self):\r\n\r\n #Read the file steering.csv with the angles and speed\r\n lines = [line.strip() for line in open(self.steeringFile)]\r\n self.steeringLines = []\r\n\r\n\tfor line in lines:\r\n info = line.split(',')\r\n\t if info[0] == 'seq':\r\n continue\r\n\t self.steeringLines.append(info)\r\n\r\n\tself.get_angles_at_timestamps(self.centers, self.steeringLines, use_average = False)\r\n\tprint len(self.angles_at_timestamps)", "def gui_reader():\n SMW_IP = entryCol.entry0.get()\n FSW_IP = entryCol.entry1.get()\n\n ### Set 5GNR Parameters\n NR5G = VST().jav_Open(SMW_IP,FSW_IP)\n NR5G.Freq = float(entryCol.entry2.get())\n NR5G.SWM_Out = float(entryCol.entry3.get())\n NR5G.NR_Dir = entryCol.entry4_enum.get()\n NR5G.NR_Deploy = entryCol.entry5_enum.get()\n NR5G.NR_ChBW = int(entryCol.entry6_enum.get())\n NR5G.NR_SubSp = int(entryCol.entry7_enum.get())\n NR5G.NR_RB = int(entryCol.entry8.get())\n NR5G.NR_RBO = int(entryCol.entry9.get())\n NR5G.NR_Mod = entryCol.entry10_enum.get()\n NR5G.NR_CC = int(entryCol.entry11.get())\n NR5G.NR_TF = 'OFF'\n return NR5G", "def open_file():\r\n\r\n file_name, file_type = QFileDialog.getOpenFileName(MainWindow, '选择文件', default_path, 'txt(*.txt)')\r\n if file_name == '':\r\n return\r\n temp_input = input.read_file(file_name)\r\n try:\r\n if temp_input.measurement_strategy == '0':\r\n ui.measurement_strategy.setCurrentIndex(0)\r\n ui.total_length.setText(temp_input.len_total)\r\n ui.length_step.setText(temp_input.len_step)\r\n elif temp_input.measurement_strategy == '1':\r\n ui.measurement_strategy.setCurrentIndex(1)\r\n ui.num_of_mea.setText(temp_input.num_of_mea)\r\n ui.frequency.setText(temp_input.frequency)\r\n ui.time_step.setText(temp_input.time_step)\r\n ui.na_average_facotr.setValue(int(temp_input.na_average_factor))\r\n ui.multi_measure.setValue(int(temp_input.multi_measure))\r\n ui.save_directory.setText(temp_input.directory)\r\n input_parameters.directory = temp_input.directory\r\n if temp_input.access_sensor_times == '0':\r\n ui.typein_t.setChecked(True)\r\n input_parameters.access_sensor_times = 0\r\n ui.temperature.setText(temp_input.temperature)\r\n ui.humidity.setText(temp_input.humidity)\r\n elif temp_input.access_sensor_times == '1':\r\n ui.measure_t_once.setChecked(True)\r\n input_parameters.access_sensor_times = 1\r\n elif temp_input.access_sensor_times == '2':\r\n ui.measure_t_repeatedly.setChecked(True)\r\n input_parameters.access_sensor_times = 2\r\n if temp_input.na_state is not None:\r\n ui.NA_state.setText(temp_input.na_state)\r\n input_parameters.motor_comp = temp_input.motor_comp\r\n input_parameters.sensor_comp = temp_input.sensor_comp\r\n input_parameters.NA_identifier = temp_input.NA_identifier\r\n except Exception:\r\n missing_parameters('文件格式错误,请补充相应数据')", "def createInput(dirPath,gSettings):\n \n with open(os.path.join('../in','input.txt')) as f:\n inpFile = f.readlines()\n \n\n # Model settings\n model = gSettings[\"Model\"]\n inpFile[13] = \"insgrav: {:1d}\\n\".format(int(model[\"NS gravity\"][\"Flag\"]))\n inpFile[14] = \"isun: {:1d}\\n\".format(int(model[\"Lunisolar\"][\"Sun\"]))\n inpFile[15] = \"imoon: {:1d}\\n\".format(int(model[\"Lunisolar\"][\"Moon\"]))\n\n if model[\"Drag\"][\"Flag\"] == False:\n inpFile[16] = \"idrag: 0\\n\"\n else:\n dm = model[\"Drag\"][\"Model\"].lower()\n if dm == \"wertz\":\n idrag = 1\n elif dm == \"us76\":\n idrag = 2\n elif dm == \"j77\":\n idrag = 3\n elif dm == \"msis00\":\n idrag = 4\n else:\n raise ValueError('Value \"' + model[\"Drag\"][\"Model\"] + '\" invalid.')\n inpFile[16] = \"idrag: {:1d}\\n\".format(idrag)\n if model[\"Drag\"][\"Solar flux\"].lower() == \"constant\":\n inpFile[17] = \"iF107: 0\\n\"\n elif model[\"Drag\"][\"Solar flux\"].lower() == \"variable\":\n inpFile[17] = \"iF107: 1\\n\"\n else:\n raise ValueError('Value \"' + model[\"Drag\"][\"Solar flux\"] + '\" invalid.')\n\n if model[\"SRP\"][\"Flag\"] == False:\n inpFile[18] = \"iSRP: {:1d}\\n\".format(int(model[\"SRP\"][\"Flag\"]))\n else:\n inpFile[18] = \"iSRP: {:1d}\\n\".format(int(model[\"SRP\"][\"Flag\"]))\n if model[\"SRP\"][\"Eclipses\"]:\n inpFile[18] = \"iSRP: 2\\n\"\n \n if model[\"Lunisolar\"][\"Ephemerides\"] == \"DE431\":\n inpFile[19] = \"iephem: 1\\n\"\n elif model[\"Lunisolar\"][\"Ephemerides\"] == \"Meeus\":\n inpFile[19] = \"iephem: 2\\n\"\n else:\n raise ValueError('Value \"' + model[\"Lunisolar\"][\"Ephemerides\"] + '\" invalid.')\n \n inpFile[20] = \"gdeg: {:3d}\\n\".format(model[\"NS gravity\"][\"Degree\"])\n if model[\"NS gravity\"][\"Order\"] <= model[\"NS gravity\"][\"Degree\"]:\n inpFile[21] = \"gord: {:3d}\\n\".format(model[\"NS gravity\"][\"Order\"])\n else:\n raise ValueError(\"Order {0:d} of the gravity field is greater than degree {1:d}\".format(model[\"NS gravity\"][\"Order\"],model[\"NS gravity\"][\"Degree\"]))\n \n\n\n # Integration settings\n integ = gSettings[\"Integration\"]\n inpFile[29] = \"tol: {:22.15E}\\n\".format(integ[\"Tolerance\"])\n inpFile[30] = \"tspan: {:22.15E}\\n\".format(integ[\"Duration\"] * 365.25)\n inpFile[31] = \"tstep: {:22.15E}\\n\".format(integ[\"Step\"])\n inpFile[39] = \"eqs: {:2d}\\n\".format(integ[\"Equations\"])\n\n\n\n # Output settings\n inpFile[44] = \"verb: 0\\n\"\n inpFile[45] = \"out: \" + os.path.abspath(os.path.join(dirPath, ' '))\n\n\n with open(os.path.join(dirPath,'input.txt'),'w') as f:\n f.writelines(inpFile)", "def _read_input_file(self):\n pass", "def readTheFile(path, gui):\n global names\n global SAMPLES_PARTS\n file = open(path, 'r')\n names = file.readline().split()\n names = names[5:]\n SAMPLES_PARTS = [0, 0]\n for name in names:\n if \"Acute\" in name:\n SAMPLES_PARTS[0] += 1\n elif \"Chall\" in name:\n SAMPLES_PARTS[1] += 1\n SAMPLES_PARTS[1] += SAMPLES_PARTS[0]\n line = file.readline()\n data = []\n counter = 1\n gui.write_to_output(\"\\n\")\n while line != '':\n if counter % 1000 == 0:\n gui.write_to_output(\"Done reading \" + str(counter) + \" lines\\n\", overwrite=True)\n counter += 1\n columns = line.split()\n reads = np.array([float(x) for x in columns[5:]])\n name = columns[0]\n chrm = columns[1]\n if chrm == \"chrM\":\n line = file.readline()\n continue\n start = int(columns[2])\n end = int(columns[3])\n if abs(end - start) > 500:\n line = file.readline()\n continue\n strand = columns[4]\n data.append(Gene(name, reads, np.array([start, end]).astype(np.int), strand, chrm))\n line = file.readline()\n gui.write_to_output(\"Done reading \" + str(counter) + \" lines...Now sorting...\\n\", overwrite=True)\n return list(sorted(data, key=lambda x: x.getName()))", "def ReadBasicInfo():\r\n\r\n EquilibriumStep, ProductionStep,HEPCP,HEPCE,Multiple=10000000,10000000,100,100,2\r\n InputPath,OutputPath,AtomParameterPath,TaskSuffix,MaterialInputFormat='..','..','..','','mol'\r\n GasType,GasAtomTypeNum,GasAtomType,GasPartialPressure,TemperatureList,PressureList,\\\r\n TorqueSetting,MuSiCSetting,Nodes=[],[],[],[],[],[],[],[],['1:ppn=1']\r\n CutOff,GridSpacingP,GridSpacingE=12.8,2.0,2.0\r\n MakeGCMC,UsePmap,UseEmap,UsePost,MakePmap,MakeEmap,MakeTorque,KeyOne,KeyTwo,\\\r\n PDBCharges = False,False,False,False,False,False,False,False,False,False\r\n\r\n with open('GlueParameters', 'r') as File:\r\n for Line in File.readlines():\r\n if Line.strip():\r\n WordList = Line.strip().split()\r\n if len(WordList)>1 or KeyOne==True or KeyTwo==True:\r\n if WordList[0]=='#':\r\n continue\r\n\r\n # Controlled part\r\n elif WordList[0] == 'MakeGCMC:' and WordList[1] == 'open':\r\n MakeGCMC = True\r\n elif WordList[0] == 'UsePmap:' and WordList[1] == 'yes':\r\n UsePmap = True\r\n elif WordList[0] == 'UseEmap:' and WordList[1] == 'yes':\r\n UseEmap = True\r\n elif WordList[0] == 'UsePost:' and WordList[1] == 'yes':\r\n UsePost = True\r\n elif WordList[0] == 'MakePmap:' and WordList[1] == 'open':\r\n MakePmap = True\r\n elif WordList[0] == 'MakeEmap:' and WordList[1] == 'open':\r\n MakeEmap = True\r\n elif WordList[0] == 'MakeTorque:' and WordList[1] == 'open':\r\n MakeTorque = True\r\n elif WordList[0] == 'UseChargesFromPDBFile:' and WordList[1] == 'yes':\r\n PDBCharges = True\r\n\r\n # Basic part\r\n elif WordList[0]=='InputPath:':\r\n InputPath=WordList[1]\r\n elif WordList[0]=='MaterialInputFormat:':\r\n MaterialInputFormat=WordList[1]\r\n elif WordList[0]=='OutputPath:':\r\n OutputPath=WordList[1]\r\n elif WordList[0]=='AtomParameterPath:':\r\n AtomParameterPath=WordList[1]\r\n elif WordList[0] == 'GasType:':\r\n GasType = list(WordList[1:])\r\n elif WordList[0] == 'GasAtomTypeNum:':\r\n\r\n for i in WordList[1:]:\r\n GasAtomTypeNum.append(int(i))\r\n\r\n elif WordList[0] == 'GasAtomType:':\r\n GasAtomType = list(WordList[1:])\r\n elif WordList[0] == 'Multiple:':\r\n Multiple = int(WordList[1])\r\n elif WordList[0] == 'CutOff:':\r\n CutOff = float(WordList[1])\r\n\r\n # GCMC part\r\n\r\n elif WordList[0] == 'GasPartialPressure:':\r\n\r\n for j in WordList[1:]:\r\n GasPartialPressure.append(str(j))\r\n\r\n elif WordList[0] == 'TemperatureList(K):':\r\n\r\n for l in WordList[1:]:\r\n TemperatureList.append(float(l))\r\n\r\n elif WordList[0] == 'PressureList(kPa):':\r\n\r\n for k in WordList[1:]:\r\n PressureList.append(float(k))\r\n\r\n elif WordList[0] == 'EquilibriumStep:':\r\n EquilibriumStep = int(WordList[1])\r\n elif WordList[0] == 'ProductionStep:':\r\n ProductionStep = int(WordList[1])\r\n\r\n # Pmap part\r\n elif WordList[0] == 'GridSpacingP(Ang):':\r\n GridSpacingP = float(WordList[1])\r\n elif WordList[0] == 'HighEndPotentialCutoffP(kJ/mol):':\r\n HEPCP = int(WordList[1])\r\n\r\n # Emap part\r\n elif WordList[0] == 'GridSpacingE(Ang):':\r\n GridSpacingE = float(WordList[1])\r\n elif WordList[0] == 'HighEndPotentialCutoffE(kJ/mol):':\r\n HEPCE = int(WordList[1])\r\n\r\n # Torque part\r\n elif WordList[0] == 'Nodes:':\r\n Nodes = WordList[1:]\r\n elif WordList[0] == 'TaskSuffix:':\r\n TaskSuffix = WordList[1]\r\n elif WordList[0] == 'TorqueSetting:':\r\n KeyOne = True\r\n elif WordList[0] == 'MuSiCSetting:':\r\n KeyOne = False\r\n KeyTwo = True\r\n elif WordList[0] == 'END':\r\n KeyTwo = False\r\n elif KeyOne == True:\r\n TorqueSetting.append(Line)\r\n elif KeyTwo == True:\r\n MuSiCSetting.append(Line)\r\n\r\n return (InputPath,OutputPath,AtomParameterPath,MakeTorque,GasType,\r\n GasAtomTypeNum,GasAtomType,GasPartialPressure,TemperatureList,PressureList,CutOff,MakeGCMC,UsePmap,\r\n UseEmap,UsePost,MakePmap,MakeEmap,EquilibriumStep,ProductionStep,GridSpacingP,HEPCP,GridSpacingE,HEPCE,\r\n Multiple,TorqueSetting,MuSiCSetting,Nodes,TaskSuffix,PDBCharges,MaterialInputFormat)", "def loadParameters (self, filePath):\n #productive #onButton\n profprint()\n widget = slicer.modules.NeedleFinderWidget\n config = ConfigParser.RawConfigParser()\n config.read(filePath)\n\n autoCorrectTip = config.getboolean('BooleanSection', 'autoCorrectTip')\n invertedContrast = config.getboolean('BooleanSection', 'invertedContrast')\n gradient = config.getboolean('BooleanSection', 'gradient')\n filterControlPoints = config.getboolean('BooleanSection', 'filterControlPoints')\n drawFiducialPoints = config.getboolean('BooleanSection', 'drawFiducialPoints')\n autoStopTip = config.getboolean('BooleanSection', 'autoStopTip')\n extendNeedle = config.getboolean('BooleanSection', 'extendNeedle')\n maxLength = config.getboolean('BooleanSection', 'maxLength')\n gaussianAttenuationButton = config.getboolean('BooleanSection', 'gaussianAttenuationButton')\n\n realNeedleLength = config.getint('IntegerSection', 'realNeedleLength')\n sigmaValue = config.getint('IntegerSection', 'sigmaValue')\n gradientPonderation = config.getint('IntegerSection', 'gradientPonderation')\n exponent = config.getint('IntegerSection', 'exponent')\n distanceMax = config.getint('IntegerSection', 'distanceMax')\n nbRotatingIterations = config.getint('IntegerSection', 'nbRotatingIterations')\n numberOfPointsPerNeedle = config.getint('IntegerSection', 'numberOfPointsPerNeedle')\n lenghtNeedleParameter = config.getint('IntegerSection', 'lenghtNeedleParameter')\n radiusNeedleParameter = config.getint('IntegerSection', 'radiusNeedleParameter')\n algoVersParameter = config.getint('IntegerSection', 'algoVersParameter')\n \n widget.autoCorrectTip.checked = autoCorrectTip\n widget.invertedContrast.checked = invertedContrast\n widget.gradient.checked = gradient \n widget.filterControlPoints.checked = filterControlPoints\n widget.drawFiducialPoints.checked = drawFiducialPoints\n widget.autoStopTip.checked = autoStopTip\n widget.extendNeedle.checked = extendNeedle\n widget.maxLength.checked = maxLength\n widget.gaussianAttenuationButton.checked = gaussianAttenuationButton\n\n widget.realNeedleLength.value = realNeedleLength\n widget.sigmaValue.value = sigmaValue\n widget.gradientPonderation.value = gradientPonderation\n widget.exponent.value = exponent\n widget.distanceMax.value = distanceMax\n widget.nbRotatingIterations.value = nbRotatingIterations\n widget.numberOfPointsPerNeedle.value = numberOfPointsPerNeedle\n widget.lenghtNeedleParameter.value = lenghtNeedleParameter\n widget.radiusNeedleParameter.value = radiusNeedleParameter\n widget.algoVersParameter.value = algoVersParameter\n print \"algoVers: \",algoVersParameter\n print \"Parameters successfully loaded!\"", "def read_input_file():\n \n global input\n \n config = ConfigParser.RawConfigParser()\n config.read(os.path.join(os.getcwd(), 'INPUT.cfg'))\n\n input = {}\n input['datapath'] = config.get('Address_info', 'datapath')\n input['inter_address'] = config.get('Address_info', 'interactive_address')\n input['target_folder'] = config.get('Address_info', 'target_folder')\n input['save_folder'] = config.get('Address_info', 'save_folder')\n \n if not os.path.isabs(input['datapath']):\n input['datapath'] = os.path.join(os.getcwd(), input['datapath'])\n \n if not os.path.isabs(input['inter_address']):\n input['inter_address'] = os.path.join(os.getcwd(), input['inter_address'])\n \n if not os.path.isabs(input['target_folder']):\n input['target_folder'] = os.path.join(os.getcwd(), input['target_folder'])\n \n if not os.path.isabs(input['save_folder']):\n input['save_folder'] = os.path.join(os.getcwd(), input['save_folder'])\n \n \n input['min_date'] = str(eval(config.get('Event_Request', 'min_datetime')))\n input['max_date'] = str(eval(config.get('Event_Request', 'max_datetime')))\n input['min_mag'] = config.getfloat('Event_Request', 'min_magnitude')\n input['max_mag'] = config.getfloat('Event_Request', 'max_magnitude')\n input['min_depth'] = config.getfloat('Event_Request', 'min_depth')\n input['max_depth'] = config.getfloat('Event_Request', 'max_depth')\n input['evlonmin'] = config.getfloat('Event_Request', 'evlonmin')\n input['evlonmax'] = config.getfloat('Event_Request', 'evlonmax')\n input['evlatmin'] = config.getfloat('Event_Request', 'evlatmin')\n input['evlatmax'] = config.getfloat('Event_Request', 'evlatmax')\n input['preset'] = config.getfloat('Event_Request', 'preset')\n input['offset'] = config.getfloat('Event_Request', 'offset')\n input['max_result'] = config.getint('Event_Request', 'max_results')\n \n input['get_events'] = config.get('Request', 'get_events')\n input['input_period'] = config.get('Parallel', 'input_period')\n input['IRIS'] = config.get('Request', 'IRIS')\n input['ArcLink'] = config.get('Request', 'ArcLink')\n input['time_iris'] = config.get('Request', 'time_iris')\n input['time_arc'] = config.get('Request', 'time_arc')\n \n input['nodes'] = config.get('Parallel', 'nodes')\n\n input['waveform'] = config.get('Request', 'waveform')\n input['response'] = config.get('Request', 'response')\n input['SAC'] = config.get('Request', 'SAC')\n \n input['net'] = config.get('specifications_request', 'network')\n input['sta'] = config.get('specifications_request', 'station')\n \n if config.get('specifications_request', 'location') == \"''\":\n input['loc'] = ''\n elif config.get('specifications_request', 'location') == '\"\"':\n input['loc'] = ''\n else:\n input['loc'] = config.get('specifications_request', 'location')\n \n input['cha'] = config.get('specifications_request', 'channel')\n\n if config.get('specifications_request', 'lat') == 'None':\n input['lat_cba'] = None\n else:\n input['lat_cba'] = config.get('specifications_request', 'lat')\n \n if config.get('specifications_request', 'lon') == 'None':\n input['lon_cba'] = None\n else:\n input['lon_cba'] = config.get('specifications_request', 'lon')\n \n if config.get('specifications_request', 'minradius') == 'None':\n input['mr_cba'] = None\n else:\n input['mr_cba'] = config.get('specifications_request', 'minradius')\n \n if config.get('specifications_request', 'maxradius') == 'None':\n input['Mr_cba'] = None\n else:\n input['Mr_cba'] = config.get('specifications_request', 'maxradius')\n \n \n if config.get('specifications_request', 'minlat') == 'None':\n input['mlat_rbb'] = None\n else:\n input['mlat_rbb'] = config.get('specifications_request', 'minlat')\n \n if config.get('specifications_request', 'maxlat') == 'None':\n input['Mlat_rbb'] = None\n else:\n input['Mlat_rbb'] = config.get('specifications_request', 'maxlat')\n \n if config.get('specifications_request', 'minlon') == 'None':\n input['mlon_rbb'] = None\n else:\n input['mlon_rbb'] = config.get('specifications_request', 'minlon')\n \n if config.get('specifications_request', 'maxlon') == 'None':\n input['Mlon_rbb'] = None\n else:\n input['Mlon_rbb'] = config.get('specifications_request', 'maxlon')\n\n \n input['test'] = config.get('test', 'test')\n input['test_num'] = config.getint('test', 'test_num')\n \n input['update_interactive'] = config.get('update', 'update_interactive')\n input['iris_update'] = config.get('update', 'iris_update')\n input['arc_update'] = config.get('update', 'arc_update')\n\n input['QC_IRIS'] = config.get('QC', 'QC_IRIS')\n input['QC_ARC'] = config.get('QC', 'QC_ARC')\n \n input['email'] = config.get('email', 'email')\n input['email_address'] = config.get('email', 'email_address')\n \n input['report'] = config.get('report', 'report')\n \n input['corr_unit'] = config.get('instrument_correction', 'corr_unit')\n input['pre_filt'] = config.get('instrument_correction', 'pre_filter')\n \n input['plt_event'] = config.get('ObsPyPT', 'plot_event')\n input['plt_sta'] = config.get('ObsPyPT', 'plot_sta')\n input['plt_ray'] = config.get('ObsPyPT', 'plot_ray')\n\n input['llcrnrlon'] = config.getfloat('ObsPyPT', 'llcrnrlon')\n input['urcrnrlon'] = config.getfloat('ObsPyPT', 'urcrnrlon')\n input['llcrnrlat'] = config.getfloat('ObsPyPT', 'llcrnrlat')\n input['urcrnrlat'] = config.getfloat('ObsPyPT', 'urcrnrlat')\n \n input['lon_0'] = config.getfloat('ObsPyPT', 'lon_0')\n input['lat_0'] = config.getfloat('ObsPyPT', 'lat_0')", "def read_graph_ui(self):\n filename = input('enter filename: ')\n try:\n self._graph = read_graph(filename)\n except FileNotFoundError:\n print('invalid filename! ')", "def t9_loadFile(self):\n print \"subspectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = \"SubSpectrumData/\" + self.filenameparser(filename)\n orgin_file = self.filenameparser(filename)\n self.t9_filename = filename\n self.t9_orginfilename = orgin_file", "def t9_loadSMFile(self):\n print \"spectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = \"data/\" + self.filenameparser(filename)\n orgin_file = self.filenameparser(filename).split('.')[0]\n self.t9_smfilename = filename\n self.t9_orginfilename = orgin_file", "def main():\r\n activities = [\"EDA\",\"Plots\"]\t\r\n choice = st.sidebar.selectbox(\"Select Activities\",activities)\r\n\r\n if choice == 'EDA':\r\n result = st.file_uploader(\"Upload\", type=\"txt\")\r\n\r\n # filename =st.text_input('Enter a file path:')\r\n try:\r\n if result:\r\n # Process you file here\r\n data = result.getvalue()\r\n # file1 = open(filename,\"r\") \r\n # data=file1.read()\r\n data=data.lower().replace('\\n','')\r\n # file1.close() \r\n st.write(data[:200])\r\n obj=Lyrics()\r\n add_split = st.sidebar.slider(\r\n 'Select a split of values',\r\n 2, 25\r\n )\r\n st.write(\"Select Split from Left Slider .\")\r\n if add_split>3:\r\n # split=st.text_input(\"Enter String split for Prediction :\")\r\n gen=obj.generator(data=data,split=int(add_split))\r\n if gen:\r\n startString=st.text_input(\"Enter Starting String for Prediction :\")\r\n if len(startString)>0:\r\n val=st.sidebar.slider(\r\n \"How many char's want's to Prediction :\",\r\n 100, 1000\r\n )\r\n st.write(\"Select no of char's want's to Prediction from Left Slider .\")\r\n if val>100:\r\n final_op=obj.future_data(startString,val,add_split)\r\n st.write(final_op)\r\n except FileNotFoundError:\r\n st.error('File not found.')\r\n except IndexError:\r\n st.error('Select only one Author. ')\r\n except KeyError:\r\n st.error(\"Enter correct Integer. \")", "def loadParameters (self, filePath):\r\n # productive #onButton\r\n profprint()\r\n widget = slicer.modules.NeedleFinderWidget\r\n config = ConfigParser.RawConfigParser()\r\n config.read(filePath)\r\n\r\n autoCorrectTip = config.getboolean('BooleanSection', 'autoCorrectTip')\r\n invertedContrast = config.getboolean('BooleanSection', 'invertedContrast')\r\n gradient = config.getboolean('BooleanSection', 'gradient')\r\n filterControlPoints = config.getboolean('BooleanSection', 'filterControlPoints')\r\n drawFiducialPoints = config.getboolean('BooleanSection', 'drawFiducialPoints')\r\n autoStopTip = config.getboolean('BooleanSection', 'autoStopTip')\r\n extendNeedle = config.getboolean('BooleanSection', 'extendNeedle')\r\n maxLength = config.getboolean('BooleanSection', 'maxLength')\r\n gaussianAttenuationButton = config.getboolean('BooleanSection', 'gaussianAttenuationButton')\r\n\r\n realNeedleLength = config.getint('IntegerSection', 'realNeedleLength')\r\n sigmaValue = config.getint('IntegerSection', 'sigmaValue')\r\n gradientPonderation = config.getint('IntegerSection', 'gradientPonderation')\r\n exponent = config.getint('IntegerSection', 'exponent')\r\n try:\r\n radiusMax = config.getint('IntegerSection', 'distanceMax') # try deprecated parameter name (old parameter files)\r\n except:\r\n radiusMax = config.getint('IntegerSection', 'radiusMax')\r\n nbRotatingIterations = config.getint('IntegerSection', 'nbRotatingIterations')\r\n numberOfPointsPerNeedle = config.getint('IntegerSection', 'numberOfPointsPerNeedle')\r\n lenghtNeedleParameter = config.getint('IntegerSection', 'lenghtNeedleParameter')\r\n radiusNeedleParameter = config.getint('IntegerSection', 'radiusNeedleParameter')\r\n algoVersParameter = config.getint('IntegerSection', 'algoVersParameter')\r\n\r\n widget.autoCorrectTip.checked = autoCorrectTip\r\n widget.invertedContrast.checked = invertedContrast\r\n widget.gradient.checked = gradient\r\n widget.filterControlPoints.checked = filterControlPoints\r\n widget.drawFiducialPoints.checked = drawFiducialPoints\r\n widget.autoStopTip.checked = autoStopTip\r\n widget.extendNeedle.checked = extendNeedle\r\n widget.maxLength.checked = maxLength\r\n widget.gaussianAttenuationButton.checked = gaussianAttenuationButton\r\n\r\n widget.realNeedleLength.value = realNeedleLength\r\n widget.sigmaValue.value = sigmaValue\r\n widget.gradientPonderation.value = gradientPonderation\r\n widget.exponent.value = exponent\r\n widget.radiusMax.value = radiusMax\r\n widget.nbRotatingIterations.value = nbRotatingIterations\r\n widget.numberOfPointsPerNeedle.value = numberOfPointsPerNeedle\r\n widget.lenghtNeedleParameter.value = lenghtNeedleParameter\r\n widget.radiusNeedleParameter.value = radiusNeedleParameter\r\n widget.algoVersParameter.value = algoVersParameter\r\n print \"#############\"\r\n print \"algoVers: \", algoVersParameter\r\n print \"Parameters successfully loaded!\"", "def t1_loadFile(self):\n print \"subspectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = self.filenameparser(filename)\n self.t1_filename = filename", "def t12_loadSMFile(self):\n print \"spectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = \"data/\" + self.filenameparser(filename)\n orgin_file = self.filenameparser(filename)\n self.t12_smfilename = filename\n self.t12_orginfilename = orgin_file", "def read_inputs(self):\n curdir = os.getcwd()\n os.chdir(self.fst_dir)\n rstat = self.readFST()\n if rstat == 0:\n os.chdir(curdir)\n return 0\n # the names of the next files are either set by caller or come from the reading the FAST file\n rstat = self.readNoise()\n rstat = self.readAD()\n rstat = self.readBlade()\n rstat = self.readPtfm()\n os.chdir(curdir)", "def read_file(self):\n Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing\n self.filename = askopenfilename(title='Select Hospital Text File') # show an \"Open\" dialog box and return the path to the selected file", "def rd_cmd_stream(self):\n # working directory\n line = raw_input(\"Enter the working directory(press enter to use default: [default: .]\\n>\")\n mydir = line.strip()\n if mydir == \"\":\n mydir = '.'\n os.chdir(mydir)\n print \"CURRENT WORKING DIRECTORY:\"\n print os.getcwd()\n #\n line = raw_input(\"Enter the Template gjf file name (i.e. gau-template-bsse.gjf):\\n(Enter only if use default value)\\n> \")\n mystr = line.strip()\n if mystr != \"\":\n self.config['tfile'] = mystr\n\n line = raw_input(\"Enter the xyz coord file name (i.e. model.xyz):\\n(Enter only if use default value)\\n> \")\n mystr = line.strip()\n if mystr != \"\":\n self.config['xyzfile'] = mystr\n\n line = raw_input(\"Enter the output gjf file name (i.e. gau_*.gjf):\\n(Enter only if use default value)\\n> \")\n mystr = line.strip()\n if mystr != \"\":\n self.config['jobfile'] = mystr\n line = raw_input(\"Enter how many step to dump the xyz struct..(default: 1)\\n> \")\n myincr = line.strip()\n if myincr != \"\":\n self.config['incr'] = int(myincr)\n \n return", "def read(self, run):\n # read the file\n self['run'] = run[0:run.rfind('.xml')]\n f = open(run)\n for line in f:\n \n if line.find('SDSU Exec') >= 0:\n n1 = line.index('name=') + 6\n n2 = line.index('\"', n1)\n self['application'] = line[n1:n2]\n\n elif line.find('<detector_status') >= 0:\n n1 = line.index('name=') + 6\n n2 = line.index('\"', n1)\n if line[n1:n2] != 'Ultraspec':\n raise Exception, 'Run ' + run + ' is not an Ultraspec file.'\n \n elif line.find('SPEED') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['speed'] = line[n1:n2]\n \n elif line.find('X_BIN') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x_bin'] = line[n1:n2]\n \n elif line.find('Y_BIN') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y_bin'] = line[n1:n2]\n \n # first window \n \n elif line.find('X1_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x1_start'] = line[n1:n2]\n \n elif line.find('X1_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x1_size'] = line[n1:n2]\n \n elif line.find('Y1_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y1_start'] = line[n1:n2]\n \n elif line.find('Y1_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y1_size'] = line[n1:n2]\n \n # second window\n \n elif line.find('X2_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x2_start'] = line[n1:n2]\n \n elif line.find('X2_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x2_size'] = line[n1:n2]\n \n elif line.find('Y2_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y2_start'] = line[n1:n2]\n \n elif line.find('Y2_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y2_size'] = line[n1:n2]\n \n elif line.find('<target>') >= 0:\n n1 = line.index('target') + 7\n n2 = line.index('<', n1)\n self['target'] = line[n1:n2]\n\n elif line.find('<grating>') >= 0:\n n1 = line.index('grating') + 8\n n2 = line.index('<', n1)\n self['grating'] = line[n1:n2]\n\n elif line.find('<slit_width>') >= 0:\n n1 = line.index('slit_width') + 11\n n2 = line.index('<', n1)\n self['slit_width'] = line[n1:n2]\n\n elif line.find('<slit_angle>') >= 0:\n n1 = line.index('slit_angle') + 11\n n2 = line.index('<', n1)\n self['slit_angle'] = line[n1:n2]\n \n elif line.find('<filters>') >= 0:\n n1 = line.index('filters') + 8\n n2 = line.index('<', n1)\n self['filters'] = line[n1:n2]\n\n elif line.find('<ID>') >= 0:\n n1 = line.index('ID') + 3\n n2 = line.index('<', n1)\n self['ID'] = line[n1:n2]\n\n elif line.find('<PI>') >= 0:\n n1 = line.index('PI') + 3\n n2 = line.index('<', n1)\n self['PI'] = line[n1:n2]\n\n elif line.find('<comment>') >= 0:\n n1 = line.index('comment') + 8\n n2 = line.index('<', n1)\n self['comment'] = line[n1:n2]\n \n\n # check that we have found what we expected to find\n if 'application' not in self:\n raise Exception, 'Failed to find application name in ' + run\n\n if self.is_not_power_onoff():\n\n if 'x_bin' not in self:\n raise Exception, 'Failed to find X_BIN in ' + run\n\n if 'y_bin' not in self:\n raise Exception, 'Failed to find Y_BIN in ' + run\n\n if 'x1_start' not in self:\n raise Exception, 'Failed to find X2_START in ' + run\n \n if 'x1_size' not in self:\n raise Exception, 'Failed to find X2_SIZE in ' + run\n \n if 'y1_start' not in self:\n raise Exception, 'Failed to find Y2_START in ' + run\n \n if 'y1_size' not in self:\n raise Exception, 'Failed to find Y2_SIZE in ' + run\n \n if 'x2_start' not in self:\n raise Exception, 'Failed to find X2_START in ' + run\n \n if 'x2_size' not in self:\n raise Exception, 'Failed to find X2_SIZE in ' + run\n \n if 'y2_start' not in self:\n raise Exception, 'Failed to find Y2_START in ' + run\n \n if 'y2_size' not in self:\n raise Exception, 'Failed to find Y2_SIZE in ' + run\n \n if 'target' not in self:\n self['target'] = 'UNKNOWN'\n\n if 'filters' not in self:\n self['filters'] = '---'\n\n if 'grating' not in self:\n self['grating'] = '---'\n\n if 'slit_width' not in self:\n self['slit_width'] = '---'\n\n if 'slit_angle' not in self:\n self['slit_angle'] = '---'\n\n if 'ID' not in self:\n self['ID'] = 'UNKNOWN'\n\n if 'PI' not in self:\n self['PI'] = 'UNKNOWN'", "def t5_loadFile(self):\n print \"OriginFile\"\n filename = tkFileDialog.askopenfilename()\n filename = \"data/\" + self.filenameparser(filename)\n self.t5_filename = filename", "def readin():\r\n nodes = np.loadtxt('Vnodes.txt', ndmin=2)\r\n mats = np.loadtxt('Vmater.txt', ndmin=2)\r\n elements = np.loadtxt('Veles.txt', ndmin=2)\r\n loads = np.loadtxt('Vloads.txt', ndmin=2)\r\n return nodes, mats, elements, loads", "def call_open(self):\n \n appliance_read=[]\n #load_file=open('model_numbers.emm','r') #simple read\n load_file=tkFileDialog.askopenfile()\n \n for line in load_file:\n appliance_read.append(line) #replicates input strings into list\n \n self.enteredNumberAppliance1.set(int(appliance_read[0]))\n self.enteredNumberAppliance2.set(int(appliance_read[1]))\n self.enteredNumberAppliance3.set(int(appliance_read[2]))\n self.enteredNumberAppliance4.set(int(appliance_read[3]))\n self.enteredNumberAppliance5.set(int(appliance_read[4]))\n self.enteredNumberAppliance6.set(int(appliance_read[5]))\n self.enteredNumberAppliance7.set(int(appliance_read[6]))\n self.enteredNumberAppliance8.set(int(appliance_read[7]))\n self.enteredNumberAppliance9.set(int(appliance_read[8]))\n \n self.enteredPowerAppliance1.set(float(appliance_read[9]))\n self.enteredPowerAppliance2.set(float(appliance_read[10]))\n self.enteredPowerAppliance3.set(float(appliance_read[11]))\n self.enteredPowerAppliance4.set(float(appliance_read[12]))\n self.enteredPowerAppliance5.set(float(appliance_read[13]))\n self.enteredPowerAppliance6.set(float(appliance_read[14]))\n self.enteredPowerAppliance7.set(float(appliance_read[15]))\n self.enteredPowerAppliance8.set(float(appliance_read[16]))\n self.enteredPowerAppliance9.set(float(appliance_read[17]))\n \n self.dropdown_variable1.set(float(appliance_read[18]))\n self.dropdown_variable2.set(float(appliance_read[19]))\n self.dropdown_variable3.set(float(appliance_read[20]))\n self.dropdown_variable4.set(float(appliance_read[21]))\n self.dropdown_variable5.set(float(appliance_read[22]))\n self.dropdown_variable6.set(float(appliance_read[23]))\n self.dropdown_variable7.set(float(appliance_read[24]))\n self.dropdown_variable8.set(float(appliance_read[25]))\n self.dropdown_variable9.set(float(appliance_read[26]))\n \n print(appliance_read)\n return", "def t8_loadFile(self):\n print \"spectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = self.filenameparser(filename)\n self.t8_filename = filename", "def read():\n try:\n #Open and parse input files.\n nodeFile = open(sys.argv[1], 'r')\n edgeFile = open(sys.argv[2], 'r')\n\t\n parse_nodes(nodeFile)\n parse_edges(edgeFile)\n nodeFile.close()\n\tedgeFile.close()\n\treturn \n except:\n print 'problem parsing input'\n #Put here some more information - usage...", "def main():\n\n # Check command line arguments\n arguments = sys.argv[1:]\n if len(arguments) != 1:\n print(\"Error! One command line argument is required.\")\n sys.exit()\n\n else:\n print(\"\\nNow opening file...\")\n # Print the path provided and try to open the file for reading\n path = os.getcwd()+ \"/\" + arguments[0]\n print(path) #print path\n names = Names()\n devices = Devices(names)\n network = Network(names, devices)\n monitors = Monitors(names, devices, network)\n scanner = Scanner(path, names)\n parser = Parser(names, devices, network, monitors, scanner)\n Error.reset()\n parser.parse_network()", "def read_input():\n # Use with to make sure the file will be closed after the block executed\n with open('snapshot_input.txt') as f:\n # Split the line at line breaks\n x = f.read().splitlines()\n # Get the data of restructuring, three positive integers N , C , and D\n # Use generator expression for time and space efficiency\n restructuring_info = (i.split() for i in x if len(i.split())==3)\n # Get the data of single machine, four integers D, P, R and G\n machine_info = (i.split() for i in x if len(i.split())!=3)\n # Get the length of restructuring data\n length = sum(1 for i in x if len(i.split())==3)\n\n return restructuring_info, machine_info, length" ]
[ "0.6534614", "0.6480986", "0.6264293", "0.61416334", "0.6109954", "0.5936167", "0.5849614", "0.5842282", "0.5826436", "0.578414", "0.57564574", "0.57386065", "0.5683754", "0.567388", "0.5645699", "0.5645465", "0.5632296", "0.56240624", "0.56231207", "0.55820847", "0.5581234", "0.5557954", "0.55438036", "0.5512566", "0.5494559", "0.5474598", "0.5466062", "0.5449312", "0.5427231", "0.5425859" ]
0.68253434
0
Print a table displaying parameters read from the steering file. Require 'tabulate' library.
def print_para_table(s): if MODE == 1: t = [['Parameter', 'Value', 'Unit'], ['Number of bends', NBENDS, '/'], ['Width', WIDTH, 'm'], ['Depth', DEPTH, 'm'], ['Length', LAMBDA*(NBENDS+1), 'm'], ['Arc wavelength', LAMBDA, 'm'], ['Slope', SLOPE, '/'], ['Streamwise resolution', DS, 'm'], ['Transverse resolution', np.around(INTERVAL, decimals=4), 'm'], ['Streamwise # of pts', s.size + 2*int(LAMBDA/2/DS), '/'], ['Transverse # of pts', NUM*2+1, '/']] elif MODE == 2: if FNAME[0].islower(): f = FNAME[0].upper() + FNAME[1:] else: f = FNAME t = [['Parameter', 'Value', 'Unit'], ['River name', f.rsplit('.', 1)[0], '/'], ['Width', WIDTH, 'm'], ['Depth', DEPTH, 'm'], ['Length', np.round(s[-1], decimals=2), 'm'], ['Slope', SLOPE, '/'], ['Streamwise resolution', np.round(np.mean(np.diff(s)), decimals=2), 'm'], ['Transverse resolution', np.round(INTERVAL, decimals=2), 'm'], ['Streamwise # of pts', s.size, '/'], ['Transverse # of pts', NUM*2+1, '/']] print(tabulate(t, tablefmt='psql', stralign='right', headers='firstrow'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n tab = Texttable()\n tab.add_rows([[\"Parameter\", \"Value\"]])\n tab.add_rows([[k.replace(\"_\", \" \").capitalize(), args[k]] for k in keys])\n print(tab.draw())", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\",\" \").capitalize(), args[k]] for k in keys])\n print(t.draw())", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\", \" \").capitalize(), args[k]] for k in keys])\n print(t.draw())", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\",\" \").capitalize(),args[k]] for k in keys])\n print(t.draw())", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\",\" \").capitalize(),args[k]] for k in keys])\n print(t.draw())", "def print_table(ledger):\n\n table = PrettyTable() # defines a PrettyTable object\n\n table.field_names = [\n \"hospital\",\n \"patient\",\n \"status\",\n \"nonce\",\n \"prev_hash\",\n \"a\",\n \"b\",\n \"c\",\n \"current_hash\",\n ] # define field names for table\n\n for block in ledger:\n table.add_row(\n [\n block[\"hospital\"],\n block[\"patient\"],\n block[\"status\"],\n block[\"nonce\"],\n block[\"prev_hash\"],\n block[\"a\"],\n block[\"b\"],\n block[\"c\"],\n block[\"current_hash\"],\n ]\n ) # add data to table\n\n print(\"\\n\\n\" + color.BOLD + \"Printing Your Ledger:\" + color.END)\n print(table) # print prettytable of patient info", "def __print_work_table(table):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % ('Act', 'Pred', 'Block', 'Dummy', 'Succ', 'start', 'end')\n for k, col in sorted(table.items()):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % tuple(\n [str(k)] + [list(col[0])] + [str(col[i]) for i in range(1, len(col))])", "def print_table(table):\n # transpose the table:\n table = map(list, zip(*table))\n # get the column width:\n col_width = [max(len(str(x)) for x in col) for col in zip(*table)]\n # print it to screen:\n print\n for line in table:\n print \"| \" + \" | \".join(\"{:{}}\".format(x, col_width[i]) for i, x in enumerate(line)) + \" |\"\n print", "def print_table(self):\n print(\"%-12s%-12s%-12s%-12s%-12s\" % (\"index\",\"balance\",\"payment\",\"interest\",\"amortization\"))\n print(\"-------------------------------------------------------------\")\n for i in self.table[\"index\"]:\n print(\"%-12i%-12i%-12i%-12i%-12i\" % (self.table[\"index\"][i],self.table[\"balance\"][i]\\\n ,self.table[\"payment\"][i],self.table[\"interest\"][i],\\\n self.table[\"amortization\"][i]))", "def print_table(self, table):\n raise NotImplementedError('print_table method not defined!')", "def print_table(self) -> None:\n if (self.probability_links == None):\n print(\"+--------+\")\n print(f\"| P({self.key:1s}) |\")\n print(\"+--------+\")\n print(f\"| {self.probability_values[0]:0.04f} |\")\n print(\"+--------+\")\n else:\n arg_len = 2 + len(' '.join(self.probability_links.keys()))\n param_len = 2 + \\\n max(6, len(\"P(A|)\" + \",\".join(self.probability_links.keys())))\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")\n print(\n f\"| {' '.join(self.probability_links.keys())} | P({self.key}|{','.join(self.probability_links.keys())}) |\")\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")\n for i in range(2**len(self.probability_links.keys())):\n # Gives us a string binary value to make truth table off of\n bool_key = f\"{i:0{len(self.probability_links.keys())}b}\"\n print(\n f\"| {' '.join(['T' if bool_key[j] == '0' else 'F' for j in range(len(self.probability_links.keys()))])} | {f'{self.probability_values[i]:0.04f}':<{param_len-1}s}|\")\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")", "def print_table(table):\n for row in table:\n print(row)", "def print_table(table):\n for row in table:\n print(row)", "def print_table(table):\n for row in table:\n print(row)", "def console(pro):\n transect = pro['transect' ] \n nm = pro['nm120r' ].flatten()#[pro['m120swr_'][0,:]]\n t = pro['t120r' ].flatten()#[pro['m120swr_'][0,:]]\n sb = pro['sbliner' ].flatten()#[0][:-1]\n NASC = pro['NASC120swr'].flatten()#[pro['m120swr_'] ]\n pc = pro['pc120swr' ].flatten()#[pro['m120swr_'] ]\n \n # Preallocate table object\n table = io.StringIO()\n \n # Outline alignment and format for table lines, header, and data\n line = '+{:-^10}+{:-^11}+{:-^25}+{:-^8}+{:-^13}+{:-^11}+ \\n'\n header = '{:<9} | {:<9} | {:<23} | {:>6} | {:>11} |{:>12} \\n'\n data = '| {:<3d} | {:<9.3f} | {:<15} | {:>6.1f} | {:>11.2f} | {:>9.1f} | \\n'\n \n # Write table lines and header\n table.write(line.format('','','','','',''))\n table.write(header.format('| Transect','N. miles','Time','Seabed','NASC','% samples |')) \n table.write(line.format('','','','','','')) \n \n # Populate table with data\n for nmi, ti, sbi, NASCi, pci in zip(nm, t, sb, NASC, pc):\n table.write(data.format(transect, nmi, ti, sbi, NASCi, pci))\n \n # Close table with a line\n table.write(line[:-2].format('','','','','',''))\n \n # Print table in the console\n table = table.getvalue() \n print(table)", "def print_table(table):\n rest = table[1:]\n fmt = \"%-28s %-9s %-16s %s\"\n for row in rest:\n print(fmt % tuple(row))", "def print_table(source, count=False):\n table_value = []\n table_header = []\n for source_key, source_value in source.items():\n for item in source_value:\n table_value.append([v for v in item.values()])\n table_header.append([k for k in item.keys()])\n if not count:\n print(tabulate(table_value,\n headers=table_header[0],\n tablefmt='orgtbl'))\n else:\n print(tabulate([[len(source_value)]],\n headers=[source_key],\n tablefmt='orgtbl'))", "def print_table(table):\n for i in range(len(table)):\n print \"Row \", i, \"\\t\",\n for j in range(len(table[i])):\n print table[i][j],\n print \"\\n\"", "def print_table(hdrs, flag=False, data=[],fmt='psql'):\n\tres = cur.fetchall()\n\tif flag:\n\t\tres = data\n\tprint(tabulate(res, headers=hdrs, tablefmt=fmt))", "def print_table(headers, rows):\n try:\n if headers:\n print('\\n')\n print(tabulate.tabulate(\n rows, headers=headers,\n tablefmt=\"plain\", numalign=\"left\"\n ))\n print('\\n')\n except Exception as e:\n print(e.message)", "def pretty_print(self):\n pt = PrettyTable()\n for i in self.files_summary:\n pt.field_names = [\"File Name\", \"Classes\", \"Functions\", \"Lines\", \"Characters\"]\n pt.add_row(list([i, self.files_summary[i][\"class\"], self.files_summary[i][\"function\"], self.files_summary[i][\"line\"], self.files_summary[i][\"char\"]]))\n print(pt) #Using a Print statement here because i tried to return self.pt and it didnt give me anything but the print works", "def print_table(table, fieldnames):\n print(\"{:<19}\".format(fieldnames[0]), end='')\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(field), end='')\n print(\"\")\n for name, row in table.items():\n # Header column left justified\n print(\"{:<19}\".format(name), end='')\n # Remaining columns right justified\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(row[field]), end='')\n print(\"\", end='\\n')", "def print_table(table, fieldnames):\n print(\"{:<19}\".format(fieldnames[0]), end='')\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(field), end='')\n print(\"\")\n for name, row in table.items():\n # Header column left justified\n print(\"{:<19}\".format(name), end='')\n # Remaining columns right justified\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(row[field]), end='')\n print(\"\", end='\\n')", "def display_table(dict_list=None, user_config_data=None):\r\n if user_config_data is not None:\r\n # print(tabulate.tabulate(user_config_data, headers=['Variable', 'Value'], tablefmt=\"grid\"))\r\n print(tabulate.tabulate(user_config_data, tablefmt=\"grid\"))\r\n return\r\n\r\n header = [\"idx\"] + list(dict_list[0].keys())\r\n rows = [[idx + 1] + list(x.values()) for idx, x in enumerate(dict_list)]\r\n print(tabulate.tabulate(rows, header, tablefmt=\"grid\"))", "def display_table(dict_list=None, user_config_data=None):\r\n if user_config_data is not None:\r\n # print(tabulate.tabulate(user_config_data, headers=['Variable', 'Value'], tablefmt=\"grid\"))\r\n print(tabulate.tabulate(user_config_data, tablefmt=\"grid\"))\r\n return\r\n\r\n header = [\"idx\"] + list(dict_list[0].keys())\r\n rows = [[idx + 1] + list(x.values()) for idx, x in enumerate(dict_list)]\r\n print(tabulate.tabulate(rows, header, tablefmt=\"grid\"))", "def print_table(table):\r\n print('/-----------------------------------------------------------------------------------\\\\')\r\n for item in table:\r\n\r\n while len(item[1]) <= 22:\r\n item[1] += ' '\r\n\r\n while len(item[2]) <= 27:\r\n item[2] += ' '\r\n\r\n while len(item[0]) <= 15:\r\n item[0] += ' '\r\n\r\n print('| '+item[0]+' | '+item[1]+'| '+item[2]+' |')\r\n\r\n print('\\\\-----------------------------------------------------------------------------------/')", "def print_tables(self):\n print \"------------------\\nTables\\n------------------\"\n cnt = 0\n for x in self.show_tables():\n cnt += 1\n print (\"{0}.) {1}\".format(cnt, x[0]))", "def print_table(data):\n for key in sorted(data):\n print \"%s: %s\" % (key.rjust(16), data[key])", "def print_params(pre, args):\n # Print out arguments used to file\n with open(pre + \"parameters.tsv\", \"w+\") as f:\n tsv_writer = csv.writer(f, delimiter='\\t')\n arg_dict = vars(args)\n for key in arg_dict:\n tsv_writer.writerow([key, arg_dict[key]])", "def show_table(table):\n # id: string\n # Unique and random generated (at least 2 special char()expect: ';'),\n # 2 number, 2 lower and 2 upper case letter)\n # title: string\n # manufacturer: string\n # price: number (dollars)\n # in_stock: number\n title_list = [\"ID\", \"Title\", \"Manufacturer\",\n \"Price\", \"Number in stock\"]\n ui.print_table(table, title_list)" ]
[ "0.6731898", "0.6727638", "0.6693523", "0.66596514", "0.66596514", "0.64847577", "0.6466909", "0.63867766", "0.6372735", "0.63454986", "0.6304587", "0.629846", "0.62803787", "0.62803787", "0.62415814", "0.6233019", "0.621662", "0.6205308", "0.61270964", "0.6085865", "0.60810685", "0.60794157", "0.60794157", "0.6074297", "0.60742676", "0.60683787", "0.60679", "0.60522217", "0.60465264", "0.6022886" ]
0.7408522
0
Print a table displaying mean, median and mode of centerline grid size before and after resampling. Require 'tabulate' library.
def print_resamp_table(mean1, median1, mode1, mean2, median2, mode2): t = [['Streamwise\nresolution', 'Before ' +'After\nresampling --> resampling', '\nUnit'], ['Mean', str(mean1) + ' --> ' + str(mean2), 'm'], ['Median', str(median1) + ' --> ' + str(median2), 'm'], ['Mode', str(mode1) + ' --> ' + str(mode2), 'm']] print(tabulate(t, tablefmt='psql', stralign='center', headers='firstrow'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def summarize_as_table(self):\n h = human_readable_size\n h_throughput = human_readable_throughput\n table = [\n ['Total Time (seconds)', '%.3f' % self.total_time,\n self.std_dev_total_time],\n ['Maximum Memory', h(self.max_memory), h(self.std_dev_max_memory)],\n ['Maximum CPU (percent)', '%.1f' % self.max_cpu,\n self.std_dev_max_cpu],\n ['Maximum Sent Throughput', h_throughput(self.max_sent_throughput),\n h_throughput(self.max_sent_throughput)],\n ['Maximum Recv Throughput', h_throughput(self.max_recv_throughput),\n h_throughput(self.max_recv_throughput)],\n ['Average Memory', h(self.average_memory),\n h(self.std_dev_average_memory)],\n ['Average CPU (percent)', '%.1f' % self.average_cpu,\n self.std_dev_average_cpu],\n ['Average Sent Throughput',\n h_throughput(self.average_sent_throughput),\n h_throughput(self.average_sent_throughput)],\n ['Average Recv Throughput',\n h_throughput(self.average_recv_throughput),\n h_throughput(self.average_recv_throughput)],\n ]\n return tabulate(\n table,\n headers=[\n 'Metric over %s run(s)' % (self.total_files),\n 'Mean',\n 'Standard Deviation'\n ],\n tablefmt=\"grid\"\n )", "def basic_summary(data):\n headers = [\"Split\", \"Samples\", \"Height\", \"Width\", \"Channels\", \"Classes\"]\n print(table_format(headers, header = True))\n for split in [\"train\", \"valid\", \"test\"]:\n X, y = data[split]\n n, h, w, c = X.shape\n n_classes = np.unique(y).shape[0]\n row = [split, n, h, w, c, n_classes]\n print(table_format(row))", "def start_table(self):\n self.col_widths = []\n self.result = \"\"", "def table(nb, max):\n\ti = 0\n\twhile i < max:\n\t\tprint(i + 1, \"*\", nb, \"=\", (i + 1) * nb)\n\t\ti += 1", "def print_tabulated_output(array_obj, headers):\n print()\n print(tabulate(array_obj, headers=headers))\n print()", "def print_summary(stim_table):\n print(\n '{:<20}{:>15}{:>15}\\n'.format('Colname', 'No. conditions', 'Mean N/cond')\n )\n for colname in stim_table.columns:\n conditions, occurrences = np.unique(\n np.nan_to_num(stim_table[colname]), return_counts = True\n )\n print(\n '{:<20}{:>15}{:>15.1f}'.format(\n colname, len(conditions), np.mean(occurrences)\n )\n )", "def __print_work_table(table):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % ('Act', 'Pred', 'Block', 'Dummy', 'Succ', 'start', 'end')\n for k, col in sorted(table.items()):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % tuple(\n [str(k)] + [list(col[0])] + [str(col[i]) for i in range(1, len(col))])", "def tabular_formatted_printing(data_list):\n n = len(data_list)\n max = 0\n for i in range(0,n):\n if int(len(data_list[i][0])) > max:\n max = len(data_list[i][0])\n for i in range(0,n):\n if int(len(data_list[i][0])) < max:\n space = max - len(data_list[i][0])\n else:\n space = 0\n print(data_list[i][0]+space*' '+' : '+str(data_list[i][1]))\n return", "def _print_table(stats):\n max_key_len = max([len(key) for key in stats])\n width_right = 15\n width_left = max(width_right, max_key_len)\n divider = '+-' + '-' * width_left + '-+-' + '-' * width_right + '-+'\n\n def get_format_char(value):\n if isinstance(value, int):\n return 'd'\n elif isinstance(value, float):\n return '.4f'\n else:\n return 's'\n\n print(divider)\n for name, value in stats.items():\n left_format = f':>{width_left}s'\n right_format = f':<{width_right}{get_format_char(value)}'\n line_format = f'| {{{left_format}}} | {{{right_format}}} |'\n line = line_format.format(name, value)\n print(line)\n print(divider)", "def plot_table(self):\r\n q = dict(sorted(decorator.arr.items(), key=lambda item: item[1]))\r\n print(\"PROGRAM | RANK | TIME ELAPSED\")\r\n count = 1\r\n for i in q:\r\n print(i[0], \"\\t\", count, \"\\t\", float(q[i]) * 1000, \"ms\")\r\n count += 1", "def get_split_summary_table(all_df, train_df, test_df):\n table = PrettyTable()\n table.field_names = ['set', 'N total', 'N non-ICH', 'N ICH', 'frac non-ICH', 'frac ICH']\n for df, name in zip([all_df, train_df, test_df],['All', 'Train', 'Test']):\n table.add_row([name, len(df), len(df[df.Hemorrhage == 0]), len(df[df.Hemorrhage == 1]),\n f'{len(df[df.Hemorrhage == 0])/len(df):.3%}', f'{len(df[df.Hemorrhage == 1])/len(df):.3%}'])\n return table", "def print_tables(self):\n print \"------------------\\nTables\\n------------------\"\n cnt = 0\n for x in self.show_tables():\n cnt += 1\n print (\"{0}.) {1}\".format(cnt, x[0]))", "def print_cap_table(info: CapTableInfo, max_entries: int, accuracy: int):\n\n if not info.token_status.end_block_timestamp:\n print(\n \"{}Token address {} not scanned. Please run tokfetch token-scan first.{}\".format(colorama.Fore.RED,\n info.token_status.address,\n colorama.Fore.RESET))\n return\n\n print(\n \"Token address: {}{}{}\".format(colorama.Fore.LIGHTCYAN_EX, info.token_status.address, colorama.Fore.RESET))\n print(\"Name: {}{}{}\".format(colorama.Fore.LIGHTCYAN_EX, info.token_status.name, colorama.Fore.RESET))\n print(\"Symbol: {}{}{}\".format(colorama.Fore.LIGHTCYAN_EX, info.token_status.symbol, colorama.Fore.RESET))\n print(\n \"Total supply: {}{}{}\".format(colorama.Fore.LIGHTCYAN_EX, info.token_status.total_supply, colorama.Fore.RESET))\n print(\"Accounted supply: {}{}{}\".format(colorama.Fore.LIGHTCYAN_EX, info.total_balance, colorama.Fore.RESET))\n print(\"Holder count: {}{}{}\".format(colorama.Fore.LIGHTCYAN_EX, len(info.entries), colorama.Fore.RESET))\n print(\"Cap table database updated at: {}{}{}\".format(colorama.Fore.LIGHTCYAN_EX,\n friendly_time(info.token_status.end_block_timestamp),\n colorama.Fore.RESET))\n print(\"Last token transfer at at: {}{}{}\".format(colorama.Fore.LIGHTCYAN_EX,\n friendly_time(info.last_token_transfer_at), colorama.Fore.RESET))\n\n print_entries = info.entries[0:max_entries]\n\n table = []\n\n balance_q = Decimal(10) ** Decimal(-accuracy)\n percent_q = Decimal(\"0.01\")\n\n # Tuplify\n for idx, entry in enumerate(print_entries, start=1):\n table.append((\n idx,\n entry.name,\n entry.address,\n entry.updated_at,\n \"{:,}\".format(entry.balance.quantize(balance_q)),\n str(((entry.percent or 0) * Decimal(100)).quantize(percent_q)),\n ))\n\n from tabulate import tabulate # https://bitbucket.org/astanin/python-tabulate\n output = tabulate(table, headers=[\"#\", \"Name\", \"Address\", \"Last transfer\", \"Balance\", \"%\"], disable_numparse=True)\n print(output)", "def _print_summary(results):\n if not len(results) > 0:\n print 'No results to show in summary.'\n return\n\n table = {}\n for res in results:\n for k, v in res.iteritems():\n table.setdefault(k, []).append(v)\n print tabulate(table, headers='keys', tablefmt=\"simple\")", "def print_stats(self):\n if self.df_avg is None:\n self.collect_stats()\n\n print(\"Simulation Results\")\n print(tabulate(self.df_avg, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"FleetManager stats\")\n print(tabulate(self.manager_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"Customer stats\")\n print(tabulate(self.customer_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"Transport stats\")\n print(tabulate(self.transport_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"Station stats\")\n print(tabulate(self.station_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))", "def _print_stat_rows(title,rows_before,rows_after):\n self.strprint(str(title)+\" : Percent of processed rows = %1.2F\"\\\n %(np.abs(rows_before-rows_after)*100/rows_before))", "def print_table(table):\n # transpose the table:\n table = map(list, zip(*table))\n # get the column width:\n col_width = [max(len(str(x)) for x in col) for col in zip(*table)]\n # print it to screen:\n print\n for line in table:\n print \"| \" + \" | \".join(\"{:{}}\".format(x, col_width[i]) for i, x in enumerate(line)) + \" |\"\n print", "def print_stat(self, returnTable=False):\n summary = PrettyTable([\"Set\", \"Name\", \"Number [-]\", \"Fraction [%]\"])\n summary.align = 'l'\n for name, df in self.subsets.items():\n summary.add_row([name, 'Normal', df[df.abnormal_XR == 0].shape[0], '{:.2%}'.format(df[df.abnormal_XR == 0].shape[0] / df.shape[0])])\n summary.add_row([name, 'Abnormal', df[df.abnormal_XR == 1].shape[0], '{:.2%}'.format(df[df.abnormal_XR == 1].shape[0] / df.shape[0])])\n summary.add_row([name, 'Normal known', df[df.semi_label == 1].shape[0], '{:.2%}'.format(df[df.semi_label == 1].shape[0] / df.shape[0])])\n summary.add_row([name, 'Abnormal known', df[df.semi_label == -1].shape[0], '{:.2%}'.format(df[df.semi_label == -1].shape[0] / df.shape[0])])\n summary.add_row([name, 'Unknown', df[df.semi_label == 0].shape[0], '{:.2%}'.format(df[df.semi_label == 0].shape[0] / df.shape[0])])\n if name != 'test' : summary.add_row(['----']*4)\n if returnTable:\n return summary\n else:\n print(summary)", "def print_info(self, mode=COUNT):\n def partition_line(character, num):\n return character * num\n\n self.update_records() # trace records\n self.count_results() # statistical results\n\n #count mode (default) : print statistical results of all kernel\n if mode == self.COUNT:\n table_header = f\"\"\"\n {partition_line('=',73)}\n {_ti_core.arch_name(ti.cfg.arch).upper()} Profiler(count)\n {partition_line('=',73)}\n \"\"\"\n items_header = f\"\"\"\n [ % total count | min avg max ] Kernel name\n \"\"\"\n print(inspect.cleandoc(table_header))\n print(inspect.cleandoc(items_header))\n for key in self._statistical_results:\n result = self._statistical_results[key]\n fraction = result.total_time / self._total_time_ms * 100.0\n #message in one line\n print(\n \"[{:6.2f}% {:7.3f} s {:6d}x |{:9.3f} {:9.3f} {:9.3f} ms] {}\"\n .format(\n fraction,\n result.total_time / 1000.0,\n result.counter,\n result.min_time,\n result.total_time / result.counter, # avg_time\n result.max_time,\n result.name))\n print(f\"{partition_line('-',73)}\")\n #one-line summary\n print(f\"[100.00%] Total kernel execution time: \"\n f\"{self._total_time_ms/1000:7.3f} s \"\n f\"number of records: \"\n f\"{len(self._statistical_results)}\")\n print(f\"{partition_line('=',73)}\")\n\n #trace mode : print records of launched kernel\n if mode == self.TRACE:\n table_header = f\"\"\"\n {partition_line('=',73)}\n {_ti_core.arch_name(ti.cfg.arch).upper()} Profiler(trace)\n {partition_line('=',73)}\n \"\"\"\n items_header = f\"\"\"\n [ % | time ] Kernel name\n \"\"\"\n print(inspect.cleandoc(table_header))\n print(inspect.cleandoc(items_header))\n for record in self._traced_records:\n fraction = record.kernel_time / self._total_time_ms * 100.0\n #message in one line\n print(\"[{:6.2f}% |{:9.3f} ms] {}\".format(\n fraction, record.kernel_time, record.name))\n print(f\"{partition_line('-',73)}\")\n #one-line summary\n print(f\"[100.00%] Total kernel execution time: \"\n f\"{self._total_time_ms/1000:7.3f} s \"\n f\"number of records: {len(self._traced_records)}\")\n print(f\"{partition_line('=',73)}\")", "def display_summary_statistics(tx, column_names=None):\n \n N, D = tx.shape\n \n mean = tx.mean(axis=0)\n median = np.median(tx, axis=0)\n std = tx.std(axis=0)\n max_ = tx.max(axis=0)\n min_ = tx.min(axis=0)\n n_undef = (tx <= -999.0).sum(axis=0)\n pct_undef = (tx <= -999.0).mean(axis=0) * 100\n\n column_names = column_names if column_names is not None else range(D)\n \n print(\" Column | Mean | Median | Std dev | Max | Min | # Undefined | % Undef \")\n for i, (col, m, med, s, mx, mn, nu, pu) in enumerate(zip(column_names, mean, median, std, max_, min_, n_undef, pct_undef)):\n print(f\"{i:2}-{col:27} | {m:8.3f} {med:8.3f} {s:8.3f} {mx:8.3f} \" + \n f\"{mn:8.3f} {nu:10.3f} {pu:7.3f}\")", "def print_results(self, final_table=None):\n\n assert self.info\n\n if not final_table:\n final_table = [\"\\n\\n{:-^80}\\n\".format(\"ANALYSIS OF RESULTS\")]\n\n if not self.info.categories[\"integrated\"]:\n final_table.append(\"NO IMAGES INTEGRATED!\")\n else:\n label_lens = [len(v[\"label\"]) for k, v in self.info.stats.items()]\n max_label = int(5 * round(float(np.max(label_lens)) / 5)) + 5\n for k, v in self.info.stats.items():\n if k in (\"lres\", \"res\", \"beamX\", \"beamY\"):\n continue\n line = (\n \"{: <{l}}: max = {:<6.2f} min = {:<6.2f} \"\n \"avg = {:<6.2f} ({:<6.2f})\"\n \"\".format(\n v[\"label\"], v[\"max\"], v[\"min\"], v[\"mean\"], v[\"std\"], l=max_label\n )\n )\n final_table.append(line)\n\n # TODO: Figure out what to do with summary charts\n # # If more than one integrated image, plot various summary graphs\n # if len(self.info.categories['integrated']) > 1:\n # plot = Plotter(self.params, self.info)\n # if self.params.analysis.summary_graphs:\n # if ( self.params.advanced.processing_backend == 'ha14' and\n # self.params.cctbx_ha14.grid_search.type is not None\n # ):\n # plot.plot_spotfinding_heatmap(write_files=True)\n # plot.plot_res_histogram(write_files=True)\n # med_beamX, med_beamY, pixel_size = plot.plot_beam_xy(write_files=True,\n # return_values=True)\n # else:\n # with warnings.catch_warnings():\n # # To catch any 'mean of empty slice' runtime warnings\n # warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n # beamXY_info = plot.calculate_beam_xy()\n # beamX, beamY = beamXY_info[:2]\n # med_beamX = np.median(beamX)\n # med_beamY = np.median(beamY)\n # pixel_size = beamXY_info[-1]\n\n final_table.append(\n \"{: <{l}}: X = {:<4.2f}, Y = {:<4.2f}\"\n \"\".format(\n \"Median Beam Center\",\n self.info.stats[\"beamX\"][\"mean\"],\n self.info.stats[\"beamY\"][\"mean\"],\n l=max_label,\n )\n )\n\n # Special entry for resolution last\n v = self.info.stats[\"res\"]\n final_table.append(\n \"{: <{l}}: low = {:<6.2f} high = {:<6.2f} \"\n \"avg = {:<6.2f} ({:<6.2f})\"\n \"\".format(\n v[\"label\"], v[\"max\"], v[\"min\"], v[\"mean\"], v[\"std\"], l=max_label\n )\n )\n\n for item in final_table:\n util.main_log(self.info.logfile, item, False)\n self.info.update(final_table=final_table)", "def print_summary_metrics(lst):\n print('*' * 50)\n print(' ' * 16 + 'Summary statistics')\n print('*' * 50)\n print('mean: {} | median: {} | mode: {}'.format(get_mean(lst),\n get_median(lst),\n get_mode(lst)))\n print('range: {} | IQR: {}'.format(get_range(list_nums),\n get_IQR(list_nums)))\n print('\\n')\n print('original list: \\n {}'.format(lst))\n print('sorted list: \\n {}'.format(sorted(lst)))\n print('List without outliers: \\n {}'.format(\n remove_outliers(list_nums)))", "def test_headless_tabulate_format():\n formatter = TabularOutputFormatter(format_name=\"minimal\")\n headers = [\"text\", \"numeric\"]\n data = [[\"a\"], [\"b\"], [\"c\"]]\n expected = \"a\\nb\\nc\"\n assert expected == \"\\n\".join(\n TabularOutputFormatter().format_output(\n iter(data),\n headers,\n format_name=\"minimal\",\n )\n )", "def tabulator(shows):\n padding = 3\n headers = ['Show', 'Next episode', 'Rating', 'Title']\n shows = sorted(shows)\n\n header_lengths = [len(h) for h in headers]\n max_show_title_length = max(len(s.title) for s in shows)\n max_ep_title_length = max(len(s._next.title) for s in shows)\n max_entry_lengths = [max_show_title_length, 6, 6, max_ep_title_length]\n column_widths = [max(h, e) for h, e in zip(header_lengths, max_entry_lengths)]\n\n # print()\n for header, width in zip(headers, column_widths):\n print('{:{}}{}'.format(header, width, ' '*padding), end='')\n print()\n\n for width in column_widths:\n print('{:-<{}}{}'.format('', width+1, (padding-1)*' '), end='')\n print()\n\n for show in shows:\n se_string = season_episode_str_from_show(show)\n\n if show._next.ratings['imdb'] is None:\n rating = 'N/A'\n else:\n rating = show._next.ratings['imdb']\n\n for field, w in zip((show.title, se_string, rating, show._next.title), column_widths):\n print('{:<{}}{}'.format(field, w, padding*' '), end='')\n print()", "def create_tables(times, accuracies, batch_sizes):\r\n #Get time data\r\n p_cpu_times = list(times[0].values())\r\n p_gpu_times = list(times[1].values())\r\n c_cpu_times = list(times[2].values())\r\n c_gpu_times = list(times[3].values())\r\n\r\n #Get differences in times\r\n p_diff_times = [a - b for a, b in zip(p_cpu_times, p_gpu_times)]\r\n c_diff_times = [a - b for a, b in zip(c_cpu_times, c_gpu_times)]\r\n cpu_diff_times = [a - b for a, b in zip(p_cpu_times, c_cpu_times)]\r\n gpu_diff_times = [a - b for a, b in zip(p_gpu_times, c_gpu_times)]\r\n\r\n #Set data in np array for table\r\n data = np.array([p_cpu_times,\r\n p_gpu_times,\r\n p_diff_times,\r\n c_cpu_times,\r\n c_gpu_times,\r\n c_diff_times,\r\n cpu_diff_times,\r\n gpu_diff_times]).T\r\n\r\n #Get data in text format\r\n n_rows = data.shape[0]\r\n cell_text = []\r\n for row in range(n_rows):\r\n cell_text.append(['%1.3f' % x for x in data[row]])\r\n \r\n #Get rows and cols for table\r\n columns = ('P CPU Time (s)', 'P GPU Time (s)', 'P Diff (s)', 'C CPU Time (s)', 'C GPU Time (s)', 'C Diff (s)', 'CPU Diff (s)', 'GPU Diff (s)')\r\n row_colors = plt.cm.BuPu(np.linspace(0, 0.5, n_rows))\r\n col_colors = np.array([192/255,192/255,192/255, 1])\r\n col_colors = np.repeat(col_colors.reshape((1, col_colors.shape[0])), len(columns), axis=0)\r\n\r\n #Create table\r\n plt.figure(figsize=(10.8,9.4)).canvas.set_window_title('CPU vs GPU MNIST Neural Network')\r\n plt.table(cellText=cell_text,\r\n rowLabels=batch_sizes,\r\n rowColours=row_colors,\r\n colLabels=columns,\r\n colColours=col_colors,\r\n loc='center')\r\n ax = plt.gca()\r\n ax.axis('off')\r\n plt.savefig('results\\\\figures\\\\table_time.png')\r\n\r\n\r\n #Get accuracy table\r\n #Get accuracy data\r\n p_cpu_accuracy = list(accuracies[0].values())\r\n p_gpu_accuracy = list(accuracies[1].values())\r\n c_cpu_accuracy = list(accuracies[2].values())\r\n c_gpu_accuracy = list(accuracies[3].values())\r\n\r\n #Get max of each batch\r\n p_cpu_max = [max(x) for x in p_cpu_accuracy]\r\n p_gpu_max = [max(x) for x in p_gpu_accuracy]\r\n c_cpu_max = [max(x) for x in c_cpu_accuracy]\r\n c_gpu_max = [max(x) for x in c_gpu_accuracy]\r\n\r\n #Get differences in accuracies\r\n p_diff_acc = [a - b for a, b in zip(p_cpu_max, p_gpu_max)]\r\n c_diff_acc = [a - b for a, b in zip(c_cpu_max, c_gpu_max)]\r\n cpu_diff_acc = [a - b for a, b in zip(p_cpu_max, c_cpu_max)]\r\n gpu_diff_acc = [a - b for a, b in zip(p_gpu_max, c_gpu_max)]\r\n\r\n #Set data in np array for table\r\n data = np.array([p_cpu_max,\r\n p_gpu_max,\r\n p_diff_acc,\r\n c_cpu_max,\r\n c_gpu_max,\r\n c_diff_acc,\r\n cpu_diff_acc,\r\n gpu_diff_acc]).T\r\n\r\n #Get data in text format\r\n n_rows = data.shape[0]\r\n cell_text = []\r\n for row in range(n_rows):\r\n cell_text.append(['%1.3f' % x for x in data[row]])\r\n \r\n #Get rows and cols for table\r\n columns = ('P CPU Acc (%)', 'P GPU Acc (%)', 'P Diff (%)', 'C CPU Acc (%)', 'C GPU Acc (%)', 'C Diff (%)', 'CPU Diff (%)', 'GPU Diff (%)')\r\n\r\n #Create table\r\n plt.clf()\r\n plt.figure(figsize=(10.8,9.4)).canvas.set_window_title('CPU vs GPU MNIST Neural Network')\r\n plt.table(cellText=cell_text,\r\n rowLabels=batch_sizes,\r\n rowColours=row_colors,\r\n colLabels=columns,\r\n colColours=col_colors,\r\n loc='center')\r\n ax = plt.gca()\r\n ax.axis('off')\r\n plt.savefig('results\\\\figures\\\\table_acc.png')", "def print_table(n):\n \n numbers = list(range(1, n + 1))\n\n #处理第一行\n s = ''\n for i in numbers:\n s = s + '\\t' + str(i)\n print(s)\n\n for i in numbers:\n s = str(i)\n for j in numbers:\n s = s + '\\t' + str(i * j)\n print(s)", "def print_table(self):\n print(\"%-12s%-12s%-12s%-12s%-12s\" % (\"index\",\"balance\",\"payment\",\"interest\",\"amortization\"))\n print(\"-------------------------------------------------------------\")\n for i in self.table[\"index\"]:\n print(\"%-12i%-12i%-12i%-12i%-12i\" % (self.table[\"index\"][i],self.table[\"balance\"][i]\\\n ,self.table[\"payment\"][i],self.table[\"interest\"][i],\\\n self.table[\"amortization\"][i]))", "def table_summary():\n \n t = dict()\n t['name'] = get_names()\n t['Name'] = [get_properties(name)['label'] for name in t['name']]\n N = len(t['name'])\n \n # host\n t['host'] = ['Sagittarius', 'Sagittarius', 'none', 'Gaia-Sausage-Enceladus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Sequoia / Arjuna', np.nan, np.nan, 'Sequoia / Arjuna', 'Gaia-Sausage-Enceladus', 'Sequoia / Arjuna', 'Helmi / Wukong', 'Helmi / Wukong', 'Sagittarius', 'in situ / Helmi / Wukong', 'Helmi / Wukong', 'Cetus', 'Cetus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Cetus', 'Sequoia / Arjuna / I\\'itoi']\n \n # progenitor\n t['progenitor'] = [np.nan, np.nan, 'itself', 'NGC 5139', 'NGC 4590', np.nan, 'NGC 3201', '(Wukong / Helmi)', '(Wukong / Helmi)', np.nan, np.nan, np.nan, np.nan, 'NGC 5024', np.nan, 'NGC 5272', 'NGC 5024', 'NGC 5824', 'NGC 5824', np.nan, np.nan, np.nan, np.nan]\n \n # progenitor type\n t['type'] = ['DG' if name in ['elqui', 'indus', 'jhelum'] else 'GC' for name in t['name']]\n \n # metallicity\n t['feh'] = [-2.4, -2.4, -2.2, -1.5, -2.16, -2.3, -1.5, -2.1, -2.1, -1.6, -1.95, -1.6, -2.7, np.nan, -1.7, -1.1, -2.7, -1.9, np.nan, np.nan, -2.2, np.nan, -1.9]\n \n # associations\n t['friends'] = ['ATLAS', 'Aliqa Uma', np.nan, np.nan, np.nan, np.nan, np.nan, 'Jhelum', 'Indus', np.nan, np.nan, np.nan, np.nan, 'Sylgr', np.nan, np.nan, 'Ravi', 'Turbio', 'Triangulum', np.nan, np.nan, np.nan, np.nan]\n \n tout = Table(t)\n tout.pprint()\n tout.write('../data/stream_origin.fits', overwrite=True)", "def num_54():\n frmt = \"\"\"\n :{}\n :Generate Data that conform to a uniform distribution.\n :\n :Class values: {}\n :Population size: {}\n :Results:\n : values:\n {}\n : table:\n {}\n : histogram: (class, frequency)\n {}\n :Then use NumPyArrayToTable to get your table.\n \"\"\"\n # import numpy as np\n st = 1\n end = 7\n vals = np.arange(st,end)\n reps = 10\n z = np.repeat(vals,reps)\n np.random.shuffle(z)\n ID = np.arange(len(z))\n tbl = np.array(list(zip(ID, z)), \n dtype = [('ID', 'int'), ('Class', 'int')])\n h = np.histogram(z, np.arange(st, end+1))\n h = np.array(list(zip(h[1], h[0])))\n pad = \" \"\n args =[num_54.__doc__, vals, reps*len(vals),\n indent(str(z.reshape(3,20)), pad),\n indent(str(tbl), pad), indent(str(h), pad)]\n print(dedent(frmt).format(*args))", "def print_table(rows, header=['Operation', 'OPS']):\n if len(rows) == 0:\n return\n col_max = [max([len(str(val[i])) for val in rows]) + 3 for i in range(len(rows[0]))]\n row_format = ''.join([\"{:<\" + str(length) + \"}\" for length in col_max])\n\n if len(header) > 0:\n print(row_format.format(*header))\n print(row_format.format(*['-' * (val - 2) for val in col_max]))\n\n for row in rows:\n print(row_format.format(*row))\n print(row_format.format(*['-' * (val - 3) for val in col_max]))" ]
[ "0.6565323", "0.6138334", "0.6114011", "0.6073732", "0.60654634", "0.605878", "0.60337764", "0.60322475", "0.60169584", "0.6001512", "0.59973735", "0.5922056", "0.58799005", "0.58584213", "0.58325213", "0.58321124", "0.5765773", "0.5743075", "0.572621", "0.5705246", "0.5672308", "0.5668988", "0.56682897", "0.5663082", "0.5634044", "0.5620656", "0.56135404", "0.56013244", "0.5585547", "0.5573469" ]
0.7362937
0
Print Kinoshita Curve equation. Only work for Mode 1.
def print_eqn(): if sys.stdout.encoding.lower().startswith('utf'): if JS != 0 and JF != 0: print('Eqn: \u03B8=' + str(np.around(THETA0, decimals=6)) + '*sin(2\u03C0s/' + str(np.around(LAMBDA, decimals=6)) + ')\n +' + str(np.around(THETA0**3, decimals=6)) + '*[' + str(np.around(JS, decimals=6)) + '*cos(6\u03C0s/' + str(np.around(LAMBDA, decimals=6)) + ')-' + str(np.around(JF, decimals=6)) + '*sin(6\u03C0s/' + str(np.around(LAMBDA, decimals=6)) + ')]') elif JS == 0 and JF != 0: print('Eqn: \u03B8=' + str(np.around(THETA0, decimals=6)) + '*sin(2\u03C0s/' + str(np.around(LAMBDA, decimals=6)) + ')+' + str(np.around(THETA0**3, decimals=6)) + '*[' + '-' + str(np.around(JF, decimals=6)) + '*sin(6\u03C0s/' + str(np.around(LAMBDA, decimals=6)) + ')]') elif JS != 0 and JF == 0: print('Eqn: \u03B8=' + str(np.around(THETA0, decimals=6)) + '*sin(2\u03C0s/' + str(np.around(LAMBDA, decimals=6)) + ')+' + str(np.around(THETA0**3, decimals=6)) + '*[' + str(np.around(JS, decimals=6)) + '*cos(6\u03C0s/' + str(np.around(LAMBDA, decimals=6)) + ')]') elif JS == 0 and JF == 0: print('Eqn: \u03B8=' + str(np.around(THETA0, decimals=6)) + '*sin(2\u03C0s/' + str(np.around(LAMBDA, decimals=6)) + ')') else: if JS != 0 and JF != 0: print('Eqn: THETA=' + str(np.around(THETA0, decimals=6)) + '*sin(2PI/' + str(np.around(LAMBDA, decimals=6)) + ')\n +' + str(np.around(THETA0**3, decimals=6)) + '*[' + str(np.around(JS, decimals=6)) + '*cos(6PI/' + str(np.around(LAMBDA, decimals=6)) + ')-' + str(np.around(JF, decimals=6)) + '*sin(6PI/' + str(np.around(LAMBDA, decimals=6)) + ')]') elif JS == 0 and JF != 0: print('Eqn: THETA=' + str(np.around(THETA0, decimals=6)) + '*sin(2PI/' + str(np.around(LAMBDA, decimals=6)) + ')+' + str(np.around(THETA0**3, decimals=6)) + '*[' + '-' + str(np.around(JF, decimals=6)) + '*sin(6PI/' + str(np.around(LAMBDA, decimals=6)) + ')]') elif JS != 0 and JF == 0: print('Eqn: THETA=' + str(np.around(THETA0, decimals=6)) + '*sin(2PI/' + str(np.around(LAMBDA, decimals=6)) + ')+' + str(np.around(THETA0**3, decimals=6)) + '*[' + str(np.around(JS, decimals=6)) + '*cos(6PI/' + str(np.around(LAMBDA, decimals=6)) + ')]') elif JS == 0 and JF == 0: print('Eqn: THETA=' + str(np.around(THETA0, decimals=6)) + '*sin(2PI/' + str(np.around(LAMBDA, decimals=6)) + ')')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_kinoshita():\n if MODE != 1:\n return [], [], [], [], []\n print('MODE 1: GENERATE KINOSHITA CURVE FROM EQUATION is selected')\n print('Kinoshita Curve parameters are read from steering file:')\n print_eqn()\n s = np.linspace(0, NBENDS*LAMBDA, int(NBENDS*LAMBDA/DS) + 1)\n print_para_table(s)\n print('+> Calculating Kinoshita Curve...', end='')\n s, x, y, cur, theta = compute_kinoshita(s)\n print(' [done]')\n return s, x, y, cur, theta", "def printfunc(self):\n zero1=self.Newton(True)\n print \"Using initial porition %0.2f ,%0.2f\" %(self.x_init,self.y_0)\n print \"extremum calculated witn Newton-Rapson: %0.2f ,%0.2f.\"%(zero1[0],zero1[1])\n zero2=self.Newton(False)\n print \"extremum calculated witn Secant: %0.2f ,%0.2f.\" %(zero2[0],zero2[1])\n xlist=np.arange(self.x_0-10,self.x_0+10,0.01)\n ylist=np.arange(self.y_0-10,self.y_0+10,0.01)\n X,Y=np.meshgrid(xlist,ylist)\n Z=self.sfunc(X,Y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n \n ax.plot(xlist, ylist, self.sfunc(xlist,ylist), 'g-',label='function $e^{(-(x-%0.2f)^2-(y-%0.2f)^2)}$' %(self.x_0,self.y_0))\n ax.contour(X, Y, Z)# colors = 'k', linestyles = 'solid')\n ax.plot([zero1[0]], [zero1[0]], self.sfunc(zero1[0],zero1[1]),'bo',label='extrema using Newton-Rapson (%0.2f; %0.2f)'%(zero1[0],zero1[1]))\n ax.plot([zero2[0]], [zero2[0]], self.sfunc(zero2[0],zero2[1]),'ro',label='extrema using Seacent (%0.2f; %0.2f)'%(zero2[0],zero2[1]))\n ax.legend()\n plt.show()", "def print_pow():\n a = get_inp_pow()\n n = get_inp_pow('power')\n print(a, \"^\", n, \" = \", pow(a, n), sep='')", "def print_curve(xlist, ylist, precision=3):\r\n print (\"----------------------\")\r\n print (\"Maturities\\tCurve\")\r\n print (\"----------------------\")\r\n for x,y in zip(xlist, ylist):\r\n print (x,\"\\t\\t\", round(y, precision))\r\n print (\"----------------------\")", "def plot_avancement(_,K):\n\t_=_+1\n\tprint(_, \"out of\", K, \"\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\", end='\\r')", "def NACA4digitsSym(self):\n self.ytu = self.NacaEquation(self.xu,self.t)\n self.ytl = -self.NacaEquation(self.xl,self.t)\n # Done for estitic reasons\n self.yu = self.ytu \n self.yl = self.ytl\n self.z = np.concatenate((self.yu, np.flip(self.yl)))\n if self.plot:\n plt.figure(self.name)\n plt.title(self.name)\n plt.plot(self.xu,self.yu)\n plt.plot(self.xl,self.yl)\n plt.axis('equal')", "def show(self):\r\n for j in self.pfn.keys():\r\n print j,\"p\",self.pfn[j],\"a\",self.afn[j] \r\n print self.pfn[\"l_knee_joint\"].amplitude_offset", "def __str__(self):\n return self.get_equation()", "def k_o(self, tl):\n\t return self.KO0*exp(self.HKO/(R*self.TO)*(1. - self.TO/tl))", "def plot5(self):\n\n cond = ((self.ds.freq<32.6) & (self.ds.freq>17))\n freq = self.ds.freq[cond]\n power = self.ds.power[cond]\n\n # the modes for KIC 9205705\n m = pd.read_csv('/home/mxs191/Desktop/MathewSchofield/TRG/GetData/Modes/modes_9205705.csv')\n m1 = [17.65, 20.6, 23.7, 26.9, 30.1] # l=1\n\n plt.rc('font', size=18)\n fig, ax = plt.subplots()\n plt.plot(freq, power, zorder=1, alpha=0.4)\n\n # NOTE: annotate mode angular degrees\n plt.scatter(m['f0'].as_matrix(), np.full(len(m), 150000), c='k', zorder=2, s=80)\n plt.scatter(m['f2'].as_matrix(), np.full(len(m), 130000), c='mediumseagreen', zorder=2, s=80, marker='^')\n plt.scatter(m1, np.full(len(m1), 140000), c='grey', zorder=2, s=80, marker='v')\n\n # NOTE: plot envelope\n numax = info['numax'].as_matrix() # mu Hz\n env_width = 0.66 * numax**0.88\n plt.plot(freq, 40004*np.exp( -( (freq-24.7)**2 / (2*7.**2) ) ), c='k', linestyle='--')\n\n # NOTE: annotate envelope\n style = dict(size=16, color='k')\n ax.text(24.1, 49167, r\"$\\nu_{\\rm max}$\", color='k', size=18)\n ax.text(24.1, 20994, r\"$\\Gamma_{\\rm env}$\", color='k', size=18)\n ax.text(23, 162944, r\"$\\Delta \\nu$\", **style)\n plt.annotate(s='', xy=(25.3, 158610), xytext=(21.91, 158610),\n arrowprops=dict(arrowstyle='<->')) # dnu\n plt.annotate(s='', xy=((24.7-env_width/2.), 15861), xytext=((24.7+env_width/2.), 15861),\n arrowprops=dict(arrowstyle='<->')) # env width\n\n ax.set_xlabel(r'Frequency ($\\rm \\mu Hz$)')\n ax.set_ylabel(r'PSD ($\\rm ppm^{2} \\, \\mu Hz^{-1}$)')\n plt.xlim(17, 32.6)\n plt.ylim(17, 195181)\n plt.tight_layout()\n plt.show()\n fig.savefig(os.getcwd() + '/DetTest1_plots/Plot5_ps_' + str(self.ds.epic) + '.pdf')", "def __str__(self):\n return \"Gamma( k=%f, theta=%f )\" % (self.k, self.theta)", "def equationPlot(self):\n clf()\n x = np.arange(0,9.9,0.1)\n plot(x,1/(10-x))\n xlabel('X')\n ylabel('1/(10-x)')\n savefig('equation.png')", "def exercise_one():\n\n interval = (0,1)\n\n control_net = np.matrix([\n [-0.2, 2],\n [-0.3, 6.2],\n [-1.2, 4.8],\n [-2.8, 8.8],\n [-0.7, 14],\n [1.4, 14.7],\n [3.6, 10.2],\n [3.2, 5.1],\n [1.5, 6.2],\n [1.4, 2],\n ])\n\n# First we plot a curve where internal knots have maximum multiplicities\n arguments = { \n 'order':4, \n 'interval':interval,\n 'internal_knots':sample_internal_knots_uniformly_in(interval, 3),\n 'control_net':control_net,\n 'multiplicities':[2,2,2]\n }\n\n curve = draw(**arguments)\n\n\n# After we plot a curve where each internal knot have multiplicity 1.\n plot_curve(curve, control_net, axis=[-4, 4, 0, 16])\n\n arguments = { \n 'order':4, \n 'interval':interval,\n 'internal_knots':sample_internal_knots_uniformly_in(interval, 6),\n 'control_net':control_net,\n }\n\n curve = draw(**arguments)\n\n plot_curve(curve, control_net, axis=[-4, 4, 0, 16])", "def _repr_(self):\n return \"Jacobian of %s\"%self.__curve", "def __str__(self):\n\t\treturn 'f(z) = ' + self.p.coeffString() + ' / ' + self.q.coeffString()", "def do_k(self, arguments):\n self.padawan.PrintKnowledge()", "def __str__(self):\n out = \"phase polynomial = \\n\"\n out += str(self.poly)\n out += \"\\naffine function = \\n\"\n out += \" (\"\n for row in range(self.num_qubits):\n wrote = False\n for col in range(self.num_qubits):\n if self.linear[row][col] != 0:\n if wrote:\n out += \" + x_\" + str(col)\n else:\n out += \"x_\" + str(col)\n wrote = True\n if self.shift[row] != 0:\n out += \" + 1\"\n if row != self.num_qubits - 1:\n out += \",\"\n out += \")\\n\"\n return out", "def value_printer():#todo: add all wanted items\n print(\"Max ascent speed = \"+ max_ascent_speed() + \" m/s\")\n print(\"Max ascent acceleration = \" + ascent_acc() + \" m/s^2\")\n print(\"Max ascent acceleration = \" + descent_acc() + \" m/s^2\")\n print(\"Max acceleration = \" + acc() + \" m/s^2\")", "def print_self(self):\n #print(f\"\\nself: \\nN: {self.N} \\nQ: {self.Q} \\npi: {self.pi}\"); \n s = ''\n s += f'N: {self.N}, \\n'\n s += f'Q: {self.Q:.2f}, \\n'\n s += f'U: {self.U:2.3f}\\n'\n s += f'policy: ' + ' '.join(f\"{x:2.3f}\" for x in self.pi)\n print(s)\n self.env.render()", "def nilakantha():\n\n print(\"Nilakantha\\n=========\")\n\n iterations = 10000\n multiplier = 1.0\n start_denominator = 2.0\n pi = 3.0\n\n for i in range(1, iterations + 1):\n pi += ( (4.0 / (start_denominator * (start_denominator + 1.0) * (start_denominator + 2.0)) ) * multiplier)\n start_denominator += 2.0\n multiplier *= -1.0\n\n print_as_text(pi)", "def get_equation(self):\n self.polynomials = dict(sorted(self.polynomials.items(), reverse=True))\n string = \"\"\n\n for index, polynomial in self.polynomials.items():\n polynomial = int(polynomial)\n index = int(index)\n\n if polynomial != 0:\n if polynomial < 0:\n string_pre = \" - \"\n else:\n string_pre = \" + \"\n\n if index != 0:\n string_append = \"x\"\n elif polynomial == 1 or polynomial == -1:\n string_append = str(abs(polynomial))\n else:\n string_append = \"\"\n\n if polynomial < 0:\n polynomial = abs(polynomial)\n\n if polynomial != 1:\n string_append = str(polynomial) + string_append\n\n if index != 0 and index != 1:\n string_append += \"^\" + str(index)\n\n string += string_pre + string_append\n\n if len(string) > 0:\n string = string[3:]\n else:\n string = \"0\"\n\n return string", "def kelvin_modes(m, q, ecc=0, chi=0):\n# if type(m) != int or type(q) != int:\n# raise TypeError(\"Input parameters should be given as integers\")\n approx = (m**2) * 2.*m*q / (2*m*q + 1.)\n return approx", "def print_me(self):\n\n print(\"----- Model:\",self.name,\" -----\")\n print(\"Mass (in M_sun): %.5f\" % (self.glb[imass]/constants.solar_mass))\n print(\"Radius (in R_sun): %.5f\" % (self.glb[iradius]/constants.solar_radius))\n print(\"Reference frequency (in uHz): %.3f\" % self.glb[ifreq_ref])\n print(\"Temperature (in K): %.1f\" % self.glb[itemperature])\n print(\"Luminosity (in L_sun): %.3g\" % (self.glb[iluminosity]/constants.solar_luminosity))\n print(\"Age (in Myrs): %.2f\" % self.glb[iage])\n print(\"Z: %.4f\" % self.glb[iz0])\n print(\"X: %.4f\" % self.glb[ix0])\n for (name, latex_name) in config.user_params:\n print(\"{0:29} {1:.5e}\".format(name,self.glb[user_params_index[name]]))\n print(\"Modes (in muHz):\")\n size = self.modes.shape[0]\n for i in range(size):\n print(\" (n,l,freq,IK) = (%d, %d, %.15f, %.5e)\" % \\\n (self.modes['n'][i], self.modes['l'][i], \\\n self.modes['freq'][i]*self.glb[ifreq_ref],\\\n self.modes['inertia'][i]))", "def showm():\n def show1(i):\n coeff=[]\n for m in range(5):\n a=SAC.queryDouble('carma.Ovro%d.Drive.Point.Constants.m%d' % (i+1,m+1) ,qmax_)\n coeff.append(a)\n for o in range(3):\n a=SAC.queryDouble('carma.Ovro%d.Drive.Point.Constants.o%d' % (i+1,o+1) ,qmax_)\n coeff.append(a)\n return coeff\n print ' ant m1 m2 m3 m4 m5 o1 o2 o3'\n for i in range(6):\n m = show1(i)\n print ' 00%d %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f' % (i+1,m[0],m[1],m[2],m[3],m[4],m[5],m[6],m[7])", "def DispCurve(c,x,B,a,N,K):\n return B+np.square(a*(x/(1+x+(K*((c-N)/55.5)))));", "def print(self):\r\n self.print_avec_separateur()", "def ukko_str (self):\n return 'k=%d p=%d k..p=\"%s\"' % (self.k, self.p, self)", "def __str__(self):\n if self.U is None:\n q0 = np.ones(self.N + self.M)\n else :\n q0 = np.ones(self.N + self.M + self.U)\n \n return \"Evaluated point q0: \" + str(q0) + \"\\n F operator evaluated at q0: \" + str(self.Fone(q0)) + \"\\n Proximal Operator evaluated at q0: \" + str(self.prox(q0)) + \"\\n optimized?: \" + str(self.optimized) + \"\\n J Operator evaluated at q0: \" + str(self.Jone(q0)) + \"\\n\"", "def kA_char_func(self):\n i, o = self.inl[0].to_flow(), self.outl[0].to_flow()\n\n ttd_1 = T_mix_ph(i, T0=self.inl[0].T.val_SI) - self.Tamb.val_SI\n ttd_2 = T_mix_ph(o, T0=self.outl[0].T.val_SI) - self.Tamb.val_SI\n\n if ttd_1 > ttd_2:\n td_log = (ttd_1 - ttd_2) / np.log(ttd_1 / ttd_2)\n elif ttd_1 < ttd_2:\n td_log = (ttd_2 - ttd_1) / np.log(ttd_2 / ttd_1)\n else:\n td_log = 0\n\n f = 1\n if not np.isnan(self.inl[0].m.design):\n if self.kA_char.param == 'm':\n f = self.kA_char.func.evaluate(i[0] / self.inl[0].m.design)\n\n fkA = 2 / (1 + 1 / f)\n\n return i[0] * (o[2] - i[2]) + self.kA.design * fkA * td_log", "def __str__(self):\n string = ''\n for degree, coef in enumerate(self.coefs, 1):\n degree = degree - 1\n string += str(coef)+'x^' + str(degree) + ' + '\n string = string[0:-3] # remove the last ' + '\n return string" ]
[ "0.6009057", "0.59244484", "0.57258797", "0.5719299", "0.5606414", "0.5596424", "0.5556299", "0.55449724", "0.5524112", "0.5523874", "0.55117285", "0.5498666", "0.5461777", "0.5460435", "0.5453134", "0.5452214", "0.53886247", "0.5387569", "0.53753626", "0.53699505", "0.5363998", "0.5347315", "0.5342401", "0.53248215", "0.5304419", "0.5297018", "0.52732086", "0.52686447", "0.5264294", "0.522657" ]
0.6549474
0
Build Kinoshita Curve (noncomputational part). Only work for Mode 1.
def build_kinoshita(): if MODE != 1: return [], [], [], [], [] print('MODE 1: GENERATE KINOSHITA CURVE FROM EQUATION is selected') print('Kinoshita Curve parameters are read from steering file:') print_eqn() s = np.linspace(0, NBENDS*LAMBDA, int(NBENDS*LAMBDA/DS) + 1) print_para_table(s) print('+> Calculating Kinoshita Curve...', end='') s, x, y, cur, theta = compute_kinoshita(s) print(' [done]') return s, x, y, cur, theta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_k(self):\n\t\n\tself.k = -np.array([self.sth*self.cphi, self.sth*self.sphi, self.cth])\n\n\treturn", "def createAnisotropicK(powerSpectrum,center,aniso):\n\n if aniso > 1.:\n anisoNew = 1. / aniso\n padDim = int( np.round( powerSpectrum.shape[0] / ( anisoNew ) ) )\n else:\n padDim = int( np.round( powerSpectrum.shape[0] / ( aniso ) ) )\n\n # the amount to pad the power spectrum\n padAmount = padDim - powerSpectrum.shape[0]/2\n paddedPS = np.pad(powerSpectrum, (padAmount, padAmount), 'constant', constant_values=(0, 0))\n center = np.array(center) + padAmount\n\n # the kperp and kpar components of the ellipses\n if aniso < 1.:\n kperp = np.arange(1,padDim-1)\n kpar = np.floor(aniso * kperp).astype(int)\n else:\n kpar = np.arange(1,padDim-1)\n kperp = np.floor(anisoNew * kpar).astype(int)\n\n\n return paddedPS, kpar, kperp, center, padAmount", "def upsilon_pK ( self ) :\n from GaudiConfUtils.ConfigurableGenerators import DaVinci__N3BodyDecays\n #\n return self.make_selection (\n 'Y&pK' ,\n DaVinci__N3BodyDecays ,\n [ self.upsilons() , self.protons() , self.kaons() ] ,\n ## algorithm properties \n DecayDescriptor = \"[Upsilon(4S) -> J/psi(1S) p+ K-]cc\" ,\n Combination12Cut = \"\"\"\n ( AM < 15 * GeV ) &\n ( ACHI2DOCA(1,2) < 16 )\n \"\"\" ,\n CombinationCut = \"\"\"\n ( AM < 15 * GeV ) &\n ( AM23 < 2000 * MeV ) & \n ( ( AM - AM1 - AM23 ) < 2.5 * GeV ) &\n ( ACHI2DOCA(1,3) < 16 ) &\n ( ACHI2DOCA(2,3) < 16 )\n \"\"\" ,\n MotherCut = \" chi2vxndf< 10 \" ,\n )", "def __init__(self, k):\n self.k = k\n self.N = 2**self.k", "def k_o(self, tl):\n\t return self.KO0*exp(self.HKO/(R*self.TO)*(1. - self.TO/tl))", "def KPMO(XVal,YVal_State_1,YVal_State_2,YVal_State_3,XVal_Mean_Trans_1,XVal_Mean_Trans_2,XVal_Sig_Trans_1,XVal_Sig_Trans_2,iOpt):\n#\t1. Computations:\n\tTiny=1E-20\n\tP_Trans_1 = fCPD(XVal,XVal_Mean_Trans_1, XVal_Sig_Trans_1) # Transition of kerogen from State #1 to State #2\n\tP_Trans_2 = fCPD(XVal,XVal_Mean_Trans_2, XVal_Sig_Trans_2) # Transition of kerogen from State #2 to State #3\n\tFunVal=0\n\tif(iOpt==0):\n\t\tP_State_1=(1-P_Trans_1)*(1-P_Trans_2)\n\t\tP_State_2=P_Trans_1*(1 - P_Trans_2)\n\t\tP_State_3=1-P_State_1-P_State_2\n\t\tFunVal=(YVal_State_1*P_State_1)+(YVal_State_2*P_State_2)+(YVal_State_3*P_State_3)\n\tif(iOpt==1):\n\t\tFunVal=YVal_State_1+P_Trans_1*YVal_State_2+P_Trans_2*YVal_State_3\n\tif(FunVal==0):\n\t\tFunVal=Tiny\n\treturn FunVal", "def self_defined_noisy_circuit() -> 'QEnv':\n # Create environment\n env = QEnv()\n # Choose backend Baidu local simulator\n env.backend(BackendName.LocalBaiduSim2)\n\n # Number of qubits, no larger than 20 \n num_qubit = 13\n # Number of gates in each for loop\n gate_num = 3 # Depth of circuit = num_qubit * gate_num\n\n assert num_qubit > 2\n assert gate_num > 2\n\n # Initialize a QCompute circuit\n q = env.Q.createList(num_qubit)\n\n # A noisy random H + CX + RX circuit\n for i in range(num_qubit - 1):\n H(q[i])\n CX(q[i], q[i + 1])\n # Random rotation angles\n rotation_list = [random.uniform(0, 6.28) for _ in range(gate_num - 2)]\n # random quantum registers\n qreg_list = [random.randint(0, num_qubit - 1) for _ in range(gate_num - 2)]\n for i in range(gate_num - 2):\n RX(rotation_list[i])(q[qreg_list[i]])\n\n # Measure with the computational basis\n MeasureZ(*env.Q.toListPair())\n\n # Define noise instances \n # Define a Bit Flip noise instance\n bfobj = BitFlip(0.1)\n # Define a 2-qubit Depolarizing noise instance\n dpobj = Depolarizing(2, 0.1)\n\n # Add noises\n env.noise(['H', 'RX'], [bfobj])\n env.noise(['CX'], [dpobj])\n\n return env", "def make_k_matrix(self):\r\n K = self.uv_vol + self.Epsilon * self.guv_vol + \\\r\n (self.Epsilon / self.Beta) * self.uv_bound\r\n return K", "def generateKNNobj():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n b=p.load(open(Tfile))\n nobs = len(b)\n #tdata=b[:,8:28].copy()\n # indx with only M22 without zero order\n m22idx = np.concatenate((np.arange(29,48),np.arange(49,68)))\n tdata=b[:,m22idx].copy()\n #tdata=b[:,9:28].copy() # remove the zero order zernike, i.e. remove the mean of the M20\n #-standardize the data. use this information in future validation data too.\n tmean = tdata.mean(axis=0)\n tstd = tdata.std(axis=0)\n tdata = (tdata - tmean)/tstd\n ttpara=b[:,0:5].copy()\n tpara = b[:,0:5].copy()\n tpara[:,3] = ttpara[:,3]*np.cos(np.deg2rad(ttpara[:,4]))\n tpara[:,4] = ttpara[:,3]*np.sin(np.deg2rad(ttpara[:,4]))\n knn = nb.KNeighborsRegressor(algorithm='ball_tree',n_neighbors=15)\n knn.fit(tdata,tpara)\n p.dump(knn,open('finerGridKnnObj_M22_remMean.cp','w'),2)\n p.dump([tmean,tstd],open('finerGridStdConst_M22_remMean.cp','w'),2)\n #np.savetxt('finerGridStdConst.txt',np.array([tmean,tstd]),fmt='%f10.5',delimiter = ',')\n return 'It is done !'", "def class_XPk(self,\n z = 0.,\n k = np.logspace(-4., 2., 1001),\n nonlinear = False,\n halofit = 'halofit',\n var_1 = ['tot'],\n var_2 = ['tot'],\n **kwargs\n ):\n components = {'tot' : 'd_tot',\n 'cdm' : 'd_cdm',\n 'wdm' : 'd_wdm',\n 'b' : 'd_b',\n 'cb' : 'd_cb',\n 'cold' : 'd_cold',\n 'nu' : 'd_nu',\n 'ur' : 'd_ur',\n 'gamma' : 'd_g',\n 'Phi' : 'phi',\n 'Psi' : 'psi'}\n\n # Set halofit for non-linear computation\n if nonlinear == True: halofit = halofit\n else: halofit = 'none'\n\n # Setting lengths\n nk = len(np.atleast_1d(k))\n nz = len(np.atleast_1d(z))\n z = np.atleast_1d(z)\n k = np.atleast_1d(k)\n kmax = max(k.max(),500.)\n zmax = max(z.max(),100.)\n # Parameters\n params = {\n 'output': 'mPk dTk',\n 'n_s': self.ns, \n 'h': self.h,\n 'omega_b': self.Omega_b*self.h**2.,\n 'omega_cdm': self.Omega_cdm*self.h**2.,\n 'Omega_k': self.Omega_K,\n 'tau_reio': self.tau,\n 'T_cmb': self.T_cmb,\n 'P_k_max_h/Mpc': kmax,\n 'z_max_pk': zmax,\n 'non_linear': halofit}\n # Set initial conditions\n if self.sigma_8 is not None: params['sigma8'] = self.sigma_8 \n else: params['A_s'] = self.As \n # Set dark energy\n if self.w0 != -1. or self.wa != 0.:\n params['Omega_fld'] = self.Omega_lambda\n params['w0_fld'] = self.w0\n params['wa_fld'] = self.wa\n # Set neutrino masses\n params['N_ur'] = self.massless_nu\n params['N_ncdm'] = self.massive_nu\n if self.massive_nu != 0:\n params['m_ncdm'] = ', '.join(str(x) for x in self.M_nu)\n params['T_ncdm'] = ', '.join(str(self.Gamma_nu) for x in self.M_nu)\n # Set WDM masses (remove UR species cause Class treats WDM and neutrinos the same way)\n params['N_ncdm'] += self.N_wdm\n if self.N_wdm>0 and self.massive_nu>0.:\n params['m_ncdm'] += ', ';params['T_ncdm'] += ', '\n params['m_ncdm'] += ', '.join(str(x) for x in self.M_wdm)\n params['T_ncdm'] += ', '.join(str(x) for x in self.Gamma_wdm)\n elif self.N_wdm>0:\n params['m_ncdm'] = ', '.join(str(x) for x in self.M_wdm)\n params['T_ncdm'] = ', '.join(str(x) for x in self.Gamma_wdm)\n # Add the keyword arguments\n for key, value in kwargs.items():\n if not key in params: params[key] = value\n else: raise KeyError(\"Parameter %s already exists in the dictionary, impossible to substitute it.\" %key)\n\n # Compute\n cosmo = Class()\n cosmo.set(params)\n cosmo.compute()\n\n # Setting lengths\n n1 = len(var_1)\n n2 = len(var_2)\n\n # I change to k/h since CLASS uses k in units of 1/Mpc\n k *= self.h\n \n # Storing Pk\n pk_m = np.zeros((nz,nk))\n for i in range(nk):\n for j in range(nz):\n pk_m[j,i] = cosmo.pk(k[i],z[j])*self.h**3.\n\n # Re-switching to (Mpc/h) units\n k /= self.h\n\n # Get transfer functions and rescale the power spectrum\n pk = {}\n # Loop over variables\n for c1 in var_1:\n for c2 in var_2:\n string = c1+'-'+c2\n pk[string] = np.zeros((nz,nk))\n # Loop over redshifts\n for ind_z in range(nz):\n # Get transfer functions at z\n TF = cosmo.get_transfer(z = z[ind_z])\n TF['d_nu'] = np.zeros_like(TF['k (h/Mpc)'])\n for inu in range(self.massive_nu):\n index = inu\n TF['d_nu'] += self.M_nu[inu]*TF['d_ncdm[%i]'%index]/np.sum(self.M_nu)\n TF['d_wdm'] = np.zeros_like(TF['k (h/Mpc)'])\n for inw in range(self.N_wdm):\n index = inw+self.massive_nu\n TF['d_wdm'] += self.Omega_wdm[inw]/self.Omega_wdm_tot*TF['d_ncdm[%i]'%index]\n TF['d_cold'] = (self.Omega_cdm *TF['d_cdm' ] + \n self.Omega_wdm_tot*TF['d_wdm' ] + \n self.Omega_b *TF['d_b' ])/self.Omega_cold\n TF['d_cb'] = (self.Omega_cdm *TF['d_cdm' ] + \n self.Omega_b *TF['d_b' ])/self.Omega_cb\n # !!!!!!!!!!!\n # For reasons unknown, for non-standard cosmological constant, the amplitude is off...\n # !!!!!!!!!!!\n if self.w0 != -1. or self.wa != 0.: \n TF['d_tot'] = (self.Omega_cold *TF['d_cold'] + \n self.Omega_nu_tot*TF['d_nu' ])/self.Omega_m\n # !!!!!!!!!!!\n # Interpolation of matter T(k)\n tm_int = si.interp1d(TF['k (h/Mpc)'],TF['d_tot'],\n kind='cubic',fill_value=\"extrapolate\",bounds_error=False)\n transf_m = tm_int(k) \n # Interpolate them to required k\n t1_int = si.interp1d(TF['k (h/Mpc)'],TF[components[c1]],\n kind='cubic',fill_value=\"extrapolate\",bounds_error=False)\n t2_int = si.interp1d(TF['k (h/Mpc)'],TF[components[c2]],\n kind='cubic',fill_value=\"extrapolate\",bounds_error=False)\n transf_1 = t1_int(k)\n transf_2 = t2_int(k)\n # Rescaling\n pk[string][ind_z] = pk_m[ind_z]*transf_1*transf_2/transf_m**2.\n cosmo.struct_cleanup()\n cosmo.empty()\n \n return k, pk", "def kaons ( self ) :\n from GaudiConfUtils.ConfigurableGenerators import FilterDesktop\n ## \n if self['NOPIDHADRONS'] :\n from StandardParticles import StdAllNoPIDsKaons as inpts\n kaoncut = self['KaonCut']\n else :\n from StandardParticles import StdAllLooseANNKaons as inpts \n kaoncut = \"(%s)&(%s)\" % ( self['KaonCut'] , self['KaonPIDCut'] ) \n #\n ##\n return self.make_selection (\n 'Kaon' ,\n FilterDesktop ,\n [ inpts ] ,\n Code = kaoncut ,\n )", "def exercise_one():\n\n interval = (0,1)\n\n control_net = np.matrix([\n [-0.2, 2],\n [-0.3, 6.2],\n [-1.2, 4.8],\n [-2.8, 8.8],\n [-0.7, 14],\n [1.4, 14.7],\n [3.6, 10.2],\n [3.2, 5.1],\n [1.5, 6.2],\n [1.4, 2],\n ])\n\n# First we plot a curve where internal knots have maximum multiplicities\n arguments = { \n 'order':4, \n 'interval':interval,\n 'internal_knots':sample_internal_knots_uniformly_in(interval, 3),\n 'control_net':control_net,\n 'multiplicities':[2,2,2]\n }\n\n curve = draw(**arguments)\n\n\n# After we plot a curve where each internal knot have multiplicity 1.\n plot_curve(curve, control_net, axis=[-4, 4, 0, 16])\n\n arguments = { \n 'order':4, \n 'interval':interval,\n 'internal_knots':sample_internal_knots_uniformly_in(interval, 6),\n 'control_net':control_net,\n }\n\n curve = draw(**arguments)\n\n plot_curve(curve, control_net, axis=[-4, 4, 0, 16])", "def class_Pk(self,\n z = 0.,\n k = np.logspace(-4., 2., 1001),\n nonlinear = False,\n halofit = 'halofit',\n **kwargs):\n\n # Set halofit for non-linear computation\n if nonlinear == True: halofit = halofit\n else: halofit = 'none'\n\n # Setting lengths\n nk = len(np.atleast_1d(k))\n nz = len(np.atleast_1d(z))\n z = np.atleast_1d(z)\n k = np.atleast_1d(k)\n kmax = max(k.max(),500.)\n zmax = max(z.max(),101.)\n tau = self.tau\n params = {\n 'output': 'mPk dTk',\n 'n_s': self.ns, \n 'h': self.h,\n 'omega_b': self.Omega_b*self.h**2.,\n 'omega_cdm': self.Omega_cdm*self.h**2.,\n 'Omega_k': self.Omega_K,\n 'tau_reio': self.tau,\n 'T_cmb': self.T_cmb,\n 'P_k_max_h/Mpc': kmax,\n 'z_max_pk': zmax,\n 'non_linear': halofit}\n # Set initial conditions\n if self.sigma_8 is not None: params['sigma8'] = self.sigma_8 \n else: params['A_s'] = self.As \n # Set dark energy\n if self.w0 != -1. or self.wa != 0.:\n params['Omega_fld'] = self.Omega_lambda\n params['w0_fld'] = self.w0\n params['wa_fld'] = self.wa\n # Set neutrino masses\n params['N_ur'] = self.massless_nu\n params['N_ncdm'] = self.massive_nu\n if self.massive_nu != 0:\n params['m_ncdm'] = ', '.join(str(x) for x in self.M_nu)\n params['T_ncdm'] = ', '.join(str(self.Gamma_nu) for x in self.M_nu)\n # Set WDM masses (remove UR species cause Class treats WDM and neutrinos the same way)\n params['N_ncdm'] += self.N_wdm\n if self.N_wdm>0 and self.massive_nu>0.:\n params['m_ncdm'] += ', ';params['T_ncdm'] += ', '\n params['m_ncdm'] += ', '.join(str(x) for x in self.M_wdm)\n params['T_ncdm'] += ', '.join(str(x) for x in self.Gamma_wdm)\n elif self.N_wdm>0:\n params['m_ncdm'] = ', '.join(str(x) for x in self.M_wdm)\n params['T_ncdm'] = ', '.join(str(x) for x in self.Gamma_wdm)\n # Add the keyword arguments\n for key, value in kwargs.items():\n if not key in params: params[key] = value\n else: raise KeyError(\"Parameter %s already exists in the dictionary, impossible to substitute it.\" %key)\n\n # Compute\n cosmo = Class()\n cosmo.set(params)\n cosmo.compute()\n\n # I change to k/h since CLASS uses k in units of 1/Mpc\n k *= self.h\n\n # Storing Pk\n pk = np.zeros((nz,nk))\n for i in range(nk):\n for j in range(nz):\n pk[j,i] = cosmo.pk(k[i],z[j])*self.h**3.\n # Re-switching to (Mpc/h) units\n k /= self.h\n\n cosmo.struct_cleanup()\n cosmo.empty()\n\n return k, pk", "def Ham_gen(self,kx,ky):\n temp=np.zeros((self.NL*2,self.NL*2),dtype=complex) # for storage of Hamiltonian matrix\n for i in range(self.NL):\n #Diagonal terms are purely layer specific.\n # DIAG A\n temp[2*i ,2*i ]=self.layers[i].H1(kx,ky) + self.layers[i].Hz(kx,ky)\n # LOWER OFF-DIAG BA\n temp[2*i+1,2*i ]=self.layers[i].Hx(kx,ky) + 1.j*self.layers[i].Hy(kx,ky)\n # UPPER OFF-DIAG AB\n temp[2*i ,2*i+1]=self.layers[i].Hx(kx,ky) - 1.j*self.layers[i].Hy(kx,ky)\n # DIAG B\n temp[2*i+1,2*i+1]=self.layers[i].H1(kx,ky) - self.layers[i].Hz(kx,ky)\n\n # Next update the couplings between the layers.\n if i<self.NL-1:\n temp[2*i ,2*i+2]=self.couplings[i]\n temp[2*i+1,2*i+3]=self.couplings[i]\n temp[2*i+2,2*i ]=self.couplings[i]\n temp[2*i+3,2*i+1]=self.couplings[i]\n\n return temp", "def build_karels():\n build_karel1()\n build_karel2()\n build_karel3()\n build_karel4()", "def gen_kpath( atoms, lattice, Nkpts=60 ):\n #\n points = get_special_points(atoms.cell, lattice)\n paths = parse_path_string(special_paths[lattice])\n #print(paths[0])\n kpts_spec = [points[k] for k in paths[0]]\n kpts, x, Xkpt = get_bandpath(kpts_spec,atoms.cell,Nkpts)\n #\n # TODO: also return string for special k-points\" symbol\n # probably using variable `paths`.\n return kpts, x, Xkpt", "def __init__(self, mode=KSamplingModes.monkhorst, num_kpts= 0,\n kpts=((1, 1, 1),),\n kpt_shifts=(0.5, 0.5, 0.5),\n kpts_weights=None, use_symmetries=True, use_time_reversal=True, chksymbreak=None,\n comment=None):\n if isinstance(mode, str):\n mode = KSamplingModes[mode]\n\n super(KSampling, self).__init__()\n\n self.mode = mode\n self.comment = comment\n\n self.num_kpts = num_kpts\n self.kpts = kpts\n self.kpt_shifts = kpt_shifts\n self.kpts_weights = kpts_weights\n self.use_symmetries = use_symmetries\n self.use_time_reversal = use_time_reversal\n self.chksymbreak = chksymbreak\n\n abivars = {}\n\n if mode == KSamplingModes.monkhorst:\n assert num_kpts == 0\n ngkpt = np.reshape(kpts, 3)\n shiftk = np.reshape(kpt_shifts, (-1,3))\n\n if use_symmetries and use_time_reversal: kptopt = 1\n if not use_symmetries and use_time_reversal: kptopt = 2\n if not use_symmetries and not use_time_reversal: kptopt = 3\n if use_symmetries and not use_time_reversal: kptopt = 4\n\n abivars.update({\n \"ngkpt\" : ngkpt,\n \"shiftk\" : shiftk,\n \"nshiftk\" : len(shiftk),\n \"kptopt\" : kptopt,\n \"chksymbreak\": chksymbreak,\n })\n\n elif mode == KSamplingModes.path:\n if num_kpts <= 0:\n raise ValueError(\"For Path mode, num_kpts must be specified and >0\")\n\n kptbounds = np.reshape(kpts, (-1,3))\n #print(\"in path with kptbound: %s \" % kptbounds)\n\n abivars.update({\n \"ndivsm\" : num_kpts,\n \"kptbounds\": kptbounds,\n \"kptopt\" : -len(kptbounds)+1,\n })\n\n elif mode == KSamplingModes.automatic:\n kpts = np.reshape(kpts, (-1,3))\n if len(kpts) != num_kpts:\n raise ValueError(\"For Automatic mode, num_kpts must be specified.\")\n\n abivars.update({\n \"kptopt\" : 0,\n \"kpt\" : kpts,\n \"nkpt\" : num_kpts,\n \"kptnrm\" : np.ones(num_kpts),\n \"wtk\" : kpts_weights, # for iscf/=-2, wtk.\n \"chksymbreak\": chksymbreak,\n })\n\n else:\n raise ValueError(\"Unknown mode %s\" % mode)\n\n self.abivars = abivars\n #self.abivars[\"#comment\"] = comment", "def __init__(self, r, l, Q, R, k1_l, k2_l, k1_r, k2_r, starting_state, dt):\n print \"CREATING EKF\" \n self.r = r\n self.l = l\n self.Q = Q\n self.R = R\n self.k1_l = k1_l\n self.k2_l = k2_l\n self.k1_r = k1_r\n self.k2_r = k2_r\n self.current_state_estimate = copy.deepcopy(starting_state)\n #self.current_prob_estimate = np.zeros([starting_state.shape[0], starting_state.shape[0]])\n self.current_prob_estimate = np.zeros([starting_state.shape[0], starting_state.shape[0]])\n self.current_prob_estimate[4:7, 4:7] = np.eye(3)\n self.dt = dt\n \n self.A = np.zeros([self.current_state_estimate.shape[0], self.current_state_estimate.shape[0]])\n self.B = np.eye(2)\n self.H = np.array([[1,0,0,0,0,0,0],\\\n [0,1,0,0,0,0,0],\\\n [0,0,0,1,0,0,0]])", "def lookup_Pk(cosmology='planck',nonlinear=0):\n\n # k in h/Mpc\n k = N.logspace(-4., 3., 3*1024)\n\n if nonlinear==1:\n hf = 'halofit'\n saveto = 'data_itam/'+cosmology+'_pk.txt'\n\n else:\n hf = ''\n saveto = 'data_itam/'+cosmology+'_pk_linear.txt'\n\n if cosmology == 'planck':\n class_params = {\n 'non linear': hf,\n 'output': ['mPk','vTk'],\n 'P_k_max_1/Mpc': 1000.,\n 'z_pk': 0.,\n 'A_s': 2.3e-9,\n 'n_s': 0.96,\n 'h': 0.7,\n 'omega_b': 0.0225,\n 'Omega_cdm': 0.25,\n }\n sig8_0 = 0.8\n\n\n elif cosmology == 'wmap':\n class_params = {\n 'non linear': hf,\n 'output': ['mPk','vTk'],\n 'P_k_max_1/Mpc': 1000.,\n 'z_pk': 0.,\n 'A_s': 2.3e-9,\n 'n_s': 0.967,\n 'h': 0.704,\n 'omega_b': 0.02253,\n 'Omega_cdm': 0.226,\n }\n sig8_0 = 0.81\n\n\n elif cosmology == 'ML':\n class_params = {\n 'non linear': hf,\n 'output': ['mPk','vTk'],\n 'P_k_max_1/Mpc': 1000.,\n 'z_pk': 0.,\n 'A_s': 2.3e-9,\n 'n_s': 1.,\n 'h': 0.73,\n 'omega_b': 0.045*0.73**2,\n 'Omega_cdm': 0.25-0.045,\n }\n sig8_0 = 0.9\n\n else:\n raise ValueError(\"the cosmology you chose does not exist\")\n\n cosmoClass_nl = Class()\n cosmoClass_nl.set(class_params)\n cosmoClass_nl.compute()\n\n # rescale the normalization of matter power spectrum to have sig8=0.8 today\n sig8 = cosmoClass_nl.sigma8()\n A_s = cosmoClass_nl.pars['A_s']\n cosmoClass_nl.struct_cleanup() # does not clean the input class_params, cosmo.empty() does that\n cosmoClass_nl.set(A_s=A_s*(sig8_0*1./sig8)**2)\n cosmoClass_nl.compute()\n\n h = cosmoClass_nl.pars['h']\n pk_nl = N.asarray([ cosmoClass_nl.pk(x*h, 0.,)*h**3 for x in k ])\n\n kpk = N.vstack((k,pk_nl))\n \n N.savetxt(saveto,kpk)\n print('saving', saveto )\n return", "def __init__(self,XXZ_,kop_,excitations_,sol_ = None):\n self.XXZ=XXZ_ #Defines the model - XXXmodel,XXZmodel,XXZmodelTrig or XXZmodelHyp\n self.levels=self.XXZ.levels #Energy levels\n self.g = kop_ #Coupling constant\n self.gamma = self.XXZ.get_c() #Gamma associated with Gaudin algebra\n self.N = excitations_ #Number of excitations\n self.n=self.XXZ.get_nlevels() #Number of single-particle levels\n self.rapidities=None; #Rapidities (have to be calculated)\n if sol_ == None:\n self.solution = None #Set of g*Lambda_i (have to be calculated)\n else: \n self.solution = np.array(sol_)\n assert(len(self.solution) == len(self.levels))\n assert(self.N <= self.n)\n self.occupation=None #Set of occupation numbers (follow from derivative of g*Lambda_i)", "def generate_knn(self,rating_data):\n\n algo = {}\n bcKNN = KNNBasic(sim_options={'name': 'cosine', 'user_based': True})\n algo['bcKNN'] = bcKNN\n\n wmKNN = KNNWithMeans(sim_options={'name': 'cosine', 'user_based': True})\n algo['wmKNN'] = wmKNN\n\n wzKNN = KNNWithZScore(sim_options={'name': 'cosine', 'user_based': True})\n algo['wzKNN'] = wzKNN\n\n blKNN = KNNBaseline(sim_options={'name': 'cosine', 'user_based': True})\n algo['blKNN'] = blKNN\n\n\n # tune param for knnBaseline, since it has best accuracy\n param_grid_bl = {'k': [10, 15, 20, 25, 30, 40, 50, 60, 70, 80, 100]}\n best_params_bl = self.tune_and_find_parameter('blKNN', KNNBaseline, rating_data, param_grid_bl)\n\n blKNN_tuned = KNNBaseline(k=best_params_bl['k'])\n algo.update({'blKNN_tuned': blKNN_tuned})\n\n return algo", "def __init__(self, k=1):\n self.k = k\n self.x = None\n self.y = None\n self.classes_ = None", "def __init__(self, Nparticles,Nkicks,GAMMA, KAPPA):\n\n\n\t\tself.__Nparticles = Nparticles\n\t\tself.__Nkicks = Nkicks\n\n\t\t\n\t\tself.__kappa = KAPPA\n\t\tself.__gamma = GAMMA\n\t\tself.__omega = 1\n\t\tself.__dt = 0.0005\n\t\tself.__Kbt = 0\n\t\tself.__q = 4\n\t\tself.__tk = 2*np.pi/self.__q\n\n\n\t\t#Fa is the matrix to solve the Langevin equation using the Euler's method.\n\t\tself.__Fa = np.array([[0,-self.__omega**2],[1,-self.__gamma]])\n\t\tself.__eta = 0.1\n\n\t\t#self.__XPinit = np.random.random((self.__Nparticles,2))*10\n\t\tself.__XPinit = np.random.normal(0,3.5,(self.__Nparticles,2))\n\t\tself.__XPEnsembleBefore, self.__XPEnsembleAfter = self.__trajectories()", "def build(self,A,k=5):\n # instantiate a Crayon::Graph object\n self.cpp = _crayon.neighborhood(A,k)\n # retrieve adjacency matrix\n self.adj = self.cpp.adj()\n # compute its Graphlet Degree Vector\n self.gdv = self.cpp.gdv()\n # convert node-wise to graph-wise graphlet frequencies\n self.sgdv = np.sum(self.gdv,axis=0)\n # weight GDV according to dependencies between orbits\n o = np.array([1, 2, 2, 2, 3, 4, 3, 3, 4, 3,\n 4, 4, 4, 4, 3, 4, 6, 5, 4, 5,\n 6, 6, 4, 4, 4, 5, 7, 4, 6, 6,\n 7, 4, 6, 6, 6, 5, 6, 7, 7, 5,\n 7, 6, 7, 6, 5, 5, 6, 8, 7, 6,\n 6, 8, 6, 9, 5, 6, 4, 6, 6, 7,\n 8, 6, 6, 8, 7, 6, 7, 7, 8, 5,\n 6, 6, 4],dtype=np.float)\n w = 1. - o / 73.\n self.ngdv = self.sgdv * w[:self.sgdv.shape[0]]\n self.ngdv = self.ngdv / max(float(np.sum(self.ngdv)),1.)", "def zero_K(self):\n # print('zero_K axnode: ', self.cell.axnode)\n i = 0\n for node in self.cell.axnode:\n for seg in node:\n if i == 0:\n print(f\"KLT: {seg.klt.gbar:.6f} mho/cm2\")\n print(f\"KCNQ: {seg.kcnq.gbar:.6f} mho/cm2\")\n print(f\"KHT: {seg.kht.gbar:.6f} mho/cm2\")\n i = 1\n # seg.klt.gbar = 0e-3\n seg.kcnq.gbar = 0e-3\n # seg.kcnq.phi_m = seg.kcnq.phi_m - 20.\n # seg.kht.gbar = 0e-3\n # seg.kht.vshift = -20.\n pass", "def rk8(accel,m,r,h,v): \n k1v = accel(m,r)\n k1r = v\n k2v = accel(m,r + 0.25*k1r*h)\n k2r = v + (0.25*k1v)*h\n k3v = accel(m,r + (5/72.*k1r + 1/72.*k2r)*h)\n k3r = v + (5/72.*k1v + 1/72.*k2v)*h\n k4v = accel(m,r + (1/32.*k1r +3/32.*k3r)*h)\n k4r = v + (1/32.*k1v +3/32.*k3v)*h\n k5v = accel(m,r + (106/125.*k1r- 408/125.*k3r + 352/125.*k4r)*h)\n k5r = v + (106/125.*k1v- 408/125.*k3v + 352/125.*k4v)*h\n k6v = accel(m,r + (1/48.*k1r+ 8/33.*k4r - 125/528.*k5r)*h)\n k6r = v + (1/48.*k1v+ 8/33.*k4v - 125/528.*k5v)*h\n k7v = accel(m,r + (-13893*k1r+ 39936*k4r -64125*k5r+ 60720*k6r)*h/26411.)\n k7r = v +(-13893*k1v+ 39936*k4v -64125*k5v+ 60720*k6v)*h/26411.\n k8v = accel(m,r + (37/392.*k1r+ 1625/9408.*k5r -2/15.*k6r+ 61/6720*k7r)*h)\n k8r = v + (37/392.*k1v+ 1625/9408.*k5v -2/15.*k6v+ 61/6720*k7v)*h\n k9v = accel(m,r +(17176/25515.*k1r - 47104/25515.*k4r + 1325/504.*k5r - 41792/25515.*k6r + 20237/145800.*k7r + 4312/6075.*k8r)*h)\n k9r = v + (17176/25515.*k1v - 47104/25515.*k4v + 1325/504.*k5v - 41792/25515.*k6v + 20237/145800.*k7v + 4312/6075.*k8v)*h\n k10v = accel(m,r + ( -23834/180075.*k1r - 77824/1980825.*k4r- 636635/633864.*k5r + 254048/300125.*k6r - 183/7000.*k7r + 8/11.*k8r - 324/3773.*k9r)*h)\n k10r = v + ( -23834/180075.*k1v - 77824/1980825.*k4v- 636635/633864.*k5v + 254048/300125.*k6v - 183/7000.*k7v + 8/11.*k8v - 324/3773.*k9v)*h\n k11v= accel(m,r + (12733/7600.*k1r - 20032/5225.*k4r + 456485/80256.*k5r - 42599/7125.*k6r + 339227/912000.*k7r - 1029/4108.*k8r + 1701/1408.*k9r + 5145/2432.*k10r)*h)\n k11r = v + (12733/7600.*k1v - 20032/5225.*k4v + 456485/80256.*k5v - 42599/7125.*k6v + 339227/912000.*k7v - 1029/4108.*k8v + 1701/1408.*k9v + 5145/2432.*k10v)*h\n k12v = accel(m,r + h*(-27061/204120.*k1r + 40448/280665.*k4r -1353775/1197504.*k5r + 17662/25515.*k6r - 71687/1166400.*k7r + 98/225.*k8r + 1/16.*k9r + 3773/11664.*k10r))\n k12r = v + h*(-27061/204120.*k1v + 40448/280665.*k4v -1353775/1197504.*k5v + 17662/25515.*k6v - 71687/1166400.*k7v + 98/225.*k8v + 1/16.*k9v + 3773/11664.*k10v)\n k13v = accel(m,r + h*(11203/8680.*k1r - 38144/11935.*k4r + 2354425/458304.*k5r - 84046/16275.*k6r + 673309/1636800.*k7r + 4704/8525.*k8r + 9477/10912.*k9r - 1029/992.*k10r + 19/341.*k12r))\n k13r = v + h*(11203/8680.*k1v - 38144/11935.*k4v + 2354425/458304.*k5v - 84046/16275.*k6v + 673309/1636800.*k7v + 4704/8525.*k8v + 9477/10912.*k9v - 1029/992.*k10v + 19/341.*k12v)\n\n\n new_v8 = v + h*(13/288.*k1v +32/125.*k6v + 31213/144000.*k7v + 2401/12375.*k8v + 1701/14080.*k9v + 2401/19200.*k10v + 19/450.*k11v) \n new_r8 = r + h*(13/288.*k1r +32/125.*k6r + 31213/144000.*k7r + 2401/12375.*k8r + 1701/14080.*k9r + 2401/19200.*k10r + 19/450.*k11r) \n \n return new_v8,new_r8", "def _u_kln(self, eTs, protocol, noBeta=False):\n L = len(protocol)\n\n addMM = ('MM' in protocol[0].keys()) and (protocol[0]['MM'])\n addSite = ('site' in protocol[0].keys()) and (protocol[0]['site'])\n probe_keys = ['MM','k_angular_ext','k_spatial_ext','k_angular_int'] + \\\n scalables\n probe_key = [key for key in protocol[0].keys() if key in probe_keys][0]\n\n if isinstance(eTs, dict):\n # There is one configuration per state\n K = len(eTs[probe_key])\n N_k = np.ones(K, dtype=int)\n u_kln = []\n E_base = np.zeros(K)\n if addMM:\n E_base += eTs['MM']\n if addSite:\n E_base += eTs['site']\n for l in range(L):\n E = 1. * E_base\n for scalable in scalables:\n if scalable in protocol[l].keys():\n E += protocol[l][scalable] * eTs[scalable]\n for key in ['k_angular_ext', 'k_spatial_ext', 'k_angular_int']:\n if key in protocol[l].keys():\n E += protocol[l][key] * eTs[key]\n if noBeta:\n u_kln.append(E)\n else:\n u_kln.append(E / (R * protocol[l]['T']))\n elif isinstance(eTs[0], dict):\n K = len(eTs)\n N_k = np.array([len(eTs[k][probe_key]) for k in range(K)])\n u_kln = np.zeros([K, L, N_k.max()], np.float)\n\n for k in range(K):\n E_base = 0.0\n if addMM:\n E_base += eTs[k]['MM']\n if addSite:\n E_base += eTs[k]['site']\n for l in range(L):\n E = 1. * E_base\n for scalable in scalables:\n if scalable in protocol[l].keys():\n E += protocol[l][scalable] * eTs[k][scalable]\n for key in ['k_angular_ext', 'k_spatial_ext', 'k_angular_int']:\n if key in protocol[l].keys():\n E += protocol[l][key] * eTs[k][key]\n if noBeta:\n u_kln[k, l, :N_k[k]] = E\n else:\n u_kln[k, l, :N_k[k]] = E / (R * protocol[l]['T'])\n elif isinstance(eTs[0], list):\n K = len(eTs)\n N_k = np.zeros(K, dtype=int)\n\n for k in range(K):\n for c in range(len(eTs[k])):\n N_k[k] += len(eTs[k][c][probe_key])\n u_kln = np.zeros([K, L, N_k.max()], np.float)\n\n for k in range(K):\n E_base = 0.0\n C = len(eTs[k])\n if addMM:\n E_base += np.concatenate([eTs[k][c]['MM'] for c in range(C)])\n if addSite:\n E_base += np.concatenate([eTs[k][c]['site'] for c in range(C)])\n for l in range(L):\n E = 1. * E_base\n for scalable in scalables:\n if scalable in protocol[l].keys():\n E += protocol[l][scalable]*np.concatenate([eTs[k][c][scalable] \\\n for c in range(C)])\n for key in ['k_angular_ext', 'k_spatial_ext', 'k_angular_int']:\n if key in protocol[l].keys():\n E += protocol[l][key]*np.concatenate([eTs[k][c][key] \\\n for c in range(C)])\n if noBeta:\n u_kln[k, l, :N_k[k]] = E\n else:\n u_kln[k, l, :N_k[k]] = E / (R * protocol[l]['T'])\n\n if (K == 1) and (L == 1):\n return u_kln.ravel()\n else:\n return (u_kln, N_k)", "def kozakov2015(*args, **kwargs):\n clusters = []\n for sel in args:\n cluster = Cluster(\"\", sel, pm.get_coords(sel))\n clusters.append(cluster)\n\n ensemble = Kozakov2015Ensemble(clusters)\n print(\n textwrap.dedent(\n f\"\"\"\n {ensemble}\n Class {ensemble.klass}\n S {ensemble.strength}\n S0 {ensemble.strength0}\n CD {ensemble.max_center_to_center}\n MD {ensemble.max_dist}\n \"\"\"\n )\n )", "def _k(self, T):\n RT = Rgas * T\n return (self.parameters.A1 / np.exp(self.parameters.E1 / RT),\n self.parameters.A2 / np.exp(self.parameters.E2 / RT))", "def ACM_Kruskal(G):\n pass" ]
[ "0.61084265", "0.58661884", "0.5722478", "0.56312686", "0.562919", "0.55858815", "0.558461", "0.5543797", "0.55313665", "0.5458351", "0.5447106", "0.5440787", "0.5429062", "0.5335893", "0.5293114", "0.52813584", "0.5255335", "0.5196818", "0.51894844", "0.5181761", "0.5176679", "0.51601774", "0.51532227", "0.51478827", "0.5138386", "0.5134018", "0.5124071", "0.5119534", "0.5111954", "0.51118916" ]
0.73484176
0
Read river centerline coordinates from userprepared centerline file. Centerline is then resampled to prevent ununiform spacing. Only work for Mode 2.
def read_centerline(s, x, y, cur, theta): if MODE == 2: print('MODE 2: READ YOUR OWN RIVER CENTERLINE FROM FILE is selected') try: centerlinexy = np.loadtxt(FNAME) except IOError: print('\'' + FNAME + '\' not found') print('Please place \'' + FNAME + '\' in working directory\n') job_done() sys.exit(1) else: return s, x, y, cur, theta x = centerlinexy[:, 0] y = centerlinexy[:, 1] if FLIPSTRM: x = x[::-1] y = y[::-1] # if np.mean(np.abs(x)) > 1e6 or np.mean(np.abs(y)) > 1e6: # print('!!! centerline X/Y too large, forced to shift toward (0, 0) !!!') # print('shifting vector: ('+str(-np.mean(x))+', '+str(-np.mean(y))+')') # x -= np.mean(x) # y -= np.mean(y) length = x.size s = np.zeros(length) for j in range(1, x.size): s[j] = s[j-1] + np.sqrt((x[j]-x[j-1])**2 + (y[j]-y[j-1])**2) mean1 = np.around(np.mean(np.diff(s)), decimals=2) median1 = np.around(np.median(np.diff(s)), decimals=2) mode1 = np.around(mode(np.diff(s))[0][0], decimals=2) print('+> Resampling centerline & Calculating curvature...', end='') s, x, y, cur, theta = resample_centerline(s, x, y) print(' [done]') mean2 = np.around(np.mean(np.diff(s)), decimals=2) median2 = np.around(np.median(np.diff(s)), decimals=2) mode2 = np.around(mode(np.diff(s))[0][0], decimals=2) print_resamp_table(mean1, median1, mode1, mean2, median2, mode2) print_para_table(s) return s, x, y, cur, theta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ds9_line(self, center_coordinates, width=20 ):\n for fn in self.images:\n print(fn)\n ff = pyfits.open(fn)\n w = wcs.WCS(ff[0].header)\n co = center_coordinates\n print(co.ra.deg, co.dec.deg )\n #pix = w.wcs_world2pix([co.ra], [co.dec], 0)\n #for a, b, in zip(pix[0], pix[1]):\n #print(a,b)\n #a0, a1 = a-width/2, a+width/2\n #b0, b1 = b-width/2, b+width/2\n #a0, a1, b0, b1 = int(a0[0]), int(a1[0]), int(b0[0]), int(b1[0])\n #print(a0, a1, b0, b1)\n #im = ff[0].data[b0:b1,a0:a1 ]\n #plt.imshow(im, origin=\"lower\", extent=(b0,b1,a0,a1))\n #plt.scatter(b, a, marker='x', color='r', s=30)\n ##plt.scatter(a, b, marker='o', color='r', s=30)\n #plt.show()\n ds9string = str(\"ds9 %s -pan to %f %f wcs -zoom 16-cmap b -colorbar no -cmap invert yes \"% (fn, co.ra.deg, co.dec.deg))\n print(ds9string)\n \n #print(pix)\n print()", "def read_CORNERS(self, fp):\r\n corners = []\r\n count = 0\r\n nb = self.size[0] * self.size[1] * self.size[2] * 24\r\n\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if \"CORNERS\" in item[0]:\r\n break\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n for c in item:\r\n if \"*\" in c:\r\n item = c.split(\"*\")\r\n for i in range(int(item[0])):\r\n corners.append(item[1])\r\n count += 1\r\n else:\r\n corners.append(c)\r\n count += 1\r\n # all attributes have been read\r\n if count == nb:\r\n break\r\n sp = int(len(corners) / 3)\r\n X = list(map(float, corners[:sp]))\r\n Y = list(map(float, corners[sp:2*sp]))\r\n Z = list(map(float, corners[2*sp:]))\r\n return X, Y, Z", "def retrieve_beam_centers(centers_file, voxel_size):\n \n origin_shifts, tilt_angles = list(), list()\n f = open(centers_file, 'r') \n content = f.readlines() \n \n # extract position of tile 0 for each tilt angle\n for line in content:\n as_array = np.array(line.strip().split()).astype(float)\n if (len(as_array) == 1):\n tilt_angles.append(as_array[0])\n counter = 0\n elif (len(as_array) >= 2) and (counter==0):\n origin_shifts.append(as_array * 1e4/voxel_size)\n counter += 1\n \n origin_shifts = np.array(origin_shifts)[:,:2] \n return origin_shifts, np.array(tilt_angles)", "def process_laneOffset(self):\n center_line = np.poly1d(np.mean([self.line_l.get_LinePoly().coeffs, self.line_r.get_LinePoly().coeffs], axis=0))\n # store the center line polynomial\n self.center_poly = center_line\n center_point = IMAGE_WIDTH/2 - center_line(709)\n offset_from_center =center_point* self.line_l.x_pxm\n self.lane_offset = offset_from_center\n return center_point", "def cfdReadPointsFile(self):\r\n\r\n with open(self.pointsFile,\"r\") as fpid:\r\n \r\n print('Reading points file ...')\r\n points_x=[]\r\n points_y=[]\r\n points_z=[]\r\n \r\n for linecount, tline in enumerate(fpid):\r\n \r\n if not io.cfdSkipEmptyLines(tline):\r\n continue\r\n \r\n if not io.cfdSkipMacroComments(tline):\r\n continue\r\n \r\n if \"FoamFile\" in tline:\r\n dictionary=io.cfdReadCfdDictionary(fpid)\r\n continue\r\n \r\n if len(tline.split()) ==1:\r\n if \"(\" in tline:\r\n continue\r\n if \")\" in tline:\r\n continue\r\n else:\r\n self.numberOfNodes = int(tline.split()[0])\r\n continue\r\n \r\n tline=tline.replace(\"(\",\"\")\r\n tline=tline.replace(\")\",\"\")\r\n tline=tline.split()\r\n \r\n points_x.append(float(tline[0]))\r\n points_y.append(float(tline[1]))\r\n points_z.append(float(tline[2]))\r\n \r\n ## (array) with the mesh point coordinates \r\n self.nodeCentroids = np.array((points_x, points_y, points_z), dtype=float).transpose()", "def read_off(filename):\n with open(filename) as f:\n # first line [0] has only the word OFF\n lines = f.readlines()\n if lines[0].find('OFF') < 0:\n print('not an OFF file')\n return None, None\n # second line [1] has counts for ....\n counts = lines[2].split()\n vertex_count = int(counts[0])\n vox_count = int(counts[1])\n # then follows vertices from lines[3] to lines[3+vertex_count]\n vertices = np.asarray([float(s) for s in lines[3].split()])\n\n for line in lines[4:3 + vertex_count]:\n vertices = np.vstack(\n (\n vertices,\n np.asarray([float(s) for s in line.split()])\n )\n )\n # now extract the centers lines[2+vertex_count] to lines(-1)\n centers = np.asarray([float(s)\n for s in lines[3 + vertex_count].split()])\n for line in lines[3 + vertex_count + 1:3 + vertex_count + vox_count]:\n if len(line) > 0:\n centers = np.vstack(\n (\n centers,\n np.asarray([float(s) for s in line.split()])\n )\n )\n return vertices, centers", "def __load_geo(self):\n pass\n # process any splines? and turn them into arcs\n # http://www.mathopenref.com/constcirclecenter.html\n # find max dist between points\n # double it\n # select two segments\n # draw normal lines\n # find intersections, that is the center", "def process(cline, rline, file1, file2, file3, library_sizes):\n cparts = cline.split(\"\\t\")\n rparts = rline.split(\"\\t\")\n\n # confirm that the two lines being processed are for the same locus\n assert(cparts[0] == rparts[0] and cparts[1] == rparts[1])\n\n # split first column (locus) into three columns containing its\n # consituent parts (chromosome, start base, and end base)\n chr = rparts[0].split(\":\")[0]\n start = rparts[0].split(\":\")[1].split(\"-\")[0]\n end = rparts[0].split(\":\")[1].split(\"-\")[1]\n\n line1 = [chr, start, end] + rparts[1:] + cparts[2:] # counts in reads\n line2 = [chr, start, end] + rparts[1:] + [cparts[2]] # counts in rpm\n line3 = [chr, start, end] + rparts[1:] + [cparts[2]] # counts in rpkm\n\n gene_length = int(rparts[2])\n\n for i in range(3, len(cparts)):\n\n index = i - 3\n lib_size = library_sizes[index][1]\n\n mapped_reads = int(cparts[i])\n\n if lib_size == 0: # Prevent DIVBYZERO error\n rpm = 0\n rpkm = 0\n elif gene_length == 0:\n rpkm = 0\n else:\n rpm = ((mapped_reads * (10 ** 6)) / lib_size)\n rpkm = ((mapped_reads * (10 ** 9)) / (lib_size * gene_length))\n\n line2 += [str(rpm)]\n line3 += [str(rpkm)]\n\n out1 = \"\\t\".join(line1) + \"\\n\"\n out2 = \"\\t\".join(line2) + \"\\n\"\n out3 = \"\\t\".join(line3) + \"\\n\"\n\n file1.write(out1)\n file2.write(out2)\n file3.write(out3)", "def read_coordinates(self, channel=\"1\"):\n\n sdsidx = {\"VNIR\": (0, 1), \"SWIR\": (6, 7), \"TIR\": (14, 15)}\n\n latstr = \":\".join(\n (\n \"HDF4_SDS\",\n \"UNKNOWN\",\n f\"{self.filename}\",\n f\"{sdsidx[self.channel2sensor[channel]][0]}\",\n )\n )\n lat = gdal.Open(latstr)\n lat = geocentric2geodetic(lat.ReadAsArray().astype(\"float\"))\n\n lonstr = \":\".join(\n (\n \"HDF4_SDS\",\n \"UNKNOWN\",\n f\"{self.filename}\",\n f\"{sdsidx[self.channel2sensor[channel]][1]}\",\n )\n )\n lon = gdal.Open(lonstr)\n lon = lon.ReadAsArray().astype(\"float\")\n\n return lat, lon", "def get_rawimage(self, raw_file, det):\n # Check for file; allow for extra .gz, etc. suffix\n fil = glob.glob(raw_file + '*')\n if len(fil) != 1:\n msgs.error(\"Found {:d} files matching {:s}\".format(len(fil)))\n\n # Read\n msgs.info(\"Reading MMIRS file: {:s}\".format(fil[0]))\n hdu = fits.open(fil[0])\n head1 = fits.getheader(fil[0],1)\n\n detector_par = self.get_detector_par(hdu, det if det is None else 1)\n\n # get the x and y binning factors...\n binning = head1['CCDSUM']\n xbin, ybin = [int(ibin) for ibin in binning.split(' ')]\n\n # First read over the header info to determine the size of the output array...\n datasec = head1['DATASEC']\n x1, x2, y1, y2 = np.array(parse.load_sections(datasec, fmt_iraf=False)).flatten()\n\n # ToDo: I am currently using the standard double correlated frame, that is a difference between\n # the first and final read-outs. In the future need to explore up-the-ramp fitting.\n if len(hdu)>2:\n data = mmirs_read_amp(hdu[1].data.astype('float64')) - mmirs_read_amp(hdu[2].data.astype('float64'))\n else:\n data = mmirs_read_amp(hdu[1].data.astype('float64'))\n array = data[x1-1:x2,y1-1:y2]\n\n ## ToDo: This is a hack. Need to solve this issue. I cut at 998 due to the HK zero order contaminating\n ## the blue part of the zJ+HK spectrum. For other setup, you do not need to cut the detector.\n if (head1['FILTER']=='zJ') and (head1['DISPERSE']=='HK'):\n array = array[:int(998/ybin),:]\n rawdatasec_img = np.ones_like(array,dtype='int')\n oscansec_img = np.ones_like(array,dtype='int')\n\n # Need the exposure time\n exptime = hdu[self.meta['exptime']['ext']].header[self.meta['exptime']['card']]\n # Return, transposing array back to orient the overscan properly\n return detector_par, np.flipud(array), hdu, exptime, np.flipud(rawdatasec_img),\\\n np.flipud(np.flipud(oscansec_img))", "def prepare_input(self, only_center = True):\n \n if only_center:\n nx = [0]\n ny = [0]\n else:\n nx = [0,1,-1]\n ny = [0,1,-1]\n gauge = dd.read_csv(str(Path(self.db_location, 'gauge', '*.csv.gz')), \n compression='gzip', \n assume_missing=True,\n dtype = {'TIMESTAMP':int, 'STATION': str})\n \n gauge = gauge.compute().drop_duplicates()\n gauge = gauge.replace(-9999,np.nan)\n for x in nx:\n for y in ny:\n logging.info('Processing neighbour {:d}{:d}'.format(x, y))\n radar = dd.read_parquet(str(Path(self.db_location, 'radar',\n '*.parquet')))\n refer = dd.read_parquet(str(Path(self.db_location, 'reference', \n '*.parquet')))\n \n # Select only required pixel\n radar = radar.loc[np.logical_and(radar['NX'] == x, \n radar['NY'] == y)]\n refer = refer.loc[np.logical_and(refer['NX'] == x, \n refer['NY'] == y)]\n \n # Convert to pandas and remove duplicates \n radar = radar.compute().drop_duplicates(subset = ['TIMESTAMP',\n 'STATION',\n 'RADAR',\n 'NX','NY',\n 'SWEEP'])\n \n refer = refer.compute().drop_duplicates(subset = ['TIMESTAMP',\n 'STATION'])\n \n radar = radar.sort_values(by = ['TIMESTAMP','STATION','SWEEP'])\n refer = refer.sort_values(by = ['TIMESTAMP','STATION'])\n gauge = gauge.sort_values(by = ['TIMESTAMP','STATION'])\n # Get only valid precip data\n gauge = gauge[np.isfinite(gauge['RRE150Z0'])]\n \n # Create individual 10 min - station stamps\n gauge['s-tstamp'] = np.array(gauge['STATION'] + \n gauge['TIMESTAMP'].astype(str)).astype(str)\n radar['s-tstamp'] = np.array(radar['STATION'] + \n radar['TIMESTAMP'].astype(str)).astype(str)\n refer['s-tstamp'] = np.array(refer['STATION'] + \n refer['TIMESTAMP'].astype(str)).astype(str)\n \n # Get gauge and reference only when radar data available\n \n # Find timestamps that are in the three datasets\n ststamp_common = np.array(pd.Series(list(set(gauge['s-tstamp'])\n .intersection(set(refer['s-tstamp'])))))\n ststamp_common = np.array(pd.Series(list(set(radar['s-tstamp'])\n .intersection(set(ststamp_common)))))\n radar = radar.loc[radar['s-tstamp'].isin(ststamp_common)]\n gauge = gauge.loc[gauge['s-tstamp'].isin(ststamp_common)]\n refer = refer.loc[refer['s-tstamp'].isin(ststamp_common)]\n \n \n # Filter incomplete hours\n stahour = np.array(gauge['STATION'] + \n ((gauge['TIMESTAMP'] - 600 ) - \n (gauge['TIMESTAMP'] - 600 ) % 3600).astype(str)).astype(str)\n \n full_hours = np.array(gauge.groupby(stahour)['STATION']\n .transform('count') == 6)\n \n refer = refer.reindex[full_hours]\n gauge = gauge.reindex[full_hours] \n radar = radar.reindex[radar['s-tstamp'].\n isin(np.array(gauge['s-tstamp']))]\n \n stahour = stahour[full_hours]\n \n # Creating vertical grouping index\n \n _, idx, grp_vertical = np.unique(radar['s-tstamp'],\n return_inverse = True,\n return_index = True)\n # Get original order\n sta_tstamp_unique = radar['s-tstamp'][np.sort(idx)]\n # Preserves order and avoids sorting radar_statstamp\n grp_vertical = idx[grp_vertical]\n # However one issue is that the indexes are not starting from zero with increment\n # of one, though they are sorted, they are like 0,7,7,7,15,15,23,23\n # We want them starting from zero with step of one\n grp_vertical = rankdata(grp_vertical,method='dense') - 1\n \n # Repeat operation with gauge hours\n sta_hourly_unique, idx, grp_hourly = np.unique(stahour, \n return_inverse = True,\n return_index = True)\n grp_hourly = idx[grp_hourly]\n \n # Add derived variables height iso0 (HISO) and height above ground (HAG)\n # Radar\n stations = constants.METSTATIONS\n cols = list(stations.columns)\n cols[1] = 'STATION'\n stations.columns = cols\n radar = pd.merge(radar,stations, how = 'left', on = 'STATION',\n sort = False)\n \n radar['HISO'] = -radar['T'] / constants.LAPSE_RATE * 100\n radar['HAG'] = radar['HEIGHT'] - radar['Z']\n radar['HAG'][radar['HAG'] < 0] = 0\n \n # Gauge\n gauge['minutes'] = (gauge['TIMESTAMP'] % 3600)/60\n \n # Save all to file\n refer.to_parquet(str(Path(self.input_location, \n 'reference_x{:d}y{:d}.parquet'.format(x,y))),\n compression = 'gzip', index = False)\n \n radar.to_parquet(str(Path(self.input_location, \n 'radar_x{:d}y{:d}.parquet'.format(x,y))),\n compression = 'gzip', index = False)\n \n grp_idx = {}\n grp_idx['grp_vertical'] = grp_vertical\n grp_idx['grp_hourly'] = grp_hourly\n grp_idx['tstamp_unique'] = sta_tstamp_unique\n \n pickle.dump(grp_idx, \n open(str(Path(self.input_location, \n 'grouping_idx_x{:d}y{:d}.p'.format(x,y))),'wb'))\n \n if x == 0 and y == 0:\n # Save only gauge for center pixel since it's available only there\n gauge.to_parquet(str(Path(self.input_location, 'gauge.parquet')),\n compression = 'gzip', index = False)", "def read_pts_file(filename):\n lines = open(filename).read().splitlines()\n if int(lines[1:2][0].split('n_points:')[-1]) != 68:\n print ('No 68-landmark format founded')\n return None\n lines = lines[3:71]\n\n landmarks = []\n for l in lines:\n coords = l.split()\n landmarks.append([float(coords[0]), float(coords[1])])\n return landmarks", "def read_xy_file(self, city = \"\"):\n\t\tcenter = []\n\t\ttemp_list = []\n\t\tif 1 > len( city ):\n\t\t\treturn center\n\t\ttoday = datetime.datetime.now().strftime(\"%Y%m%d\")\n\t\ttry:\n\t\t\tinput_filename = f\"{city}{self.second_part_of_xy_filename}\"\n\t\t\twith open( os.path.join( self.input_dir, input_filename ), 'r', encoding='utf-8') as f:\n\t\t\t\tfor item in f.readlines()[1:]:\n\t\t\t\t\tcenter.append(tuple(item.strip().split(\",\")[-5:])) # lng, lat, ok0, max_value, max_timestamp\n\t\texcept Exception as ex:\n\t\t\tcenter = []\n\t\t\tself.logger.error( f\"Inside Method {sys._getframe().f_code.co_name} of Class {self.__class__.__name__}, cannot read xy_list file ({input_filename}) or requested xy points file ({input_filename}). Exception = {ex}\" )\n\t\treturn center", "def find_centroid(event_file):\n \n print('Finding the centroid of the event file...\\n')\n \n make_img(event_file,clobber=True)\n \n fits = pyfits.open('temp.fits')\n \n #Previously used the RA and DEC headers to find the centre, now trying a more nuanced\n #max pixel value method\n \n #source_ra = fits[1].header['RA_TARG']\n #source_dec = fits[1].header['DEC_TARG']\n \n #return source_ra,source_dec\n \n data = fits[0].data\n \n #As the data from make_img is 1024x1024 based on the centre of the image, use modulo\n #arithmetic to find the physical x and y coordinates\n \n argmax = np.argmax(data)\n \n x = argmax%1024 + 3584\n y = int(argmax/1024) + 3584\n \n return x,y", "def read_coordinate_file(file_path, start_row=1, end_row=None):\n\n # Read file\n with open(file_path, newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n\n x = []\n y = []\n z = []\n\n for row in spamreader:\n x.append(row[0])\n y.append(row[1])\n z.append(row[2])\n\n if end_row is None:\n end_row = len(x) - 1\n\n x = [float(l) for l in x[start_row:end_row]]\n y = [float(l) for l in y[start_row:end_row]]\n z = [float(l) for l in z[start_row:end_row]]\n\n return [[x[i], y[i], z[i]] for i in range(len(x))]", "def loadDCPos(self):\n with open(gv.DC_POS_PATH, 'r') as fh: \n for line in fh:\n dcID, _, dcPos = line.rstrip().split(';')\n self.centerDict[dcID] = [float(i) for i in dcPos.split(',')]", "def extract_profile(tif, line_file, ds):\r\n\r\n import numpy as np\r\n import gdal\r\n import fiona\r\n from scipy.interpolate import interp1d\r\n# from scipy.interpolate import interp2d\r\n from scipy.ndimage import map_coordinates\r\n \r\n #%% Create evenly spaced points\r\n # Read coordinates of the profile line from shapefile\r\n fiona_obj = fiona.open(line_file)\r\n# line = fiona_obj.next()\r\n line = iter(fiona_obj).next() # this line is proper syntax for fiona v2. Corrected on Mar 12, 2021 by TCB\r\n coords = np.array( line['geometry']['coordinates'] ) # m the easting and northing coordinates of the vertices along the shapefile\r\n \r\n sqrd_deltas = np.diff(coords, axis=0)**2 # squared differences between x and y coordinates\r\n deltas = np.sum(sqrd_deltas, axis=1)**0.5 # m straight-line path length between adjacent points in the shapefile\r\n dist = np.cumsum( np.append(0, deltas) ) # m running distance along the shapefile from one end.\r\n \r\n disti = np.arange(dist[0], dist[-1], ds) # m vector of evenly spaced distances along the shapefile,\r\n # equivalent to an evenly spaced version of dist\r\n xi = interp1d(dist, coords[:,0])(disti) # m the easting coordinates of disti points, at which profile will be extracted\r\n yi = interp1d(dist, coords[:,1])(disti) # m the northing coordinates of disti points, at which profile will be extracted\r\n\r\n #%% Manipulate the raster and extract its data\r\n # ---- dimensions of geotiff\r\n gtif = gdal.Open(tif)\r\n xmin,xres,xskew,ymax,yskew,yres = gtif.GetGeoTransform()\r\n\r\n\r\n # convert the profile coordinates into pixel coordinates\r\n px = (xi - xmin) / xres\r\n py = (yi - ymax) / yres\r\n# px = np.round(col).astype(int)\r\n# py = np.round(row).astype(int)\r\n \r\n \r\n # pull out the array of raster data. Data are assumed to be in band 1.\r\n gtif_data = gtif.GetRasterBand(1).ReadAsArray()\r\n# gtif_data = band.ReadAsArray()px,py, 1, 1)\r\n \r\n # Two early versions of extacting the data:\r\n # profile = map_coordinates(gtif_data,[px,py],order=0,cval=np.nan)\r\n # profile = interp2d(np.arange(gtif_data.shape[1]), np.arange(gtif_data.shape[0]), \r\n # gtif_data)(px, py)\r\n\r\n # Interpolate within gtif_data at given pixel coordinates to identify values from the geotiff \r\n # Uses a 1st order spline interpolant to extract estimated values of\r\n # gtif_data at the (non-integer) pixel values px and py.\r\n # Function returns `cval' at undefined values of gtif_data.\r\n profile = map_coordinates(gtif_data, np.vstack((py, px)),\r\n order=1, cval=np.nan)\r\n \r\n# profile = np.array(profile,dtype=float)\r\n if type(profile[0]) == float:\r\n profile[np.abs(profile) == 9999] = np.nan\r\n \r\n return disti, profile", "def from_srf_file(self, filename, normalize=False):\n with open(filename, \"rt\") as f:\n # go to POINTS block\n line = f.readline()\n while 'POINTS' not in line:\n line = f.readline()\n\n npoints = int(line.split()[1])\n sources = []\n\n for _ in np.arange(npoints):\n lon, lat, dep, stk, dip, area, tinit, dt = \\\n map(float, f.readline().split())\n rake, slip1, nt1, slip2, nt2, slip3, nt3 = \\\n map(float, f.readline().split())\n\n dep *= 1e3 # km > m\n area *= 1e-4 # cm^2 > m^2\n slip1 *= 1e-2 # cm > m\n slip2 *= 1e-2 # cm > m\n # slip3 *= 1e-2 # cm > m\n\n nt1, nt2, nt3 = map(int, (nt1, nt2, nt3))\n\n if nt1 > 0:\n line = f.readline()\n while len(line.split()) < nt1:\n line = line + f.readline()\n stf = np.array(line.split(), dtype=float)\n if normalize:\n stf /= np.trapz(stf, dx=dt)\n\n M0 = area * DEFAULT_MU * slip1\n\n sources.append(\n Source.from_strike_dip_rake(\n lat, lon, dep, stk, dip, rake, M0,\n time_shift=tinit, sliprate=stf, dt=dt))\n\n if nt2 > 0:\n line = f.readline()\n while len(line.split()) < nt2:\n line = line + f.readline()\n stf = np.array(line.split(), dtype=float)\n if normalize:\n stf /= np.trapz(stf, dx=dt)\n\n M0 = area * DEFAULT_MU * slip2\n\n sources.append(\n Source.from_strike_dip_rake(\n lat, lon, dep, stk, dip, rake, M0,\n time_shift=tinit, sliprate=stf, dt=dt))\n\n if nt3 > 0:\n raise NotImplementedError('Slip along u3 axis')\n\n return self(pointsources=sources)", "def get_oracle_from_candidate_centerlines(candidate_centerlines: List[np.ndarray], xy: np.ndarray) -> LineString:\n\n max_offset = float(\"inf\")\n max_dist_along_cl = -float(\"inf\")\n\n # Chose based on distance travelled along centerline\n oracle_centerlines = []\n for centerline in candidate_centerlines:\n centerLine = LineString(centerline)\n start_dist = centerLine.project(Point(xy[0, 0], xy[0, 1]))\n end_dist = centerLine.project(Point(xy[-1, 0], xy[-1, 1]))\n dist_along_cl = end_dist - start_dist\n if dist_along_cl > max_dist_along_cl - 1:\n max_dist_along_cl = dist_along_cl\n oracle_centerlines.append(centerline)\n\n # Chose based on maximum offset\n min_of_max_offset = float(\"inf\")\n for centerline in oracle_centerlines:\n max_offset = 0.0\n for i in range(xy.shape[0]):\n offset = Point(xy[i]).distance(LineString(centerline))\n max_offset = max(offset, max_offset)\n if max_offset < min_of_max_offset:\n min_of_max_offset = max_offset\n oracle_centerline = centerline\n\n return oracle_centerline", "def read_coordinate_file(file):\n with open(file, 'r') as file1:\n coords = []\n\n for line in file1:\n line = line.strip('{} \\n')\n (a, b) = line.split(\",\")\n ''' \n x and y are expressed as latitude and longitude. These are converted with the Mercator projection (from Computer assignment 1)\n into x and y coordinates.\n '''\n coord = [(float(b)*m.pi/180), (m.log((m.tan(m.pi/4+m.pi*float(a)/360))))]\n coords.append(coord)\n return np.array(coords)", "def v2calib2sections(filename):\n\n from xfel.cftbx.detector.cspad_cbf_tbx import read_slac_metrology\n from scitbx.matrix import sqr\n from xfel.cxi.cspad_ana.cspad_tbx import pixel_size\n\n # metro is a dictionary where the keys are levels in the detector\n # hierarchy and the values are 'basis' objects\n metro = read_slac_metrology(filename)\n\n # 90 degree rotation to get into same frame\n reference_frame = sqr((0,-1, 0, 0,\n 1, 0, 0, 0,\n 0, 0, 1, 0,\n 0, 0, 0, 1))\n\n d = 0\n d_basis = metro[(d,)]\n\n sections = []\n for q in range(4):\n sections.append([])\n q_basis = metro[(d,q)]\n for s in range(8):\n if not (d,q,s) in metro:\n continue\n\n s_basis = metro[(d,q,s)]\n\n # collapse the transformations from the detector center to the quadrant center\n # to the sensor center\n transform = reference_frame * \\\n d_basis.as_homogenous_transformation() * \\\n q_basis.as_homogenous_transformation() * \\\n s_basis.as_homogenous_transformation()\n\n # an homologous transformation is a 4x4 matrix, with a 3x3 rotation in the\n # upper left corner and the translation in the right-most column. The last\n # row is 0,0,0,1\n ori = sqr((transform[0],transform[1],transform[2],\n transform[4],transform[5],transform[6],\n transform[8],transform[9],transform[10]))\n angle = ori.r3_rotation_matrix_as_x_y_z_angles(deg=True)[2]\n\n # move the reference of the sensor so its relative to the upper left of the\n # detector instead of the center of the detector\n center = (1765/2)+(transform[3]/pixel_size),(1765/2)+(transform[7]/pixel_size)\n\n sections[q].append(Section(angle, center))\n\n return sections", "def satReader(directory,month,latmin,latmax,lonmin,lonmax):\n \n ### Enter filename\n filename = 'cs2icesat_regrid_mar_20042015.nc' \n \n ### Month/Years extracted\n dateyr = now.year \n datemo = datetime.date(dateyr,month+1,1).strftime('%B')\n \n ### Retrieve data\n data = Dataset(directory + filename)\n lat = data.variables['lat'][:]\n lon = data.variables['lon'][:]\n thkn = data.variables['thick'][:]\n data.close()\n \n ### Calculate lat/lon region\n xmask = (lat > latmin) & (lat < latmax)\n ymask = (lon > lonmin) & (lon < lonmax)\n \n mask = xmask[:] & ymask[:]\n latvals = np.where(mask == True)[0]\n lonvals = np.where(mask == True)[1]\n latvals = np.unique(latvals)\n lonvals = np.unique(lonvals)\n \n thk = thkn[:,latvals,:]\n thk = thk[:,:,lonvals]\n \n lat = lat[latvals,:]\n lat = lat[:,lonvals]\n lon = lon[latvals,:]\n lon = lon[:,lonvals]\n\n grid = '---> [[%s to %s N, %s to %s E]]' % (latmin,latmax,lonmin,lonmax)\n print 'Completed: Satellite data read (%s)!' % datemo, grid\n \n return lat,lon,thk", "def find_center(file):\n\n data = pyfits.getdata(file)\n chipx = data.field('X')\n chipy = data.field('Y')\n#\n#--- because the array is too large to handle in one swipe, divide it into 8x8 segments\n#\n xmin = min(chipx)\n ymin = min(chipy)\n xmax = max(chipx)\n ymax = max(chipy)\n xstep = int((xmax-xmin) / 8 )\n ystep = int((ymax-ymin) / 8 )\n#\n#--- find the interval which contains largest samples \n#\n cposx = 0\n cposy = 0\n cmax = 0\n for i in range (0, 8):\n xstart = xstep * i + xmin\n xstop = xstart + xstep\n for j in range (0, 8):\n ystart = ystep * j + ymin\n ystop = ystart + ystep\n\n mask = (data.field('X') >= xstart) & (data.field('X') < xstop) & (data.field('Y') >= ystart) & (data.field('Y') < ystop)\n temp = data[mask]\n chipx_p = temp.field('X')\n chipy_p = temp.field('Y')\n if len(chipx_p) > cmax:\n cmax = len(chipx_p)\n cposx = i\n cposy = j\n#\n#--- extract the area of the highest count\n#\n xpos_list = []\n ypos_list = []\n maxv_list = []\n xstart = xstep * cposx + xmin\n xstop = xstart + xstep\n\n ystart = ystep * cposy + ymin\n ystop = ystart + ystep\n\n mask = (data.field('X') >= xstart) & (data.field('X') < xstop) & (data.field('Y') >= ystart) & (data.field('Y') < ystop)\n temp = data[mask]\n chipx_p = temp.field('X')\n chipy_p = temp.field('Y')\n#\n#--- count up the events. bin to 2x2 so that we get enough count in each bin\n#\n xmin = min(chipx_p)\n xmax = max(chipx_p)\n xdim = int(0.5 * (xmax - xmin)) + 1\n ymin = min(chipy_p)\n ymax = max(chipy_p)\n ydim = int(0.5 * (ymax - ymin)) + 1\n\n cbin = [[0 for y in range(0, ydim)] for x in range(0, xdim)]\n for j in range(0, len(chipy_p)):\n xpos = int(0.5 * (chipx_p[j]-xmin))\n ypos = int(0.5 * (chipy_p[j]-ymin))\n cbin[xpos][ypos] += 1\n#\n#--- now find max position\n#\n vmax = 0\n xx = 0\n yy = 0\n for m in range(0, xdim):\n for n in range(0, ydim):\n if cbin[m][n] > vmax:\n vmax = cbin[m][n]\n xx = m\n yy = n\n#\n#--- take the mddle of the bin as the brightest spot\n#\n xv = int(xx * 2.0 + 1.0 + xmin)\n yv = int(yy * 2.0 + 1.0 + ymin)\n\n return [xv, yv]", "def _extract_raw_data(self, lines):\r\n\r\n i = self._find_first_data_point(lines)\r\n if self._lines_have_temperature(lines[i]):\r\n self._T = []\r\n\r\n if self._has_drift_points(lines):\r\n while i < len(lines) and lines[i][0] in ['+', '-']:\r\n self._extract_drift_point(lines[i])\r\n i += 2\r\n i += self._extract_next_forc(lines[i:])\r\n i += 1\r\n else:\r\n while i < len(lines) and lines[i][0]in ['+', '-']:\r\n i += self._extract_next_forc(lines[i:])\r\n self._extract_drift_point(lines[i-1])\r\n i += 1\r\n\r\n return", "def read_satellite(filename, ftype):\n #ftype = 'l3c'\n #filename = '/gws/nopw/j04/cds_c3s_sst/output/v2.6.0/l3c/AVHRR19_G/2018/03/01/20180301120000-C3S-L3C_GHRSST-SSTskin-AVHRR19_G-ICDR2.0_day-v02.0-fv01.0.nc'\n #ftype = 'l4'\n #filename = '/gws/nopw/j04/cds_c3s_sst/public/data/ICDR_v2/Analysis/L4/v2.0/2018/01/01/20180101120000-C3S-L4_GHRSST-SSTdepth-OSTIA-GLOB_ICDR2.0-v02.0-fv01.0.nc'\n print \"Reading %s file: %s\" % (ftype, filename)\n \n # Read data - L4 or L3C (note L4 mask and L3C quality level have same array name)\n ncin = netCDF4.Dataset(filename)\n if ftype == 'l4':\n lon = ncin.variables['lon'][:]\n lat = ncin.variables['lat'][:]\n time_read = ncin.variables['time'][:]\n sst = ncin.variables['analysed_sst'][:]\n unc = ncin.variables['analysis_uncertainty'][:]\n sea_ice_frac = ncin.variables['sea_ice_fraction'][:]\n ql = ncin.variables['mask'][:]\n sstfill = ncin.variables['analysed_sst']._FillValue\n sstao = ncin.variables['analysed_sst'].add_offset\n sstsf = ncin.variables['analysed_sst'].scale_factor\n elif ftype == 'l3c':\n lon = ncin.variables['lon'][:]\n lat = ncin.variables['lat'][:]\n time_read = ncin.variables['time'][:]\n time_bnds = ncin.variables['time_bnds'][:]\n sst = ncin.variables['sea_surface_temperature'][:]\n sst_depth = ncin.variables['sea_surface_temperature_depth'][:]\n sst_dtime = ncin.variables['sst_dtime'][:]\n sst_depth_dtime = ncin.variables['sst_depth_dtime'][:]\n sses_bias = ncin.variables['sses_bias'][:]\n sses_sd = ncin.variables['sses_standard_deviation'][:]\n sst_depth_total_unc = ncin.variables['sst_depth_total_uncertainty'][:]\n l2p_flags = ncin.variables['l2p_flags'][:]\n ql = ncin.variables['quality_level'][:]\n wind_speed = ncin.variables['wind_speed'][:]\n large_scale_cor_unc = ncin.variables['large_scale_correlated_uncertainty'][:]\n synop_cor_unc = ncin.variables['synoptically_correlated_uncertainty'][:]\n uncor_unc = ncin.variables['uncorrelated_uncertainty'][:]\n adj_unc = ncin.variables['adjustment_uncertainty'][:]\n aerosol_dyn_ind = ncin.variables['aerosol_dynamic_indicator'][:]\n sens = ncin.variables['sensitivity'][:]\n tfill = ncin.variables['sst_dtime']._FillValue\n sstfill = ncin.variables['sea_surface_temperature']._FillValue\n sstao = ncin.variables['sea_surface_temperature'].add_offset\n sstsf = ncin.variables['sea_surface_temperature'].scale_factor\n else:\n print 'ftype not recognised or supported'\n \n # Create time field\n # -> If L4 then create a time field set to time in L4 file\n # -> Also add a time fill value to keep coding simple later on\n if ftype == 'l4':\n time = np.empty((7200,3600))\n time[:,:] = time_read\n tfill = -2147483648\n else:\n time = copy.deepcopy(sst_dtime) # Need to make a hard copy\n mask = sst_dtime.mask == False; mask = mask[0,:,:]\n row, col = np.where(mask==True)\n time.data[0, row, col] = time.data[0,row, col] + time_read\n \n # Create output structure\n if ftype == 'l4':\n data = dict(lon=lon,\n lat=lat,\n time_read=time_read,\n time=time,\n sst=sst,\n unc=unc,\n sea_ice_frac=sea_ice_frac,\n ql=ql,\n tfill=tfill,\n sstfill=sstfill,\n sstao=sstao,\n sstsf=sstsf)\n elif ftype == 'l3c':\n data = dict(lon=lon,\n lat=lat,\n time_read=time_read,\n time=time,\n time_bnds=time_bnds,\n sst=sst,\n sst_depth=sst_depth,\n sst_dtime=sst_dtime,\n sst_depth_dtime=sst_depth_dtime,\n sses_bias=sses_bias,\n sses_sd=sses_sd,\n sst_depth_total_unc=sst_depth_total_unc,\n l2p_flags=l2p_flags,\n ql=ql,\n wind_speed=wind_speed,\n large_scale_cor_unc=large_scale_cor_unc,\n synop_cor_unc=synop_cor_unc,\n uncor_unc=uncor_unc,\n adj_unc=adj_unc,\n aerosol_dyn_ind=aerosol_dyn_ind,\n sens=sens,\n tfill=tfill,\n sstfill=sstfill,\n sstao=sstao,\n sstsf=sstsf)\n else:\n print 'ftype not recognised or supported'\n \n return data", "def Read_RMCA_out(Complete_Path):\n fid = open(Complete_Path,'r')\n L,R = [],[]\n while 1: \n line = fid.readline()\n if line =='': \n break \n else :\n L.append(float(line[:25]))\n R.append(float(line[27:-2]))\n return np.array(L),np.array(R)", "def _processing( infile, rchr, dist, outf ):\n\n coords, sizes = build_dict(infile)\n qry_chrs = list(coords.keys())\n\n print(\"Primary\\tHaplotig\\tPrimary_Start\\tPrimary_end\\tHaplotig_Start\\tHaplotig_End\\tHaplotig_Length\", file=outf)\n for qchr in qry_chrs:\n refcoords = coords[qchr][0]\n qrycoords = coords[qchr][1]\n refst, refend, qryst, qryend = \\\n clustering( refcoords, sorted(qrycoords), sizes[qchr], dist )\n\n print(\"%s\\t%s\\t%d\\t%d\\t%d\\t%d\\t%d\" % \\\n (rchr, qchr, refst, refend, qryst, qryend, sizes[qchr]), file=outf)", "def load_raster_xyz(self, filename):\n with rasterio.open(filename, 'r') as src:\n ## Alias 'affine' no longer works for 'transform'\n ##matrix = src.affine\n matrix = src.transform\n self.size = (src.width, src.height)\n # read per scan line\n for row in range(0, src.height):\n window = ((row, row+1), (0, src.width))\n data = src.read(window=window)\n this_row = data[0][0]\n for column in range(0, src.width):\n x, y = matrix * (column, row)\n yield x, y, this_row[column]", "def Read_CCD_image(Path):\n fs = open(Path, 'r')\n \n #Compte le nombre de lignes, oblige pr le moment de tout lire\n # la dernière ligne est vide ! attention, j'initialise nb_line à -1 pour compenser\n nb_line = -1\n while 1: \n txt = fs.readline()\n nb_line = nb_line+1\n if ((txt =='')|(txt == '\\r\\n')): \n break\n fs.close()\n \n \n # je lis une ligne, compte le nombre d'espace et en deduit le nombre de colonne de la matrice\n fs = open(Path, 'r')\n txt = fs.readline()\n ii = 0\n index_line = []\n while 1: # on cherche le premier espace qui limite le premier nombre\n ii = ii+1 \n if (txt[ii:ii+1] == '\\t'):\n index_line.append(ii)\n if (txt[ii:ii+4] == '\\r\\n'):\n break\n nb_col = np.array(index_line).size\n fs.close()\n \n image = np.ones((nb_line,nb_col), dtype = float) # Create the image matrix\n # Pour les axes, je reprends les chiffres obtenus lors de la calibration du mouvement de la pointe.... cad 31nm/pixel...\n #axex = np.linspace(0,0.032*nb_line,nb_line) #microns\n #axey = np.linspace(0,0.032*nb_col,nb_col) #microns\n axex = np.linspace(0,nb_line,nb_line) #pixels\n axey = np.linspace(0,nb_col,nb_col) #pixels\n \n fs = open(Path, 'r')\n \n nb_line = 0 # I need to count the lines to fill the matrix\n while 1: \n txt = fs.readline()\n if ((txt =='')|(txt == '\\r\\n')): \n break\n if txt[0] =='#':\n pass\n else:\n #print(txt)\n ii=-1\n index_line=[]\n while 1: # on cherche le premier espace qui limite le premier nombre\n ii = ii+1 \n if (txt[ii:ii+1] == '\\t'):\n index_line.append(ii)\n if (txt[ii:ii+4] == '\\r\\n'):\n break\n # ici j'ai tous mes index d'espace pour une ligne normalement\n line = []\n line.append(txt[:index_line[0]])\n index_line = np.array(index_line) # premier nombre\n for ii in range (index_line.size -1):\n line.append(np.float(txt[index_line[ii]:index_line[ii+1]]))\n # Il me manque le dernier aussi\n #line.append(np.float(txt[index_line[-1]:])) \n image[nb_line,:] = line\n nb_line = nb_line+1\n #flipping up-down with [::-1,...] then image appears in Python as in the screen in HiPic \n return axex,axey,image[::-1,...]", "def read_jackknife_centers(des_region, ncen):\n fname=get_jackknife_centers_file(des_region, ncen)\n print(\"reading:\",fname)\n return fitsio.read(fname)" ]
[ "0.5484611", "0.54598975", "0.541714", "0.5410985", "0.53711194", "0.5348809", "0.53283346", "0.5277495", "0.5272035", "0.52507305", "0.52339166", "0.5203206", "0.5176077", "0.5174357", "0.5174031", "0.51609975", "0.51548326", "0.5138431", "0.5123837", "0.50979066", "0.508635", "0.5066098", "0.50570524", "0.5056205", "0.50503016", "0.50408465", "0.5035961", "0.50307393", "0.5029516", "0.49937418" ]
0.74167114
0
Extend centerline to have straight channels at both ends.
def extend_centerline(s, x, y, cur, theta): print('+> Extending centerline to have straight channels at both ends...', end='') if MODE == 1: extlength = LAMBDA/10 d = DS elif MODE == 2: extlength = WIDTH d = INTERVAL num = int(extlength/d) coshead = (x[1] - x[0])/d sinhead = (y[1] - y[0])/d headx = np.linspace(x[0] - extlength*coshead, x[0] - d*coshead, num) heady = np.linspace(y[0] - extlength*sinhead, y[0] - d*sinhead, num) costail = (x[-1] - x[-2])/d sintail = (y[-1] - y[-2])/d tailx = np.linspace(x[-1] + d*costail, x[-1] + extlength*costail, num) taily = np.linspace(y[-1] + d*sintail, y[-1] + extlength*sintail, num) x = np.concatenate((headx, x, tailx), axis=0) y = np.concatenate((heady, y, taily), axis=0) s, x, y = smooth_centerline(x, y) s, x, y, cur, theta = resample_centerline(s, x, y) print(' [done]') return s, x, y, cur, theta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def centerAxis():\n dislin.center()", "def linecenter(l):\n return scale3(add(l[0],l[1]),0.5)", "def centerline_to_polygon(\n centerline: np.ndarray, width_scaling_factor: float = 1.0, visualize: bool = False\n) -> np.ndarray:\n # eliminate duplicates\n _, inds = np.unique(centerline, axis=0, return_index=True)\n # does not return indices in sorted order\n inds = np.sort(inds)\n centerline = centerline[inds]\n\n dx = np.gradient(centerline[:, 0])\n dy = np.gradient(centerline[:, 1])\n\n # compute the normal at each point\n slopes = dy / dx\n inv_slopes = -1.0 / slopes\n\n thetas = np.arctan(inv_slopes)\n x_disp = 3.8 * width_scaling_factor / 2.0 * np.cos(thetas)\n y_disp = 3.8 * width_scaling_factor / 2.0 * np.sin(thetas)\n\n displacement = np.hstack([x_disp[:, np.newaxis], y_disp[:, np.newaxis]])\n right_centerline = centerline + displacement\n left_centerline = centerline - displacement\n\n # right centerline position depends on sign of dx and dy\n subtract_cond1 = np.logical_and(dx > 0, dy < 0)\n subtract_cond2 = np.logical_and(dx > 0, dy > 0)\n add_cond1 = np.logical_and(dx < 0, dy < 0)\n add_cond2 = np.logical_and(dx < 0, dy > 0)\n subtract_cond = np.logical_or(subtract_cond1, subtract_cond2)\n add_cond = np.logical_or(add_cond1, add_cond2)\n left_centerline, right_centerline = swap_left_and_right(subtract_cond, left_centerline, right_centerline)\n\n # right centerline also depended on if we added or subtracted y\n neg_disp_cond = displacement[:, 1] > 0\n left_centerline, right_centerline = swap_left_and_right(neg_disp_cond, left_centerline, right_centerline)\n\n if visualize:\n plt.scatter(centerline[:, 0], centerline[:, 1], 20, marker=\".\", color=\"b\")\n plt.scatter(right_centerline[:, 0], right_centerline[:, 1], 20, marker=\".\", color=\"r\")\n plt.scatter(left_centerline[:, 0], left_centerline[:, 1], 20, marker=\".\", color=\"g\")\n fname = datetime.datetime.utcnow().strftime(\"%Y_%m_%d_%H_%M_%S_%f\")\n plt.savefig(f\"polygon_unit_tests/{fname}.png\")\n plt.close(\"all\")\n\n # return the polygon\n return convert_lane_boundaries_to_polygon(right_centerline, left_centerline)", "def center(self) -> Line:\n return Line(self.shape.pos, self.shape.pos + self.velocity)", "def setup_lines(self):\n self.center_lines()\n self.space_lines()", "def center_lines(self):\n window_center = Rectangle.center(self.window_width, self.window_height)\n for line in self.lines.values():\n line.pos = Vector(*window_center)\n line.set_vertices()", "def midpoint_line(a, b):\n return scale_vector(add_vectors(a, b), 0.5)", "def center_on_line(line_no):\n return Cmds([str(line_no), Cmd.normal(\"z.\"), \"redraw!\"])", "def set_display_from_lines(self):\n y = 1\n maxlin = CA_World.ca_display_size - 1\n limy = len(self.ca_lines) + maxlin\n for i in self.ca_lines:\n x = 1\n if limy >= maxlin:\n if SimEngine.gui_get('init') == \"Right\": # Right\n limx = len(i) + maxlin + 2\n for j in range(len(i) - 2):\n if limx >= maxlin:\n b = bool(i[j])\n self.pixel_tuple_to_patch(\n ((maxlin - len(i) + 2 + x) * 4, (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n x += 1\n else:\n limx -= 1\n elif SimEngine.gui_get('init') == \"Left\": # Left\n limx = 0\n for j in range(len(i) - 2):\n if limx <= maxlin + 2:\n b = bool(i[j])\n self.pixel_tuple_to_patch(((x - 3) * 4, (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(\n b)\n x += 1\n limx += 1\n else: # Center and Random\n limx = int((len(i) - maxlin) / 2)\n k = 0\n for j in range(len(i)):\n if limx < 0:\n b = bool(i[j])\n self.pixel_tuple_to_patch(((maxlin - len(i) + x - 1 + limx) * 4,\n (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n else:\n if k < maxlin + 1:\n b = bool(i[j + limx])\n self.pixel_tuple_to_patch((k * 4,\n (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n x += 1\n k += 1\n y += 1\n else:\n limy -= 1", "def process_laneOffset(self):\n center_line = np.poly1d(np.mean([self.line_l.get_LinePoly().coeffs, self.line_r.get_LinePoly().coeffs], axis=0))\n # store the center line polynomial\n self.center_poly = center_line\n center_point = IMAGE_WIDTH/2 - center_line(709)\n offset_from_center =center_point* self.line_l.x_pxm\n self.lane_offset = offset_from_center\n return center_point", "def draw_centered_line(\n self,\n theta: Quantity,\n length: Quantity,\n ra: Quantity,\n dec: Quantity,\n color: str = \"b\",\n linewidth: float = 1.0,\n alpha: float = 0.7,\n ) -> None:\n\n _length = length / 2.0\n dx = np.sin(theta) * _length / np.cos(dec)\n dy = np.cos(theta) * _length\n coords = np.array(\n [\n [(ra + dx).to_value(u.deg), (ra - dx).to_value(u.deg)],\n [(dec + dy).to_value(u.deg), (dec - dy).to_value(u.deg)],\n ]\n )\n self.plot.show_lines([coords], color=color, linewidth=linewidth, alpha=alpha)", "def setCenter(self, center):\n p = center - self.center\n for i in range(len(self.points)):\n self.points[i] += p", "def draw_interval(center_length):\n if center_length > 0:\n draw_interval(center_length - 1)\n draw_line(center_length)\n draw_interval(center_length - 1)", "def read_centerline(s, x, y, cur, theta):\n if MODE == 2:\n print('MODE 2: READ YOUR OWN RIVER CENTERLINE FROM FILE is selected')\n try:\n centerlinexy = np.loadtxt(FNAME)\n except IOError:\n print('\\'' + FNAME + '\\' not found')\n print('Please place \\'' + FNAME + '\\' in working directory\\n')\n job_done()\n sys.exit(1)\n else:\n return s, x, y, cur, theta\n x = centerlinexy[:, 0]\n y = centerlinexy[:, 1]\n if FLIPSTRM:\n x = x[::-1]\n y = y[::-1]\n# if np.mean(np.abs(x)) > 1e6 or np.mean(np.abs(y)) > 1e6:\n# print('!!! centerline X/Y too large, forced to shift toward (0, 0) !!!')\n# print('shifting vector: ('+str(-np.mean(x))+', '+str(-np.mean(y))+')')\n# x -= np.mean(x)\n# y -= np.mean(y)\n length = x.size\n s = np.zeros(length)\n for j in range(1, x.size):\n s[j] = s[j-1] + np.sqrt((x[j]-x[j-1])**2 + (y[j]-y[j-1])**2)\n mean1 = np.around(np.mean(np.diff(s)), decimals=2)\n median1 = np.around(np.median(np.diff(s)), decimals=2)\n mode1 = np.around(mode(np.diff(s))[0][0], decimals=2)\n print('+> Resampling centerline & Calculating curvature...', end='')\n s, x, y, cur, theta = resample_centerline(s, x, y)\n print(' [done]')\n mean2 = np.around(np.mean(np.diff(s)), decimals=2)\n median2 = np.around(np.median(np.diff(s)), decimals=2)\n mode2 = np.around(mode(np.diff(s))[0][0], decimals=2)\n print_resamp_table(mean1, median1, mode1, mean2, median2, mode2)\n print_para_table(s)\n return s, x, y, cur, theta", "def gourad_centerline_strip(left, center, right, left_v, center_v, right_v, shape, accumulate=False, background=0):\n left = numpy.asarray(left, dtype=numpy.float32, order='C')\n center = numpy.asarray(center, dtype=numpy.float32, order='C')\n right = numpy.asarray(right, dtype=numpy.float32, order='C')\n left_v = numpy.asarray(left_v, dtype=numpy.float32, order='C')\n center_v = numpy.asarray(center_v, dtype=numpy.float32, order='C')\n right_v = numpy.asarray(right_v, dtype=numpy.float32, order='C')\n assert left.shape == center.shape and center.shape == right.shape\n assert left_v.shape == center_v.shape and center_v.shape == right_v.shape\n assert left.ndim == 2 and left.shape[1] == 2 and len(left) > 1\n assert left_v.ndim in (1, 2)\n assert len(left) == len(left_v)\n unpack_out = False\n if left_v.ndim == 1:\n left_v = left_v[:, numpy.newaxis]\n center_v = center_v[:, numpy.newaxis]\n right_v = right_v[:, numpy.newaxis]\n unpack_out = True\n num_points = len(left)\n out = numpy.empty(tuple(shape)+left_v.shape[1:], dtype=numpy.float32, order='F')\n out.fill(background)\n _gouraud.gourad_centerline_strip(num_points,\n _cast('float *', left),\n _cast('float *', center),\n _cast('float *', right),\n _cast('float *', left_v),\n _cast('float *', center_v),\n _cast('float *', right_v),\n _cast('float *', out),\n out.shape, out.strides, accumulate)\n if unpack_out:\n return out[:,:,0]\n else:\n return out.transpose((2,0,1))", "def _createline(self):\n return self.cv.create_line(0, 0, 0, 0, fill=\"\", width=2,\n capstyle = TK.ROUND)", "def yline(y,farright, width, dash, grayamount):\r\n aline([[0,y],[farright,y]],width, dash, grayamount)", "def maskCenterLines(width,shape):\n assert isinstance(width,int), \"width has to be integer\"\n assert width>0, \"width has to be positive\"\n mask = np.zeros(shape,dtype=bool)\n if isinstance(width,int): width = (width,width)\n c0 = int(shape[0]/2)\n c1 = int(shape[1]/2)\n w0 = int(width[0]/2)\n w1 = int(width[1]/2)\n mask[ c0-w0:c0+w0 , : ] = True\n mask[ : , c1-w1:c1+w1 ] = True\n return mask", "def _draw_center_indicator(\n self, src, color=(0, 0, 255), shape=\"crosshairs\", size=10, thickness=1\n ):\n cpt = self._get_frame_center(src)\n self._draw_indicator(\n src,\n new_point(*cpt),\n shape=shape,\n color=color,\n size=size,\n thickness=thickness,\n )", "def __DrawCenter(self, image, center, color):\n if center[0] != -1 and center[1] != -1:\n cv2.circle(image, (int(center[0]), int(center[1])), 5, color, -1)", "def line_layer(self):\n screen_origin = self.ids.mapview.get_window_xy_from(lat1, lon1, self.ids.mapview.zoom)\n screen_destination = self.ids.mapview.get_window_xy_from(lat2, lon2, self.ids.mapview.zoom)\n point_list = [screen_origin[0], screen_origin[1], screen_destination[0], screen_destination[1]]\n\n with self.ids.line.canvas:\n self.ids.line.canvas.clear()\n\n Color(0, 0, 0, .6)\n Line(points=point_list, width=3, joint=\"bevel\")", "def getScrewAxis(self, center=None, linelength=None):\n\n # first check that there is a rotational component. If not, abort\n # if there is a rotation, self.real != 1.0\n if self.real <= 0.99999999:\n # need the direction to determine which way to draw the line\n trans = Vector(self.trans[:3])\n theta = numpy.arccos(self.real)\n axis = self.pure / numpy.sin(theta)\n axis = Vector(axis)\n screw = (trans * axis)\n tpar = screw * axis\n tper = trans - tpar\n cpt1 = tper / 2.\n length = tper.length()\n height = length / (2 * numpy.tan(theta))\n cpt2 = height * (axis.cross(tper)).normal()\n point = cpt1 + cpt2\n if center:\n try:\n center = Vector(center)\n except:\n raise ValueError('center must be a numpy array of shape (3,)')\n m = (center - point) * axis\n point = point + m * axis\n if not linelength:\n return point, point + axis * screw, screw\n else:\n return point, point + linelength * numpy.sign(screw) * axis, screw\n\n else:\n return None", "def _set_center(self):\n sl_center = np.array(\n [self.sl_list[k].mean_pos for k in range(self.nb_sl)]\n )\n sl_nb_pts = np.array(\n [self.sl_list[k].nb_points for k in range(self.nb_sl)]\n )\n sl_wcenter = [sl_center[k] * sl_nb_pts[k] for k in range(self.nb_sl)]\n self.center = np.sum(sl_wcenter, axis=0) / np.sum(sl_nb_pts)", "def set_mode_line():\n global DRAW_MODE, CURRENT_LABEL, SHAPE_SIZE\n global mouse_pos, line_start_pos\n\n if DRAW_MODE==\"line\":\n # draw the line on the mask\n cv.line(source_msk, line_start_pos, mouse_pos, CURRENT_LABEL, thickness=SHAPE_SIZE)\n\n line_start_pos = mouse_pos\n DRAW_MODE=\"line\"", "def ortho_line_cut(self):\n x_mid_left, y_mid_left = self.midpoint(0,1) # Computes the mid point of the LHS face of the edm cut\n x_mid_right, y_mid_right = self.midpoint(2,3) # Computes the mid point of the RHS face of the edm cut\n\n ave_grad = self.average_grad()\n m_horizontal = -1/ave_grad #90 degrees rotation of the vertical line average gradient\n\n horizontal_eq_c = y_mid_right - m_horizontal*x_mid_right # y offset of horizontal line\n vertical_eq_left_c = y_mid_left - ave_grad * x_mid_left # y offset of vertical line on left side\n\n x_intersect, y_intersect = self.intersect_point(m_horizontal, horizontal_eq_c, ave_grad,vertical_eq_left_c)\n\n\n coordleft = [x_intersect, y_intersect]\n coordright =[x_mid_right, y_mid_right]\n\n dist = self.distance(coordleft, coordright)\n\n return coordleft, coordright, dist", "def raster_linify(channel):\n k_lines = []\n right = True\n for X in range(channel.shape[0]):\n linestart = None\n inline = False\n if right == True:\n Y=0\n while (Y < channel.shape[1]):\n if (inline):\n if (channel[X,Y]>0.5):\n pass\n else:\n k_lines.append(copy.copy([linestart,[X,Y]]))\n inline = False\n else:\n if (channel[X,Y]>0.5):\n linestart = copy.copy([X,Y])\n inline=True\n else:\n pass\n Y=Y+1\n right = False\n else:\n Y=channel.shape[1]-1\n while (Y > 0):\n if (inline):\n if (channel[X,Y]>0.5):\n pass\n else:\n k_lines.append(copy.copy([linestart,[X,Y]]))\n inline = False\n else:\n if (channel[X,Y]>0.5):\n linestart = copy.copy([X,Y])\n inline=True\n else:\n pass\n Y=Y-1\n right = True\n return k_lines", "def cutout(self, centre, radius):", "def advanced_line():\n\n # Make dataset specifying arguments\n dataset_a = DataSet(sine,line_style='-',line_width=1.5,marker_style='o',marker_size='4')\n\n # Make dataset changing options using setters\n dataset_b = DataSet(cosine)\n dataset_b.set_line(style='--',width=1.5)\n dataset_b.set_colour(colour='royalblue')\n\n # Make plot object and adjust properties using setters\n plot = Plot()\n plot.set_text(latex=True,label=12)\n plot.add_dataset(dataset_a)\n plot.add_dataset(dataset_b)\n plot.set_axes(xlim=(0,8),ylim=(-1.1,1.1),xlabel=r'$x$',ylabel=r'$f\\left(x\\right)$',xticks=(1.0,0.2),yticks=(0.2,0.05))\n\n # Plot graph and display\n plot.plot()\n plot.save(name='./figures/2d_advanced_line',fmt='png')\n plot.display()", "def addLineStyle(dist, focus, axis, pupil):\n r = 0 #focus / 2\n g = 0 #np.log10(dist) / (25 / 3)\n b = 0 #axis / 20\n a = 0.4\n rgb = [r, g, b, a]\n line = {'style': '-', 'color': rgb}\n return line", "def draw_line(self, gray=0, nextline=0):\n\n self.fontsize = 4\n if nextline:\n self.nextline()\n else:\n self.linespace(8)\n self.resetx()\n c = self.canvas\n c.setStrokeGray(gray)\n c.setLineWidth(1)\n #self.y = self.y + self.linespacing + (self.fontsize/2)\n c.line(self.x, self.y, self.width - self.x, self.y)\n self.y = self.y + (self.linespacing)" ]
[ "0.6367526", "0.6277636", "0.60933346", "0.58676577", "0.58125347", "0.57696474", "0.5726331", "0.5680378", "0.56134474", "0.5567007", "0.55354726", "0.5523416", "0.5488831", "0.5460297", "0.5416137", "0.5398362", "0.53754103", "0.5371892", "0.5368449", "0.5356547", "0.5341156", "0.5331248", "0.5299096", "0.5277445", "0.52542377", "0.5218476", "0.5201347", "0.5195435", "0.51883054", "0.5149252" ]
0.7979626
0
Impose a phase lag to the curvature signal by replacing the local curvature with the upstreamwise moving averaged curvature.
def lag(s, cur, t): if LAG == 0: return cur else: if MODE == 1: num = int(WIDTH*LAGSTR/DS) elif MODE == 2: num = int(WIDTH*LAGSTR/np.mean(np.diff(s))) if np.mod(t, LPRINT) == 0: print('+> Adding phase lag to local curvature...', end='') cur = compute_lag(cur, num) if np.mod(t, LPRINT) == 0: print(' [done]') return cur
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_baseline(self):\n\n print(\" \\t Apply Savitzky-Golay filter \\t %d\" %self.nwin)\n base_savgol = signal.savgol_filter(self.input, self.nwin, 1)\n self.input_nobase = self.input - base_savgol", "def adjust_u(self):\r\n # compute the volume integrals of the x,y, and z components of u\r\n ux = assemble(self.u.sub(0)*dx)\r\n uy = assemble(self.u.sub(1)*dx)\r\n uz = assemble(self.u.sub(2)*dx)\r\n\r\n # create a function of value 1, which can be integrated.\r\n try:\r\n self.unit\r\n except AttributeError:\r\n self.unit = Function(self.Q)\r\n self.unit.assign(Constant(1))\r\n\r\n # compute the volume of the body\r\n Vol = assemble(self.unit*dx)\r\n\r\n try:\r\n self.umean\r\n except AttributeError:\r\n self.umean = Function(self.Z)\r\n\r\n # compute the volume-averaged component means\r\n self.umean.assign(Constant((ux/Vol, uy/Vol, uz/Vol, 0)))\r\n\r\n # subtract the mean from the solution function\r\n self.up.assign(self.up-self.umean)", "def update_policy(self):\n self.optimizer.step()\n self.optimizer.zero_grad()", "def revolve(self, angle_step):\n self.angle += radians(self.speed)\n self.x = self.cx + self.length * sin(self.angle)\n self.y = self.cy + self.length * cos(self.angle)", "def mean_curvature(self, uv):\n mean_curv = GeomLProp_SLProps(\n self.surface(), uv[0], uv[1], 2, 1e-9\n ).MeanCurvature()\n if self.reversed():\n mean_curv *= -1\n return mean_curv", "def step_v(u_old, v_old, dt, dx2):\n\treturn v_old + dt*(e*(u_old + a0) + delta*laplacian(v_old,dx2))", "def surface_curv(self, dB=False, sig0=False):\n\n # evaluate the slope of the used brdf\n brdf_curv = self.SRF.brdf_theta_diff(\n t_0=self.t_0, t_ex=self.t_ex, p_0=self.p_0,\n p_ex=self.p_ex, geometry = 'mono',\n param_dict=self.param_dict, return_symbolic=False,\n n=2)\n # evaluate the slope of the used brdf\n brdf_slope = self.SRF.brdf_theta_diff(\n t_0=self.t_0, t_ex=self.t_ex, p_0=self.p_0,\n p_ex=self.p_ex, geometry = 'mono',\n param_dict=self.param_dict, return_symbolic=False,\n n=1)\n # evaluate the used brdf\n brdf_val = self.SRF.brdf(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n param_dict=self.param_dict)\n\n # vegetated soil contribution\n I_vegs_curv = (self.I0\n * np.exp(-(2. * self.V.tau / self._mu_0)) * (\n self._mu_0 * brdf_curv -\n 2. * np.sin(self.t_0) * brdf_slope * (\n 2. * self.V.tau / self._mu_0 + 1.)\n + (4. * self.V.tau**2 / self._mu_0**3\n * np.sin(self.t_0)**2\n - 2. * self.V.tau - self._mu_0) * brdf_val ))\n\n # bare soil contribution\n I_bs_curv = self.I0 * ( self._mu_0 * brdf_curv\n - 2. * np.sin(self.t_0) * brdf_slope\n - self._mu_0 * brdf_val )\n\n I_curv = self.SRF.NormBRDF * (\n (1. - self.bsf) * I_vegs_curv\n + self.bsf * I_bs_curv)\n\n if sig0 is False and dB is False:\n return I_curv\n else:\n I_slope = self.surface_slope(dB=False, sig0=False)\n I_val = self.surface()\n if sig0 is True and dB is False:\n return 4. * np.pi * (self._mu_0 * I_curv\n - 2. * np.sin(self.t_0)\n * I_slope\n - self._mu_0 * I_val )\n elif sig0 is False and dB is True:\n return 10./np.log(10) * (I_curv / I_val\n - I_slope**2 / I_val**2 )\n elif sig0 is True and dB is True:\n return 10./np.log(10) * (I_curv / I_val\n - I_slope**2 / I_val**2\n - self._mu_0**(-2))", "def compute_spike_moving_average(self, tau=0.005):\n rho = 1 - self.DT / tau\n rav = np.zeros_like(self.R)\n\n rav[:, 0] = self.R[:, 0] * (1 - rho)\n for i in range(1, self.N_T):\n rav[:, i] = rho * rav[:, i - 1] + (1 - rho) * self.R[:, i]\n\n self.rav = rav / self.DT", "def volume_curv(self, dB=False, sig0=False):\n # evaluate the slope of the used brdf\n p_curv = self.V.p_theta_diff(t_0=self.t_0, t_ex=self.t_ex,\n p_0=self.p_0, p_ex=self.p_ex,\n geometry = 'mono',\n param_dict=self.param_dict,\n return_symbolic=False,\n n=2)\n # evaluate the slope of the used brdf\n p_slope = self.V.p_theta_diff(t_0=self.t_0, t_ex=self.t_ex,\n p_0=self.p_0, p_ex=self.p_ex,\n geometry = 'mono',\n param_dict=self.param_dict,\n return_symbolic=False,\n n=1)\n # evaluate the used brdf\n p_val = self.V.p(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n param_dict=self.param_dict)\n\n I_curv = (1. - self.bsf) * self.I0 * self.V.omega / 2. * (\n np.exp(-(2 * self.V.tau / self._mu_0)) * (\n 2 * self.V.tau / self._mu_0**3) * (\n np.sin(self.t_0)**2 + 1.\n - 2. * self.V.tau / self._mu_0\n * np.sin(self.t_0)**2) * p_val\n + (np.exp(-(2 * self.V.tau / self._mu_0)) *\n 4. * self.V.tau / self._mu_0**2\n * np.sin(self.t_0)) * p_slope\n + (1 -\n np.exp(-(2 * self.V.tau / self._mu_0))\n ) * p_curv )\n\n\n\n#\n# I_curv = (1. - self.bsf) * self.I0 * self.V.omega / 2. * (\n# np.exp(-(2 * self.V.tau / self._mu_0)) * (\n# 4. * self.V.tau * np.sin(self.t_0) / self._mu_0**2 * p_slope\n# +\n# (1. + 2. * np.sin(self.t_0)**2 / self._mu_0**2\n# - 2. * self.V.tau * np.sin(self.t_0)**2 / self._mu_0**3) *\n# 2. * self.V.tau / self._mu_0 * p_val\n# )\n# + (1. - np.exp(-(2 * self.V.tau / self._mu_0))) * p_curv\n# )\n\n\n\n if sig0 is False and dB is False:\n return I_curv\n else:\n I_slope = self.volume_slope(dB=False, sig0=False)\n I_val = self.volume()\n if sig0 is True and dB is False:\n return 4. * np.pi * (self._mu_0 * I_curv\n - 2. * np.sin(self.t_0)\n * I_slope\n - self._mu_0 * I_val )\n elif sig0 is False and dB is True:\n return 10./np.log(10) * (I_curv / I_val\n - I_slope**2 / I_val**2 )\n elif sig0 is True and dB is True:\n return 10./np.log(10) * (I_curv / I_val\n - I_slope**2 / I_val**2\n - self._mu_0**(-2))", "def sweep():\n \n set_enable_load(True) # turn input ON\n time.sleep(.250)\n \n print('Begin IV curve measurement')\n \n voc = open_circ() # measure open circuit voltage\n iv_curve(voc) # measure iv curve\n short_circ() # measure short circuit current\n \n time.sleep(.250)\n set_enable_load(False) # turn input OFF", "def extrapolate_survival():\n\n # Read in raw data\n survival_rates_raw = np.loadtxt(\n ppj(\"IN_DATA\", \"survival_rates_raw.csv\"), delimiter=\",\", dtype=np.float64,\n )\n\n # Adjustment factors to be changed for simulated change in survival probabilities\n adjustment = np.ones((age_max, projection_length), dtype=np.float64)\n adjustment[:, adjustment_start:adjustment_end] = adjustment_factor\n\n # Initiate object to store simulated survival probabilities\n survival_rates = np.ones((age_max, projection_length + 1), dtype=np.float64)\n\n # Initial survival probabilities are empirical data\n survival_rates[: age_max - 1, 0] = survival_rates_raw[\n age_min + 1 : age_min + age_max\n ]\n\n # Probability to survive after max age is zero\n survival_rates[-1, 0] = 0.0\n\n # Simulate over transition period by iterating over adjustment factors\n for time_idx in range(projection_length):\n mortality_rates_tmp = 1.0 - survival_rates[:, time_idx]\n mortality_rates_next = mortality_rates_tmp * adjustment[:, time_idx]\n mortality_rates_next[-1] = 1.0\n survival_rates_next = 1.0 - mortality_rates_next\n survival_rates[:, time_idx + 1] = survival_rates_next\n\n return survival_rates", "def smooth(x, window,nan=True,old=False,fill='mean'):\n if nan:\n from Sp_parameters import nanmasked\n from scipy import interpolate\n ix = np.arange(len(x))\n xmasked, mask = nanmasked(x)\n if fill == 'mean':\n fv = np.mean(xmasked)\n elif fill == 'median':\n fv = np.median(xmasked)\n elif fill == 'zero':\n fv = 0.0\n else:\n raise ValueError('the fill keyword doesnt match possible values, try mean, median, or zero')\n fx = interpolate.interp1d(ix[mask],xmasked,bounds_error=False,fill_value=fv)\n xinterp = fx(ix)\n if old:\n xout = np.convolve(xinterp, np.ones(window)/window, 'same')\n else:\n s = np.r_[xinterp[window-1:0:-1],xinterp,xinterp[-1:-window:-1]]\n w = np.ones(window,'d')\n xout = np.convolve(w/w.sum(),s,mode='valid')\n istart = window/2\n iend = -window/2+1\n if iend==0:\n iend = len(xout)\n xout = xout[istart:iend]\n else:\n if old:\n xout = np.convolve(x, np.ones(window)/window, 'same')\n else:\n s = np.r_[x[window-1:0:-1],x,x[-1:-window:-1]]\n w = np.ones(window,'d')\n xout = np.convolve(w/w.sum(),s,mode='valid')\n xout = xout[window/2:-window/2+1]\n return xout", "def fix_curvature(self) -> None:\n self.n1.fix = True\n self.n2.fix = True", "def interpolate_impedance(self, starting_kp, starting_damping, goal_kp, goal_damping):\n delta_kp_per_step = (goal_kp - starting_kp[self.action_mask]) / self.interpolation_steps\n delta_damping_per_step = (goal_damping - starting_damping[self.action_mask]) / self.interpolation_steps\n\n def update_impedance(index):\n if index < self.interpolation_steps - 1:\n self.impedance_kp[self.action_mask] += delta_kp_per_step\n self.impedance_damping[self.action_mask] += delta_damping_per_step\n\n self.update_impedance = update_impedance", "def adjustToNewAngle(self):\n\n self.a,self.b,self.c = parametersFromPointAngle( 0.5*(self.point1+self.pointN), self.newAngle)\n\n #print 'adjustToNewAngle ', self, self.angle, self.newAngle\n self.angle = self.newAngle\n self.normalv = numpy.array( [ self.a, self.b ])\n self.unitv = numpy.array( [ self.b, -self.a ])\n if abs(self.angle) > numpy.pi/2 :\n if self.b > 0: self.unitv *= -1\n elif self.b<0 : self.unitv *= -1\n\n self.point1 = self.projectPoint(self.point1) # reset point1 \n if self.next is None or not self.next.isSegment():\n # move the last point (no intersect with next)\n\n pN = self.projectPoint(self.pointN)\n dirN = pN - self.point1 \n lN = length(pN, self.point1)\n self.pointN = dirN/lN*self.length + self.point1\n #print ' ... adjusting last seg angle ',p.dump() , ' normalv=', p.normalv, 'unitv ', p.unitv\n else:\n self.setIntersectWithNext()", "def create_calibrator(orig, calibrated):\n\n if orig[0] > 0:\n orig = np.insert(orig, 0, 0)\n calibrated = np.insert(calibrated, 0, 0)\n if orig[-1] < 1:\n orig = np.append(orig, 1.0)\n calibrated = np.append(calibrated, 1.0)\n return interpolate.interp1d(orig, calibrated, 'linear', bounds_error=True)", "def movingAverage(curve, radius):\n window_size = 2 * radius + 1\n # Define the filter\n f = np.ones(window_size)/window_size\n # Add padding to the boundaries\n curve_pad = np.lib.pad(curve, (radius, radius), 'edge')\n # Apply convolution\n curve_smoothed = np.convolve(curve_pad, f, mode='same')\n # Remove padding\n curve_smoothed = curve_smoothed[radius:-radius]\n # return smoothed curve\n return curve_smoothed", "def upsample(img):\n\n filtered = sp.signal.convolve2d(img, guassianFilter, 'same')\n i, j = img.shape\n upsampled = np.zeros((i*2, j*2))\n for r in range(i):\n upsampled[2 * r, ::2] = img[r, ::]\n for c in range(j):\n upsampled[::2, 2 * c] = img[::, c]\n\n # Need to raise values of upsampled image by 4 (1px in original -> 4px in upsampled)\n return 4 * sp.signal.convolve2d(upsampled, guassianFilter, 'same')", "def displacement(track):\n\n xcoord = np.diff(track.x)\n ycoord = np.diff(track.y)\n zcoord = np.diff(track.z)\n displacement_ = np.sqrt(xcoord ** 2 + ycoord ** 2 + zcoord ** 2)\n\n # In reality we should be looking to regions of flatness\n # Plateaus of slope zero which indicate constant velocity\n\n velo = displacement_ / track.samplerate\n window = int(len(displacement_) * 30) // 100\n velo = np.convolve(velo, np.ones(window) / window, mode='valid')\n\n return velo * 1000", "def recalibrate_start(self):\n self.epoch += 1\n self.initialize()\n self.recalibration_i = 0\n\n if self.vr_from_epoch is not None and self.epoch >= self.vr_from_epoch:\n for group in self.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n param_state['gavg'].zero_()\n param_state['m2'].zero_()\n\n # xk is changed to the running_x\n p.data.zero_().add_(param_state['running_x'])\n param_state['tilde_x'] = p.data.clone()", "def soft_update(self, local_model, target_model, tau):\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)", "def superimpose_field(u, v, img):\n \n # Set scaling.\n scaling = 0.11\n u = cv2.resize(gausssmooth(u, 1.5), (0, 0), fx=scaling, fy=scaling)\n v = cv2.resize(gausssmooth(v, 1.5), (0, 0), fx=scaling, fy=scaling)\n \n # Normalize magnitudes.\n # u = u / np.sqrt(u**2 + v**2);\n # v = v / np.sqrt(u**2 + v**2);\n \n # Create plot.\n x_ = (np.array(list(range(1, u.shape[1] + 1))) - 0.5) / scaling\n y_ = (np.array(list(range(1, u.shape[0] + 1))) - 0.5) / scaling\n x, y = np.meshgrid(x_, y_)\n fig = plt.figure()\n ax = plt.gca()\n ax.axis('off')\n ax.quiver(x, y, u, -v, color='r', scale=30)\n ax.imshow(img)\n fig.canvas.draw()\n plt.close()\n \n # Get plot in shape of numpy array and return it.\n data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n return data.reshape(fig.canvas.get_width_height()[::-1] + (3,))", "def soft_update(target, source, tau):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)", "def global_curvature_features(local_curvatures):\n \n # differentiate postive and negative curvatures and compute features\n pos_curvature=local_curvatures[local_curvatures>0]\n neg_curvature=np.abs(local_curvatures[local_curvatures<0])\n \n if pos_curvature.shape[0]>0:\n (max_posi_curv,avg_posi_curv,med_posi_curv,\n std_posi_curv,sum_posi_curv,len_posi_curv)=(np.max(pos_curvature),np.mean(pos_curvature),\n np.median(pos_curvature),np.std(pos_curvature),\n np.sum(pos_curvature),pos_curvature.shape[0])\n else:\n (max_posi_curv,avg_posi_curv,med_posi_curv,\n std_posi_curv,sum_posi_curv,len_posi_curv)=('NA','NA','NA','NA','NA','NA')\n \n \n if neg_curvature.shape[0]>0:\n (max_neg_curv,avg_neg_curv,med_neg_curv,\n std_neg_curv,sum_neg_curv,len_neg_curv)=(np.max(neg_curvature),np.mean(neg_curvature),\n np.median(neg_curvature),np.std(neg_curvature),\n np.sum(neg_curvature),neg_curvature.shape[0])\n \n else:\n (max_neg_curv,avg_neg_curv,med_neg_curv,\n std_neg_curv,sum_neg_curv,len_neg_curv)=('NA','NA','NA','NA','NA','NA')\n \n return Global_Curvature_Features([np.mean(local_curvatures),np.std(local_curvatures),\n np.where(np.diff(np.sign(local_curvatures)))[0].shape[0],\n max_posi_curv,avg_posi_curv,med_posi_curv,\n std_posi_curv,sum_posi_curv,len_posi_curv,\n max_neg_curv,avg_neg_curv,med_neg_curv,\n std_neg_curv,sum_neg_curv,len_neg_curv])", "def update_acc(self, acc_delta: np.ndarray, coeff: float) -> None:\n self.state[:, :, Boids.Attr.ACC] += acc_delta*coeff\n self.state[:, :, Boids.Attr.ACC] = maglim(\n self.state[:, :, Boids.Attr.ACC], self.max_acc)", "def slerp_gaussian(val, low, high):\n offset = norm.cdf(np.zeros_like(low)) # offset is just [0.5, 0.5, ...]\n low_gau_shifted = norm.cdf(low) - offset\n high_gau_shifted = norm.cdf(high) - offset\n circle_lerped_gau = slerp(val, low_gau_shifted, high_gau_shifted)\n epsilon = 0.001\n clipped_sum = np.clip(circle_lerped_gau + offset, epsilon, 1.0 - epsilon)\n result = norm.ppf(clipped_sum)\n return result", "def calibrate(cap, location):\n\n #Poisition and size of sensor\n [x, y, h, w] = location\n\n #show square to user and wait for key\n print(\"please, step away to clear the blue square displayed on screen and press q to continue\")\n while True:\n ret, frame = cap.read()\n cv2.namedWindow('Calibrate',cv2.WINDOW_NORMAL)\n show = cv2.rectangle(frame, (x,y), (x+w,y+h), (255, 0, 0) , 5)\n cv2.imshow('Calibrate', show)\n key = cv2.waitKey(1)\n if key == ord('q'):\n break\n\n #get first image, process and define window previous for iteration\n ret, frame = cap.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.GaussianBlur(frame, (7,7), 0)\n previous = frame[y:y+w,x:x+h]\n\n #set parameters for mean value of sensor, kernel of erode function,\n sampleNbMean = 50\n xi = np.empty((0, sampleNbMean))\n kernel = np.ones((5,5), np.uint8)\n\n #iterate over each frame until sample number\n for iteration in range(sampleNbMean):\n\n # Capture frame, draw the window and display to the user\n ret, frame = cap.read()\n # Image operation\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.GaussianBlur(frame, (7,7), 0)\n\n #get present window\n present = frame[y:y+w,x:x+h]\n\n #add sample for mean, add diference of window with prieviuos\n xi = np.append(xi,\n np.sum(\n cv2.erode(\n cv2.bitwise_xor(present,previous), kernel, iterations=1)))\n\n #present image becomes previous before steping into next image\n previous = present\n\n #mean\n mean = np.sum(xi)/len(xi)\n\n #sigma\n sum = 0\n for sample in xi:\n sum += np.power(sample - mean, 2)\n sigma = np.sqrt(sum/len(xi))\n\n #close window\n cv2.destroyWindow('Calibrate')\n\n return mean, sigma", "def update(self, u_vector, increment = True):\n if increment:\n # Move the prototype closer to input vector\n self.p_vector = self.p_vector + self.epsilon * (u_vector - self.p_vector)\n else:\n # Move the prototype away from input vector\n self.p_vector = self.p_vector - self.epsilon * (u_vector - self.p_vector)", "def get_Curvature(self):\n #return str(np.mean([self.line_l.get_CurveRad(), self.line_r.get_CurveRad()]))\n y = np.linspace(0,719, 10)\n x = self.center_poly(y)\n fit_scaled = np.polyfit(y*self.line_l.y_pxm,x*self.line_l.x_pxm, deg=2)\n curverad = ((1 + (2 * fit_scaled[0] * 600 + fit_scaled[1]) ** 2) ** 1.5) / np.absolute(2 * fit_scaled[0])\n\n if len(self.curve_buffer) > 15:\n self.curve_buffer.pop(0)\n\n self.curve_buffer.append(curverad)\n _, self.curve_buffer = self.line_l.remove_outliers(self.curve_buffer,[None]*len(self.curve_buffer), m=3)\n buff_mean= np.mean(self.curve_buffer)\n #print(\"Buf Mean: \" +str(buff_mean))\n #outlier = np.abs(buff_mean - curverad) > np.std(self.curve_buffer)*2\n if curverad > 4000.0:\n buff_mean = \"Straight Lane\"\n else:\n buff_mean = str(int(buff_mean)) + \" m\"\n\n return buff_mean", "def pass_through_lateral_conn(self):\n\n if self.conv_filter is not None:\n boundary = 'wrap' if self.circular else 'fill'\n self.P = convolve2d(self.P, self.conv_filter, 'same', boundary)\n\n self.P = self.P / self.P.sum() # rescale to PD" ]
[ "0.5069656", "0.49511254", "0.4938894", "0.49136677", "0.48906946", "0.48032054", "0.46865338", "0.46588433", "0.4652921", "0.4643875", "0.4641906", "0.46012482", "0.4595003", "0.45939776", "0.45844513", "0.45730233", "0.45540237", "0.45404497", "0.4538936", "0.45381907", "0.4537065", "0.4521938", "0.45017326", "0.45014408", "0.45013747", "0.4501298", "0.45001614", "0.44794017", "0.44779077", "0.44715363" ]
0.49604023
1
Compute left and right offset polylines of centerline with an offset distance of L. Thank Y. Luo for improving the offsetting method.
def offset(x, y, L): length = x.size offsetx = np.zeros((length, 2)) offsety = np.zeros((length, 2)) dx = np.zeros(length-1) dy = np.zeros(length-1) dxL = np.zeros(length-1) dyL = np.zeros(length-1) xl = np.zeros(length) # counterclockwise xr = np.zeros(length) # clockwise yl = np.zeros(length) yr = np.zeros(length) xl0 = np.zeros(length) xr0 = np.zeros(length) yl0 = np.zeros(length) yr0 = np.zeros(length) for i in range(0, length-1): dx[i] = x[i+1]-x[i] dy[i] = y[i+1]-y[i] for i in range(0, length-1): r = np.sqrt(dx[i]**2 + dy[i]**2) dxL[i] = dx[i]*L/r dyL[i] = dy[i]*L/r xl0[i] = -dyL[i] + x[i] yl0[i] = dxL[i] + y[i] xr0[i] = dyL[i] + x[i] yr0[i] = -dxL[i] + y[i] xl0[length-1] = xl0[length-2] + dx[length-2] yl0[length-1] = yl0[length-2] + dy[length-2] xr0[length-1] = xr0[length-2] + dx[length-2] yr0[length-1] = yr0[length-2] + dy[length-2] xl[0] = xl0[0] yl[0] = yl0[0] xl[length-1] = xl0[length-1] yl[length-1] = yl0[length-1] xr[0] = xr0[0] yr[0] = yr0[0] xr[length-1] = xr0[length-1] yr[length-1] = yr0[length-1] for i in range(1, length-1): a = np.array([[dy[i-1], -dx[i-1]], [dy[i], -dx[i]]]) bl = np.array([dy[i-1]*xl0[i-1]-dx[i-1]*yl0[i-1], dy[i]*xl0[i]-dx[i]*yl0[i]]) br = np.array([dy[i-1]*xr0[i-1]-dx[i-1]*yr0[i-1], dy[i]*xr0[i]-dx[i]*yr0[i]]) theta = (dx[i-1]*dx[i]+dy[i-1]*dy[i])/(dx[i-1]**2+dy[i-1]**2)**0.5/(dx[i]**2+dy[i]**2)**0.5 if theta > 1 - 1e-10: xl[i] = xl0[i] yl[i] = yl0[i] xr[i] = xr0[i] yr[i] = yr0[i] else: pl = np.linalg.solve(a, bl) xl[i] = pl[0] yl[i] = pl[1] pr = np.linalg.solve(a, br) xr[i] = pr[0] yr[i] = pr[1] offsetx[:, 0], offsetx[:, 1] = xl, xr offsety[:, 0], offsety[:, 1] = yl, yr return offsetx, offsety
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_laneOffset(self):\n center_line = np.poly1d(np.mean([self.line_l.get_LinePoly().coeffs, self.line_r.get_LinePoly().coeffs], axis=0))\n # store the center line polynomial\n self.center_poly = center_line\n center_point = IMAGE_WIDTH/2 - center_line(709)\n offset_from_center =center_point* self.line_l.x_pxm\n self.lane_offset = offset_from_center\n return center_point", "def getOffsetLine(self, distance, side=c.INSIDE):\n StartA = np.array([self.start.x, self.start.y])\n EndA = np.array([self.end.x, self.end.y])\n r = StartA - EndA #The slope vector of self\n rn = np.array([-r[c.Y], r[c.X]]) #flip x and y and inverse y to get the normal vector of the slope\n rn = rn/np.linalg.norm(rn)*distance #normalize by dividing by its magnitude and multipy by distance to get the correct length\n \n if side == c.INSIDE:\n return self.translate(-rn[c.X], -rn[c.Y]) #the \"minus\" side line is the left side which is inside.\n \n return self.translate(rn[c.X], rn[c.Y]) #the \"Plus\" side of the line is the right side which is outside.", "def linecenter(l):\n return scale3(add(l[0],l[1]),0.5)", "def drawSlope(self):\n length = sqrt(1 + self.slope**2) # Length of the line segment over 1 x-unit\n xOffset = (segmentLength / length) / 2 # Figures out how many times the length of the 1 unit length fits into the desired length\n # then divides by 2 becuase half is on the left and half on the right of the center\n\n\n # Left end point\n xLeft = self.x - xOffset\n yLeft = (self.slope * (xLeft - self.x)) + self.y\n\n # Right end point\n xRight = self.x + xOffset\n yRight = (self.slope * (xRight - self.x)) + self.y\n\n\n # Converts the left and right end points from cartesian coordinates to screen coordinates\n left = cartesianToScreen(xLeft , yLeft)\n right = cartesianToScreen(xRight, yRight)\n\n\n pygame.draw.aaline(display, self.color, left, right, 1) # DRAWS THE LINE AHHHHHHHHHHHHHHHHHH :P", "def ortho_line_cut(self):\n x_mid_left, y_mid_left = self.midpoint(0,1) # Computes the mid point of the LHS face of the edm cut\n x_mid_right, y_mid_right = self.midpoint(2,3) # Computes the mid point of the RHS face of the edm cut\n\n ave_grad = self.average_grad()\n m_horizontal = -1/ave_grad #90 degrees rotation of the vertical line average gradient\n\n horizontal_eq_c = y_mid_right - m_horizontal*x_mid_right # y offset of horizontal line\n vertical_eq_left_c = y_mid_left - ave_grad * x_mid_left # y offset of vertical line on left side\n\n x_intersect, y_intersect = self.intersect_point(m_horizontal, horizontal_eq_c, ave_grad,vertical_eq_left_c)\n\n\n coordleft = [x_intersect, y_intersect]\n coordright =[x_mid_right, y_mid_right]\n\n dist = self.distance(coordleft, coordright)\n\n return coordleft, coordright, dist", "def make_offset(self, offset, last=None):\n line = self.offset(offset)\n if last is None:\n return line\n\n if hasattr(last, \"r\"):\n res, d, t = line.point_sur_segment(last.c)\n c = (last.r * last.r) - (d * d)\n # print(\"t:%s\" % t)\n if c <= 0:\n # no intersection !\n p0 = line.lerp(t)\n else:\n # center is past start of line\n if t > 0:\n p0 = line.lerp(t) - line.v.normalized() * sqrt(c)\n else:\n p0 = line.lerp(t) + line.v.normalized() * sqrt(c)\n # compute da of arc\n u = last.p0 - last.c\n v = p0 - last.c\n da = self.signed_angle(u, v)\n # da is ccw\n if last.ccw:\n # da is cw\n if da < 0:\n # so take inverse\n da = 2 * pi + da\n elif da > 0:\n # da is ccw\n da = 2 * pi - da\n last.da = da\n line.p0 = p0\n else:\n # intersect line / line\n # 1 line -> 2 line\n c = line.cross_z\n d = last.v.dot(c)\n if d == 0:\n return line\n v = line.p - last.p\n t = c.dot(v) / d\n c2 = last.cross_z\n u = c2.dot(v) / d\n # intersect past this segment end\n # or before last segment start\n # print(\"u:%s t:%s\" % (u, t))\n if u > 1 or t < 0:\n return line\n p = last.lerp(t)\n line.p0 = p\n last.p1 = p\n\n return line", "def line_plane(l, p):\n d = dot((p.o - l.o), p.n) / dot(l.d, p.n)\n return l(d)", "def centerline_to_polygon(\n centerline: np.ndarray, width_scaling_factor: float = 1.0, visualize: bool = False\n) -> np.ndarray:\n # eliminate duplicates\n _, inds = np.unique(centerline, axis=0, return_index=True)\n # does not return indices in sorted order\n inds = np.sort(inds)\n centerline = centerline[inds]\n\n dx = np.gradient(centerline[:, 0])\n dy = np.gradient(centerline[:, 1])\n\n # compute the normal at each point\n slopes = dy / dx\n inv_slopes = -1.0 / slopes\n\n thetas = np.arctan(inv_slopes)\n x_disp = 3.8 * width_scaling_factor / 2.0 * np.cos(thetas)\n y_disp = 3.8 * width_scaling_factor / 2.0 * np.sin(thetas)\n\n displacement = np.hstack([x_disp[:, np.newaxis], y_disp[:, np.newaxis]])\n right_centerline = centerline + displacement\n left_centerline = centerline - displacement\n\n # right centerline position depends on sign of dx and dy\n subtract_cond1 = np.logical_and(dx > 0, dy < 0)\n subtract_cond2 = np.logical_and(dx > 0, dy > 0)\n add_cond1 = np.logical_and(dx < 0, dy < 0)\n add_cond2 = np.logical_and(dx < 0, dy > 0)\n subtract_cond = np.logical_or(subtract_cond1, subtract_cond2)\n add_cond = np.logical_or(add_cond1, add_cond2)\n left_centerline, right_centerline = swap_left_and_right(subtract_cond, left_centerline, right_centerline)\n\n # right centerline also depended on if we added or subtracted y\n neg_disp_cond = displacement[:, 1] > 0\n left_centerline, right_centerline = swap_left_and_right(neg_disp_cond, left_centerline, right_centerline)\n\n if visualize:\n plt.scatter(centerline[:, 0], centerline[:, 1], 20, marker=\".\", color=\"b\")\n plt.scatter(right_centerline[:, 0], right_centerline[:, 1], 20, marker=\".\", color=\"r\")\n plt.scatter(left_centerline[:, 0], left_centerline[:, 1], 20, marker=\".\", color=\"g\")\n fname = datetime.datetime.utcnow().strftime(\"%Y_%m_%d_%H_%M_%S_%f\")\n plt.savefig(f\"polygon_unit_tests/{fname}.png\")\n plt.close(\"all\")\n\n # return the polygon\n return convert_lane_boundaries_to_polygon(right_centerline, left_centerline)", "def compute_xy_lims(center, width):\n x, y = center\n w2 = width / 2\n return (x - w2, x + w2), (y - w2, y + w2)", "def calculate_insetaxes_offset(lims, points, offset_length):\n import numpy as np\n offsets = []\n for dim_n in range(len(lims)): # loop through each dimension. \n dim_centre = np.mean(lims[dim_n])\n if points[dim_n] < dim_centre:\n offsets.append(-offset_length)\n else:\n offsets.append(offset_length)\n return offsets", "def average_slope_intercept(frame, line_segments):\n try:\n lane_lines = []\n height, width = frame.shape\n left_fit = []\n right_fit = []\n Ys = []\n cords = []\n ml = 0\n mr = 0\n boundary = 1 / 2\n left_region_boundary = width * (1 - boundary) # left lane line segment should be on left 2/3 of the screen\n right_region_boundary = width * boundary # right lane line segment should be on right 1/3 of the screen\n for line_segment in line_segments:\n for x1, y1, x2, y2 in line_segment:\n if x1 == x2:\n continue\n Ys += [y1, y2]\n min_y = min(Ys)\n max_y = 700\n fit = np.polyfit((x1, x2), (y1, y2), 1)\n slope = fit[0]\n intercept = fit[1]\n if slope < 0:\n if x1 < left_region_boundary and x2 < left_region_boundary:\n left_fit.append((slope, intercept))\n else:\n if x1 > right_region_boundary and x2 > right_region_boundary:\n right_fit.append((slope, intercept))\n\n left_fit_average = np.average(left_fit, axis=0)\n if len(left_fit) > 0:\n x1 = (min_y - left_fit_average[1]) / left_fit_average[0]\n x2 = (max_y - left_fit_average[1]) / left_fit_average[0]\n cords.append([[int(x1), int(min_y), int(x2), int(max_y)]])\n ml = 1\n else:\n ml = 0\n\n right_fit_average = np.average(right_fit, axis=0)\n if len(right_fit) > 0:\n x1 = (min_y - right_fit_average[1]) / right_fit_average[0]\n x2 = (max_y - right_fit_average[1]) / right_fit_average[0]\n cords.append([[int(x1), int(min_y), int(x2), int(max_y)]])\n mr = 1\n else:\n mr = 0\n\n # print(ml, mr)\n return cords, ml, mr\n except:\n return 0, 0, 0", "def getpt(pt1, pt2, pt3, offset):\n ### Get first offset intercept\n if pt2[0] - pt1[0] != 0:\n m = (pt2[1] - pt1[1])/(pt2[0] - pt1[0])\n boffset = getoffsetintercept(pt1, pt2, m, offset)\n else: # if vertical line (i.e. undefined slope)\n m = \"undefined\"\n \n ### Get second offset intercept\n if pt3[0] - pt2[0] != 0:\n mprime = (pt3[1] - pt2[1])/(pt3[0] - pt2[0])\n boffsetprime = getoffsetintercept(pt2, pt3, mprime, offset)\n else: # if vertical line (i.e. undefined slope)\n mprime = \"undefined\"\n \n ### Get intersection of two offset lines\n if m != \"undefined\" and mprime != \"undefined\":\n # if neither offset intercepts are vertical\n newx = (boffsetprime - boffset)/(m - mprime)\n newy = m * newx + boffset\n elif m == \"undefined\":\n # if first offset intercept is vertical\n newx, y_infinity = calcoffsetpoint(pt1, pt2, offset)\n newy = mprime * newx + boffsetprime\n elif mprime == \"undefined\":\n # if second offset intercept is vertical\n newx, y_infinity = calcoffsetpoint(pt2, pt3, offset)\n newy = m * newx + boffset\n elif m == \"undefined\" and mprime == \"undefined\":\n # if both offset intercepts are vertical (same line)\n newx, y_infinity = calcoffsetpoint(pt1, pt2, offset)\n newy = pt2[1]\n return newx, newy", "def distancetoline(p, l1, l2):\n vx = l1.x-p.x \n vy = l1.y-p.y\n ux = l2.x-l1.x\n uy = l2.y-l1.y\n\n length = ux*ux+uy*uy;\n\n det = (-vx*ux)+(-vy*uy); \n # if this is < 0 or > length then its outside the line segment\n if det<0 or det>length:\n ux=l2.x-p.x\n uy=l2.y-p.y\n return sqrt(min(vx*vx+vy*vy, ux*ux+uy*uy))\n\n det = ux*vy-uy*vx\n if length == 0.0:\n return 0.0\n else:\n return sqrt((det*det)/length)", "def offset_slope(self):\n foc_um_slope = self.focus_slope * self.pix_size\n offset_slope = 0.5 * foc_um_slope / np.tan(self.convergence_angle)\n return offset_slope", "def offset_line(line, distance, normal=[0., 0., 1.]):\n pt1, pt2 = line[0], line[1]\n vec = subtract_vectors(pt1, pt2)\n dir_vec = normalize_vector(cross_vectors(vec, normal))\n\n if isinstance(distance, list):\n distances = distance\n else:\n distances = [distance, distance]\n\n vec_pt1 = scale_vector(dir_vec, distances[0])\n vec_pt2 = scale_vector(dir_vec, distances[1])\n pt1_new = add_vectors(pt1, vec_pt1)\n pt2_new = add_vectors(pt2, vec_pt2)\n return pt1_new, pt2_new", "def makeLeftRightline(self):\r\n left_lines = []\r\n right_lines = []\r\n for i in self.lines:\r\n for x1,y1,x2,y2 in i:\r\n if x1 == x2:\r\n #Vertical Lines\r\n pass\r\n else:\r\n m = (y2 - y1) / (x2 - x1)\r\n c = y1 - m * x1\r\n if m < 0:\r\n left_lines.append((m,c))\r\n elif m >= 0:\r\n right_lines.append((m,c))\r\n return left_lines,right_lines", "def line(x1,y1,x2,y2,z_thickness,laser):\r\n\t#Global variables that are used by all algorithms\r\n\tlayers = int(z_thickness/laser[\"z_spacing\"])\r\n\r\n\t#Works out offset when beginning on a new layer\r\n\ttaper = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * laser[\"z_spacing\"]\r\n\ttaper_x,taper_y = offset(x1,y1,x2,y2,taper)\r\n\r\n\t#Works out offset between each parallel scan on the same layer\r\n\tdelta_x,delta_y = offset(x1,y1,x2,y2,laser[\"xy_spacing\"])\r\n\r\n\t#Works out maximum offset from starting line, we don't want to exceed this at any point.\r\n\tmax_taper = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * (z_thickness) * 2\r\n\tmax_delta_x, max_delta_y = offset(x1,y1,x2,y2,max_taper)\r\n\t#max_delta_x, max_delta_y = 2*max_delta_x, 2*max_delta_y\r\n\r\n\t#Loops through each layer, in which we fit as many parallel raster scans as the maximum offset allows\r\n\tcutlist = []\r\n\tfor a in range(layers):\r\n\t\tnew_x1,new_x2,new_y1,new_y2 = x1 + a*taper_x, x2 + a*taper_x, y1 + a*taper_y, y2 + a*taper_y\r\n\t\ti = 0\r\n\t\tcutlist.append([\"z_step\", str(-laser[\"z_spacing\"])])\r\n\t\twhile abs(new_x1-x1) < abs(max_delta_x) or abs(new_y1-y1) < abs(max_delta_y):\r\n\t\t\t#This use of i is to reduce the jump distance between individual scans\r\n\t\t\tif i % 2 == 0:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x2:.6f}\", f\"{new_y2:.6f}\"])\r\n\t\t\telse:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x2:.6f}\", f\"{new_y2:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\tnew_x1,new_x2,new_y1,new_y2 = new_x1 + delta_x, new_x2 + delta_x, new_y1 + delta_y, new_y2 + delta_y\r\n\t\t\ti = i + 1\r\n\t\t#Having completed one layer, the laser moves down to begin the next layer\r\n\t\tmax_delta_x = max_delta_x - taper_x\r\n\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\treturn json.dumps(cutlist)", "def offset(offset):\n\n # Providing only one value for the offset means that\n # the same offset is applied\n if type(offset) in [int, float] :\n offset = offset * ones(len(gca().lines))\n\n # But for a fine tuning, a list can be given.\n # Be carefull, the list has to have the same size\n # as the gca().lines list.\n if type (offset) == list :\n if len(offset) != len (gca().lines) :\n print \"The offset list has a size different of\",\n \"the gca().lines list\"\n return\n\n total_offset = 0\n _min, _max = 1e31, -1e31\n\n for i, j in zip(gca().lines, offset) :\n y0 = i.get_ydata() + j + total_offset\n i.set_ydata(y0)\n if y0.min() < _min :\n print \"min\", y0.min()\n _min = y0.min()\n if y0.max() > _max :\n print \"max\", y0.max()\n _max = y0.max()\n total_offset = total_offset + j\n\n # Enlarge the ylim by 10 %\n _min = _min - 0.1 * abs(_max - _min)\n _max = _max + 0.1 * abs(_max - _min)\n ylim(_min,_max)\n draw()", "def offset(self, offset):\n return Line(self.p + offset * self.cross_z.normalized(), self.v)", "def offset(self, offset):\n return Line3d(self.p + offset * self.cross.normalized(), self.v)", "def get_point_from_lengths_y_first(xA, yA, xB, yB, l2, l3, direction='right'):\n\n A = ((l2**2 - l3**2) + (xB**2 - xA**2) + (yB**2 - yA**2)) / (2 * (xB - xA))\n B = (yA - yB) / (xB - xA)\n\n a = B**2 + 1\n b = 2 * A * B - 2 * yA - 2 * xA * B\n c = A**2 + xA**2 + yA**2 - l2**2 - 2 * xA * A\n\n yC = solve_quadratic_equation(a, b, c, direction)\n xC = A + B * yC\n\n return [xC, yC]", "def calculatecenterpos(holepolylinelist):\r\n center_pos_list=[]\r\n for poly in holepolylinelist:\r\n center_pos_x=0\r\n center_pos_y=0\r\n for pos in poly: #通过累加各多段线顶点坐标值,然后除以多段线的顶点数,计算出其中心点的坐标\r\n center_pos_x=center_pos_x+pos[0]\r\n center_pos_y=center_pos_y+pos[1]\r\n center_pos_x=center_pos_x/len(poly)-(globalconfig.CUTLINE_X_OFFSET+globalconfig.RING_DISTANCE/2)\r\n center_pos_y=center_pos_y/len(poly)-(globalconfig.CUTLINE_Y_OFFSET+globalconfig.RING_DISTANCE/2)\r\n center_pos_list.append([center_pos_x,center_pos_y])\r\n return center_pos_list", "def find_line_through_point(center, theta, length):\n\n r = length\n cx, cy = center\n\n xo = int(r * math.sin(theta))\n yo = int(r * math.cos(theta))\n\n line_start = cx, cy\n line_end = cx + xo, cy + yo\n\n return line_start, line_end", "def get_lane_area(poly_l, poly_r, n=10):\n # set the list of points as needed by polyfill\n x = np.linspace(0, 720, n)\n fy_l = poly_l(x)\n fy_r = poly_r(x)\n\n return np.append(np.c_[fy_l,x], np.c_[fy_r,x][::-1], axis=0)", "def datasetratiocopy_xl_extend(l,ratio,x_offset,y_offset):#只延伸上下两边以及左边的点\r\n dataset=[]\r\n for polyline in l:\r\n newpolyline=[]\r\n for pos in polyline:\r\n pos_x=pos[0]\r\n pos_y=pos[1]\r\n if abs((abs(pos_x)-globalconfig.X_LENGTH/2))<0.01:\r\n if pos_x<0: #judge if the pos is on the origin outline,if on outline,will be moved to the new enlarged outline and plus an extene length\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+(abs(pos_x)/pos_x*globalconfig.X_EXTENDED_LENGTH)+x_offset\r\n else:\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+x_offset \r\n else:\r\n pos_x=pos[0]/ratio+x_offset\r\n if abs((abs(pos_y)-globalconfig.Y_LENGTH/2))<0.01:\r\n pos_y=pos[1]/globalconfig.CENTER_RATIO+(abs(pos_y)/pos_y*globalconfig.Y_EXTENDED_LENGTH)+y_offset\r\n else:\r\n pos_y=pos[1]/ratio+y_offset \r\n newpolyline.append([pos_x,pos_y])\r\n dataset.append(newpolyline)\r\n return dataset", "def linePointXYDist(l,p,inside=True):\n return linePointXY(l,p,inside,distance=True)", "def offsetline(linen, pattern_result):\n\n if \"nlines\" in pattern_result:\n nlines = pattern_result[\"nlines\"]\n else:\n nlines = 0\n new_linen = linen - nlines - 1\n if new_linen < 0:\n return 0\n else:\n return new_linen", "def __get_lane_area(self, poly_l, poly_r, n=10):\n # set the list of points as needed by polyfill\n x = np.linspace(0, 720, n)\n fy_l = poly_l(x)\n fy_r = poly_r(x)\n\n return np.int32(np.append(np.c_[fy_l,x], np.c_[fy_r,x][::-1], axis=0))", "def diag_line((lat0, lon0, alt0), (lat, lon, alt), k=5):\n\tlats = np.linspace(lat0, lat, k)\n\tlons = np.linspace(lon0, lon, k)\n\talts = np.linspace(alt0, alt, k)\n\tp = zip(lats, lons, alts)\n\treturn p", "def raline(self):\n\n x, y = self._get_xy_lims()\n raline = Line2D(\n xdata=[x - self.offset, x - self.span],\n ydata=[y, y],\n color=self.colour,\n linewidth=2,\n zorder=10,\n path_effects=[pe.Stroke(linewidth=3, foreground='k'), pe.Normal()]\n )\n\n return raline" ]
[ "0.74781597", "0.6577585", "0.6375418", "0.6173955", "0.6073473", "0.6069938", "0.597449", "0.59539807", "0.59206563", "0.5882129", "0.58703756", "0.5837737", "0.58107406", "0.5750666", "0.57391214", "0.570022", "0.56990975", "0.5670591", "0.5662863", "0.56608677", "0.5651498", "0.56285965", "0.56078124", "0.56010765", "0.5599696", "0.5585602", "0.558165", "0.55733436", "0.5572676", "0.5571589" ]
0.7511953
0
Build and write the finite element mesh (noncompuational).
def write_mesh_file(allxyz, beck_bed): if SAVEMESH: print('+> Saving finite element mesh files...', end='') fname = FNAME.rsplit('.', 1)[0] ncol = beck_bed[0,:].size nrow = beck_bed[:,0].size nele = (nrow-1)*(ncol-1)*2 d = compute_mesh(nrow, ncol, nele) h = ':NodeCount ' + str(allxyz[:,0].size) + '\n:ElementCount ' \ + str(nele) + '\n#\n:EndHeader\n' with open(fname + '_mesh.t3s', 'w') as f: f.write(h) with open(fname + '_mesh.t3s', 'a') as f: np.savetxt(f, allxyz, fmt='%.6e') np.savetxt(f, d, fmt='%d') f.write('\n\n') h = 'TITLE = \"' + fname \ + '_mesh\"\nVARIABLES = \"X\", \"Y\", \"' + fname \ + '_mesh\"\nZONE NODES=' + str(allxyz[:,0].size) + ', ELEMENTS=' \ + str(nele) + ', DATAPACKING=POINT, ZONETYPE=FETRIANGLE\n' with open(fname + '_mesh.dat', 'w') as f: f.write(h) with open(fname + '_mesh.dat', 'a') as f: np.savetxt(f, allxyz, fmt='%.6e') np.savetxt(f, d, fmt='%d') f.write('\n\n') inlet = np.zeros((ncol,), dtype=int) outlet = np.zeros((ncol,), dtype=int) for i in range(ncol): inlet[i] = 1 + i*nrow outlet[i] = (1 + i)*nrow left = np.zeros((nrow-2,), dtype=int) right = np.zeros((nrow-2,), dtype=int) for i in range(1, nrow-1): left[i-1] = (ncol-2)*nrow + i + 1 right[i-1] = (ncol-1)*nrow + i + 1 cli = np.zeros((2*(nrow+ncol-2), 13)) cli[:,:2] = 2 cli[:,7] = 2 cli[:,11] = np.concatenate((inlet, outlet, left, right)) cli[:,12] = np.arange(2*(nrow+ncol-2)) + 1 cli[:ncol,0] = 4 cli[:ncol,1] = 5 cli[:ncol,2] = 5 cli[:ncol,7] = 4 cli[ncol:2*ncol,0] = 5 cli[ncol:2*ncol,1] = 4 cli[ncol:2*ncol,2] = 4 cli[ncol:2*ncol,7] = 4 np.savetxt(fname + '_BC_tmp.cli', cli, fmt='%d') with open(fname + '_BC.cli', 'w') as out_f: with open(fname + '_BC_tmp.cli', 'r') as in_f: for i, line in enumerate(in_f): if i < ncol: s = ' #Inlet' elif i >= ncol and i < 2*ncol: s = ' #Outlet' else: s = ' #' out_f.write(line.rstrip('\n') + s + '\n') out_f.write('\n') os.remove(fname + '_BC_tmp.cli') h = ':FileType bc2 ASCII EnSim 1.0' \ + '\n:NodeCount ' + str(allxyz[:,0].size) \ + '\n:ElementCount ' + str(nele) \ + '\n:ElementType T3' \ + '\n:BoundarySegmentCount 2' \ + '\n# id code sectionCount startNode1 endNode1 startNode2 endNode2 tracerCode name' \ + '\n:BoundarySegment 1 455 1 1 ' + str(ncol) + ' 1 1 4 \"Inlet\"' \ + '\n:BoundarySegment 2 544 1 ' + str(ncol+1) + ' ' + str(2*ncol) + ' 1 1 4 \"Outlet\"' \ + '\n:ShorelineCount 1' \ + '\n:ShorelineNodeCount ' + str(2*(nrow+ncol-2)) \ + '\n:EndHeader' \ + '\n:BeginNodes ' + str(allxyz[:,0].size) + '\n' with open(fname + '_BC.bc2', 'w') as f: f.write(h) with open(fname + '_BC.bc2', 'a') as f: xyz = np.copy(allxyz) xyz[:,2] = 0 np.savetxt(f, xyz, fmt='%.6e') f.write(':EndNodes\n:BeginElements ' + str(nele) + '\n') np.savetxt(f, d, fmt='%d') f.write(':EndElements\n:BeginTable ' + str(2*(nrow+ncol-2)) + ' 15\n') with open(fname + '_BC.cli', 'r') as g: lines = g.read() f.write(lines[:-1]) f.write(':EndTable\n\n') print(' [done]')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CreateDummyUpperDimensionalMesh(self):\n\n\n sys.stdout = open(os.devnull, \"w\")\n p = self.InferPolynomialDegree()\n mesh = Mesh()\n if self.element_type == \"tri\":\n mesh.Parallelepiped(nx=1,ny=1,nz=1, element_type=\"tet\")\n mesh.GetHighOrderMesh(p=p)\n elif self.element_type == \"quad\":\n mesh.Parallelepiped(nx=1,ny=1,nz=1, element_type=\"hex\")\n mesh.GetHighOrderMesh(p=p)\n elif self.element_type == \"line\":\n mesh.Rectangle(nx=1,ny=1, element_type=\"quad\")\n mesh.GetHighOrderMesh(p=p)\n sys.stdout = sys.__stdout__\n\n return mesh", "def WriteMFEM(self, filename):\n\n self.__do_memebers_exist__()\n\n nodeperelem = self.InferNumberOfNodesPerElement()\n\n if self.element_type == \"tet\":\n etype = 4\n betype = 2\n elif self.element_type == \"hex\":\n etype = 5\n betype = 3\n elif self.element_type == \"tri\":\n etype = 2\n betype = 1\n elif self.element_type == \"quad\":\n etype = 3\n betype = 1\n elif self.element_type == \"line\":\n etype = 1\n betype = 0\n\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n boundary = self.faces\n elif self.element_type == \"tri\" or self.element_type == \"quad\":\n boundary = self.edges\n elif self.element_type == \"line\":\n boundary = self.corners\n\n with open(filename, 'w') as f:\n f.write(\"MFEM mesh v1.0\\n\")\n f.write(\"#\\n\\n\")\n\n f.write(\"dimension\\n\")\n f.write('{}'.format(self.InferSpatialDimension()))\n f.write(\"\\n\\n\")\n\n f.write(\"elements\\n\")\n f.write('{}'.format(self.nelem))\n f.write(\"\\n\")\n for elem in range(self.nelem):\n f.write('1 {} '.format(etype))\n for node in range(nodeperelem):\n f.write('{} '.format(self.elements[elem,node]))\n f.write(\"\\n\")\n f.write(\"\\n\\n\")\n\n\n f.write(\"boundary\\n\")\n f.write('{}'.format(boundary.shape[0]))\n f.write(\"\\n\")\n for elem in range(boundary.shape[0]):\n f.write('1 {} '.format(betype))\n for node in range(boundary.shape[1]):\n f.write('{} '.format(boundary[elem,node]))\n f.write(\"\\n\")\n f.write(\"\\n\\n\")\n\n\n f.write(\"vertices\\n\")\n f.write('{}'.format(self.points.shape[0]))\n f.write(\"\\n\")\n f.write('{}'.format(self.points.shape[1]))\n f.write(\"\\n\")\n\n for elem in range(self.points.shape[0]):\n for node in range(self.points.shape[1]):\n f.write('{} '.format(self.points[elem,node]))\n f.write(\"\\n\")", "def CreateDummyLowerDimensionalMesh(self):\n\n\n sys.stdout = open(os.devnull, \"w\")\n p = self.InferPolynomialDegree()\n mesh = Mesh()\n if self.element_type == \"tet\":\n mesh.Rectangle(nx=1,ny=1, element_type=\"tri\")\n mesh.GetHighOrderMesh(p=p)\n elif self.element_type == \"hex\":\n mesh.Rectangle(nx=1,ny=1, element_type=\"quad\")\n mesh.GetHighOrderMesh(p=p)\n elif self.element_type == \"tri\" or self.element_type == \"quad\":\n mesh.Line(n=1, p=p)\n elif self.element_type == \"line\":\n mesh.element_type = \"point\"\n mesh.nelem = 1\n mesh.nnode = 1\n mesh.degree = p\n mesh.elements = np.array([[0]])\n mesh.points = np.array([[0.,0.,0.]])\n sys.stdout = sys.__stdout__\n\n return mesh", "def WriteMesh(fileW, mesh, exportMatrix, materials, labels, isCollision = False):\n from . import fileWriter, enums\n global DO\n\n debug(\" Writing BASIC:\", mesh.name)\n start = fileW.tell()\n\n # the verts and normals in pairs and a list that translates between original id and distinct id\n distVertNrm = VertNrmPairs(mesh.vertices, exportMatrix) \n\n #creating a bounding box and updating it while writing vertices\n bounds = BoundingBox()\n\n #writing vertices\n verticesAddress = fileW.tell()\n for v in distVertNrm:\n v[0].write(fileW)\n bounds.checkUpdate(v[0])\n\n bounds.calcCenter()\n bounds.calcRadius(mesh.vertices)\n\n #writing normals\n normalsAddress = fileW.tell()\n for v in distVertNrm:\n v[1].write(fileW)\n\n # creating the loops (as an index list)\n\n if isCollision:\n polyVs = PolyVert.collisionFromLoops(mesh)\n else:\n polyVs = PolyVert.fromLoops(mesh)\n\n # making them strips, each set is for one mesh set\n materialLength = len(materials)\n if materialLength < 2:\n polyT = list()\n for p in polyVs:\n polyT.extend(p)\n polyVs = [polyT]\n\n\n polyStrips = PolyVert.toStrips(polyVs)\n\n if DO:\n for i,s in enumerate(polyStrips):\n if s is not None:\n if s[0] == enums.PolyType.Strips:\n print(\" strips\", str(i)+\":\", len(s) - 1)\n else:\n print(\" tris\", str(i)+\":\", (len(s) - 1) / 3)\n\n #writing the mesh data and getting the mesh sets\n meshSets = list() #[None] * len(polyStrips)\n\n if materialLength == 0 or isCollision:\n for i, p in enumerate(polyStrips):\n if p == None:\n continue\n meshSets.append(PolyVert.write(fileW, mesh, 0, i, p, isCollision))\n else:\n for i, p in enumerate(polyStrips):\n if p == None:\n continue\n matID = 0\n try:\n for mid, m in enumerate(materials):\n if m.name == mesh.materials[i].name:\n matID = mid\n break\n except ValueError:\n debug(\" material\", mesh.materials[i].name, \"not found\")\n meshSets.append(PolyVert.write(fileW, mesh, matID, i, p))\n\n # writing the mesh sets\n meshSetAddress = fileW.tell()\n\n for m in meshSets:\n m.write(fileW, labels)\n\n #adding mesh address to the labels\n labels[\"bsc_\" + mesh.name] = fileW.tell()\n #labels[mesh.name] = fileW.tell()\n\n #writing addresses\n\n labels[\"bsc_\" + mesh.name + \"_v\"] = verticesAddress\n fileW.wUInt(verticesAddress)\n labels[\"bsc_\" + mesh.name + \"_nrm\"] = normalsAddress\n fileW.wUInt(normalsAddress)\n fileW.wUInt(len(distVertNrm))\n labels[\"bsc_\" + mesh.name + \"_ml\"] = meshSetAddress # ml = \"mesh list\"\n fileW.wUInt(meshSetAddress)\n fileW.wUInt(0x00000010) # material address is always the same (at least the way this addon exports the format)\n fileW.wUShort(len(meshSets))\n fileW.wUShort(materialLength) # material count\n bounds.write(fileW)\n fileW.wUInt(0) #sa1 gap\n\n if DO:\n print(\" vert addr:\", '{:08x}'.format(verticesAddress))\n print(\" nrm addr:\", '{:08x}'.format(normalsAddress))\n print(\" vertices:\", len(distVertNrm))\n print(\" set addr:\", '{:08x}'.format(meshSetAddress))\n print(\" sets:\", len(meshSets))\n print(\" mats:\", materialLength)\n print(\" BASIC length:\", (fileW.tell() - start))\n print(\"----- \\n\")\n\n fileW.align(4)", "def MeshMachine(main):\n\n # oDesign definition\n oDesign = main['ANSYS']['oDesign']\n\n # Data for the rotor mesh\n RotorName = main['ANSYS']['Rotor&Magnets']['Name'][0]\n RotorNumMaxElem = main['ANSYS']['Mesh']['Rotor']['NumMaxElem']\n RotorMaxLength = main['ANSYS']['Mesh']['Rotor']['MaxLength']\n\n # Data for the magnets mesh\n PMNames = main['ANSYS']['Rotor&Magnets']['PMNames']\n PMNumMaxElem = main['ANSYS']['Mesh']['Magnets']['NumMaxElem']\n PMMaxLength = main['ANSYS']['Mesh']['Magnets']['MaxLength']\n\n # Data for the Stator mesh\n StatorName = main['ANSYS']['Stator']['Name']\n StatorNormalDev = main['ANSYS']['Mesh']['Stator']['NormalDev']\n StatorAspectRatio = main['ANSYS']['Mesh']['Stator']['AspectRatio']\n\n # Data for the Stator mesh\n CoilNames = main['ANSYS']['Winding']['CoilNames']\n WindingNumMaxElem = main['ANSYS']['Mesh']['Winding']['NumMaxElem']\n WindingMaxLength = main['ANSYS']['Mesh']['Winding']['MaxLength']\n\n WindingName = []\n for phase in CoilNames:\n for direction in phase:\n WindingName += direction\n\n # Creating meshes\n oModule = oDesign.GetModule(\"MeshSetup\")\n\n # Rotor meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Rotor\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", [RotorName],\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(RotorNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(RotorMaxLength)+\"mm\"\n ]\n )\n # Magnet meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Magnets\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", PMNames,\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(PMNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(PMMaxLength)+\"mm\"\n ]\n )\n # Stator meshes\n oModule.AssignTrueSurfOp(\n [\n \"NAME:Stator\",\n \"Objects:=\", [StatorName],\n \"CurvedSurfaceApproxChoice:=\", \"ManualSettings\",\n \"SurfDevChoice:=\", 0,\n \"NormalDevChoice:=\", 2,\n \"NormalDev:=\", str(StatorNormalDev) + \"deg\",\n \"AspectRatioChoice:=\", 2,\n \"AspectRatio:=\", str(StatorAspectRatio)\n ]\n )\n\n # Coil meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Coils\",\n \"RefineInside:=\"\t, True,\n \"Enabled:=\"\t\t, True,\n \"Objects:=\"\t\t, WindingName,\n \"RestrictElem:=\"\t, False,\n \"NumMaxElem:=\"\t\t, str(WindingNumMaxElem),\n \"RestrictLength:=\"\t, True,\n \"MaxLength:=\"\t\t, str(WindingMaxLength) +\"mm\"\n ]\n )\n\n return main", "def meshmaker(self,\n abmn=None,\n electrode_spacing=5,\n elevation_data=None\n ):\n\n dat = abmn\n mesh_dir = self.mesh_dir\n mesh_exe_name = self.mesh_exe\n elev = elevation_data\n es = electrode_spacing # Electrode spacing\n\n # Electrodes x-position\n\n extent = int(max(dat.flatten())) # Maximum electrode x-position\n\n epx = list(range(0, extent + es, es))\n\n epn = 'elec1.dat'\n epfn = jp(mesh_dir, epn) # Electrode position file\n\n with open(epfn, 'w') as elec:\n [elec.write(str(i + 1) + ' ' + str(i * es) + '\\n') for i in range(0, len(epx))]\n elec.close()\n\n # Electrodes elevation\n\n fnel = f1d(elev[:, 0], elev[:, 1], kind='cubic',\n fill_value=\"extrapolate\") # Interpolation function for the elevation to fill the gaps.\n elint = list(map(float, list(map(fnel, epx))))\n\n evn = 'topo1.dat'\n evf = jp(mesh_dir, evn) # Electrode elevation file\n\n with open(evf, 'w') as elev:\n [elev.write(str(i * es) + ' ' + str(elint[i]) + '\\n') for i in range(0, len(epx))]\n elev.close()\n\n ms_exe_f = jp(mesh_dir, path_leaf(mesh_exe_name))\n\n # if not os.path.exists(self.mesh_exe): # Check if the mesh exe files can be found.\n # if os.path.exists(jp(self.working_dir, path_leaf(mesh_exe_name))):\n # copyfile(jp(self.working_dir, path_leaf(mesh_exe_name)), ms_exe_f)\n # else:\n # print('Can not find mesh executable')\n\n if os.path.exists(self.mesh_exe): # Check if the mesh exe files can be found.\n if not os.path.exists(jp(self.mesh_dir, path_leaf(mesh_exe_name))):\n copyfile(self.mesh_exe, ms_exe_f)\n else:\n print('Can not find mesh executable')\n self.mesh_exe = ms_exe_f\n\n mesh_short = '.\\\\'\n epfn = crtomo_file_shortener(ms_exe_f, epfn)\n evf = crtomo_file_shortener(ms_exe_f, evf)\n\n # Writing mesh.in file\n meshparams = [\"{}\".format(mesh_short),\n \"{}\".format(epfn),\n \"Mesh\", \"2\", \"{}\".format(evf),\n \"0 0\",\n \"0.1 20 0.01 0.05\"]\n\n meshinf = jp(os.path.dirname(self.mesh_exe), 'mesh.in')\n\n with open(meshinf, 'w') as ms:\n ms.write('\\n'.join(meshparams))\n ms.close()\n\n # Running mesh exe file\n\n mmdir = jp(mesh_dir, 'Model') # CRTOMO automatically loads the results in a folder called 'Model'\n\n try:\n if not os.path.exists(mmdir):\n os.makedirs(mmdir)\n except:\n pass\n\n os.chdir(self.mesh_dir)\n sp.call([self.mesh_exe]) # Run\n os.chdir(self.working_dir)\n\n msh = datread(jp(mmdir, 'Mesh.msh'))\n nelem = int(msh[1][1])\n\n # Builing final mesh\n\n # 1 - Where there are 4 columns, move them to the right and add a column beginning by 1 incrementing by 1\n # each line.\n\n nc = [len(e) for e in msh]\n\n cidx1 = nc.index(4) # Beginning of 4 columns\n b1 = nc.index(2) # End of 4 columns when 2 columns appear\n [msh[i].insert(0, i + 1 - cidx1) for i in range(cidx1, b1)]\n\n # 2 - Where's there's one column, take it and place it on the right of the two previous one. Move the three\n # of them to the right and add a new column like step 1.\n\n l1 = [i for i, x in enumerate(nc) if x == 1] # Index of rows of length 1\n cidx2 = l1[0]\n b2 = l1[-1]\n\n l2 = [i for i, x in enumerate(nc) if x == 2] # Index of rows of length 2\n cidx3 = l2[0]\n b3 = l2[-1]\n\n for i in range(len(l2)):\n msh[l2[i]] += msh[l1[i]] # Inserting column\n\n [msh[l2[i]].insert(0, int(i + 1)) for i in range(len(l2))] # Inserting column 1 2 3 ...\n\n msh = np.delete(msh, l1) # Deleting column\n\n nc2 = [len(e) for e in msh] # New len array\n\n l5 = [i for i, x in enumerate(nc2) if x == 5] # Index of rows of length 5\n l5.pop(0) # First element = header, not necessary\n for j in range(len(l5)): # Adding columns as required\n msh[l5[j]] += ['T', 'T', 'T', 'T', int(10 + j), 'T', int(2 + j), 'T']\n\n adj = [msh[l5[i]][1:5] for i in range(len(l5))] # Preparing 'adj' file\n adj = np.array([list(map(int, a)) for a in adj])\n\n print('neighbourg process begins, there are {} elements'.format(nelem))\n\n adji = [Neighbourgs(adj, i) for i in range(nelem)]\n\n print('neighbourg process over')\n\n for j in range(len(l5)): # Adding columns as required\n msh[l5[j]] += list(map(str, adji[j]))\n\n # Export final mesh\n\n mesh_file_name = jp(mesh_dir, 'Mesh.dat') # Export Mesh.msh as Mesh.dat and Mesh.elc as elec.dat\n\n meshdat = '\\n'.join(['\\t'.join(list(map(str, l))) for l in msh])\n\n with open(mesh_file_name, 'w') as md:\n md.write(meshdat)\n md.close()\n\n elec_file_name = jp(mesh_dir, 'elec.dat') #\n\n copyfile(jp(mmdir, 'Mesh.elc'), elec_file_name) # Mesh.elc -> elec.dat\n\n print('mesh generated')", "def test_plot_mesh(self):\n plt.close('all')\n\n #\n # Initialize\n #\n fig, ax = plt.subplots(3,3)\n plot = Plot()\n #\n # Define mesh\n # \n mesh = Mesh.newmesh(grid_size=(2,2))\n mesh.refine() \n mesh.root_node().children[1,1].mark(1)\n mesh.refine(1)\n \n # Plot simple mesh\n ax[0,0] = plot.mesh(ax[0,0], mesh)\n \n #\n # Flag a few cells\n # \n mesh.unmark(nodes=True)\n mesh.root_node().children[0,0].mark(2)\n mesh.root_node().children[1,0].mark(1)\n mesh.root_node().children[1,1].children['SW'].mark(3)\n mesh.root_node().children[1,1].children['NE'].mark(3)\n \n # Color flagged cells\n ax[0,1] = plot.mesh(ax[0,1], mesh, color_marked=[1,2,3], nested=True)\n \n # Plot vertex numbers\n ax[0,2] = plot.mesh(ax[0,2], mesh, vertex_numbers=True)\n \n # Plot edge numbers\n ax[1,0] = plot.mesh(ax[1,0], mesh, edge_numbers=True)\n \n # Plot cell numbers nested off\n mesh.refine(2)\n ax[1,1] = plot.mesh(ax[1,1], mesh, cell_numbers=True)\n \n # Plot cell numbers nested on\n ax[1,2] = plot.mesh(ax[1,2], mesh, cell_numbers=True, nested=True)\n\n # Plot dofs\n element = QuadFE(2,'Q1')\n ax[2,0] = plot.mesh(ax[2,0], mesh, element=element, dofs=True)\n \n # Assign dofs in a nested way\n ax[2,1] = plot.mesh(ax[2,1], mesh, element=element, dofs=True, \\\n nested=True)\n \n # Display only dofs of flagged nodes \n ax[2,2] = plot.mesh(ax[2,2], mesh, element=element, dofs=True, \\\n node_flag=3, nested=True, show_axis=True)", "def build_mesh(self):\n vertices = []\n indices = []\n step = 10\n istep = (pi * 2) / float(step)\n for i in range(step):\n x = 350 + cos(istep * i) * 100\n y = 350 + sin(istep * i) * 100\n vertices.extend([x, y, 0, 0])\n indices.append(i)\n return Mesh(vertices=vertices, indices=indices)", "def mesh_uniform(N_e, d, Omega):", "def save_surface_mesh(self, fname):\n triangulation = self.surf_mesh\n surf_indx = self.surface_indices\n points = self.cluster.get_positions()\n with open(fname, 'w') as out:\n # Write mandatory header\n out.write(\"$MeshFormat\\n\")\n out.write(\"2.2 0 8\\n\")\n out.write(\"$EndMeshFormat\\n\\n\")\n\n\n # Write points\n out.write(\"$Nodes\\n\")\n out.write(\"{}\\n\".format(points.shape[0]))\n for i in range(points.shape[0]):\n vec = points[i, :]\n out.write(\"{} {} {} {}\\n\".format(i+1, vec[0], vec[1], vec[2]))\n out.write(\"$EndNodes\\n\")\n\n # Write triangles\n out.write(\"$Elements\\n\")\n out.write(\"{}\\n\".format(len(triangulation)))\n for i, tri in enumerate(triangulation):\n out.write(\"{} 2 0 {} {} {}\\n\".format(i+1, tri[0]+1, tri[1]+1, tri[2]+1))\n out.write(\"$EndElements\\n\")\n\n if self._interface_energy:\n # Interface energy has been computed\n # We store the values as node data\n out.write(\"$NodeData\\n\")\n out.write(\"1\\n\")\n out.write(\"\\\"Gamma\\\"\\n\")\n out.write(\"1\\n0.0\\n\")\n out.write(\"4\\n0\\n1\\n{}\\n0\\n\".format(len(self._interface_energy)))\n for indx, interf in zip(surf_indx, self._interface_energy):\n out.write(\"{} {}\\n\".format(indx+1, interf[1]))\n out.write(\"$EndNodeData\\n\")\n print(\"Surface mesh saved to {}\".format(fname))", "def WriteGmsh(self, filename, write_surface_info=False):\n\n self.__do_essential_memebers_exist__()\n\n mesh = deepcopy(self)\n p = self.InferPolynomialDegree()\n\n # if p > 1:\n # mesh = self.GetLinearMesh(remap=True)\n\n element_type = mesh.element_type\n edim = mesh.InferElementalDimension()\n\n # THESE TAGS ARE DIFFERENT FROM THE GMSH READER TAGS\n bel = -1\n if element_type == \"line\":\n el = 1\n elif element_type == \"tri\":\n if p == 1:\n el = 2\n bel = 1\n elif p == 2:\n el = 9\n bel = 8\n elif p == 3:\n el = 21\n bel = 26\n elif p == 4:\n el = 23\n bel = 27\n elif element_type == \"quad\":\n if p == 1:\n el = 3\n bel = 1\n elif p == 2:\n el = 10\n bel = 8\n elif p == 3:\n el = 36\n bel = 26\n elif p == 4:\n el = 37\n bel = 27\n elif element_type == \"tet\":\n if p == 1:\n el = 4\n bel = 2\n elif p == 2:\n el = 11\n bel = 9\n elif element_type == \"hex\":\n if p == 1:\n el = 5\n bel = 3\n else:\n el = 12\n bel = 10\n else:\n raise ValueError(\"Element type not understood\")\n\n\n elements = np.copy(mesh.elements).astype(np.int64)\n points = mesh.points[np.unique(elements),:]\n\n # TRI6\n if el == 9:\n elements = elements[:,[0, 1, 2, 3, 5, 4]]\n # TRI10\n elif el == 21:\n elements = elements[:,[0, 1, 2, 3, 4, 7, 9, 8, 5, 6]]\n # TRI15\n elif el == 23:\n elements = elements[:,[0, 1, 2, 3, 4, 5, 9, 12, 14, 13, 10, 6, 7, 8, 11]]\n # QUAD9\n elif el == 10:\n elements = elements[:,[0, 1, 2, 3, 4, 7, 8, 5, 6]]\n # QUAD16\n elif el == 36:\n elements = elements[:,[0, 1, 2, 3, 4, 5, 9, 13, 15, 14, 10, 6, 7, 8, 12, 11]]\n # QUAD25\n elif el == 37:\n elements = elements[:,[0, 1, 2, 3, 4, 5, 6, 11, 16, 21, 24, 23, 22, 17, 12, 7, 8, 9, 10, 15, 20, 19, 18, 13, 14]]\n # TET10\n elif el == 11:\n # Tet 2\n elements = elements[:,[0, 1, 2, 3, 4, 6, 5, 7, 9, 8]]\n # HEX27\n elif el == 12:\n elements = elements[:,[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 13, 11, 14, 12, 15, 16, 22, 23, 25, 26, 10, 17, 18, 20, 21, 24, 19]]\n\n # Take care of a corner case where nnode != points.shape[0]\n if mesh.nnode != points.shape[0]:\n mesh.nnode = points.shape[0]\n\n if points.shape[1] == 2:\n points = np.hstack((points,np.zeros((points.shape[0],1))))\n\n points_repr = np.zeros((points.shape[0],points.shape[1]+1), dtype=object)\n points_repr[:,0] = np.arange(mesh.nnode) + 1\n points_repr[:,1:] = points\n\n elements_repr = np.zeros((elements.shape[0],elements.shape[1]+5), dtype=object)\n elements_repr[:,0] = np.arange(mesh.nelem) + 1\n elements_repr[:,1] = el\n elements_repr[:,2] = 2\n elements_repr[:,3] = 0\n elements_repr[:,4] = 1\n elements_repr[:,5:] = elements + 1\n\n if write_surface_info:\n\n if edim == 3:\n boundary = np.copy(mesh.faces).astype(np.int64)\n elif edim == 2:\n boundary = np.copy(mesh.edges).astype(np.int64)\n\n boundary_repr = np.zeros((boundary.shape[0],boundary.shape[1]+5), dtype=object)\n boundary_repr[:,0] = np.arange(boundary.shape[0]) + 1\n boundary_repr[:,1] = bel\n boundary_repr[:,2] = 2\n boundary_repr[:,3] = 0\n boundary_repr[:,4] = 1\n boundary_repr[:,5:] = boundary + 1\n\n elements_repr[:,0] += boundary.shape[0]\n\n gmsh_nelem = mesh.nelem + boundary.shape[0]\n else:\n gmsh_nelem = mesh.nelem\n\n with open(filename, 'w') as f:\n f.write(\"$MeshFormat\\n\")\n f.write(\"2.2 0 8\\n\")\n f.write(\"$EndMeshFormat\\n\")\n f.write(\"$Nodes\\n\")\n f.write(str(mesh.nnode) + \"\\n\")\n\n np.savetxt(f, points_repr, fmt=\"%s\")\n\n f.write(\"$EndNodes\\n\")\n f.write(\"$Elements\\n\")\n f.write(str(gmsh_nelem) + \"\\n\")\n\n if write_surface_info:\n np.savetxt(f, boundary_repr, fmt=\"%s\")\n\n np.savetxt(f, elements_repr, fmt=\"%s\")\n\n f.write(\"$EndElements\\n\")", "def clear(self):\n\n self.index = 1\n self.degen = 1.\n self.nnnn_out = False\n self.json_out = False\n self.verbose = False\n self.ipol = 0\n self.ellip = 0.\n self.nepts = 0\n self.genfmt_order = 2\n self.genfmt_vers = \"\"\n self.exch_label = \"\"\n self.rs = 0.\n self.vint = 0.\n self.xmu = 0.\n self.edge = 0.\n self.kf = 0.\n self.rnorman = 0.\n self.gamach = 0.\n self.nepts = FEFF_maxpts\n\n dargs = dict(dtype=np.float64, order='F')\n largs = dict(dtype=np.int32, order='F')\n\n self.evec = np.zeros(3, **dargs)\n self.xivec = np.zeros(3, **dargs)\n self.ipot = np.zeros(1+FEFF_maxleg, **largs)\n self.beta = np.zeros(1+FEFF_maxleg, **dargs)\n self.eta = np.zeros(2+FEFF_maxleg, **dargs)\n self.ri = np.zeros(FEFF_maxleg, **dargs)\n self.rat = np.zeros((3, 2+FEFF_maxleg), **dargs)\n self.iz = np.zeros(1+FEFF_maxpot, **largs)\n self.kfeff = np.zeros(FEFF_maxpts, **dargs)\n self.real_phc = np.zeros(FEFF_maxpts, **dargs)\n self.mag_feff = np.zeros(FEFF_maxpts, **dargs)\n self.pha_feff = np.zeros(FEFF_maxpts, **dargs)\n self.red_fact = np.zeros(FEFF_maxpts, **dargs)\n self.lam = np.zeros(FEFF_maxpts, **dargs)\n self.rep = np.zeros(FEFF_maxpts, **dargs)\n self.nleg = 1", "def main():\n fem_mesh.check_version()\n\n opts = read_cli()\n\n # setup the new output file with a very long, but unique, filename\n loadfilename = (\"gauss_exc_sigma_%.3f_%.3f_%.3f_center_%.3f_%.3f_%.3f_amp_%.3f_amp_cut_%.3f_%s.dyn\" %\n (opts.sigma[0], opts.sigma[1], opts.sigma[2],\n opts.center[0], opts.center[1], opts.center[2],\n opts.amp, opts.amp_cut, opts.sym))\n LOADFILE = open(loadfilename, 'w')\n LOADFILE.write(\"$ Generated using %s:\\n\" % sys.argv[0])\n LOADFILE.write(\"$ %s\\n\" % opts)\n\n LOADFILE.write(\"*LOAD_NODE_POINT\\n\")\n\n # loop through all of the nodes and see which ones fall w/i the Gaussian\n # excitation field\n sym_node_count = 0\n node_count = 0\n NODEFILE = open(opts.nodefile,'r')\n for i in NODEFILE:\n # make sure not to process comment and command syntax lines\n if i[0] != \"$\" and i[0] != \"*\":\n i = i.rstrip('\\n')\n # dyna scripts should be kicking out comma-delimited data; if not,\n # then the user needs to deal with it\n fields = i.split(',')\n fields = [float(j) for j in fields]\n # check for unexpected inputs and exit if needed (have user figure\n # out what's wrong)\n if len(fields) != 4:\n print(\"ERROR: Unexpected number of node columns\")\n print(fields)\n sys.exit(1)\n # compute the Gaussian amplitude at the node\n exp1 = math.pow((fields[1]-opts.center[0])/opts.sigma[0], 2)\n exp2 = math.pow((fields[2]-opts.center[1])/opts.sigma[1], 2)\n exp3 = math.pow((fields[3]-opts.center[2])/opts.sigma[2], 2)\n nodeGaussAmp = opts.amp * math.exp(-(exp1 + exp2 + exp3))\n\n # write the point load only if the amplitude is above the cutoff\n # dyna input needs to be limited in precision\n if nodeGaussAmp > opts.amp*opts.amp_cut:\n\n node_count += 1\n # check for quarter symmetry force reduction (if needed)\n if opts.sym == 'qsym':\n if (math.fabs(fields[1]) < opts.search_tol and\n math.fabs(fields[2]) < opts.search_tol):\n nodeGaussAmp = nodeGaussAmp/4\n sym_node_count += 1\n elif (math.fabs(fields[1]) < opts.search_tol or\n math.fabs(fields[2]) < opts.search_tol):\n nodeGaussAmp = nodeGaussAmp/2\n sym_node_count += 1\n # check for half symmetry force reduction (if needed)\n elif opts.sym == 'hsym':\n if math.fabs(fields[1]) < opts.search_tol:\n nodeGaussAmp = nodeGaussAmp/2\n sym_node_count += 1\n elif opts.sym != 'none':\n sys.exit('ERROR: Invalid symmetry option specified.')\n\n LOADFILE.write(\"%i,3,1,-%.4f\\n\" % (int(fields[0]),\n nodeGaussAmp))\n\n # wrap everything up\n NODEFILE.close()\n LOADFILE.write(\"*END\\n\")\n LOADFILE.write(\"$ %i loads generated\\n\" % node_count)\n LOADFILE.write(\"$ %i exist on a symmetry plane / edge\\n\" % sym_node_count)\n LOADFILE.close()", "def create_start_data(self):\n\t\tdef inputMesh(feature_size):\n\t\t\tc1= np.expand_dims(np.array([0,-0.9]),0)\n\t\t\tc2= np.expand_dims(np.array([-0.9,0.9]),0)\n\t\t\tc3= np.expand_dims(np.array([0.9,0.9]),0)\n\t\t\tx1 = np.expand_dims(np.pad(np.array([0,-0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tx2 = np.expand_dims(np.pad(np.array([-0.9,0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tx3 = np.expand_dims(np.pad(np.array([0.9,0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tedge_index = np.transpose(np.array([[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]])) # COO format\n\t\t\treturn np.concatenate((c1,c2,c3),axis=0), np.concatenate((x1,x2,x3),axis=0),edge_index\n\n\t\tc, x, edge_index = inputMesh(self.params.feature_size)# x is c with zeros appended, x=f ..pixel2mesh\n\t\tdata_list_x = []\n\t\tdata_list_c = []\n\t\tdata_list_pid = []\n\t\tfor i in range(self.params.batch_size):\n\t\t\tdata_list_x.append(Data(x=torch.Tensor(x).type(dtypeF), edge_index=torch.Tensor(edge_index).type(dtypeL)))\n\t\t\tdata_list_c.append(Data(x=torch.Tensor(c).type(dtypeF), edge_index=torch.Tensor(edge_index).type(dtypeL)))\n\t\t\tdata_list_pid.append(Data(x=torch.zeros(c.shape[0],1).type(dtypeL).requires_grad_(False)))\n\t\tbatch_x = Batch.from_data_list(data_list_x)\n\t\tbatch_c = Batch.from_data_list(data_list_c)\n\t\tbatch_pid = Batch.from_data_list(data_list_pid)\n\t\treturn batch_x, batch_c, batch_pid", "def WriteOBJ(self, filename, write_texture=False, write_normal=False):\n\n self.__do_essential_memebers_exist__()\n\n mesh = deepcopy(self)\n p = self.InferPolynomialDegree()\n\n if p > 1:\n mesh = self.GetLinearMesh(remap=True)\n\n edim = mesh.InferElementalDimension()\n\n if edim == 2:\n elements = np.copy(mesh.elements).astype(np.int64)\n elif edim == 3:\n elements = np.copy(mesh.faces).astype(np.int64)\n else:\n raise RuntimeError(\"Writing obj file for {} elements not supported\".format(mesh.element_type))\n\n points = mesh.points[np.unique(elements),:]\n if points.shape[1] == 2:\n points = np.hstack((points,np.zeros((points.shape[0],1))))\n\n points_repr = np.zeros((points.shape[0],points.shape[1]+1), dtype=object)\n points_repr[:,0] = \"v\"\n points_repr[:,1:] = points\n\n elements_repr = np.zeros((elements.shape[0],elements.shape[1]+1), dtype=object)\n elements_repr[:,0] = \"f\"\n elements_repr[:,1:] = elements + 1\n\n if write_texture:\n textures = mesh.textures[np.unique(elements),:]\n\n textures_repr = np.zeros((textures.shape[0],textures.shape[1]+1), dtype=object)\n textures_repr[:,0] = \"vt\"\n textures_repr[:,1:] = textures\n\n elements_repr = np.zeros((mesh.telements.shape[0],mesh.telements.shape[1]+1), dtype=object)\n elements_repr[:,0] = \"f\"\n # elements_repr[:,1:] = telements + 1\n counter = 0\n for i, j in zip(elements,mesh.telements):\n curr_row = [str(ii+1)+\"/\"+str(jj+1) for ii,jj in zip(i,j)]\n elements_repr[counter,1:] = curr_row\n counter += 1\n\n with open(filename, \"w\") as f:\n # f.write(\"# \"+ str(mesh.nnode))\n # f.write('\\n')\n # f.write(\"# \"+ str(mesh.nelem))\n # f.write('\\n')\n\n np.savetxt(f, points_repr, fmt=\"%s\")\n if write_texture:\n np.savetxt(f, textures_repr, fmt=\"%s\")\n\n if write_normal:\n if self.normals is None:\n enormals = self.Normals()\n els = self.GetNodeCommonality()[0]\n self.normals = np.zeros((self.nnode, 3))\n for counter, el in enumerate(els):\n self.normals[counter] = np.sum(enormals[el], axis=0) / enormals[el].shape[0]\n\n normals_repr = np.zeros((self.normals.shape[0], self.normals.shape[1]+1), dtype=object)\n normals_repr[:,0] = \"vn\"\n normals_repr[:,1:] = self.normals\n np.savetxt(f, normals_repr, fmt=\"%s\")\n\n f.write('\\n')\n np.savetxt(f, elements_repr, fmt=\"%s\")", "def to_mesh(self, outfilename, projection=None, z=None):\n if projection is None:\n projection = \"LONG/LAT\"\n\n x = self.xy[:, 0]\n y = self.xy[:, 1]\n if z is None:\n z = np.zeros(self.n)\n codes = np.zeros(self.n, dtype=int)\n codes[y == self.y1] = 5 # north\n codes[x == self.x1] = 4 # east\n codes[y == self.y0] = 3 # south\n codes[x == self.x0] = 2 # west\n codes[(y == self.y1) & (x == self.x0)] = 5 # corner->north\n\n builder = MeshBuilder()\n builder.SetNodes(x, y, z, codes)\n\n elem_table = self._to_element_table(index_base=1)\n builder.SetElements(asnetarray_v2(elem_table))\n\n builder.SetProjection(projection)\n quantity = eumQuantity.Create(EUMType.Bathymetry, EUMUnit.meter)\n builder.SetEumQuantity(quantity)\n newMesh = builder.CreateMesh()\n newMesh.Write(outfilename)", "def convert(fluentmesh, \n func=None, \n mesh_format='nek5000', # nek5000, semtex or fenics\n periodic_dx={}, curves = {}, bcs = False, # nek5000 and semtex\n temperature=False, passive_scalars=[], # nek5000 only\n cylindrical=1, NZ=1): # semtex only\n ofilename = fluentmesh[:-4]\n ifile = open(fluentmesh, \"r\")\n\n if not nodes:\n # Read all lines of fluent mesh\n #lines = ifile.readlines()\n #if len(lines) == 0:\n #raise IOError(\"Empty fluent mesh file\")\n \n #scan_fluent_mesh(lines)\n scan_fluent_mesh(ifile)\n\n dim = nodes.shape[0]\n create_cell_face_map(dim, mesh_format)\n create_periodic_face_map(periodic_dx)\n create_periodic_cell_face_map()\n create_boundary_section(bcs, temperature, passive_scalars, mesh_format)\n # Modify the entire mesh using the shape-function func\n if func:\n sz = nodes.shape\n for i in range(sz[1]):\n x, y = nodes[:, i]\n if 'x' in func:\n xnew = func['x'](x, y)\n if abs(xnew - x) > 1e-6:\n nodes[0, i] = xnew\n if 'y' in func:\n ynew = func['y'](x, y)\n if abs(ynew - y) > 1e-6:\n nodes[1, i] = ynew\n if mesh_format == 'nek5000':\n print 'Warning!! Consider using userdat/userdat2 instead!'\n\n if not mesh_format == 'fenics':\n read_curved_sides(curves)\n\n # Generate the mesh files for given mesh format\n if mesh_format == 'nek5000':\n write_nek5000_file(dim, ofilename, curves, temperature, passive_scalars)\n elif mesh_format == 'semtex':\n write_semtex_file(dim, ofilename, curves, cylindrical, NZ)\n if mesh_format == 'fenics':\n write_fenics_file(dim, ofilename)\n\n ifile.close()", "def write_ugrid(self,\n fn,\n mesh_name='mesh',\n fields='auto',\n overwrite=False):\n if os.path.exists(fn):\n if overwrite:\n os.unlink(fn)\n else:\n raise GridException(\"File %s exists\"%(fn))\n\n if 1: # xarray-based code\n ds=xr.Dataset()\n ds[mesh_name]=1\n\n mesh_var=ds[mesh_name]\n mesh_var.attrs['cf_role']='mesh_topology'\n mesh_var.attrs['node_coordinates']='node_x node_y'\n mesh_var.attrs['face_node_connectivity']='face_node'\n mesh_var.attrs['edge_node_connectivity']='edge_node'\n mesh_var.attrs['node_dimension']='node'\n mesh_var.attrs['edge_dimension']='edge'\n mesh_var.attrs['face_dimension']='face'\n \n ds['node_x'] = ('node',),self.nodes['x'][:,0]\n ds['node_y'] = ('node',),self.nodes['x'][:,1]\n\n ds['face_node'] = ('face','maxnode_per_face'),self.cells['nodes']\n\n ds['edge_node']=('edge','node_per_edge'),self.edges['nodes']\n\n if fields=='auto':\n for src_data,dim_name in [ (self.cells,'face'),\n (self.edges,'edge'),\n (self.nodes,'node') ]:\n for field in src_data.dtype.names:\n if field.startswith('_'):\n continue\n if field in ['cells','nodes','edges','deleted']:\n continue # already included\n if src_data[field].ndim != 1:\n continue # not smart enough for that yet\n if field in ds:\n out_field = dim_name + \"_\" + field\n else:\n out_field=field\n \n ds[out_field] = (dim_name,),src_data[field]\n ds.to_netcdf(fn)\n \n if 0: # old qnc-based code\n nc=qnc.empty(fn)\n\n nc[mesh_name]=1\n mesh_var=nc.variables[mesh_name]\n mesh_var.cf_role='mesh_topology'\n\n mesh_var.node_coordinates='node_x node_y'\n nc['node_x']['node']=self.nodes['x'][:,0]\n nc['node_y']['node']=self.nodes['x'][:,1]\n\n mesh_var.face_node_connectivity='face_node'\n nc['face_node']['face','maxnode_per_face']=self.cells['nodes']\n\n mesh_var.edge_node_connectivity='edge_node'\n nc['edge_node']['edge','node_per_edge']=self.edges['nodes']\n\n nc.close()", "def WriteFile( self ):\n with open( \"BasisVector.in\" , \"w\" ) as outfile:\n firstLine = \" \" + str( self.NQ ) + \\\n \" \" + str( self.Nbranches ) + \\\n \" \" + str( self.NatomsUC ) + \\\n \" \" + str( self.dim ) + \"\\n\"\n outfile.write( firstLine )\n for qq in range( self.NQ ): ## loop over Q vectors\n lineQ = [ \"{:15.8f}\".format( x ) for x in \n self.QVectors[ qq , : ] ]\n lineQ = \"\".join( lineQ )\n outfile.write( lineQ + \"\\n\" )\n for branch in range( self.Nbranches ): ## loop over branches\n for atom in range( self.NatomsUC ): ## loop over atoms in unit cell\n line = [ \"{:15.8f}\".format( x ) for x in \n self.EigenVectors[ qq , branch , atom , : ] ]\n line = \"\".join( line )\n outfile.write( line + \"\\n\" )\n outfile.write( \"\\n\" )\n outfile.write( \"\\n\" )", "def serialize_mesh(self, filename):\n print 'Saving mesh to', filename\n if self.current_point_index != -1:\n print 'Points on mesh will not be serialized.'\n\n np.savez(filename, k=self.k, initial_point=self.initial_point,\n initial_face_index=self.initial_face_index,\n all_vertices=self.all_vertices, triangles=self.triangles,\n face_local_bases=self.face_local_bases,\n neighbor_faces=self.neighbor_faces)", "def plot_fenics_mesh(mesh, new_fig=True):\n if(new_fig):\n plt.figure()\n\n plot(mesh)\n #plt.title(\"FEniCS mesh\")\n plt.show(block=False)\n\n pass", "def comp_env_func(atomic_coords, field, cnfg, path_to_data, verbosity):\n\n id = make_id(field, atomic_coords / si.a_Si)\n atomic_coords = (atomic_coords / si.ab).tolist()\n field = field.tolist()\n\n path_to_data = os.path.join(path_to_data, id)\n\n if verbosity > 0:\n print(\"I am going to save data to {}\".format(path_to_data))\n\n # ----------------- check whether path is exist -------------\n\n if not os.path.exists(path_to_data):\n print(\"Path to data does not exist, {}\".format(path_to_data))\n user_input = raw_input(\"Do you want me to create a new directory for data? [y/N]:\")\n if user_input.lower() == 'y':\n os.makedirs(path_to_data)\n os.makedirs(os.path.join(path_to_data, 'v0'))\n os.makedirs(os.path.join(path_to_data, 'v1'))\n os.makedirs(os.path.join(path_to_data, 'v2'))\n else:\n raise EnvironmentError(\"Path to data does not exist, \", path_to_data)\n\n # ---------------------- making mesh ------------------------\n\n if os.path.isfile(os.path.join(path_to_data, 'mesh_sample.mesh')):\n print(\"There is a mesh stored in the file: {}\".format(os.path.join(path_to_data, 'mesh_sample.mesh')))\n user_input = raw_input(\"Do you want to generate a new mesh [y/N]:\")\n else:\n user_input = 'y'\n\n if not os.path.isfile(os.path.join(path_to_data, 'mesh_sample.mesh')) or user_input.lower() == 'y':\n # file does not exist\n path_to_make_mesh = os.path.join(os.path.dirname(__file__), 'make_mesh.edp')\n os.system('rm %s' % path_to_make_mesh)\n\n with open(path_to_make_mesh, 'w') as f:\n mesh = make_mesh_generator_script(path_to_data,\n cube_coords=cnfg['cube_coords'],\n num_elem_init=cnfg['num_elem_init'],\n atomic_coords=atomic_coords,\n mesh_adaptation=cnfg['mesh_adaptation'],\n verbosity=1)\n f.write(mesh)\n\n sp.call([\"FreeFem++\", \"make_mesh.edp\"])\n\n # # ----------- computing periodic Bloch functions -----------\n #\n # flag = True\n #\n # for file_name in os.listdir(path_to_data):\n # if file_name.startwith('wf'):\n # flag = False\n #\n # if flag:\n # os.system('python pot_ff.py')\n\n # -------------------- making potential ---------------------\n\n if os.path.isfile(os.path.join(path_to_data, 'pot3.txt')):\n print(\"There is a potential stored in: {}\".format(os.path.join(path_to_data, 'pot3.txt')))\n user_input = raw_input(\"Do you want to generate a new potential [y/N]:\")\n else:\n user_input = 'y'\n\n if not os.path.isfile(os.path.join(path_to_data, 'pot3.txt')) or user_input.lower() == 'y':\n # file does not exist\n\n k0 = si.k0 * si.ab\n\n kk = k0 * np.array([[1, 0, 0],\n [-1, 0, 0],\n [0, 1, 0],\n [0, -1, 0],\n [0, 0, 1],\n [0, 0, -1]])\n\n pot_for_ff(path_to_data, atomic_coords, kk[0, :], kk[0, :], '1')\n pot_for_ff(path_to_data, atomic_coords, kk[1, :], kk[1, :], '2')\n pot_for_ff(path_to_data, atomic_coords, kk[2, :], kk[2, :], '3')\n\n # ---------------- computing envelope functions ------------\n\n p1 = sp.Popen([\"FreeFem++\", \"si_ham.edp\",\n \"0\", \"0\", \"1.00\", \"1.00\", \"0.19\", path_to_data])\n\n p2 = sp.Popen([\"FreeFem++\", \"si_ham.edp\",\n \"0\", \"0\", \"1.00\", \"0.19\", \"1.00\", path_to_data])\n\n p3 = sp.Popen([\"FreeFem++\", \"si_ham.edp\",\n \"0\", \"0\", \"0.19\", \"1.00\", \"1.00\", path_to_data])\n\n p1.communicate()\n p2.communicate()\n p3.communicate()", "def _write(self):\n f = FortranFile(self.filename,mode='w')\n # Default omnivor binary header\n f.writeInts ( self.data['MK'] , 'i' ) \n f.writeInts ( self.data['itime'] , 'i' ) \n f.writeString ( self.data['version'] ) \n f.writeInts ( self.data['file_id'] , 'i' ) \n f.writeString ( self.data['sversion'] ) \n # Velocity field\n f.writeString ( self.data['stype'] ) \n f.writeInts ( self.data['is_grid'] , 'i' ) \n f.writeInts ( self.data['nCPs'] , 'i' ) \n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n f.writeInts ( self.data['n1'] , 'i' ) \n f.writeInts ( self.data['n2'] , 'i' ) \n f.writeInts ( self.data['n3'] , 'i' ) \n f.writeInts ( self.data['is_straight'] , 'i' ) \n f.writeReals ( self.data['v1'] , real_char ) \n f.writeReals ( self.data['v2'] , real_char ) \n f.writeReals ( self.data['v3'] , real_char ) \n\n CPs = self.data['CPs'].flatten(order = 'F')\n Utot = self.data['Utot'].flatten(order = 'F')\n f.writeReals(CPs,real_char)\n f.writeReals(Utot,real_char)", "def _mesh(self):\n from scipy.spatial import Delaunay\n points = self.cluster.get_positions()\n delaunay = Delaunay(points)\n simplices = self._filter_max_dist_in_element(delaunay.simplices)\n delaunay.simplices = simplices\n return delaunay", "def show_mesh(self):\n g = self.build_gmsh()\n if g:\n mesh = cfm.GmshMesh(g)\n mesh.el_type = self.el_type\n\n mesh.dofs_per_node = self.dofs_per_node\n mesh.el_size_factor = self.el_size_factor\n self.mesh = mesh\n\n coords, edof, dofs, bdofs, elementmarkers = mesh.create()\n cfv.clf()\n\n cfv.draw_mesh(\n coords=coords,\n edof=edof,\n dofs_per_node=mesh.dofs_per_node,\n el_type=mesh.el_type,\n filled=True\n )\n if self.figure_canvas is not None:\n self.figure_canvas.draw()\n else:\n cfv.show_and_wait()\n return None\n else:\n return \"Canceled\"", "def write_xdmf(self, filename: str):\n\n mesh = UnstructuredMesh.from_h5(filename)\n mesh.write_h5(filename)", "def __init__(self, mesh: Mesh):\n self.mesh = mesh\n self.f = [0]*len(mesh.delaunay.simplices)", "def Draw1D(mesh, coefs, keep=False, n_p=2, figsize=(20,4)):\n if n_p <= 2:\n n_p = 2\n \n eps = 1e-6 \n \n x_v = [p[0] for p in mesh.ngmesh.Points()]\n x_s = []\n f_s = {}\n\n miny = 1e99\n for f, name in coefs:\n f_s[name] = []\n \n x_s.append(nan)\n for f,name in coefs:\n f_s[name].append(nan)\n \n for el in mesh.ngmesh.Elements1D():\n left = mesh.ngmesh.Points()[el.points[0]][0]\n right = mesh.ngmesh.Points()[el.points[1]][0]\n for l in range(n_p):\n y = left + eps + (l / (n_p-1)) * (right - eps -left) \n x_s.append(y)\n for f,name in coefs:\n ff = f(mesh(y))\n miny = min(miny,ff)\n f_s[name].append(ff)\n \n x_s.append(nan)\n for f,name in coefs:\n f_s[name].append(nan)\n\n \n # plt.clf()\n # display.display(plt.gcf())\n plt.figure(figsize=figsize)\n for f,name in coefs:\n plt.plot(x_s,f_s[name],label=name)\n plt.plot(x_v,[miny for v in x_v],'|',label='vertices')\n plt.xlabel(\"x\")\n plt.legend()\n plt.show()\n if keep:\n display.clear_output(wait=True)", "def local_composition(self, outfile):\n # TODO Rewrite if I ever need this again\n radius = 3.6 * 2\n npix = 64\n #mat = np.zeros((npix,npix,npix),dtype=np.float)\n #mat = np.zeros((npix,npix,npix),dtype={'names':['col1', 'col2', 'col3'], 'formats':['f4','f4','f4']})\n #mat = np.zeros((npix,npix,npix),dtype={'names':['40', '13', '29'], 'formats':['f4','f4','f4']})\n #mat = np.zeros((npix,npix,npix),dtype={'names':['id','data'], 'formats':['f4','f4']})\n #names = ['id','data']\n #formats = ['i4',('f4','f4','f4')]\n #mat = np.zeros((npix,npix,npix),dtype=dict(names = names, formats=formats))\n #mat = np.zeros((npix,npix,npix),dtype={'40':('i4',0), '29':('f4',0), '13':('f4',0)})\n print(\"Creating matrix...\")\n mat = [[[{} for i in range(npix)] for j in range(npix)] for k in range(npix)]\n print(\"Finished creating matrix.\")\n #print(repr(mat))\n dx = self.xsize/npix\n dy = self.ysize/npix\n dz = self.zsize/npix\n for ii,i in enumerate(drange(-npix/2*dx,npix/2*dx-dx,dx)):\n print(\"On ii = {0}\".format(ii))\n for jj,j in enumerate(drange(-npix/2*dy,npix/2*dy-dy,dy)):\n for kk,k in enumerate(drange(-npix/2*dz,npix/2*dz-dz,dz)):\n atoms = self.get_atoms_in_cutoff( (i,j,k), radius )\n comp = {}\n for atom in atoms:\n comp[str(atom.z)] = comp.get(str(atom.z),0) + 1.0\n for key in comp:\n comp[key] /= len(atoms)\n #print(comp)\n #mat[ii][jj][kk] = copy.copy(comp)\n mat[ii][jj][kk] = comp\n of = open(outfile,'w')\n of.write('IGOR\\n')\n for atomtype in self.atomtypes:\n of.write('\\nWAVES/N=({0},{1},{2})\\t {3}\\nBEGIN\\n'.format(npix,npix,npix,'partial_comp_'+znum2sym.z2sym(atomtype)))\n for layer in mat:\n for column in layer:\n for value in column:\n try:\n of.write(\"{0} \".format(value[str(atomtype)]))\n except KeyError:\n of.write(\"{0} \".format(0.0))\n of.write(\"\\n\")\n of.write('END\\n')\n of.write('X SetScale/P x 0,1,\"\", {0}; SetScale/P y 0,1,\"\", {0}; SetScale/P z 0,1,\"\", {0}; SetScale d 0,0,\"\", {0}\\n'.format('partial_comp_'+znum2sym.z2sym(atomtype)))\n of.close()\n return mat", "def CreateDummy3DMeshfrom2DMesh(self):\n\n self.__do_memebers_exist__()\n\n sys.stdout = open(os.devnull, \"w\")\n\n p = self.InferPolynomialDegree()\n mm = Mesh()\n if self.element_type == \"quad\":\n mm.element_type = \"hex\"\n mm.elements = np.zeros((1,int((p+1)**3))).astype(np.uint64)\n elif self.element_type == \"tri\":\n mm.element_type = \"tet\"\n mm.elements = np.zeros((1,int((p+1)*(p+2)*(p+3)/6))).astype(np.uint64)\n else:\n raise ValueError(\"Cannot make a 3D mesh from the 2D mesh of type {}\".format(self.element_type))\n\n mm.edges = np.zeros((1,p+1)).astype(np.uint64)\n mm.points = np.copy(self.points)\n mm.nelem = 1\n mm.nnode = mm.points.shape[0]\n mm.faces = np.copy(self.elements)\n mm.boundary_face_to_element = np.zeros((mm.faces.shape[0],2)).astype(np.int64)\n mm.boundary_face_to_element[:,0] = 1\n\n sys.stdout = sys.__stdout__\n\n return mm" ]
[ "0.67319965", "0.6486597", "0.641751", "0.6250623", "0.611316", "0.6060483", "0.6028214", "0.5988006", "0.59420913", "0.59015757", "0.5859401", "0.5840814", "0.5839659", "0.5817488", "0.581355", "0.57974845", "0.5776498", "0.57728815", "0.5749524", "0.57306874", "0.57267076", "0.57247126", "0.5697338", "0.5684668", "0.5668787", "0.56468636", "0.5630012", "0.562238", "0.5619401", "0.5616364" ]
0.72402173
0
Generate a rustanalyzer compatible rustproject.json file.
def generate_rust_project_json(self) -> None: if not self.rust_crates: return with open(os.path.join(self.environment.get_build_dir(), 'rust-project.json'), 'w', encoding='utf-8') as f: json.dump( { "sysroot_src": os.path.join(self.environment.coredata.compilers.host['rust'].get_sysroot(), 'lib/rustlib/src/rust/library/'), "crates": [c.to_json() for c in self.rust_crates.values()], }, f, indent=4)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createproject(destinationdir):\n print(f\"Writing json data files to {destinationdir}\")\n return", "def projectToJSONFile(projectPath):\n jsonProjectFileName = projectPath.split('.')[0] + '_summary.json'\n jsonProject = projectToJSON(projectPath)\n with open (jsonProjectFileName, 'w') as outFile:\n outFile.write(json.dumps(jsonProject,\n sort_keys=True,\n indent=2, separators=(',', ':')))", "def main(root: Path = typer.Argument(Path.cwd(), help=\"Root path to look in\")):\n msg.info(f\"Updating projects.jsonl in {root}\")\n entries = []\n # We look specifically for project directories\n for path in root.glob(f\"**/*/{PROJECT_FILE}\"):\n path = path.parent\n\n # prep data for the json file\n config = load_project_config(path)\n entry = {\"shortname\": f\"{path.parent.name}/{path.name}\"}\n entry[\"title\"] = config[\"title\"]\n entry[\"description\"] = config.get(\"description\", \"\")\n entries.append(entry)\n\n with open(\"projects.jsonl\", \"w\", encoding=\"utf-8\") as jsonfile:\n for entry in entries:\n jsonfile.write(json.dumps(entry))\n jsonfile.write(\"\\n\")", "def create_update_pyproject_toml(self) -> None:\n if (self.toml_path).exists():\n # do not overwrite the version of a pre-existing file\n _pyproject = self.pyproject\n assert _pyproject is not None\n # clear out the packages section\n _pyproject[\"tool\"][\"poetry\"][\"packages\"] = []\n # update the dependencies section by readin that from the template file\n with open(CONFIG.template_path / \"pyproject.toml\", \"rb\") as f:\n tpl = tomllib.load(f)\n\n _pyproject[\"tool\"][\"poetry\"][\"dependencies\"] = tpl[\"tool\"][\"poetry\"][\"dependencies\"]\n\n else:\n # read the template pyproject.toml file from the template folder\n try:\n with open(CONFIG.template_path / \"pyproject.toml\", \"rb\") as f:\n _pyproject = tomllib.load(f)\n _pyproject[\"tool\"][\"poetry\"][\"version\"] = self.mpy_version\n except FileNotFoundError as e:\n log.error(f\"Could not find template pyproject.toml file {e}\")\n raise (e)\n\n # update the name , version and description of the package\n _pyproject[\"tool\"][\"poetry\"][\"name\"] = self.package_name\n _pyproject[\"tool\"][\"poetry\"][\"description\"] = self.description\n # write out the pyproject.toml file\n self.pyproject = _pyproject", "def _create_pyproject_toml(\n self,\n package_name: str,\n ) -> str:\n return f\"\"\"\n [tool.pytest.ini_options]\n DJANGO_SETTINGS_MODULE = \"reviewboard.settings\"\n django_debug_mode = false\n\n python_files = [\"tests.py\", \"test_*.py\"]\n python_classes = [\"*Tests\"]\n python_functions = [\"test_*\"]\n pythonpath = \".\"\n testpaths = [\"{package_name}\"]\n\n env = [\n \"RB_RUNNING_TESTS=1\",\n \"RBSSH_STORAGE_BACKEND=reviewboard.ssh.storage.FileSSHStorage\",\n ]\n\n addopts = [\"--reuse-db\"]\n\n required_plugins = [\n \"pytest-django\",\n \"pytest-env\",\n ]\n \"\"\"", "def project():", "def project():", "def project():", "def _build_project_template(self, output_filename=\"{}_p.json\"):\n\n output_filename = output_filename.format(self._get_next_build_sequence_id())\n template = self._base_troposphere_template()\n\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n resource_cls.register_type_project_template(self, template)\n for r in self.get_resources(resource_type):\n r.register_project_template(template)\n\n template = utils.fix_troposphere_references(template)\n\n self.puts(colored.cyan(output_filename))\n with open(os.path.join(self.build_path, output_filename), 'w') as f:\n f.write(template.to_json())", "def assemble_project(project, base_dir, build_result=None):\n resources = project.resources.all()\n\n if project.is_standard_project_type:\n # Write out the sources, resources, and wscript and jshint file\n assemble_source_files(project, base_dir)\n if project.project_type != 'rocky':\n assemble_resource_directories(project, base_dir)\n assemble_resources(base_dir, project.resources_path, resources)\n with open(os.path.join(base_dir, 'wscript'), 'w') as wscript:\n wscript.write(generate_wscript_file(project))\n with open(os.path.join(base_dir, 'pebble-jshintrc'), 'w') as jshint:\n jshint.write(generate_jshint_file(project))\n elif project.project_type == 'simplyjs':\n # SimplyJS is a particularly special case\n assemble_simplyjs_sources(project, base_dir, build_result)\n elif project.project_type == 'pebblejs':\n # PebbleJS projects have to import the entire pebblejs library, including its wscript\n assemble_resource_directories(project, base_dir)\n shutil.rmtree(base_dir)\n shutil.copytree(settings.PEBBLEJS_ROOT, base_dir)\n assemble_resources(base_dir, project.resources_path, resources, type_restrictions=('png', 'bitmap'))\n assemble_source_files(project, base_dir)\n\n # All projects have a manifest\n manifest_filename = manifest_name_for_project(project)\n manifest_dict = generate_manifest_dict(project, resources)\n\n with open(os.path.join(base_dir, manifest_filename), 'w') as f:\n f.write(json.dumps(manifest_dict))", "def main(args):\n\n # If the given output format is not 'table', redirect logger's output to\n # the stderr.\n logger.setup_logger(args.verbose if 'verbose' in args else None,\n None if args.output_format == 'table' else 'stderr')\n\n context = analyzer_context.get_context()\n working_analyzers, errored = analyzer_types.check_supported_analyzers(\n args.analyzers,\n context)\n analyzer_types.check_available_analyzers(working_analyzers, errored)\n\n analyzer_environment = env.extend(context.path_env_extra,\n context.ld_lib_path_extra)\n\n analyzer_config_map = analyzer_types.build_config_handlers(\n args, context, working_analyzers)\n\n def uglify(text):\n \"\"\"\n csv and json format output contain this non human readable header\n string: no CamelCase and no space.\n \"\"\"\n return text.lower().replace(' ', '_')\n\n def match_guideline(checker_name, selected_guidelines):\n \"\"\"\n Returns True if checker_name gives reports related to any of the\n selected guideline rule.\n checker_name -- A full checker name.\n selected_guidelines -- A list of guideline names or guideline rule IDs.\n \"\"\"\n guideline = context.guideline_map.get(checker_name, {})\n guideline_set = set(guideline)\n for value in guideline.values():\n guideline_set |= set(value)\n\n return any(g in guideline_set for g in selected_guidelines)\n\n def format_guideline(guideline):\n \"\"\"\n Convert guideline rules to human-readable format.\n guideline -- Dictionary in the following format:\n {\"guideline_1\": [\"rule_1\", \"rule_2\"]}\n \"\"\"\n return ' '.join('Related {} rules: {}'.format(g, ', '.join(r))\n for g, r in guideline.items())\n\n # List available checker profiles.\n if 'profile' in args and args.profile == 'list':\n if 'details' in args:\n header = ['Profile name', 'Description']\n rows = context.profile_map.available_profiles().items()\n else:\n header = ['Profile name']\n rows = [(key, \"\") for key in\n context.profile_map.available_profiles()]\n\n if args.output_format in ['csv', 'json']:\n header = list(map(uglify, header))\n\n print(twodim.to_str(args.output_format, header, rows))\n return\n\n # List checker config options.\n if 'checker_config' in args:\n if 'details' in args:\n header = ['Option', 'Description']\n else:\n header = ['Option']\n\n if args.output_format in ['csv', 'json']:\n header = list(map(uglify, header))\n\n rows = []\n analyzer_failures = []\n for analyzer in working_analyzers:\n config_handler = analyzer_config_map.get(analyzer)\n analyzer_class = analyzer_types.supported_analyzers[analyzer]\n\n configs = analyzer_class.get_checker_config(config_handler,\n analyzer_environment)\n if not configs:\n analyzer_failures.append(analyzer)\n continue\n\n rows.extend((':'.join((analyzer, c[0])), c[1]) if 'details' in args\n else (':'.join((analyzer, c[0])),) for c in configs)\n\n if rows:\n print(twodim.to_str(args.output_format, header, rows))\n\n if analyzer_failures:\n LOG.error(\"Failed to get checker configuration options for '%s' \"\n \"analyzer(s)! Please try to upgrade your analyzer \"\n \"version to use this feature.\",\n ', '.join(analyzer_failures))\n sys.exit(1)\n\n return\n\n if args.guideline is not None and len(args.guideline) == 0:\n result = defaultdict(set)\n\n for _, guidelines in context.guideline_map.items():\n for guideline, rules in guidelines.items():\n result[guideline] |= set(rules)\n\n header = ['Guideline', 'Rules']\n if args.output_format in ['csv', 'json']:\n header = list(map(uglify, header))\n\n if args.output_format == 'json':\n rows = [(g, sorted(list(r))) for g, r in result.items()]\n else:\n rows = [(g, ', '.join(sorted(r))) for g, r in result.items()]\n\n if args.output_format == 'rows':\n for row in rows:\n print('Guideline: {}'.format(row[0]))\n print('Rules: {}'.format(row[1]))\n else:\n print(twodim.to_str(args.output_format, header, rows))\n return\n\n # List available checkers.\n if 'details' in args:\n header = ['Enabled', 'Name', 'Analyzer', 'Severity', 'Guideline',\n 'Description']\n else:\n header = ['Name']\n\n if args.output_format in ['csv', 'json']:\n header = list(map(uglify, header))\n\n rows = []\n for analyzer in working_analyzers:\n config_handler = analyzer_config_map.get(analyzer)\n analyzer_class = analyzer_types.supported_analyzers[analyzer]\n\n checkers = analyzer_class.get_analyzer_checkers(config_handler,\n analyzer_environment)\n\n profile_checkers = []\n if 'profile' in args:\n if args.profile not in context.profile_map.available_profiles():\n LOG.error(\"Checker profile '%s' does not exist!\",\n args.profile)\n LOG.error(\"To list available profiles, use '--profile list'.\")\n sys.exit(1)\n\n profile_checkers = [('profile:' + args.profile, True)]\n\n config_handler.initialize_checkers(context,\n checkers,\n profile_checkers)\n\n for checker_name, value in config_handler.checks().items():\n state, description = value\n\n if state != CheckerState.enabled and 'profile' in args:\n continue\n\n if state == CheckerState.enabled and 'only_disabled' in args:\n continue\n elif state != CheckerState.enabled and 'only_enabled' in args:\n continue\n\n if args.output_format == 'json':\n state = state == CheckerState.enabled\n else:\n state = '+' if state == CheckerState.enabled else '-'\n\n if args.guideline is not None:\n if not match_guideline(checker_name, args.guideline):\n continue\n\n if 'details' in args:\n severity = context.severity_map.get(checker_name)\n guideline = context.guideline_map.get(checker_name, {})\n if args.output_format != 'json':\n guideline = format_guideline(guideline)\n rows.append([state, checker_name, analyzer,\n severity, guideline, description])\n else:\n rows.append([checker_name])\n\n if 'show_warnings' in args:\n severity = context.severity_map.get('clang-diagnostic-')\n for warning in get_warnings(analyzer_environment):\n warning = 'clang-diagnostic-' + warning\n\n if args.guideline is not None:\n if not match_guideline(warning, args.guideline):\n continue\n\n guideline = context.guideline_map.get(warning, {})\n if args.output_format != 'json':\n guideline = format_guideline(guideline)\n\n if 'details' in args:\n rows.append(['', warning, '-', severity, guideline, '-'])\n else:\n rows.append([warning])\n\n if rows:\n print(twodim.to_str(args.output_format, header, rows))\n\n analyzer_types.print_unsupported_analyzers(errored)", "def create_gen_json(self, out_file):\n\n params = self.create_package_dict()\n with open(out_file, 'w') as fp:\n json.dump(params, fp)", "def python_to_json(run_dir='.', in_py='in.py', out_json='out.json'):\n import sirepo.importer\n with pkio.save_chdir(run_dir):\n out = sirepo.importer.python_to_json(in_py)\n with open(out_json, 'w') as f:\n f.write(out)\n return 'Created: {}'.format(out_json)", "def dump_project_py():\n\n # list all project.* attributes\n for key in dir(project):\n if not key.startswith('__'):\n try:\n log.info('project.%s=%s' % (key, eval('project.%s' % key)))\n except AttributeError:\n pass", "def projectToJSON(projectPath):\n summary = {}\n if not projectPath.endswith('zip') and not projectPath.endswith('.aia'):\n raise Exception(\"project is not .aia or .zip\")\n with zipfile.ZipFile(projectPath, 'r') as myZip:\n summary['**Project Name'] = findName(myZip)\n summary['**created'], summary['**modified'] = findCreatedModifiedTimes(myZip)\n listOfScreens = findScreenNames(myZip)\n summary['*Number of Screens'] = len(listOfScreens)\n media = []\n for screen in listOfScreens:\n screenInfo = screenToJSON(myZip, screen, projectPath)\n summary[str(screen)] = screenInfo[0]\n media += screenInfo[1]\n summary['*Media Assets'] = list(set(media))\n return summary", "def pyproject_toml():\n root_path = HERE.joinpath(\"../..\").resolve()\n return f\"\"\"\n[build-system]\nrequires = [\"jupyter_packaging@file://{root_path.as_posix()}\"]\nbuild-backend = \"setuptools.build_meta\"\n\"\"\"", "def cc_json():\n return sh(\"intercept-build ./build.py compile:\\\\* -R; ./build.py -c compile:\\\\*\")", "def main():\n parser = argparse.ArgumentParser(description=\"Script for generating an index template out of a document\")\n parser.add_argument(\"INDEX_NAME\", help=\"Name of index\")\n parser.add_argument(\"--output_file\", help=\"File to write schema to\")\n args = parser.parse_args()\n\n output = generate_template(args.INDEX_NAME)\n if args.output_file:\n with open(args.output_file, \"w\") as file:\n json.dump(output, file, ensure_ascii=False, indent=4, sort_keys=True)\n else:\n print(json.dumps(output, ensure_ascii=False, indent=4, sort_keys=True))", "def generate(random, pid, autogen_tools, n):\n\n generator_path = autogen_tools.get_directory(__file__)\n\n template_path = path.join(generator_path, \"code.txt.template\")\n rendered_template_path = path.join(generator_path, \"code.txt\")\n\n autogen_tools.replace_source_tokens(\n template_path,\n {\"flag\": gen_code(n, \"Aviation House\")},\n rendered_template_path\n )\n\n code_link = autogen_tools.generate_resource_link(pid, \"code.txt\", title=\"Encrypted file\")\n\n return {\n \"resource_files\": {\n \"public\": [\n (rendered_template_path, \"code.txt\"),\n ],\n },\n \"static_files\": {\n },\n \"problem_updates\": {\n \"description\": \"<p>We've updated the system to AES. We heard that this is military grade encryption so that should fix everything</p><p>The team have stored the password in %s. Bet you can't get into it</p>\" % code_link\n }\n }", "def main():\n global GOLIVE # If False, it's a dry run only\n global PROJECT_ROOT\n global CAD_SOURCE\n global REVIT_SOURCE\n global GENERIC_SOURCE\n global FOLDER_LIST\n global logger\n\n logger = logging.getLogger('__name__')\n stream_handler = logging.StreamHandler()\n logger.addHandler(stream_handler)\n logger.setLevel(logging.INFO)\n\n logger.debug(sys.argv)\n parser = argparse.ArgumentParser(description='Create a project')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-i', action='store_true', help=\"Show INFO messages\")\n group.add_argument('-d', action='store_true', help=\"Show DEBUG messages\")\n parser.add_argument('-t', action='store_true', help='Test: dry run only')\n parser.add_argument('-r', help=\"Set root directory\")\n parser.add_argument('project_data', nargs='+', help=\"<num>%,<name>%<type>\")\n\n args = parser.parse_args(sys.argv[1:])\n logger.debug(args)\n if args.i:\n logger.info('Setting logging level to INFO')\n logger.setLevel(logging.INFO)\n elif args.d:\n logger.info('Setting logging level to DEBUG')\n logger.setLevel(logging.DEBUG)\n if args.t:\n GOLIVE = False\n logger.info('Dry run...')\n if args.r:\n PROJECT_ROOT = args.r\n logger.info(f'Setting PROJECT_ROOT to {args.r}')\n\n CAD_SOURCE = os.path.join(PROJECT_ROOT, 'Templates', 'CAD_Template')\n REVIT_SOURCE = os.path.join(PROJECT_ROOT, 'Templates', 'Revit_Template')\n GENERIC_SOURCE = os.path.join(PROJECT_ROOT,\n 'Templates', 'Generic_Template')\n FOLDER_LIST = os.listdir(PROJECT_ROOT)\n project_info = ' '.join(args.project_data) # The parser split at spaces\n logger.debug(f'Project info: {project_info}')\n project_info = project_info.split('%') # Divide it into our 3 fields\n project_number, project_name, project_type = project_info\n assert project_type in ['Revit', 'CAD', 'Generic']\n\n if checkNewProject(project_number, project_name): # Sanity checks\n success = createProject(project_number, project_name, project_type)\n if success:\n logger.info(f'Created project {project_number} {project_name}')\n else:\n logger.error('Project creation failed.')", "def to_analyzer_dict(self):\n return {\"directory\": self.directory,\n \"command\": self.__command_str(),\n \"file\": self.source}", "def generate_project_files(specs_path, dst_path):\n hm_generator = HookManGenerator(hook_spec_file_path=specs_path)\n hm_generator.generate_project_files(Path(dst_path))\n return 0", "def Write(self):\n template_mappings = {\n 'pypi_token': self._project_definition.pypi_token or ''}\n\n file_content = []\n\n template_data = self._GenerateFromTemplate('environment', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name not in self._PROJECTS_WITHOUT_BUILD:\n if self._project_definition.pypi_token:\n template_data = self._GenerateFromTemplate(\n 'pypi_token', template_mappings)\n file_content.append(template_data)\n\n template_data = self._GenerateFromTemplate('matrix', template_mappings)\n file_content.append(template_data)\n\n template_data = self._GenerateFromTemplate('install', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name != 'l2tdevtools':\n template_data = self._GenerateFromTemplate(\n 'install_l2tdevtools', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name in self._PROJECTS_WITHOUT_BUILD:\n template_filename = 'build_off'\n else:\n template_filename = 'build'\n\n template_data = self._GenerateFromTemplate(\n template_filename, template_mappings)\n file_content.append(template_data)\n\n template_data = self._GenerateFromTemplate('test_script', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name not in self._PROJECTS_WITHOUT_BUILD:\n template_data = self._GenerateFromTemplate('artifacts', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.pypi_token:\n template_data = self._GenerateFromTemplate(\n 'deploy_script', template_mappings)\n file_content.append(template_data)\n\n file_content = ''.join(file_content)\n\n with io.open(self.PATH, 'w', encoding='utf-8') as file_object:\n file_object.write(file_content)", "def mkpy(project_name, mode, pkg):\n\n MAIN_FOLDER = data.get_base_path(data.PYTHON)\n\n if mode != 'MAIN':\n MAIN_FOLDER += f'{mode}/'\n\n pyproject = folders.PyProject(project_name, MAIN_FOLDER)\n\n pyproject.create_project(pkg)\n click.echo(f'Project created succesfull in {pyproject.project_path}')\n cli_commands.start_git(pyproject.project_path)\n cli_commands.show_dir_path(pyproject.project_path)\n # cli_commands.start_vscode(pyproject.project_path)\n\n click.echo('Project Path copied to clipboard...')", "def _build_pre_project_template(self, output_filename=\"{}_pr_p.json\"):\n template = actions.ActionsTemplate()\n\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n resource_cls.register_type_pre_project_template(self, template)\n for r in self.get_resources(resource_type):\n r.register_pre_project_template(template)\n\n if template:\n output_filename = output_filename.format(self._get_next_build_sequence_id())\n self.puts(colored.cyan(output_filename))\n with open(os.path.join(self.build_path, output_filename), 'w') as f:\n f.write(template.to_json(indent=4))", "def main():\n \n root = Folder(name=os.getcwd(), file='meta.json',\n collection='.github/jekyll')\n root.update()\n root.export_folders(True)", "def generate_cpp():\n cpp_file = AUTOGEN_WARNING\n cpp_file += \"// Implements basic nuclear data functions.\\n\"\n cpp_file += \"#ifndef PYNE_IS_AMALGAMATED\\n\"\n cpp_file += '#include \"atomic_data.h\"\\n'\n cpp_file += '#include \"nucname.h\"\\n'\n cpp_file += \"#endif\\n\"\n cpp_file += \" \\n\"\n cpp_file += \"void pyne::_load_atomic_mass_map_memory() { \\n\"\n cpp_file += \" // header version of atomic weight table data \\n\"\n cpp_file += \" //see if the data table is already loaded\\n\"\n cpp_file += \" if(!atomic_mass_map.empty()) {\\n\"\n cpp_file += \" return;\\n\"\n cpp_file += \" } else { \\n\"\n cpp_file += \" _insert_atomic_mass_map();\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" //see if the data table is already loaded\\n\"\n cpp_file += \" if(!natural_abund_map.empty()) {\\n\"\n cpp_file += \" return;\\n\"\n cpp_file += \" } else { \\n\"\n cpp_file += \" _insert_abund_map();\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" // calculate the atomic_masses of the elements \\n\"\n cpp_file += \" std::map<int,double> :: iterator it;\\n\"\n cpp_file += \" \\n\"\n cpp_file += \" for (int z = 1; z <= 92 ; z++) {\\n\"\n cpp_file += \" // loop through the natural abundance map\\n\"\n cpp_file += \" double element_atomic_weight = 0.0;\\n\"\n cpp_file += \" for (it = natural_abund_map.begin(); it != natural_abund_map.end() ; ++it){\\n\"\n cpp_file += \" // if the atomic number of the abudance matches the\\n\"\n cpp_file += \" // that of index\\n\"\n cpp_file += \" if(pyne::nucname::znum(it->first) == z) {\\n\"\n cpp_file += \" // take atomic abundance and multiply by mass\\n\"\n cpp_file += (\n \" // to get the mass of that nuclide / 100 since abundance is in %\\n\"\n )\n cpp_file += \" element_atomic_weight += (it->second*atomic_mass_map[it->first]/100.0);\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" // insert the abundance of the element into the list\\n\"\n cpp_file += \" atomic_mass_map[z*10000000] = element_atomic_weight;\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \"}\\n\"\n cpp_file += \"\\n\\n\"\n cpp_file += \"void pyne::_insert_atomic_mass_map() { \\n\"\n cpp_file += generate_atomic_mass()\n cpp_file += \"}\\n\"\n cpp_file += \"\\n\\n\"\n cpp_file += \"void pyne::_insert_abund_map() { \\n\"\n cpp_file += generate_abundances()\n cpp_file += \"}\\n\"\n return cpp_file", "def main():\n os.makedirs(\"../json-data\", exist_ok=True)\n # num_docs = 1005\n num_docs = int(sys.argv[1])\n for answerno in range(num_docs):\n print('Creating document', answerno, 'of', num_docs)\n basename = \"../json-data/chunck_%s\" % uuid.uuid4()\n tempname = basename + '.temp.gz'\n longtermname = basename + '.json.gz'\n\n # We compress with gzip.\n # It's relatively fast compression.\n # We could compress with bzip2 or zlib instead if we have the CPU time available.\n # We could do bits and bytes, but that's harder to debug, and only worth it if there's a LOT of data to store.\n # We could eliminate all unanswered responses, but that is a little prone to surprises.\n # We also have the option of using bson instead of json.\n with gzip.open(tempname, \"w\") as answerfile:\n row = {\"pk\": \"%d\" % answerno}\n for carvar in constants.carvars:\n row[carvar] = random.choice(constants.carbrands)\n for carvar in constants.mrcarvars:\n for carbrand in constants.carbrands:\n row[\"%s.%s\" % (carvar, carbrand)] = random.choice(constants.answers)\n for singvar in constants.singervars:\n row[singvar] = random.choice(constants.singers)\n for singvar in constants.mrsingervars:\n for singer in constants.singers:\n row[\"%s.%s\" % (singvar, singer)] = random.choice(constants.answers)\n string = json.dumps(row)\n answerfile.write(string.encode('UTF-8'))\n os.rename(tempname, longtermname)", "def main():\n for db_csv_export in current_dir.glob(\"template*.csv\"):\n data_projects = load_projects(db_csv_export)\n json_path = db_csv_export.with_suffix(\".json\")\n with open(json_path, \"w\") as fh:\n json.dump(data_projects, fh, indent=2)", "def generate(env, daos_prefix, comp_prefix, args):\n analyzer = Analyzer(env, daos_prefix, comp_prefix, args)\n analyzer.analyze_on_exit()" ]
[ "0.5765005", "0.5750292", "0.56758595", "0.5569898", "0.55584913", "0.547835", "0.547835", "0.547835", "0.5442536", "0.5390132", "0.5291652", "0.52869755", "0.5279458", "0.52660453", "0.525721", "0.52367043", "0.5209489", "0.52066916", "0.5184295", "0.51825064", "0.5129931", "0.5077997", "0.5077105", "0.5072831", "0.5065653", "0.5062996", "0.50301856", "0.49904397", "0.4971849", "0.49377498" ]
0.69861084
0
Splits the target's sources into .vala, .gs, .vapi, and other sources. Handles both preexisting and generated sources. Returns a tuple (vala, vapi, others) each of which is a dictionary with the keys being the path to the file (relative to the build directory) and the value being the object that generated or represents the file.
def split_vala_sources(self, t: build.BuildTarget) -> \ T.Tuple[T.MutableMapping[str, File], T.MutableMapping[str, File], T.Tuple[T.MutableMapping[str, File], T.MutableMapping]]: vala: T.MutableMapping[str, File] = OrderedDict() vapi: T.MutableMapping[str, File] = OrderedDict() others: T.MutableMapping[str, File] = OrderedDict() othersgen: T.MutableMapping[str, File] = OrderedDict() # Split preexisting sources for s in t.get_sources(): # BuildTarget sources are always mesonlib.File files which are # either in the source root, or generated with configure_file and # in the build root if not isinstance(s, File): raise InvalidArguments(f'All sources in target {t!r} must be of type mesonlib.File, not {s!r}') f = s.rel_to_builddir(self.build_to_src) if s.endswith(('.vala', '.gs')): srctype = vala elif s.endswith('.vapi'): srctype = vapi else: srctype = others srctype[f] = s # Split generated sources for gensrc in t.get_generated_sources(): for s in gensrc.get_outputs(): f = self.get_target_generated_dir(t, gensrc, s) if s.endswith(('.vala', '.gs')): srctype = vala elif s.endswith('.vapi'): srctype = vapi # Generated non-Vala (C/C++) sources. Won't be used for # generating the Vala compile rule below. else: srctype = othersgen # Duplicate outputs are disastrous if f in srctype and srctype[f] is not gensrc: msg = 'Duplicate output {0!r} from {1!r} {2!r}; ' \ 'conflicts with {0!r} from {4!r} {3!r}' \ ''.format(f, type(gensrc).__name__, gensrc.name, srctype[f].name, type(srctype[f]).__name__) raise InvalidArguments(msg) # Store 'somefile.vala': GeneratedList (or CustomTarget) srctype[f] = gensrc return vala, vapi, (others, othersgen)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_vala_compile(self, target: build.BuildTarget) -> \\\n T.Tuple[T.MutableMapping[str, File], T.MutableMapping[str, File], T.List[str]]:\n (vala_src, vapi_src, other_src) = self.split_vala_sources(target)\n extra_dep_files = []\n if not vala_src:\n raise InvalidArguments(f'Vala library {target.name!r} has no Vala or Genie source files.')\n\n valac = target.compilers['vala']\n c_out_dir = self.get_target_private_dir(target)\n # C files generated by valac\n vala_c_src: T.List[str] = []\n # Files generated by valac\n valac_outputs: T.List = []\n # All sources that are passed to valac on the commandline\n all_files = list(vapi_src)\n # Passed as --basedir\n srcbasedir = os.path.join(self.build_to_src, target.get_subdir())\n for (vala_file, gensrc) in vala_src.items():\n all_files.append(vala_file)\n # Figure out where the Vala compiler will write the compiled C file\n #\n # If the Vala file is in a subdir of the build dir (in our case\n # because it was generated/built by something else), and is also\n # a subdir of --basedir (because the builddir is in the source\n # tree, and the target subdir is the source root), the subdir\n # components from the source root till the private builddir will be\n # duplicated inside the private builddir. Otherwise, just the\n # basename will be used.\n #\n # If the Vala file is outside the build directory, the paths from\n # the --basedir till the subdir will be duplicated inside the\n # private builddir.\n if isinstance(gensrc, (build.CustomTarget, build.GeneratedList)) or gensrc.is_built:\n vala_c_file = os.path.splitext(os.path.basename(vala_file))[0] + '.c'\n # Check if the vala file is in a subdir of --basedir\n abs_srcbasedir = os.path.join(self.environment.get_source_dir(), target.get_subdir())\n abs_vala_file = os.path.join(self.environment.get_build_dir(), vala_file)\n if PurePath(os.path.commonpath((abs_srcbasedir, abs_vala_file))) == PurePath(abs_srcbasedir):\n vala_c_subdir = PurePath(abs_vala_file).parent.relative_to(abs_srcbasedir)\n vala_c_file = os.path.join(str(vala_c_subdir), vala_c_file)\n else:\n path_to_target = os.path.join(self.build_to_src, target.get_subdir())\n if vala_file.startswith(path_to_target):\n vala_c_file = os.path.splitext(os.path.relpath(vala_file, path_to_target))[0] + '.c'\n else:\n vala_c_file = os.path.splitext(os.path.basename(vala_file))[0] + '.c'\n # All this will be placed inside the c_out_dir\n vala_c_file = os.path.join(c_out_dir, vala_c_file)\n vala_c_src.append(vala_c_file)\n valac_outputs.append(vala_c_file)\n\n args = self.generate_basic_compiler_args(target, valac)\n args += valac.get_colorout_args(target.get_option(OptionKey('b_colorout')))\n # Tell Valac to output everything in our private directory. Sadly this\n # means it will also preserve the directory components of Vala sources\n # found inside the build tree (generated sources).\n args += ['--directory', c_out_dir]\n args += ['--basedir', srcbasedir]\n if target.is_linkable_target():\n # Library name\n args += ['--library', target.name]\n # Outputted header\n hname = os.path.join(self.get_target_dir(target), target.vala_header)\n args += ['--header', hname]\n if target.is_unity:\n # Without this the declarations will get duplicated in the .c\n # files and cause a build failure when all of them are\n # #include-d in one .c file.\n # https://github.com/mesonbuild/meson/issues/1969\n args += ['--use-header']\n valac_outputs.append(hname)\n # Outputted vapi file\n vapiname = os.path.join(self.get_target_dir(target), target.vala_vapi)\n # Force valac to write the vapi and gir files in the target build dir.\n # Without this, it will write it inside c_out_dir\n args += ['--vapi', os.path.join('..', target.vala_vapi)]\n valac_outputs.append(vapiname)\n target.outputs += [target.vala_header, target.vala_vapi]\n target.install_tag += ['devel', 'devel']\n # Install header and vapi to default locations if user requests this\n if len(target.install_dir) > 1 and target.install_dir[1] is True:\n target.install_dir[1] = self.environment.get_includedir()\n if len(target.install_dir) > 2 and target.install_dir[2] is True:\n target.install_dir[2] = os.path.join(self.environment.get_datadir(), 'vala', 'vapi')\n # Generate GIR if requested\n if isinstance(target.vala_gir, str):\n girname = os.path.join(self.get_target_dir(target), target.vala_gir)\n args += ['--gir', os.path.join('..', target.vala_gir)]\n valac_outputs.append(girname)\n target.outputs.append(target.vala_gir)\n target.install_tag.append('devel')\n # Install GIR to default location if requested by user\n if len(target.install_dir) > 3 and target.install_dir[3] is True:\n target.install_dir[3] = os.path.join(self.environment.get_datadir(), 'gir-1.0')\n # Detect gresources and add --gresources arguments for each\n for gensrc in other_src[1].values():\n if isinstance(gensrc, modules.GResourceTarget):\n gres_xml, = self.get_custom_target_sources(gensrc)\n args += ['--gresources=' + gres_xml]\n extra_args = []\n\n for a in target.extra_args.get('vala', []):\n if isinstance(a, File):\n relname = a.rel_to_builddir(self.build_to_src)\n extra_dep_files.append(relname)\n extra_args.append(relname)\n else:\n extra_args.append(a)\n dependency_vapis = self.determine_dep_vapis(target)\n extra_dep_files += dependency_vapis\n args += extra_args\n element = NinjaBuildElement(self.all_outputs, valac_outputs,\n self.compiler_to_rule_name(valac),\n all_files + dependency_vapis)\n element.add_item('ARGS', args)\n element.add_dep(extra_dep_files)\n self.add_build(element)\n self.create_target_source_introspection(target, valac, args, all_files, [])\n return other_src[0], other_src[1], vala_c_src", "def calculate_gen(source):\r\n\r\n with open(source, 'r') as thrift:\r\n lines = thrift.readlines()\r\n namespaces = {}\r\n types = defaultdict(set)\r\n for line in lines:\r\n match = NAMESPACE_PARSER.match(line)\r\n if match:\r\n lang = match.group(1)\r\n namespace = match.group(2)\r\n namespaces[lang] = namespace\r\n else:\r\n match = TYPE_PARSER.match(line)\r\n if match:\r\n typename = match.group(1)\r\n name = match.group(2)\r\n types[typename].add(name)\r\n\r\n genfiles = defaultdict(set)\r\n\r\n namespace = namespaces.get('py')\r\n if namespace:\r\n genfiles['py'].update(calculate_python_genfiles(namespace, types))\r\n\r\n namespace = namespaces.get('java')\r\n if namespace:\r\n genfiles['java'].update(calculate_java_genfiles(namespace, types))\r\n\r\n return types['service'], genfiles", "def readSources(self):\n for sourceCount, sourceElement in enumerate(self.root.findall(\".sources/source\")):\n # shall we just read the UFO here?\n filename = sourceElement.attrib.get('filename')\n # filename is a path relaive to the documentpath. resolve first.\n sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename))\n sourceName = sourceElement.attrib.get('name')\n if sourceName is None:\n # if the source element has no name attribute\n # (some authoring tools do not need them)\n # then we should make a temporary one. We still need it for reference.\n sourceName = \"temp_master.%d\"%(sourceCount)\n self.reportProgress(\"prep\", 'load', sourcePath)\n if not os.path.exists(sourcePath):\n raise MutatorError(\"Source not found at %s\"%sourcePath)\n sourceObject = self._instantiateFont(sourcePath)\n # read the locations\n sourceLocationObject = None\n sourceLocationObject = self.locationFromElement(sourceElement)\n\n if sourceLocationObject is None:\n raise MutatorError(\"No location defined for source %s\"%sourceName)\n\n # read lib flag\n for libElement in sourceElement.findall('.lib'):\n if libElement.attrib.get('copy') == '1':\n self.libSource = sourceName\n\n # read the groups flag\n for groupsElement in sourceElement.findall('.groups'):\n if groupsElement.attrib.get('copy') == '1':\n self.groupsSource = sourceName\n\n # read the info flag\n for infoElement in sourceElement.findall(\".info\"):\n if infoElement.attrib.get('copy') == '1':\n self.infoSource = sourceName\n if infoElement.attrib.get('mute') == '1':\n self.muted['info'].append(sourceName)\n\n # read the features flag\n for featuresElement in sourceElement.findall(\".features\"):\n if featuresElement.attrib.get('copy') == '1':\n if self.featuresSource is not None:\n self.featuresSource = None\n else:\n self.featuresSource = sourceName\n\n mutedGlyphs = []\n for glyphElement in sourceElement.findall(\".glyph\"):\n glyphName = glyphElement.attrib.get('name')\n if glyphName is None:\n continue\n if glyphElement.attrib.get('mute') == '1':\n if not sourceName in self.muted['glyphs']:\n self.muted['glyphs'][sourceName] = []\n self.muted['glyphs'][sourceName].append(glyphName)\n\n for kerningElement in sourceElement.findall(\".kerning\"):\n if kerningElement.attrib.get('mute') == '1':\n self.muted['kerning'].append(sourceName)\n\n # store\n self.sources[sourceName] = sourceObject, sourceLocationObject\n self.reportProgress(\"prep\", 'done')", "def source_files(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___DebuggedSourceFile]:", "def get_target_generated_sources(self, target: build.BuildTarget) -> T.MutableMapping[str, File]:\n srcs: T.MutableMapping[str, File] = OrderedDict()\n for gensrc in target.get_generated_sources():\n for s in gensrc.get_outputs():\n rel_src = self.get_target_generated_dir(target, gensrc, s)\n srcs[rel_src] = File.from_built_relative(rel_src)\n return srcs", "def collect_source_hpp_files(self):\n for pattern in self.package_info.source_hpp_patterns:\n for filename in fnmatch.filter(self.source_hpp_files, pattern):\n self.package_info.source_hpp_files.append(os.path.basename(filename))\n self.source_dirs.add(os.path.abspath(os.path.dirname(filename)))\n\n for root, _, filenames in os.walk(self.source_root, followlinks=True):\n for pattern in self.package_info.source_hpp_patterns:\n for filename in fnmatch.filter(filenames, pattern):\n if \"pybindx\" not in filename:\n self.package_info.source_hpp_files.append(os.path.join(root, filename))\n self.package_info.source_hpp_files = [path for path in self.package_info.source_hpp_files\n if self.wrapper_root not in path]", "def _compute_sources_by_target(self, targets):\r\n def calculate_sources(target):\r\n sources = [s for s in target.sources_relative_to_buildroot() if s.endswith(self._file_suffix)]\r\n # TODO: Make this less hacky. Ideally target.java_sources will point to sources, not targets.\r\n if hasattr(target, 'java_sources') and target.java_sources:\r\n sources.extend(self._resolve_target_sources(target.java_sources, '.java'))\r\n return sources\r\n return dict([(t, calculate_sources(t)) for t in targets])", "def sources(source):\n\n source2 = models.Source(name=u\"Bob's Funerals.com\", url=u\"http://www.bobsfunerals.com\")\n source3 = models.Source(name=u\"Jim's Funerals.com\", url=u\"http://www.jimsfunerals.com\")\n return (source, source2, source3)", "def separateSource(self,compInfo):\n sourceInfo = {}\n source = []\n for eachline in compInfo:\n words = eachline.split() ##This line need to be confirmed with Manas\n if eachline[0] in ['f', 'h']:\n source.append(words[3])\n if len(source) > 0:\n for eachline in compInfo:\n words_s = eachline.split()\n if words_s[0] in source:\n sourceInfo[words_s[0]] = words_s[1:3]\n return sourceInfo", "def Get_Source(self):\r\n source_directory = filedialog.askdirectory(initialdir = initial_dir, title=\"Select source folder\")\r\n global source\r\n source = source_directory \r\n self.txt_srcPath.delete(0, 'end')\r\n self.txt_srcPath.insert(0, str(source_directory))\r\n file_type = self.txt_fileType.get()\r\n hrs= int(self.txt_fileAge.get())\r\n file_age = datetime.timedelta(hours=hrs)\r\n global filenames_list\r\n filenames_list = []\r\n global files_list\r\n files_list = []\r\n global ft_list\r\n ft_list = []\r\n for f in glob.iglob(os.path.join(source, file_type)):\r\n files_list = [os.path.splitext(f)[0]]\r\n filenames_list.append(f) \r\n return(filenames_list, source, files_list)", "def pyrex_sources(self, sources, extension):\n new_sources = []\n ext_name = extension.name.split('.')[-1]\n for source in sources:\n (base, ext) = os.path.splitext(source)\n if ext == '.pyx':\n target_file = self.generate_a_pyrex_source(base, ext_name,\n source,\n extension)\n new_sources.append(target_file)\n else:\n new_sources.append(source)\n return new_sources", "def gather() -> None:\n # pylint: disable=too-many-locals\n\n # First off, clear out any existing output.\n existing_dirs = [\n os.path.join('src/external', d) for d in os.listdir('src/external')\n if d.startswith('python-') and d != 'python-notes.txt'\n ]\n existing_dirs += [\n os.path.join('assets/src', d) for d in os.listdir('assets/src')\n if d.startswith('pylib-')\n ]\n for existing_dir in existing_dirs:\n efrotools.run('rm -rf \"' + existing_dir + '\"')\n\n for buildtype in ['debug', 'release']:\n debug = buildtype == 'debug'\n bsuffix = '_debug' if buildtype == 'debug' else ''\n bsuffix2 = '-debug' if buildtype == 'debug' else ''\n\n libname = 'python' + PYTHON_VERSION_MAJOR + ('dm' if debug else 'm')\n\n bases = {\n 'mac':\n f'build/python_apple_mac{bsuffix}/build/macOS',\n 'ios':\n f'build/python_apple_ios{bsuffix}/build/iOS',\n 'tvos':\n f'build/python_apple_tvos{bsuffix}/build/tvOS',\n 'android_arm':\n f'build/python_android_arm{bsuffix}/build/sysroot',\n 'android_arm64':\n f'build/python_android_arm64{bsuffix}/build/sysroot',\n 'android_x86':\n f'build/python_android_x86{bsuffix}/build/sysroot',\n 'android_x86_64':\n f'build/python_android_x86_64{bsuffix}/build/sysroot'\n }\n\n # Note: only need pylib for the first in each group.\n builds: List[Dict[str, Any]] = [{\n 'name':\n 'macos',\n 'group':\n 'apple',\n 'headers':\n bases['mac'] + '/Support/Python/Headers',\n 'libs': [\n bases['mac'] + '/Support/Python/libPython.a',\n bases['mac'] + '/Support/OpenSSL/libOpenSSL.a',\n bases['mac'] + '/Support/XZ/libxz.a'\n ],\n 'pylib':\n (bases['mac'] + '/python/lib/python' + PYTHON_VERSION_MAJOR),\n }, {\n 'name':\n 'ios',\n 'group':\n 'apple',\n 'headers':\n bases['ios'] + '/Support/Python/Headers',\n 'libs': [\n bases['ios'] + '/Support/Python/libPython.a',\n bases['ios'] + '/Support/OpenSSL/libOpenSSL.a',\n bases['ios'] + '/Support/XZ/libxz.a'\n ],\n }, {\n 'name':\n 'tvos',\n 'group':\n 'apple',\n 'headers':\n bases['tvos'] + '/Support/Python/Headers',\n 'libs': [\n bases['tvos'] + '/Support/Python/libPython.a',\n bases['tvos'] + '/Support/OpenSSL/libOpenSSL.a',\n bases['tvos'] + '/Support/XZ/libxz.a'\n ],\n }, {\n 'name':\n 'android_arm',\n 'group':\n 'android',\n 'headers':\n bases['android_arm'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_arm'] + f'/usr/lib/lib{libname}.a',\n bases['android_arm'] + '/usr/lib/libssl.a',\n bases['android_arm'] + '/usr/lib/libcrypto.a',\n bases['android_arm'] + '/usr/lib/liblzma.a',\n bases['android_arm'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst':\n 'android_armeabi-v7a',\n 'pylib': (bases['android_arm'] + '/usr/lib/python' +\n PYTHON_VERSION_MAJOR),\n }, {\n 'name': 'android_arm64',\n 'group': 'android',\n 'headers': bases['android_arm64'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_arm64'] + f'/usr/lib/lib{libname}.a',\n bases['android_arm64'] + '/usr/lib/libssl.a',\n bases['android_arm64'] + '/usr/lib/libcrypto.a',\n bases['android_arm64'] + '/usr/lib/liblzma.a',\n bases['android_arm64'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst': 'android_arm64-v8a',\n }, {\n 'name': 'android_x86',\n 'group': 'android',\n 'headers': bases['android_x86'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_x86'] + f'/usr/lib/lib{libname}.a',\n bases['android_x86'] + '/usr/lib/libssl.a',\n bases['android_x86'] + '/usr/lib/libcrypto.a',\n bases['android_x86'] + '/usr/lib/liblzma.a',\n bases['android_x86'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst': 'android_x86',\n }, {\n 'name': 'android_x86_64',\n 'group': 'android',\n 'headers': bases['android_x86_64'] + f'/usr/include/{libname}',\n 'libs': [\n bases['android_x86_64'] + f'/usr/lib/lib{libname}.a',\n bases['android_x86_64'] + '/usr/lib/libssl.a',\n bases['android_x86_64'] + '/usr/lib/libcrypto.a',\n bases['android_x86_64'] + '/usr/lib/liblzma.a',\n bases['android_x86_64'] + '/usr/lib/libsqlite3.a'\n ],\n 'libinst': 'android_x86_64',\n }]\n\n for build in builds:\n\n grp = build['group']\n builddir = f'src/external/python-{grp}{bsuffix2}'\n header_dst = os.path.join(builddir, 'include')\n lib_dst = os.path.join(builddir, 'lib')\n assets_src_dst = f'assets/src/pylib-{grp}'\n\n # Do some setup only once per group.\n if not os.path.exists(builddir):\n efrotools.run('mkdir -p \"' + builddir + '\"')\n efrotools.run('mkdir -p \"' + lib_dst + '\"')\n\n # Only pull modules into game assets on release pass.\n if not debug:\n # Copy system modules into the src assets\n # dir for this group.\n efrotools.run('mkdir -p \"' + assets_src_dst + '\"')\n efrotools.run(\n 'rsync --recursive --include \"*.py\"'\n ' --exclude __pycache__ --include \"*/\" --exclude \"*\" \"'\n + build['pylib'] + '/\" \"' + assets_src_dst + '\"')\n\n # Prune a bunch of modules we don't need to cut\n # down on size.\n prune = [\n 'config-*', 'idlelib', 'lib-dynload', 'lib2to3',\n 'multiprocessing', 'pydoc_data', 'site-packages',\n 'ensurepip', 'tkinter', 'wsgiref', 'distutils',\n 'turtle.py', 'turtledemo', 'test', 'sqlite3/test',\n 'unittest', 'dbm', 'venv', 'ctypes/test', 'imaplib.py',\n '_sysconfigdata_*'\n ]\n efrotools.run('cd \"' + assets_src_dst + '\" && rm -rf ' +\n ' '.join(prune))\n\n # Some minor filtering to system scripts:\n # on iOS/tvOS, addusersitepackages() leads to a crash\n # due to _sysconfigdata_dm_ios_darwin module not existing,\n # so let's skip that.\n fname = f'{assets_src_dst}/site.py'\n txt = efrotools.readfile(fname)\n txt = efrotools.replace_one(\n txt,\n ' known_paths = addusersitepackages(known_paths)',\n ' # efro tweak: this craps out on ios/tvos.\\n'\n ' # (and we don\\'t use it anyway)\\n'\n ' # known_paths = addusersitepackages(known_paths)')\n efrotools.writefile(fname, txt)\n\n # Copy in a base set of headers (everything in a group should\n # be using the same headers)\n efrotools.run(f'cp -r \"{build[\"headers\"]}\" \"{header_dst}\"')\n\n # Clear whatever pyconfigs came across; we'll build our own\n # universal one below.\n efrotools.run('rm ' + header_dst + '/pyconfig*')\n\n # Write a master pyconfig header that reroutes to each\n # platform's actual header.\n with open(header_dst + '/pyconfig.h', 'w') as hfile:\n hfile.write(\n '#if BA_OSTYPE_MACOS\\n'\n '#include \"pyconfig-macos.h\"\\n\\n'\n '#elif BA_OSTYPE_IOS\\n'\n '#include \"pyconfig-ios.h\"\\n\\n'\n '#elif BA_OSTYPE_TVOS\\n'\n '#include \"pyconfig-tvos.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__arm__)\\n'\n '#include \"pyconfig-android_arm.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__aarch64__)\\n'\n '#include \"pyconfig-android_arm64.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__i386__)\\n'\n '#include \"pyconfig-android_x86.h\"\\n\\n'\n '#elif BA_OSTYPE_ANDROID and defined(__x86_64__)\\n'\n '#include \"pyconfig-android_x86_64.h\"\\n\\n'\n '#else\\n'\n '#error unknown platform\\n\\n'\n '#endif\\n')\n\n # Now copy each build's config headers in with unique names.\n cfgs = [\n f for f in os.listdir(build['headers'])\n if f.startswith('pyconfig')\n ]\n\n # Copy config headers to their filtered names.\n for cfg in cfgs:\n out = cfg.replace('pyconfig', 'pyconfig-' + build['name'])\n if cfg == 'pyconfig.h':\n\n # For platform's root pyconfig.h we need to filter\n # contents too (those headers can themselves include\n # others; ios for instance points to a arm64 and a\n # x86_64 variant).\n contents = efrotools.readfile(build['headers'] + '/' + cfg)\n contents = contents.replace('pyconfig',\n 'pyconfig-' + build['name'])\n efrotools.writefile(header_dst + '/' + out, contents)\n else:\n # other configs we just rename\n efrotools.run('cp \"' + build['headers'] + '/' + cfg +\n '\" \"' + header_dst + '/' + out + '\"')\n\n # Copy in libs. If the lib gave a specific install name,\n # use that; otherwise use name.\n targetdir = lib_dst + '/' + build.get('libinst', build['name'])\n efrotools.run('rm -rf \"' + targetdir + '\"')\n efrotools.run('mkdir -p \"' + targetdir + '\"')\n for lib in build['libs']:\n efrotools.run('cp \"' + lib + '\" \"' + targetdir + '\"')\n\n print('Great success!')", "def _compute_classes_by_source(self, analysis_file=None):\r\n if analysis_file is None:\r\n analysis_file = self._analysis_file\r\n\r\n if not os.path.exists(analysis_file):\r\n return {}\r\n buildroot = get_buildroot()\r\n products = self._analysis_parser.parse_products_from_path(analysis_file)\r\n classes_by_src = {}\r\n for src, classes in products.items():\r\n relsrc = os.path.relpath(src, buildroot)\r\n classes_by_src[relsrc] = classes\r\n return classes_by_src", "def swig_sources(self, srcs, ext):\n srcs = build_ext.build_ext.swig_sources(self, srcs, ext)\n newsrcs = []\n for src in srcs:\n if src.endswith('_gen.cpp'):\n newsrcs.append(join(self.build_temp, src))\n else:\n newsrcs.append(src)\n return newsrcs", "def get_sources(self, sources=None):\n\n if sources is None:\n with open(self.path, 'r') as infile:\n keys = list(json.loads(next(infile)).keys())\n sources = [\n k for k in keys\n ] + [\n 'raw_' + k for k in keys\n ] + [\n k + '_length' for k in keys\n ]\n\n elif not isinstance(sources, (list, tuple)):\n sources = [sources]\n\n for source in sources:\n if source not in self.sources:\n raise KeyError(\n 'Invalid data key: {}. Valid keys are: {}'.format(\n source, ', '.join(str(k) for k in self.sources.keys())\n ))\n\n return {k : self.sources[k] for k in sources}", "def getSourcePaths(self, makeGlyphs=True, makeKerning=True, makeInfo=True):\n paths = []\n for name in self.sources.keys():\n paths.append(self.sources[name][0].path)\n return paths", "def _generate_src():\n for ext in extensions:\n yield self.src_format[ext](f=\"{}{}\".format(name, ext))", "def generate_files(self):\n import re\n for year, url in self.metadata.build.sources.items():\n zf = self.filesystem.download(url)\n for fn in self.filesystem.unzip_dir(zf, re.compile(r'.*all.*', re.IGNORECASE)):\n yield year, fn", "def _compile_sources(sources: Union[Sequence[_SourceTypes], _SourceTypes]) -> Dict[str, Any]:\n if not isinstance(sources, Sequence):\n sources = [sources]\n\n compiled: Dict[str, Any] = {}\n for source in sources:\n if callable(source):\n source = source(compiled) # Pass what we have parsed so far to the function\n compiled = _merge_sources(dest=compiled, source=source)\n return compiled", "def sources(self):\n for source_name, source in self._sources.items():\n yield source_name, source", "def calculate_compile_sources_HACK_FOR_SCROOGE_LEGACY(targets, is_thrift_target):\r\n\r\n dirs = set()\r\n sources = set()\r\n\r\n def collect_sources(target):\r\n for source in target.sources:\r\n dirs.add(os.path.normpath(os.path.join(target.target_base, os.path.dirname(source))))\r\n sources.add(os.path.join(target.target_base, source))\r\n for target in targets:\r\n target.walk(collect_sources, predicate=is_thrift_target)\r\n\r\n return dirs, sources", "def get_sources(self, target):\n return sorted(list({t[0].split('.')[0]\n for t in self.mapping.items()\n if target in [c.split('.')[0]\n for c in type(t[1]) is dict and t[1].keys() or ()]}))", "def lexEmitter(target, source, env) -> tuple:\n\n sourceBase, sourceExt = os.path.splitext(to_String(source[0]))\n if sourceExt == \".lm\": # If using Objective-C\n target = [sourceBase + \".m\"] # the extension is \".m\".\n\n # With --header-file and ----tables-file, the file to write is defined\n # by the option argument. Extract this and include in the list of targets.\n # NOTE: a filename passed to the command this way is not modified by SCons,\n # and so will be interpreted relative to the project top directory at\n # execution time, while the name added to the target list will be\n # interpreted relative to the SConscript directory - a possible mismatch.\n #\n # These are GNU flex-only options.\n # TODO: recognize --outfile also?\n file_gen_options = [\"--header-file=\", \"--tables-file=\"]\n lexflags = env.subst_list(\"$LEXFLAGS\", target=target, source=source)\n for option in lexflags[0]:\n for fileGenOption in file_gen_options:\n l = len(fileGenOption)\n if option[:l] == fileGenOption:\n # A file generating option is present, so add the\n # file name to the target list.\n file_name = option[l:].strip()\n target.append(file_name)\n\n lexheaderfile = env.subst(\"$LEX_HEADER_FILE\", target=target, source=source)\n if lexheaderfile:\n target.append(lexheaderfile)\n # rewrite user-supplied file string with a node, we need later\n env.Replace(LEX_HEADER_FILE=env.File(lexheaderfile))\n\n lextablesfile = env.subst(\"$LEX_TABLES_FILE\", target=target, source=source)\n if lextablesfile:\n target.append(lextablesfile)\n # rewrite user-supplied file string with a node, we need later\n env.Replace(LEX_TABLES_FILE=env.File(lextablesfile))\n\n return target, source", "def preprocess_emitter(source, target, env):\n target = []\n for src in source:\n basename = os.path.basename(src.abspath)\n (base, _ext) = os.path.splitext(basename)\n prefix = \"\"\n for var in [\"OBJPREFIX\", \"OBJSUFFIX\", \"SHOBJPREFIX\", \"SHOBJSUFFIX\"]:\n mod = env.subst(\"$%s\" % var)\n if var == \"OBJSUFFIX\" and mod == \".o\":\n continue\n if var == \"SHOBJSUFFIX\" and mod == \".os\":\n continue\n if mod != \"\":\n prefix = prefix + \"_\" + mod\n target.append(prefix + base + \"_pp.c\")\n return target, source", "def generate_a_pyrex_source(self, base, ext_name, source, extension):\n return []", "def process_source(pk_type, path, file_flag, out_path, skip_lines=None, encoding=\"utf-8\"):\n if file_flag:\n process_singles(pk_type, path, out_path, skip_lines, encoding)\n else:\n process_dirs(pk_type, path, out_path, skip_lines, encoding)", "def Sources():\n return _sources", "def determine_dep_vapis(self, target):\n result = OrderedSet()\n for dep in itertools.chain(target.link_targets, target.link_whole_targets):\n if not dep.is_linkable_target():\n continue\n for i in dep.sources:\n if hasattr(i, 'fname'):\n i = i.fname\n if i.split('.')[-1] in compilers.lang_suffixes['vala']:\n vapiname = dep.vala_vapi\n fullname = os.path.join(self.get_target_dir(dep), vapiname)\n result.add(fullname)\n break\n return list(result)", "def _GetFilePairs(config):\n\n ret = []\n\n has_bazel_genfiles = os.path.exists(\"bazel-bin\")\n\n for filename in config.file_list:\n target = os.path.join(config.package_name, filename)\n generated = os.path.join(config.package_name, config.pattern % filename)\n if has_bazel_genfiles:\n generated = os.path.join(\"bazel-bin\", generated)\n\n # Generated files should always exist. Blaze should guarantee this before\n # we are run.\n if not os.path.isfile(generated):\n print(\"Generated file '%s' does not exist.\" % generated)\n print(\"Please run this command to generate it:\")\n print(\" bazel build %s:%s\" % (config.package_name, config.target_name))\n sys.exit(1)\n ret.append(_FilePair(target, generated))\n\n return ret", "def sources(self):\n res = set()\n for elem in chain(settings.PIPELINE_CSS.values(), settings.PIPELINE_JS.values()):\n # TODO: add support for glob\n res.update(elem.get('source_filenames', []))\n return tuple(res)" ]
[ "0.6056835", "0.58320844", "0.56975776", "0.5695355", "0.56941354", "0.56379557", "0.55843604", "0.5572055", "0.5410893", "0.535213", "0.5313574", "0.53130484", "0.530888", "0.52893823", "0.5281984", "0.5279749", "0.5272755", "0.5268782", "0.5266625", "0.52504265", "0.5248615", "0.52459764", "0.52051634", "0.52004004", "0.51987547", "0.519749", "0.5192867", "0.5164819", "0.51389956", "0.5137592" ]
0.785854
0
Vala is compiled into C. Set up all necessary build steps here.
def generate_vala_compile(self, target: build.BuildTarget) -> \ T.Tuple[T.MutableMapping[str, File], T.MutableMapping[str, File], T.List[str]]: (vala_src, vapi_src, other_src) = self.split_vala_sources(target) extra_dep_files = [] if not vala_src: raise InvalidArguments(f'Vala library {target.name!r} has no Vala or Genie source files.') valac = target.compilers['vala'] c_out_dir = self.get_target_private_dir(target) # C files generated by valac vala_c_src: T.List[str] = [] # Files generated by valac valac_outputs: T.List = [] # All sources that are passed to valac on the commandline all_files = list(vapi_src) # Passed as --basedir srcbasedir = os.path.join(self.build_to_src, target.get_subdir()) for (vala_file, gensrc) in vala_src.items(): all_files.append(vala_file) # Figure out where the Vala compiler will write the compiled C file # # If the Vala file is in a subdir of the build dir (in our case # because it was generated/built by something else), and is also # a subdir of --basedir (because the builddir is in the source # tree, and the target subdir is the source root), the subdir # components from the source root till the private builddir will be # duplicated inside the private builddir. Otherwise, just the # basename will be used. # # If the Vala file is outside the build directory, the paths from # the --basedir till the subdir will be duplicated inside the # private builddir. if isinstance(gensrc, (build.CustomTarget, build.GeneratedList)) or gensrc.is_built: vala_c_file = os.path.splitext(os.path.basename(vala_file))[0] + '.c' # Check if the vala file is in a subdir of --basedir abs_srcbasedir = os.path.join(self.environment.get_source_dir(), target.get_subdir()) abs_vala_file = os.path.join(self.environment.get_build_dir(), vala_file) if PurePath(os.path.commonpath((abs_srcbasedir, abs_vala_file))) == PurePath(abs_srcbasedir): vala_c_subdir = PurePath(abs_vala_file).parent.relative_to(abs_srcbasedir) vala_c_file = os.path.join(str(vala_c_subdir), vala_c_file) else: path_to_target = os.path.join(self.build_to_src, target.get_subdir()) if vala_file.startswith(path_to_target): vala_c_file = os.path.splitext(os.path.relpath(vala_file, path_to_target))[0] + '.c' else: vala_c_file = os.path.splitext(os.path.basename(vala_file))[0] + '.c' # All this will be placed inside the c_out_dir vala_c_file = os.path.join(c_out_dir, vala_c_file) vala_c_src.append(vala_c_file) valac_outputs.append(vala_c_file) args = self.generate_basic_compiler_args(target, valac) args += valac.get_colorout_args(target.get_option(OptionKey('b_colorout'))) # Tell Valac to output everything in our private directory. Sadly this # means it will also preserve the directory components of Vala sources # found inside the build tree (generated sources). args += ['--directory', c_out_dir] args += ['--basedir', srcbasedir] if target.is_linkable_target(): # Library name args += ['--library', target.name] # Outputted header hname = os.path.join(self.get_target_dir(target), target.vala_header) args += ['--header', hname] if target.is_unity: # Without this the declarations will get duplicated in the .c # files and cause a build failure when all of them are # #include-d in one .c file. # https://github.com/mesonbuild/meson/issues/1969 args += ['--use-header'] valac_outputs.append(hname) # Outputted vapi file vapiname = os.path.join(self.get_target_dir(target), target.vala_vapi) # Force valac to write the vapi and gir files in the target build dir. # Without this, it will write it inside c_out_dir args += ['--vapi', os.path.join('..', target.vala_vapi)] valac_outputs.append(vapiname) target.outputs += [target.vala_header, target.vala_vapi] target.install_tag += ['devel', 'devel'] # Install header and vapi to default locations if user requests this if len(target.install_dir) > 1 and target.install_dir[1] is True: target.install_dir[1] = self.environment.get_includedir() if len(target.install_dir) > 2 and target.install_dir[2] is True: target.install_dir[2] = os.path.join(self.environment.get_datadir(), 'vala', 'vapi') # Generate GIR if requested if isinstance(target.vala_gir, str): girname = os.path.join(self.get_target_dir(target), target.vala_gir) args += ['--gir', os.path.join('..', target.vala_gir)] valac_outputs.append(girname) target.outputs.append(target.vala_gir) target.install_tag.append('devel') # Install GIR to default location if requested by user if len(target.install_dir) > 3 and target.install_dir[3] is True: target.install_dir[3] = os.path.join(self.environment.get_datadir(), 'gir-1.0') # Detect gresources and add --gresources arguments for each for gensrc in other_src[1].values(): if isinstance(gensrc, modules.GResourceTarget): gres_xml, = self.get_custom_target_sources(gensrc) args += ['--gresources=' + gres_xml] extra_args = [] for a in target.extra_args.get('vala', []): if isinstance(a, File): relname = a.rel_to_builddir(self.build_to_src) extra_dep_files.append(relname) extra_args.append(relname) else: extra_args.append(a) dependency_vapis = self.determine_dep_vapis(target) extra_dep_files += dependency_vapis args += extra_args element = NinjaBuildElement(self.all_outputs, valac_outputs, self.compiler_to_rule_name(valac), all_files + dependency_vapis) element.add_item('ARGS', args) element.add_dep(extra_dep_files) self.add_build(element) self.create_target_source_introspection(target, valac, args, all_files, []) return other_src[0], other_src[1], vala_c_src
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_glibc():\n if not os.path.exists(glibc_build_dir):\n docmd(\"mkdir %s\" % glibc_build_dir)\n glibc_subdir = \"glibc-%s\" % glibc_version\n if not os.path.exists(glibc_subdir):\n docmd(\"wget http://ftpmirror.gnu.org/glibc/\"\n \"%s.tar.bz2\" % glibc_subdir)\n docmd(\"tar jxf %s.tar.bz2\" % glibc_subdir)\n ta = flag_target_arch\n dochdir(glibc_build_dir)\n doscmd(\"../%s/configure --prefix=%s/%s \"\n \"--build=%s \"\n \"--host=%s \"\n \"--target=%s \"\n \"--with-headers=%s/%s/include \"\n \"%s \"\n \"libc_cv_forced_unwind=yes\" % (glibc_subdir,\n cross_prefix, ta,\n \"x86_64-pc-linux-gnu\",\n ta, ta,\n cross_prefix, ta,\n flag_use_multilib))\n doscmd(\"make install-bootstrap-headers=yes install-headers\")\n doscmd(\"touch %s/%s/include/gnu/stubs.h\" % (cross_prefix, flag_target_arch))\n dochdir(\"..\")", "def generate(env) -> None:\n c_file, cxx_file = SCons.Tool.createCFileBuilders(env)\n\n # C\n c_file.add_action(\".l\", LexAction)\n c_file.add_emitter(\".l\", lexEmitter)\n\n c_file.add_action(\".lex\", LexAction)\n c_file.add_emitter(\".lex\", lexEmitter)\n\n # Objective-C\n cxx_file.add_action(\".lm\", LexAction)\n cxx_file.add_emitter(\".lm\", lexEmitter)\n\n # C++\n cxx_file.add_action(\".ll\", LexAction)\n cxx_file.add_emitter(\".ll\", lexEmitter)\n\n if sys.platform == 'win32':\n # ignore the return, all we need is for the path to be added\n _ = get_lex_path(env, append_paths=True)\n\n env.SetDefault(\n LEX=env.Detect(BINS),\n LEXFLAGS=CLVar(\"\"),\n LEX_HEADER_FILE=\"\",\n LEX_TABLES_FILE=\"\",\n )\n\n if sys.platform == 'win32':\n env.SetDefault(LEXUNISTD=CLVar(\"\"))\n env[\"LEXCOM\"] = \"$LEX $LEXUNISTD $LEXFLAGS $_LEX_HEADER $_LEX_TABLES -t $SOURCES > $TARGET\"\n else:\n env[\"LEXCOM\"] = \"$LEX $LEXFLAGS $_LEX_HEADER $_LEX_TABLES -t $SOURCES > $TARGET\"\n\n env['_LEX_HEADER'] = '${LEX_HEADER_FILE and \"--header-file=\" + str(LEX_HEADER_FILE)}'\n env['_LEX_TABLES'] = '${LEX_TABLES_FILE and \"--tables-file=\" + str(LEX_TABLES_FILE)}'", "def generate(env):\n c_file, cxx_file = SCons.Tool.createCFileBuilders(env)\n\n # C\n c_file.add_action(\".l\", LexAction)\n c_file.add_emitter(\".l\", lexEmitter)\n\n c_file.add_action(\".lex\", LexAction)\n c_file.add_emitter(\".lex\", lexEmitter)\n\n # Objective-C\n cxx_file.add_action(\".lm\", LexAction)\n cxx_file.add_emitter(\".lm\", lexEmitter)\n\n # C++\n cxx_file.add_action(\".ll\", LexAction)\n cxx_file.add_emitter(\".ll\", lexEmitter)\n\n env[\"LEX\"] = env.Detect(\"flex\") or \"lex\"\n env[\"LEXFLAGS\"] = SCons.Util.CLVar(\"\")\n env[\"LEXCOM\"] = \"$LEX $LEXFLAGS -t $SOURCES > $TARGET\"", "def main():\n\n # Parse the command line.\n parser = ArgumentParser(\n \"Generate Python extension modules for C/C++ libraries.\",\n fromfile_prefix_chars='@')\n\n parser.add_argument('specification',\n help=\"the name of the specification file [default stdin]\",\n metavar=\"FILE\", nargs='?')\n\n parser.add_argument('-a', dest='api_extract',\n help=\"the name of the QScintilla API file [default not generated]\",\n metavar=\"FILE\")\n\n parser.add_argument('--abi-version', dest='abi_version',\n help=\"the ABI version\", metavar=\"VERSION\")\n\n parser.add_argument('-B', dest='backstops', action='append',\n help=\"add <TAG> to the list of timeline backstops\",\n metavar=\"TAG\")\n\n parser.add_argument('-c', dest='sources_dir',\n help=\"the name of the code directory [default not generated]\",\n metavar=\"DIR\")\n\n parser.add_argument('-D', dest='py_debug', action='store_true',\n default=False,\n help=\"generate code for a debug build of Python\")\n\n parser.add_argument('-e', dest='exceptions', action='store_true',\n default=False,\n help=\"enable support for exceptions [default disabled]\")\n\n parser.add_argument('-f', dest='warnings_are_errors', action='store_true',\n default=False,\n help=\"warnings are handled as errors\")\n\n parser.add_argument('-g', dest='release_gil', action='store_true',\n default=False,\n help=\"always release and reacquire the GIL [default only when \"\n \"specified]\")\n\n parser.add_argument('-I', dest='include_dirs', action='append',\n help=\"add <DIR> to the list of directories to search when \"\n \"importing or including .sip files\",\n metavar=\"DIR\")\n\n parser.add_argument('-j', dest='parts', type=int, default=0,\n help=\"split the generated code into <FILES> files [default 1 per \"\n \"class]\",\n metavar=\"FILES\")\n\n parser.add_argument('-m', dest='xml_extract', help=SUPPRESS)\n\n parser.add_argument('-n', dest='sip_module',\n help=\"the fully qualified name of the sip module\",\n metavar=\"NAME\")\n\n parser.add_argument('-o', dest='docstrings', action='store_true',\n default=False,\n help=\"enable the automatic generation of docstrings [default \"\n \"disabled]\")\n\n parser.add_argument('-P', dest='protected_is_public', action='store_true',\n default=False,\n help=\"enable the protected/public hack [default disabled]\")\n\n parser.add_argument('-r', dest='tracing', action='store_true',\n default=False,\n help=\"generate code with tracing enabled [default disabled]\")\n\n parser.add_argument('-s', dest='source_suffix',\n help=\"the suffix to use for C or C++ source files [default \\\".c\\\" \"\n \"or \\\".cpp\\\"]\",\n metavar=\"SUFFIX\")\n\n parser.add_argument('-t', dest='tags', action='append',\n help=\"add <TAG> to the list of versions/platforms to generate \"\n \"code for\",\n metavar=\"TAG\")\n\n parser.add_argument('-w', dest='warnings', action='store_true',\n default=False, help=\"enable warning messages [default disabled]\")\n\n parser.add_argument('-x', dest='disabled_features', action='append',\n help=\"add <FEATURE> to the list of disabled features\",\n metavar=\"FEATURE\")\n\n parser.add_argument('-X', dest='extracts', action='append',\n help=\"add <ID:FILE> to the list of extracts to generate\",\n metavar=\"ID:FILE\")\n\n parser.add_argument('-y', dest='pyi_extract',\n help=\"the name of the .pyi stub file [default not generated]\",\n metavar=\"FILE\")\n\n args = parser.parse_args()\n\n # Configure the handling of warnings.\n if args.warnings:\n if args.warnings_are_errors:\n simplefilter('error', FutureWarning)\n simplefilter('error', UserWarning)\n else:\n # Note that we don't suppress FutureWarnings.\n simplefilter('ignore', UserWarning)\n\n try:\n sip5(args.specification, sip_module=args.sip_module,\n abi_version=args.abi_version, sources_dir=args.sources_dir,\n include_dirs=args.include_dirs, tags=args.tags,\n backstops=args.backstops,\n disabled_features=args.disabled_features,\n exceptions=args.exceptions, parts=args.parts,\n source_suffix=args.source_suffix, docstrings=args.docstrings,\n protected_is_public=args.protected_is_public,\n py_debug=args.py_debug, release_gil=args.release_gil,\n tracing=args.tracing, extracts=args.extracts,\n pyi_extract=args.pyi_extract, api_extract=args.api_extract,\n xml_extract=args.xml_extract)\n except Exception as e:\n handle_exception(e)\n\n return 0", "def run_cpp(self):", "def load_glib(finder, module):\n module.AddGlobalName(\"GError\")\n module.AddGlobalName(\"IOChannel\")\n module.AddGlobalName(\"IO_ERR\")\n module.AddGlobalName(\"IO_FLAG_APPEND\")\n module.AddGlobalName(\"IO_FLAG_GET_MASK\")\n module.AddGlobalName(\"IO_FLAG_IS_READABLE\")\n module.AddGlobalName(\"IO_FLAG_IS_SEEKABLE\")\n module.AddGlobalName(\"IO_FLAG_IS_WRITEABLE\")\n module.AddGlobalName(\"IO_FLAG_MASK\")\n module.AddGlobalName(\"IO_FLAG_NONBLOCK\")\n module.AddGlobalName(\"IO_FLAG_SET_MASK\")\n module.AddGlobalName(\"IO_HUP\")\n module.AddGlobalName(\"IO_IN\")\n module.AddGlobalName(\"IO_NVAL\")\n module.AddGlobalName(\"IO_OUT\")\n module.AddGlobalName(\"IO_PRI\")\n module.AddGlobalName(\"IO_STATUS_AGAIN\")\n module.AddGlobalName(\"IO_STATUS_EOF\")\n module.AddGlobalName(\"IO_STATUS_ERROR\")\n module.AddGlobalName(\"IO_STATUS_NORMAL\")\n module.AddGlobalName(\"Idle\")\n module.AddGlobalName(\"MainContext\")\n module.AddGlobalName(\"MainLoop\")\n module.AddGlobalName(\"OPTION_ERROR\")\n module.AddGlobalName(\"OPTION_ERROR_BAD_VALUE\")\n module.AddGlobalName(\"OPTION_ERROR_FAILED\")\n module.AddGlobalName(\"OPTION_ERROR_UNKNOWN_OPTION\")\n module.AddGlobalName(\"OPTION_FLAG_FILENAME\")\n module.AddGlobalName(\"OPTION_FLAG_HIDDEN\")\n module.AddGlobalName(\"OPTION_FLAG_IN_MAIN\")\n module.AddGlobalName(\"OPTION_FLAG_NOALIAS\")\n module.AddGlobalName(\"OPTION_FLAG_NO_ARG\")\n module.AddGlobalName(\"OPTION_FLAG_OPTIONAL_ARG\")\n module.AddGlobalName(\"OPTION_FLAG_REVERSE\")\n module.AddGlobalName(\"OPTION_REMAINING\")\n module.AddGlobalName(\"OptionContext\")\n module.AddGlobalName(\"OptionGroup\")\n module.AddGlobalName(\"PRIORITY_DEFAULT\")\n module.AddGlobalName(\"PRIORITY_DEFAULT_IDLE\")\n module.AddGlobalName(\"PRIORITY_HIGH\")\n module.AddGlobalName(\"PRIORITY_HIGH_IDLE\")\n module.AddGlobalName(\"PRIORITY_LOW\")\n module.AddGlobalName(\"Pid\")\n module.AddGlobalName(\"PollFD\")\n module.AddGlobalName(\"SPAWN_CHILD_INHERITS_STDIN\")\n module.AddGlobalName(\"SPAWN_DO_NOT_REAP_CHILD\")\n module.AddGlobalName(\"SPAWN_FILE_AND_ARGV_ZERO\")\n module.AddGlobalName(\"SPAWN_LEAVE_DESCRIPTORS_OPEN\")\n module.AddGlobalName(\"SPAWN_SEARCH_PATH\")\n module.AddGlobalName(\"SPAWN_STDERR_TO_DEV_NULL\")\n module.AddGlobalName(\"SPAWN_STDOUT_TO_DEV_NULL\")\n module.AddGlobalName(\"Source\")\n module.AddGlobalName(\"Timeout\")\n module.AddGlobalName(\"child_watch_add\")\n module.AddGlobalName(\"filename_display_basename\")\n module.AddGlobalName(\"filename_display_name\")\n module.AddGlobalName(\"filename_from_utf8\")\n module.AddGlobalName(\"get_application_name\")\n module.AddGlobalName(\"get_current_time\")\n module.AddGlobalName(\"get_prgname\")\n module.AddGlobalName(\"glib_version\")\n module.AddGlobalName(\"idle_add\")\n module.AddGlobalName(\"io_add_watch\")\n module.AddGlobalName(\"main_context_default\")\n module.AddGlobalName(\"main_depth\")\n module.AddGlobalName(\"markup_escape_text\")\n module.AddGlobalName(\"set_application_name\")\n module.AddGlobalName(\"set_prgname\")\n module.AddGlobalName(\"source_remove\")\n module.AddGlobalName(\"spawn_async\")\n module.AddGlobalName(\"timeout_add\")\n module.AddGlobalName(\"timeout_add_seconds\")", "def Cpp_test():\n pass", "def setup_libgcc(self):\n libgccpath = os.path.dirname(os.popen(\"%s --print-libgcc-file-name\" % self.subst(\"$CCCOM_NOTARGET\")).read())\n self.libs[\"gcc\"] = ([], 1, libgccpath, None, [])", "def build_cffi():\r\n print_banner(\"Building CFFI Module\")\r\n ffi = cffi.FFI()\r\n\r\n this_dir = pathlib.Path().resolve()\r\n h_file_name = this_dir / \"cmult.h\"\r\n with open(h_file_name) as h_file:\r\n # cffi does not like our preprocessor directives, so we remove them\r\n lns = h_file.read().splitlines()\r\n flt = filter(lambda ln: not re.match(r\" *#\", ln), lns)\r\n flt = map(lambda ln: ln.replace(\"EXPORT_SYMBOL \", \"\"), flt)\r\n ffi.cdef(str(\"\\n\").join(flt))\r\n\r\n ffi.set_source(\r\n \"cffi_example\",\r\n # Since we are calling a fully built library directly no custom source\r\n # is necessary. We need to include the .h files, though, because behind\r\n # the scenes cffi generates a .c file which contains a Python-friendly\r\n # wrapper around each of the functions.\r\n '#include \"cmult.h\"',\r\n # The important thing is to include the pre-built lib in the list of\r\n # libraries we are linking against:\r\n libraries=[\"cmult\"],\r\n library_dirs=[this_dir.as_posix()],\r\n extra_link_args=[\"-Wl,-rpath,.\"],\r\n )\r\n\r\n ffi.compile()\r\n print(\"* Complete\")", "def __init__(self, path_prefix=\"\"):\n self.gcc_path = path_prefix + \"gcc\"", "def test_clang_cxx(self):\n self.assertEqual(\n self.ndk.clang_cxx,\n f\"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/clang++\",\n )", "def make_c_header(self):\n res = \\\n\"\"\"PyThreadState* ___madz_LANG_python_thread_state; //Holds Thread State for this interpreter\nPyObject *___madz_LANG_python_wrapper_module; //Hold Pointer to the _madz.py file representing this plugin\ntypedef struct{{\n{function_pointers}\n}}___madz_LANG_python_TYPE_;\n___madz_LANG_python_TYPE_ ___madz_LANG_python_OUTPUT;\nvoid ___madz_init_imports();\n{fn_dec}\n\n\"\"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n #TODO function_pointers, all same except\n fragments ={\"fn_dec\" : \"\", \"function_pointers\" : \"\"}\n fn = \"\"\"{rettype}{fnname}({args});\\n\"\"\"\n pointer = \"\"\" {prettype} (*{nodename})({args});\\n\"\"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frg = {\n \"prettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n\n }\n fragments[\"fn_dec\"] += fn.format(**frg)\n fragments[\"function_pointers\"] += pointer.format(**frg)\n if fragments[\"function_pointers\"] == \"\":\n fragments[\"function_pointers\"] = \"uint8_t _madz_empty;\"\n return res.format(**fragments)", "def build(self):\n env = ConfigureEnvironment(self.deps_cpp_info, self.settings)\n\n set_path_command = \"\"\n # Download nasm as build tool. This should go to source()\n if self.options.SSE == True:\n if self.settings.os == \"Linux\":\n # TODO: We should build nasm from source then.\n self.options.SSE = False # Or is removing here better? I'm not familiar with python..\n else:\n nasm_version = \"2.12.02\"\n nasm_os_url_id = \"\" #nasm url identifier\n if self.settings.os == \"Windows\":\n if self.settings.arch == \"x86\":\n nasm_os_url_id = \"win32\"\n else:\n nasm_os_url_id = \"win64\" \n elif self.settings.os == \"Macos\":\n nasm_os_url_id = \"macosx\"\n nasm_folder_name = \"nasm-%s-%s\" % (nasm_version, nasm_os_url_id)\n nasm_zip_name = \"%s.zip\" % nasm_folder_name\n download(\"http://www.nasm.us/pub/nasm/releasebuilds/%s/%s/%s\" % (nasm_version, nasm_os_url_id, nasm_zip_name), nasm_zip_name)\n self.output.warn(\"Downloading nasm: http://www.nasm.us/pub/nasm/releasebuilds/%s/%s/%s\" % (nasm_version, nasm_os_url_id, nasm_zip_name))\n unzip(nasm_zip_name)\n os.unlink(nasm_zip_name)\n nasm_path = os.path.join(os.getcwd(), nasm_folder_name)\n\n #env.environ[\"PATH\"] += os.pathsep + nasm_path #its probably as easy as this, but i cant append to the path self.run operates in.\n if self.settings.os == \"Windows\":\n set_path_command = \"set \\\"PATH=%s\\\" &&\" % os.environ[\"PATH\"]\n else:\n set_path_command = \"PATH=\\\"%s\\\" &&\" % os.environ[\"PATH\"]\n\n if self.settings.os == \"Linux\" or self.settings.os == \"Macos\":\n if self.options.fPIC:\n env_line = env.command_line.replace('CFLAGS=\"', 'CFLAGS=\"-fPIC ')\n else:\n env_line = env.command_line\n self.run(\"cd %s && autoreconf -fiv\" % self.ZIP_FOLDER_NAME)\n config_options = \"\"\n if self.settings.arch == \"x86\":\n if self.settings.os == \"Linux\":\n config_options = \"--host i686-pc-linux-gnu CFLAGS='-O3 -m32' LDFLAGS=-m32\"\n else:\n config_options = \"--host i686-apple-darwin CFLAGS='-O3 -m32' LDFLAGS=-m32\"\n\n if self.settings.os == \"Macos\":\n old_str = '-install_name \\$rpath/\\$soname'\n new_str = '-install_name \\$soname'\n replace_in_file(\"./%s/configure\" % self.ZIP_FOLDER_NAME, old_str, new_str)\n\n self.run(\"cd %s && %s ./configure %s\" % (self.ZIP_FOLDER_NAME, env_line, config_options))\n self.run(\"cd %s && %s make\" % (self.ZIP_FOLDER_NAME, env_line))\n else: # We should (for simplicity) always use cmake shouldnt we?\n conan_magic_lines = '''project(libjpeg-turbo)\n cmake_minimum_required(VERSION 3.0)\n include(../conanbuildinfo.cmake)\n CONAN_BASIC_SETUP()\n '''\n replace_in_file(\"%s/CMakeLists.txt\" % self.ZIP_FOLDER_NAME, \"cmake_minimum_required(VERSION 2.8.8)\", conan_magic_lines)\n replace_in_file(\"%s/CMakeLists.txt\" % self.ZIP_FOLDER_NAME, \"project(libjpeg-turbo C)\", \"\")\n \n cmake = CMake(self.settings)\n builddir = os.path.join(self.ZIP_FOLDER_NAME, \"_build\")\n\n if os.path.exists(builddir):\n shutil.rmtree(builddir) # We need to remove this folder first for windows\n os.makedirs(builddir)\n\n cmake_options = []\n if self.options.shared == True:\n cmake_options += [\"-DENABLE_STATIC=0\"]\n else:\n cmake_options = [\"-DENABLE_SHARED=0\"]\n cmake_options += [\"-DWITH_SIMD=%s\" % \"1\" if self.options.SSE else \"0\"]\n\n # why this comment: \"Don't change runtime, conan will take care of\"? conan_basic_setup() runs before this cmake option replaces MT with MD again\n cmake_options += [\"-DWITH_CRT_DLL=%s\" % \"1\" if self.settings.compiler.runtime == \"MD\" or self.settings.compiler.runtime == \"MDd\" else \"0\"]\n\n self.run('%s cd %s && cmake .. %s %s' % (set_path_command, builddir, cmake.command_line, \" \".join(cmake_options)))\n self.run(\"%s cd %s && cmake --build . %s\" % (set_path_command, builddir, cmake.build_config))", "def generate(env):\n # NOTE: SCons requires the use of this name, which fails gpylint.\n\n # Set target platform bits\n env.SetBits('debug')\n\n env['TARGET_DEBUG'] = True\n\n env.Append(\n CPPDEFINES=['_DEBUG'] + env.get('CPPDEFINES_DEBUG', []),\n CCFLAGS=env.get('CCFLAGS_DEBUG', []),\n LINKFLAGS=env.get('LINKFLAGS_DEBUG', []),\n )", "def pyo():\n local('python -O -m compileall .')", "def pyo():\n local('python -O -m compileall .')", "def compile_c(self):\n if(self.input == \"\"):\n stderr = subprocess.run(\n [\"gcc\", self.id+\".c\", \"-o\", self.id+\"_c\"], stderr=subprocess.PIPE).stderr.decode('utf-8')\n if(len(stderr) == 0):\n self.status = 1\n stdout = subprocess.run(\n [\"./\"+self.id+\"_c\"], stdout=subprocess.PIPE).stdout.decode('utf-8')\n self.output = stdout\n else:\n self.status = 0\n self.output = stderr\n else:\n pass", "def setup_gcc():\n if not os.path.exists(gcc_build_dir):\n docmd(\"mkdir %s\" % gcc_build_dir)\n dochdir(gcc_build_dir)\n dopt = \"\"\n if flag_debug_gcc:\n dopt = mk_debug_configopts()\n sropt = \"\"\n if sysroot:\n sropt = \"--with-sysroot=%s\" % sysroot\n else:\n if flag_do_only_gcc_build:\n sropt = \"\"\n else:\n sropt = \"--with-glibc-version=2.20\"\n doscmd(\"../%s/configure %s --prefix=%s --target=%s %s \"\n \"--enable-languages=%s --enable-libgo \"\n \"%s %s \" % (flag_gcc_subdir,\n dopt, cross_prefix,\n flag_target_arch,\n sropt, flag_langs,\n flag_use_multilib, flag_use_bootstrap))\n doscmd(\"make %s all-gcc\" % flag_parfactor)\n doscmd(\"make %s install-gcc\" % flag_parfactor)\n dochdir(\"..\")", "def setup_ctypes():\n lib.createComplex.argtypes = [ctypes.c_double, ctypes.c_double]\n lib.createComplex.restype = ctypes.c_void_p\n\n lib.deleteComplex.argypes = [ctypes.c_void_p]\n lib.deleteComplex.restype = None\n\n lib.getRealPart.argypes = [ctypes.c_void_p]\n lib.getRealPart.restype = ctypes.c_double\n\n lib.getImaginaryPart.argypes = [ctypes.c_void_p]\n lib.getImaginaryPart.restype = ctypes.c_double\n\n lib.add.argypes = [ctypes.c_void_p, ctypes.c_void_p]\n lib.add.restype = ctypes.c_void_p\n\n lib.equals.argtypes = [ctypes.c_void_p, ctypes.c_void_p]\n lib.equals.restype = ctypes.c_bool", "def main ( ) :\r\n\r\n # checks the command line\r\n\r\n libraryPath = checkArguments()\r\n\r\n # checks whether the library path is defined in a file pythonLibrary.txt\r\n\r\n if libraryPath is None : libraryPath = readLink()\r\n\r\n # checks for current directory\r\n\r\n if libraryPath is None : libraryPath = findDirectory( os.getcwd() )\r\n\r\n # checks whether the current execution prefix ( the directory where we are running python ) contains libraryPython\r\n\r\n if libraryPath is None : libraryPath = findDirectory( sys.exec_prefix )\r\n\r\n # not found\r\n\r\n if libraryPath is None :\r\n\r\n raw_input( sys.argv[ 0 ] + \" - fatal error : library not found. Press any key\" )\r\n\r\n sys.exit( 1 )\r\n\r\n # adds final / if required\r\n\r\n libraryPath = libraryPath.rstrip( \"\\\\/\" ) + os.sep\r\n\r\n # writes the link file (contains library path, speeds up next execution)\r\n\r\n writeLink( libraryPath )\r\n\r\n # appends library and root to search path\r\n\r\n if not libraryPath in sys.path : sys.path = [ libraryPath ] + sys.path\r\n\r\n # appends library and the local copy of python to the system search path so that local python modules have priority\r\n\r\n if os.path.exists( libraryPath + os.sep + sys.platform ) : localPath = libraryPath + os.sep + sys.platform + os.sep\r\n\r\n elif os.path.exists( libraryPath + os.sep + \"python\" ) : localPath = libraryPath + os.sep + \"python\" + os.sep\r\n\r\n else : localPath = None\r\n\r\n if not localPath is None : localPython( localPath )\r\n\r\n # writes the python library directly in sys, for next modules\r\n\r\n sys.libraryPath = libraryPath\r\n\r\n # writes the root of python library in sys, for next modules\r\n\r\n rootPath, dummy = os.path.split( libraryPath.rstrip( \"\\\\/\" ) )\r\n\r\n rootPath = rootPath + os.sep\r\n\r\n sys.rootPath = rootPath\r\n\r\n if not rootPath in sys.path : sys.path = [ rootPath ] + sys.path\r\n\r\n # sets the environment variables (session) for GUIs\r\n\r\n if not localPath is None : setEnvironment( localPath )", "def generate(env):\n\n gcc.generate(env)\n\n # Set up standard folder locations\n env.SetDefault(SDK_TOOLS = env['TOOLS_ROOT'] + '/tools')\n env.SetDefault(KCC_DIR = env['SDK_TOOLS'] + '/kcc/bin')\n\n env['KCC'] = _detect(env)\n env['AS'] = '$KCC'\n env['CC'] = '$KCC'\n env['OBJSUFFIX'] = '.o'\n env['BUILDERS']['AsmObject'] = _kccAsmBuilder", "def generate(env):\n SCons.Tool.gcc.generate(env)\n\n detector = DetectCompiler()\n if detector['icx'] is None:\n raise SCons.Errors.InternalError(\"No oneapi compiler found\")\n\n env['INTEL_C_COMPILER_TOP'] = detector['root']\n paths = {'INCLUDE': 'include',\n 'LIB': 'libarch',\n 'PATH': 'binarch',\n 'LD_LIBRARY_PATH': 'libarch'}\n for (key, value) in paths.items():\n env.PrependENVPath(key, detector[value])\n env.PrependENVPath(\"PATH\", detector[\"bin\"])\n env.PrependENVPath(\"LIB\", detector[\"lib\"])\n env.PrependENVPath(\"LD_LIBRARY_PATH\", detector[\"lib\"])\n env['CC'] = 'icx'\n env['CXX'] = 'icpx'\n env['AR'] = 'ar'\n env['LD'] = 'xild' # not used by default", "def configure_and_build_llvm(args: str) -> None:\n ninja = get_cmd_or_die(\"ninja\")\n # Possible values are Release, Debug, RelWithDebInfo and MinSizeRel\n build_type = \"Debug\" if args.debug else \"RelWithDebInfo\"\n ninja_build_file = os.path.join(c.LLVM_BLD, \"build.ninja\")\n with pb.local.cwd(c.LLVM_BLD):\n if os.path.isfile(ninja_build_file):\n prev_build_type = get_ninja_build_type(ninja_build_file)\n run_cmake = prev_build_type != build_type\n else:\n run_cmake = True\n\n if run_cmake:\n cmake = get_cmd_or_die(\"cmake\")\n max_link_jobs = est_parallel_link_jobs()\n assertions = \"1\" if args.assertions else \"0\"\n cargs = [\"-G\", \"Ninja\", c.LLVM_SRC,\n \"-Wno-dev\",\n \"-DCMAKE_C_COMPILER=clang\",\n \"-DCMAKE_CXX_COMPILER=clang++\",\n \"-DCMAKE_C_FLAGS=-I{}/include\".format(c.CBOR_PREFIX),\n \"-DCMAKE_CXX_FLAGS=-I{}/include\".format(c.CBOR_PREFIX),\n \"-DCMAKE_EXE_LINKER_FLAGS=-L{}/lib\".format(c.CBOR_PREFIX),\n \"-DCMAKE_BUILD_TYPE=\" + build_type,\n \"-DLLVM_ENABLE_ASSERTIONS=\" + assertions,\n \"-DLLVM_TARGETS_TO_BUILD=X86\",\n \"-DLLVM_INCLUDE_UTILS=1\",\n \"-DLLVM_BUILD_UTILS=1\",\n \"-DBUILD_SHARED_LIBS=1\",\n \"-DLLVM_PARALLEL_LINK_JOBS={}\".format(max_link_jobs)]\n invoke(cmake[cargs])\n else:\n logging.debug(\"found existing ninja.build, not running cmake\")\n\n ninja_args = ['ast-exporter']\n ninja_args += ['FileCheck', 'count', 'not']\n if args.with_clang:\n ninja_args.append('clang')\n invoke(ninja, *ninja_args)", "def build(c):", "def setup_lib(CLIB):\n # {{ SETUP_LIB }}", "def _init_libcxx(self, version):\n self.libcxx_version = version", "def main(options):\n # Setup generated dependency file options.\n if options.generate_dependency_file:\n dependency_file = os.path.normcase(os.path.join(\n options.config_build_dir, \"LLDBWrapPython.cpp.d\"))\n else:\n dependency_file = None\n\n # Keep track of all the swig-related settings.\n settings = SwigSettings()\n\n # Determine the final binding file path.\n settings.output_file = os.path.normcase(\n os.path.join(options.config_build_dir, \"LLDBWrapPython.cpp\"))\n\n # Touch the output file (but don't really generate it) if python\n # is disabled.\n disable_python = os.getenv(\"LLDB_DISABLE_PYTHON\", None)\n if disable_python is not None and disable_python == \"1\":\n remove_ignore_enoent(settings.output_file)\n # Touch the file.\n open(settings.output_file, 'w').close()\n logging.info(\n \"Created empty python binding file due to LLDB_DISABLE_PYTHON \"\n \"being set\")\n return\n\n # We also check the GCC_PREPROCESSOR_DEFINITIONS to see if it\n # contains LLDB_DISABLE_PYTHON. If so, we skip generating\n # the binding.\n gcc_preprocessor_defs = os.getenv(\"GCC_PREPROCESSOR_DEFINITIONS\", None)\n if gcc_preprocessor_defs is not None:\n if re.search(r\"LLDB_DISABLE_PYTHON\", gcc_preprocessor_defs):\n remove_ignore_enoent(settings.output_file)\n # Touch the file\n open(settings.output_file, 'w').close()\n logging.info(\n \"Created empty python binding file due to \"\n \"finding LLDB_DISABLE_PYTHON in GCC_PREPROCESSOR_DEFINITIONS\")\n return\n\n # Setup paths used during swig invocation.\n settings.input_file = os.path.normcase(\n os.path.join(options.src_root, \"scripts\", \"lldb.swig\"))\n scripts_python_dir = os.path.dirname(os.path.realpath(__file__))\n settings.extensions_file = os.path.normcase(\n os.path.join(scripts_python_dir, \"python-extensions.swig\"))\n settings.wrapper_file = os.path.normcase(\n os.path.join(scripts_python_dir, \"python-wrapper.swig\"))\n settings.typemaps_file = os.path.normcase(\n os.path.join(scripts_python_dir, \"python-typemaps.swig\"))\n settings.safecast_file = os.path.normcase(\n os.path.join(scripts_python_dir, \"python-swigsafecast.swig\"))\n\n settings.header_files = get_header_files(options)\n settings.interface_files = get_interface_files(options)\n\n generate_output = settings.output_out_of_date()\n\n # Determine where to put the module.\n python_module_path = get_python_module_path(options)\n logging.info(\"python module path: %s\", python_module_path)\n\n # Handle the configuration build dir.\n if options.config_build_dir is not None:\n config_build_dir = options.config_build_dir\n else:\n config_build_dir = python_module_path\n\n # Allow missing/non-link _lldb.so to force regeneration.\n if not generate_output:\n # Ensure the _lldb.so file exists.\n so_path = os.path.join(python_module_path, \"_lldb.so\")\n if not os.path.exists(so_path) or not os.path.islink(so_path):\n logging.info(\"_lldb.so doesn't exist or isn't a symlink\")\n generate_output = True\n\n # Allow missing __init__.py to force regeneration.\n if not generate_output:\n # Ensure the __init__.py for the lldb module can be found.\n init_path = os.path.join(python_module_path, \"__init__.py\")\n if not os.path.exists(init_path):\n logging.info(\"__init__.py doesn't exist\")\n generate_output = True\n\n # Figure out if we would be using static bindings\n use_static_bindings = (\n not options.swig_executable or\n not os.path.exists(options.swig_executable))\n if use_static_bindings and not generate_output:\n # If the contents of the VCS static binding are different from what\n # we have in the build dir, we should copy them regardless.\n if static_bindings_require_refresh(\n options, config_build_dir, settings):\n # Force the static bindings to be copied later on, thus preventing\n # early exit from this method.\n logging.info(\"updating static binding due to VCS binding changes\")\n generate_output = True\n\n if not generate_output:\n logging.info(\n \"Skipping Python binding generation: everything is up to date\")\n return\n\n # Generate the Python binding with swig, or use the static bindings if\n # no swig.\n if use_static_bindings:\n # Copy over the static bindings. We capture the the modified (\n # i.e. post-processed) binding, so we don't do the modify step\n # here - the modifications have already been applied.\n copy_static_bindings(options, config_build_dir, settings)\n else:\n # Generate the Python binding with swig.\n logging.info(\"Python binding is out of date, regenerating\")\n do_swig_rebuild(options, dependency_file, config_build_dir, settings)", "def configure_step(self):\n\n binutils_root = get_software_root('binutils')\n gcc_root = get_software_root('GCCcore') or get_software_root('GCC')\n gcc_ver = get_software_version('GCCcore') or get_software_version('GCC')\n\n # only patch Bazel scripts if binutils & GCC installation prefix could be determined\n if binutils_root and gcc_root:\n\n res = glob.glob(os.path.join(gcc_root, 'lib', 'gcc', '*', gcc_ver, 'include'))\n if res and len(res) == 1:\n gcc_lib_inc = res[0]\n else:\n raise EasyBuildError(\"Failed to pinpoint location of GCC include files: %s\", res)\n\n gcc_lib_inc_fixed = os.path.join(os.path.dirname(gcc_lib_inc), 'include-fixed')\n if not os.path.exists(gcc_lib_inc_fixed):\n raise EasyBuildError(\"Derived directory %s does not exist\", gcc_lib_inc_fixed)\n\n gcc_cplusplus_inc = os.path.join(gcc_root, 'include', 'c++', gcc_ver)\n if not os.path.exists(gcc_cplusplus_inc):\n raise EasyBuildError(\"Derived directory %s does not exist\", gcc_cplusplus_inc)\n\n # replace hardcoded paths in CROSSTOOL\n regex_subs = [\n (r'-B/usr/bin', '-B%s' % os.path.join(binutils_root, 'bin')),\n (r'(cxx_builtin_include_directory:.*)/usr/lib/gcc', r'\\1%s' % gcc_lib_inc),\n (r'(cxx_builtin_include_directory:.*)/usr/local/include', r'\\1%s' % gcc_lib_inc_fixed),\n (r'(cxx_builtin_include_directory:.*)/usr/include', r'\\1%s' % gcc_cplusplus_inc),\n ]\n for tool in ['ar', 'cpp', 'dwp', 'gcc', 'ld']:\n path = which(tool)\n if path:\n regex_subs.append((os.path.join('/usr', 'bin', tool), path))\n else:\n raise EasyBuildError(\"Failed to determine path to '%s'\", tool)\n\n apply_regex_substitutions(os.path.join('tools', 'cpp', 'CROSSTOOL'), regex_subs)\n\n # replace hardcoded paths in (unix_)cc_configure.bzl\n regex_subs = [\n (r'-B/usr/bin', '-B%s' % os.path.join(binutils_root, 'bin')),\n (r'\"/usr/bin', '\"' + os.path.join(binutils_root, 'bin')),\n ]\n for conf_bzl in ['cc_configure.bzl', 'unix_cc_configure.bzl']:\n filepath = os.path.join('tools', 'cpp', conf_bzl)\n if os.path.exists(filepath):\n apply_regex_substitutions(filepath, regex_subs)\n else:\n self.log.info(\"Not patching Bazel build scripts, installation prefix for binutils/GCC not found\")\n\n # enable building in parallel\n env.setvar('EXTRA_BAZEL_ARGS', '--jobs=%d' % self.cfg['parallel'])", "def build_cmult(path=\"C:\\\\Program Files (x86)\\\\Microsoft Visual Studio\\\\2019\\\\Professional\\\\VC\\\\Auxiliary\\\\Build\\\\\"):\r\n # Moving this type hint into signature causes an error (???)\r\n c = invoke.Context()\r\n\r\n if on_win:\r\n if not path:\r\n print(\"Path is missing\")\r\n else:\r\n # Using c.cd didn't work with paths that have spaces :/\r\n path = f'\"{path}vcvars32.bat\" x64' # Enter the VS venv\r\n path += f'&& cd \"{os.getcwd()}\"' # Change to current dir\r\n path += \"&& cl /LD cmult.c\" # Compile\r\n # Uncomment line below, to suppress stdout\r\n # path = path.replace(\"&&\", \" >nul &&\") + \" >nul\"\r\n c.run(path)\r\n else:\r\n print_banner(\"Building C Library\")\r\n cmd = \"gcc -c -Wall -Werror -fpic cmult.c -I /usr/include/python3.7\"\r\n invoke.run(cmd)\r\n invoke.run(\"gcc -shared -o libcmult.so cmult.o\")\r\n print(\"* Complete\")", "def fiddle_with_flags():\n flags['c++'] += '-arch x86_64 -bundle'\n flags['c'] += '-arch x86_64'" ]
[ "0.5602567", "0.543335", "0.53301674", "0.5304108", "0.5286191", "0.52635735", "0.5227829", "0.51943076", "0.5164394", "0.51239663", "0.5060787", "0.50462466", "0.5021612", "0.4998351", "0.4991233", "0.4991233", "0.49836853", "0.4957573", "0.494618", "0.49404785", "0.49330255", "0.48705605", "0.4854161", "0.4849502", "0.4844048", "0.48410258", "0.4815899", "0.47906512", "0.47855598", "0.47829434" ]
0.64493704
0
Generate rules for transpiling Cython files to C or C++
def generate_cython_transpile(self, target: build.BuildTarget) -> \ T.Tuple[T.MutableMapping[str, File], T.MutableMapping[str, File], T.List[str]]: static_sources: T.MutableMapping[str, File] = OrderedDict() generated_sources: T.MutableMapping[str, File] = OrderedDict() cython_sources: T.List[str] = [] cython = target.compilers['cython'] args: T.List[str] = [] args += cython.get_always_args() args += cython.get_buildtype_args(target.get_option(OptionKey('buildtype'))) args += cython.get_debug_args(target.get_option(OptionKey('debug'))) args += cython.get_optimization_args(target.get_option(OptionKey('optimization'))) args += cython.get_option_compile_args(target.get_options()) args += self.build.get_global_args(cython, target.for_machine) args += self.build.get_project_args(cython, target.subproject, target.for_machine) args += target.get_extra_args('cython') ext = target.get_option(OptionKey('language', machine=target.for_machine, lang='cython')) pyx_sources = [] # Keep track of sources we're adding to build for src in target.get_sources(): if src.endswith('.pyx'): output = os.path.join(self.get_target_private_dir(target), f'{src}.{ext}') element = NinjaBuildElement( self.all_outputs, [output], self.compiler_to_rule_name(cython), [src.absolute_path(self.environment.get_source_dir(), self.environment.get_build_dir())]) element.add_item('ARGS', args) self.add_build(element) # TODO: introspection? cython_sources.append(output) pyx_sources.append(element) else: static_sources[src.rel_to_builddir(self.build_to_src)] = src header_deps = [] # Keep track of generated headers for those sources for gen in target.get_generated_sources(): for ssrc in gen.get_outputs(): if isinstance(gen, GeneratedList): ssrc = os.path.join(self.get_target_private_dir(target), ssrc) else: ssrc = os.path.join(gen.get_subdir(), ssrc) if ssrc.endswith('.pyx'): output = os.path.join(self.get_target_private_dir(target), f'{ssrc}.{ext}') element = NinjaBuildElement( self.all_outputs, [output], self.compiler_to_rule_name(cython), [ssrc]) element.add_item('ARGS', args) self.add_build(element) pyx_sources.append(element) # TODO: introspection? cython_sources.append(output) else: generated_sources[ssrc] = mesonlib.File.from_built_file(gen.get_subdir(), ssrc) # Following logic in L883-900 where we determine whether to add generated source # as a header(order-only) dep to the .so compilation rule if not self.environment.is_source(ssrc) and \ not self.environment.is_object(ssrc) and \ not self.environment.is_library(ssrc) and \ not modules.is_module_library(ssrc): header_deps.append(ssrc) for source in pyx_sources: source.add_orderdep(header_deps) return static_sources, generated_sources, cython_sources
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pyo():\n local('python -O -m compileall .')", "def pyo():\n local('python -O -m compileall .')", "def compile_cutils():\r\n\r\n types = ['npy_' + t for t in ['int8', 'int16', 'int32', 'int64', 'int128',\r\n 'int256', 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\r\n 'float16', 'float32', 'float64', 'float80', 'float96', 'float128',\r\n 'float256']]\r\n\r\n complex_types = ['npy_' + t for t in ['complex32', 'complex64',\r\n 'complex128', 'complex160', 'complex192', 'complex512']]\r\n\r\n inplace_map_template = \"\"\"\r\n #if defined(%(typen)s)\r\n static void %(type)s_inplace_add(PyArrayMapIterObject *mit, PyArrayIterObject *it)\r\n {\r\n int index = mit->size;\r\n while (index--) {\r\n %(op)s\r\n\r\n PyArray_MapIterNext(mit);\r\n PyArray_ITER_NEXT(it);\r\n }\r\n }\r\n #endif\r\n \"\"\"\r\n\r\n floatadd = \"((%(type)s*)mit->dataptr)[0] = ((%(type)s*)mit->dataptr)[0] + ((%(type)s*)it->dataptr)[0];\"\r\n complexadd = \"\"\"\r\n ((%(type)s*)mit->dataptr)[0].real = ((%(type)s*)mit->dataptr)[0].real + ((%(type)s*)it->dataptr)[0].real;\r\n ((%(type)s*)mit->dataptr)[0].imag = ((%(type)s*)mit->dataptr)[0].imag + ((%(type)s*)it->dataptr)[0].imag;\r\n \"\"\"\r\n\r\n fns = ''.join([inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': floatadd % {'type': t}}\r\n for t in types] +\r\n [inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': complexadd % {'type': t}}\r\n for t in complex_types])\r\n\r\n fn_array = (\"static inplace_map_binop addition_funcs[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(type)s_inplace_add,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"\"\"NULL};\r\n \"\"\")\r\n\r\n type_number_array = (\"static int type_numbers[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(typen)s,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"-1000};\")\r\n\r\n code = (\"\"\"\r\n #include <Python.h>\r\n #include \"numpy/arrayobject.h\"\r\n\r\n extern \"C\"{\r\n static PyObject *\r\n run_cthunk(PyObject *self, PyObject *args)\r\n {\r\n PyObject *py_cthunk = NULL;\r\n if(!PyArg_ParseTuple(args,\"O\",&py_cthunk))\r\n return NULL;\r\n\r\n if (!PyCObject_Check(py_cthunk)) {\r\n PyErr_SetString(PyExc_ValueError,\r\n \"Argument to run_cthunk must be a PyCObject.\");\r\n return NULL;\r\n }\r\n void * ptr_addr = PyCObject_AsVoidPtr(py_cthunk);\r\n int (*fn)(void*) = (int (*)(void*))(ptr_addr);\r\n void* it = PyCObject_GetDesc(py_cthunk);\r\n int failure = fn(it);\r\n\r\n return Py_BuildValue(\"i\", failure);\r\n }\r\n\r\n #if NPY_API_VERSION >= 0x00000008\r\n typedef void (*inplace_map_binop)(PyArrayMapIterObject *, PyArrayIterObject *);\r\n \"\"\" + fns + fn_array + type_number_array +\r\n\r\n\"\"\"\r\nstatic int\r\nmap_increment(PyArrayMapIterObject *mit, PyObject *op, inplace_map_binop add_inplace)\r\n{\r\n PyArrayObject *arr = NULL;\r\n PyArrayIterObject *it;\r\n PyArray_Descr *descr;\r\n if (mit->ait == NULL) {\r\n return -1;\r\n }\r\n descr = PyArray_DESCR(mit->ait->ao);\r\n Py_INCREF(descr);\r\n arr = (PyArrayObject *)PyArray_FromAny(op, descr,\r\n 0, 0, NPY_ARRAY_FORCECAST, NULL);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n if ((mit->subspace != NULL) && (mit->consec)) {\r\n PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n }\r\n it = (PyArrayIterObject*)\r\n PyArray_BroadcastToShape((PyObject*)arr, mit->dimensions, mit->nd);\r\n if (it == NULL) {\r\n Py_DECREF(arr);\r\n return -1;\r\n }\r\n\r\n (*add_inplace)(mit, it);\r\n\r\n Py_DECREF(arr);\r\n Py_DECREF(it);\r\n return 0;\r\n}\r\n\r\n\r\nstatic PyObject *\r\ninplace_increment(PyObject *dummy, PyObject *args)\r\n{\r\n PyObject *arg_a = NULL, *index=NULL, *inc=NULL;\r\n PyArrayObject *a;\r\n inplace_map_binop add_inplace = NULL;\r\n int type_number = -1;\r\n int i =0;\r\n PyArrayMapIterObject * mit;\r\n\r\n if (!PyArg_ParseTuple(args, \"OOO\", &arg_a, &index,\r\n &inc)) {\r\n return NULL;\r\n }\r\n if (!PyArray_Check(arg_a)) {\r\n PyErr_SetString(PyExc_ValueError, \"needs an ndarray as first argument\");\r\n return NULL;\r\n }\r\n\r\n a = (PyArrayObject *) arg_a;\r\n\r\n if (PyArray_FailUnlessWriteable(a, \"input/output array\") < 0) {\r\n return NULL;\r\n }\r\n\r\n if (PyArray_NDIM(a) == 0) {\r\n PyErr_SetString(PyExc_IndexError, \"0-d arrays can't be indexed.\");\r\n return NULL;\r\n }\r\n type_number = PyArray_TYPE(a);\r\n\r\n\r\n\r\n while (type_numbers[i] >= 0 && addition_funcs[i] != NULL){\r\n if (type_number == type_numbers[i]) {\r\n add_inplace = addition_funcs[i];\r\n break;\r\n }\r\n i++ ;\r\n }\r\n\r\n if (add_inplace == NULL) {\r\n PyErr_SetString(PyExc_TypeError, \"unsupported type for a\");\r\n return NULL;\r\n }\r\n mit = (PyArrayMapIterObject *) PyArray_MapIterArray(a, index);\r\n if (mit == NULL) {\r\n goto fail;\r\n }\r\n if (map_increment(mit, inc, add_inplace) != 0) {\r\n goto fail;\r\n }\r\n\r\n Py_DECREF(mit);\r\n\r\n Py_INCREF(Py_None);\r\n return Py_None;\r\n\r\nfail:\r\n Py_XDECREF(mit);\r\n\r\n return NULL;\r\n}\r\n #endif\r\n\r\n\r\n static PyMethodDef CutilsExtMethods[] = {\r\n {\"run_cthunk\", run_cthunk, METH_VARARGS|METH_KEYWORDS,\r\n \"Run a theano cthunk.\"},\r\n #if NPY_API_VERSION >= 0x00000008\r\n {\"inplace_increment\", inplace_increment,\r\n METH_VARARGS,\r\n \"increments a numpy array inplace at the passed indexes.\"},\r\n #endif\r\n {NULL, NULL, 0, NULL} /* Sentinel */\r\n };\"\"\")\r\n\r\n if PY3:\r\n # This is not the most efficient code, but it is written this way to\r\n # highlight the changes needed to make 2.x code compile under python 3.\r\n code = code.replace(\"<Python.h>\", '\"numpy/npy_3kcompat.h\"', 1)\r\n code = code.replace(\"PyCObject\", \"NpyCapsule\")\r\n code += \"\"\"\r\n static struct PyModuleDef moduledef = {\r\n PyModuleDef_HEAD_INIT,\r\n \"cutils_ext\",\r\n NULL,\r\n -1,\r\n CutilsExtMethods,\r\n };\r\n\r\n PyMODINIT_FUNC\r\n PyInit_cutils_ext(void) {\r\n import_array();\r\n return PyModule_Create(&moduledef);\r\n }\r\n }\r\n \"\"\"\r\n else:\r\n code += \"\"\"\r\n PyMODINIT_FUNC\r\n initcutils_ext(void)\r\n {\r\n import_array();\r\n (void) Py_InitModule(\"cutils_ext\", CutilsExtMethods);\r\n }\r\n } //extern C\r\n \"\"\"\r\n\r\n loc = os.path.join(config.compiledir, 'cutils_ext')\r\n if not os.path.exists(loc):\r\n os.mkdir(loc)\r\n\r\n args = cmodule.GCC_compiler.compile_args()\r\n cmodule.GCC_compiler.compile_str('cutils_ext', code, location=loc,\r\n preargs=args)", "def build_from_c_and_cpp_files(extensions):\n for extension in extensions:\n sources = []\n for sfile in extension.sources:\n path, ext = os.path.splitext(sfile)\n if ext in ('.pyx', '.py'):\n if extension.language == 'c++':\n ext = '.cpp'\n else:\n ext = '.c'\n sfile = path + ext\n sources.append(sfile)\n extension.sources = sources", "def generate_cpp():\n cpp_file = AUTOGEN_WARNING\n cpp_file += \"// Implements basic nuclear data functions.\\n\"\n cpp_file += \"#ifndef PYNE_IS_AMALGAMATED\\n\"\n cpp_file += '#include \"atomic_data.h\"\\n'\n cpp_file += '#include \"nucname.h\"\\n'\n cpp_file += \"#endif\\n\"\n cpp_file += \" \\n\"\n cpp_file += \"void pyne::_load_atomic_mass_map_memory() { \\n\"\n cpp_file += \" // header version of atomic weight table data \\n\"\n cpp_file += \" //see if the data table is already loaded\\n\"\n cpp_file += \" if(!atomic_mass_map.empty()) {\\n\"\n cpp_file += \" return;\\n\"\n cpp_file += \" } else { \\n\"\n cpp_file += \" _insert_atomic_mass_map();\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" //see if the data table is already loaded\\n\"\n cpp_file += \" if(!natural_abund_map.empty()) {\\n\"\n cpp_file += \" return;\\n\"\n cpp_file += \" } else { \\n\"\n cpp_file += \" _insert_abund_map();\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" // calculate the atomic_masses of the elements \\n\"\n cpp_file += \" std::map<int,double> :: iterator it;\\n\"\n cpp_file += \" \\n\"\n cpp_file += \" for (int z = 1; z <= 92 ; z++) {\\n\"\n cpp_file += \" // loop through the natural abundance map\\n\"\n cpp_file += \" double element_atomic_weight = 0.0;\\n\"\n cpp_file += \" for (it = natural_abund_map.begin(); it != natural_abund_map.end() ; ++it){\\n\"\n cpp_file += \" // if the atomic number of the abudance matches the\\n\"\n cpp_file += \" // that of index\\n\"\n cpp_file += \" if(pyne::nucname::znum(it->first) == z) {\\n\"\n cpp_file += \" // take atomic abundance and multiply by mass\\n\"\n cpp_file += (\n \" // to get the mass of that nuclide / 100 since abundance is in %\\n\"\n )\n cpp_file += \" element_atomic_weight += (it->second*atomic_mass_map[it->first]/100.0);\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" // insert the abundance of the element into the list\\n\"\n cpp_file += \" atomic_mass_map[z*10000000] = element_atomic_weight;\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \"}\\n\"\n cpp_file += \"\\n\\n\"\n cpp_file += \"void pyne::_insert_atomic_mass_map() { \\n\"\n cpp_file += generate_atomic_mass()\n cpp_file += \"}\\n\"\n cpp_file += \"\\n\\n\"\n cpp_file += \"void pyne::_insert_abund_map() { \\n\"\n cpp_file += generate_abundances()\n cpp_file += \"}\\n\"\n return cpp_file", "def make_c_header(self):\n res = \\\n\"\"\"PyThreadState* ___madz_LANG_python_thread_state; //Holds Thread State for this interpreter\nPyObject *___madz_LANG_python_wrapper_module; //Hold Pointer to the _madz.py file representing this plugin\ntypedef struct{{\n{function_pointers}\n}}___madz_LANG_python_TYPE_;\n___madz_LANG_python_TYPE_ ___madz_LANG_python_OUTPUT;\nvoid ___madz_init_imports();\n{fn_dec}\n\n\"\"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n #TODO function_pointers, all same except\n fragments ={\"fn_dec\" : \"\", \"function_pointers\" : \"\"}\n fn = \"\"\"{rettype}{fnname}({args});\\n\"\"\"\n pointer = \"\"\" {prettype} (*{nodename})({args});\\n\"\"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frg = {\n \"prettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n\n }\n fragments[\"fn_dec\"] += fn.format(**frg)\n fragments[\"function_pointers\"] += pointer.format(**frg)\n if fragments[\"function_pointers\"] == \"\":\n fragments[\"function_pointers\"] = \"uint8_t _madz_empty;\"\n return res.format(**fragments)", "def generate(env):\n c_file, cxx_file = SCons.Tool.createCFileBuilders(env)\n\n # C\n c_file.add_action(\".l\", LexAction)\n c_file.add_emitter(\".l\", lexEmitter)\n\n c_file.add_action(\".lex\", LexAction)\n c_file.add_emitter(\".lex\", lexEmitter)\n\n # Objective-C\n cxx_file.add_action(\".lm\", LexAction)\n cxx_file.add_emitter(\".lm\", lexEmitter)\n\n # C++\n cxx_file.add_action(\".ll\", LexAction)\n cxx_file.add_emitter(\".ll\", lexEmitter)\n\n env[\"LEX\"] = env.Detect(\"flex\") or \"lex\"\n env[\"LEXFLAGS\"] = SCons.Util.CLVar(\"\")\n env[\"LEXCOM\"] = \"$LEX $LEXFLAGS -t $SOURCES > $TARGET\"", "def _gen_code(self):\r\n #TODO: maybe generate one C function only to save compile time? Also easier to take that as a basis and hand craft other covariances??\r\n\r\n #generate c functions from sympy objects \r\n argument_sequence = self._sp_x+self._sp_z+self._sp_theta\r\n code_list = [('k',self._sp_k)]\r\n # gradients with respect to covariance input\r\n code_list += [('dk_d%s'%x.name,dx) for x,dx in zip(self._sp_x,self._sp_dk_dx)]\r\n # gradient with respect to parameters\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta,self._sp_dk_dtheta)]\r\n # gradient with respect to multiple output parameters\r\n if self.output_dim > 1:\r\n argument_sequence += self._sp_theta_i + self._sp_theta_j\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta_i,self._sp_dk_dtheta_i)]\r\n (foo_c,self._function_code), (foo_h,self._function_header) = \\\r\n codegen(code_list, \"C\",'foobar',argument_sequence=argument_sequence)\r\n #put the header file where we can find it\r\n f = file(os.path.join(tempfile.gettempdir(),'foobar.h'),'w')\r\n f.write(self._function_header)\r\n f.close()\r\n\r\n # Substitute any known derivatives which sympy doesn't compute\r\n self._function_code = re.sub('DiracDelta\\(.+?,.+?\\)','0.0',self._function_code)\r\n\r\n\r\n ############################################################\r\n # This is the basic argument construction for the C code. #\r\n ############################################################\r\n \r\n arg_list = ([\"X2(i, %s)\"%x.name[2:] for x in self._sp_x]\r\n + [\"Z2(j, %s)\"%z.name[2:] for z in self._sp_z])\r\n\r\n # for multiple outputs need to also provide these arguments reversed.\r\n if self.output_dim>1:\r\n reverse_arg_list = list(arg_list)\r\n reverse_arg_list.reverse()\r\n\r\n # Add in any 'shared' parameters to the list.\r\n param_arg_list = [shared_params.name for shared_params in self._sp_theta]\r\n arg_list += param_arg_list\r\n\r\n precompute_list=[]\r\n if self.output_dim > 1:\r\n reverse_arg_list+=list(param_arg_list)\r\n split_param_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['ii', 'jj'] for theta in self._sp_theta_i]\r\n split_param_reverse_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['jj', 'ii'] for theta in self._sp_theta_i]\r\n arg_list += split_param_arg_list\r\n reverse_arg_list += split_param_reverse_arg_list\r\n # Extract the right output indices from the inputs.\r\n c_define_output_indices = [' '*16 + \"int %s=(int)%s(%s, %i);\"%(index, var, index2, self.input_dim-1) for index, var, index2 in zip(['ii', 'jj'], ['X2', 'Z2'], ['i', 'j'])]\r\n precompute_list += c_define_output_indices\r\n reverse_arg_string = \", \".join(reverse_arg_list)\r\n arg_string = \", \".join(arg_list)\r\n precompute_string = \"\\n\".join(precompute_list)\r\n\r\n # Code to compute argments string needed when only X is provided.\r\n X_arg_string = re.sub('Z','X',arg_string)\r\n # Code to compute argument string when only diagonal is required.\r\n diag_arg_string = re.sub('int jj','//int jj',X_arg_string)\r\n diag_arg_string = re.sub('j','i',diag_arg_string)\r\n if precompute_string == '':\r\n # if it's not multioutput, the precompute strings are set to zero\r\n diag_precompute_string = ''\r\n diag_precompute_replace = ''\r\n else:\r\n # for multioutput we need to extract the index of the output form the input.\r\n diag_precompute_string = precompute_list[0]\r\n diag_precompute_replace = precompute_list[1]\r\n \r\n\r\n # Here's the code to do the looping for K\r\n self._K_code =\\\r\n \"\"\"\r\n // _K_code\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n //target[i*num_inducing+j] = \r\n TARGET2(i, j) += k(%s);\r\n }\r\n }\r\n %s\r\n \"\"\"%(precompute_string,arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n self._K_code_X = \"\"\"\r\n // _K_code_X\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n %s // int ii=(int)X2(i, 1);\r\n TARGET2(i, i) += k(%s);\r\n for (j=0;j<i;j++){\r\n %s //int jj=(int)X2(j, 1);\r\n double kval = k(%s); //double kval = k(X2(i, 0), shared_lengthscale, LENGTHSCALE1(ii), SCALE1(ii));\r\n TARGET2(i, j) += kval;\r\n TARGET2(j, i) += kval;\r\n }\r\n }\r\n /*%s*/\r\n \"\"\"%(diag_precompute_string, diag_arg_string, re.sub('Z2', 'X2', diag_precompute_replace), X_arg_string,str(self._sp_k)) #adding a string representation forces recompile when needed\r\n\r\n # Code to do the looping for Kdiag\r\n self._Kdiag_code =\\\r\n \"\"\"\r\n // _Kdiag_code\r\n // Code for computing diagonal of covariance function.\r\n int i;\r\n int N = target_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for\r\n for (i=0;i<N;i++){\r\n %s\r\n //target[i] =\r\n TARGET1(i)=k(%s);\r\n }\r\n %s\r\n \"\"\"%(diag_precompute_string,diag_arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code to compute gradients\r\n grad_func_list = []\r\n if self.output_dim>1:\r\n grad_func_list += c_define_output_indices\r\n grad_func_list += [' '*16 + 'TARGET1(%i+ii) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += [' '*16 + 'TARGET1(%i+jj) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, reverse_arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += ([' '*16 + 'TARGET1(%i) += PARTIAL2(i, j)*dk_d%s(%s);'%(i,theta.name,arg_string) for i,theta in enumerate(self._sp_theta)])\r\n grad_func_string = '\\n'.join(grad_func_list) \r\n\r\n self._dK_dtheta_code =\\\r\n \"\"\"\r\n // _dK_dtheta_code\r\n // Code for computing gradient of covariance with respect to parameters.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n }\r\n }\r\n %s\r\n \"\"\"%(grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") # adding a string representation forces recompile when needed\r\n\r\n\r\n # Code to compute gradients for Kdiag TODO: needs clean up\r\n diag_grad_func_string = re.sub('Z','X',grad_func_string,count=0)\r\n diag_grad_func_string = re.sub('int jj','//int jj',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('j','i',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('PARTIAL2\\(i, i\\)','PARTIAL1(i)',diag_grad_func_string)\r\n self._dKdiag_dtheta_code =\\\r\n \"\"\"\r\n // _dKdiag_dtheta_code\r\n // Code for computing gradient of diagonal with respect to parameters.\r\n int i;\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (i=0;i<N;i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code for gradients wrt X, TODO: may need to deal with special case where one input is actually an output.\r\n gradX_func_list = []\r\n if self.output_dim>1:\r\n gradX_func_list += c_define_output_indices\r\n gradX_func_list += [\"TARGET2(i, %i) += PARTIAL2(i, j)*dk_dx_%i(%s);\"%(q,q,arg_string) for q in range(self._real_input_dim)]\r\n gradX_func_string = \"\\n\".join(gradX_func_list)\r\n\r\n self._dK_dX_code = \\\r\n \"\"\"\r\n // _dK_dX_code\r\n // Code for computing gradient of covariance with respect to inputs.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N; i++){\r\n for (j=0; j<num_inducing; j++){\r\n %s\r\n }\r\n }\r\n %s\r\n \"\"\"%(gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n \r\n\r\n diag_gradX_func_string = re.sub('Z','X',gradX_func_string,count=0)\r\n diag_gradX_func_string = re.sub('int jj','//int jj',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('j','i',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('PARTIAL2\\(i, i\\)','2*PARTIAL1(i)',diag_gradX_func_string)\r\n\r\n # Code for gradients of Kdiag wrt X\r\n self._dKdiag_dX_code= \\\r\n \"\"\"\r\n // _dKdiag_dX_code\r\n // Code for computing gradient of diagonal with respect to inputs.\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (int i=0;i<N; i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a\r\n # string representation forces recompile when needed Get rid\r\n # of Zs in argument for diagonal. TODO: Why wasn't\r\n # diag_func_string called here? Need to check that.\r\n #self._dKdiag_dX_code = self._dKdiag_dX_code.replace('Z[j', 'X[i')\r\n\r\n # Code to use when only X is provided. \r\n self._dK_dtheta_code_X = self._dK_dtheta_code.replace('Z[', 'X[')\r\n self._dK_dX_code_X = self._dK_dX_code.replace('Z[', 'X[').replace('+= PARTIAL2(', '+= 2*PARTIAL2(') \r\n self._dK_dtheta_code_X = self._dK_dtheta_code_X.replace('Z2(', 'X2(')\r\n self._dK_dX_code_X = self._dK_dX_code_X.replace('Z2(', 'X2(')\r\n\r\n\r\n #TODO: insert multiple functions here via string manipulation\r\n #TODO: similar functions for psi_stats\r", "def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res", "def cython_c2py_conv_function_pointer(t_, ts):\n t = t_[1]\n argnames = []\n argdecls = []\n argbodys = []\n argrtns = []\n for n, argt in t[1][2]:\n argnames.append(n)\n decl, body, rtn = ts.cython_py2c(n, argt, proxy_name=\"c_\" + n)\n argdecls += decl.split('\\n') if isinstance(decl,basestring) else [decl]\n argbodys += body.split('\\n') if isinstance(body,basestring) else [body]\n argrtns += rtn.split('\\n') if isinstance(rtn,basestring) else [rtn]\n rtnname = 'rtn'\n rtnprox = 'c_' + rtnname\n rtncall = 'c_call_' + rtnname\n while rtnname in argnames or rtnprox in argnames:\n rtnname += '_'\n rtnprox += '_'\n argdecls = indent(argdecls)\n argbodys = indent(argbodys)\n rtndecl, rtnbody, rtnrtn, _ = ts.cython_c2py(rtncall, t[2][2],\n cached=False, proxy_name=rtnprox, existing_name=rtncall)\n if rtndecl is None and rtnbody is None:\n rtnprox = rtnname\n rtndecls = [rtndecl]\n returns_void = (t[2][2] == 'void')\n if not returns_void:\n rtndecls.append(\"cdef {0} {1}\".format(ts.cython_ctype(t[2][2]),\n rtncall))\n rtndecl = indent(rtndecls)\n rtnbody = indent(rtnbody)\n s = ('def {{proxy_name}}({arglist}):\\n'\n '{argdecls}\\n'\n '{rtndecl}\\n'\n ' if {{var}} == NULL:\\n'\n ' raise RuntimeError(\"{{var}} is NULL and may not be '\n 'safely called!\")\\n'\n '{argbodys}\\n')\n s += ' {{var}}({carglist})\\n' if returns_void else \\\n ' {rtncall} = {{var}}({carglist})\\n'\n s += '{rtnbody}\\n'\n s = s.format(arglist=\", \".join(argnames), argdecls=argdecls,\n cvartypeptr=ts.cython_ctype(t_).format(type_name='cvartype'),\n argbodys=argbodys, rtndecl=rtndecl, rtnprox=rtnprox,\n rtncall=rtncall, carglist=\", \".join(argrtns), rtnbody=rtnbody)\n caches = 'if {cache_name} is None:\\n' + indent(s)\n if not returns_void:\n caches += \"\\n return {rtnrtn}\".format(rtnrtn=rtnrtn)\n caches += '\\n {cache_name} = {proxy_name}\\n'\n return s, s, caches", "def cross_compile(*args, **kwargs):\n return compile(*args, **kwargs)", "def cython_py2c(self, name, t, inst_name=None, proxy_name=None):\n t = self.canon(t)\n if isinstance(t, basestring) or 0 == t[-1] or self.isrefinement(t[-1]):\n last = ''\n elif isinstance(t[-1], int):\n last = ' [{0}]'.format(t[-1])\n else:\n last = ' ' + t[-1]\n tkey = t\n tinst = None\n while tkey not in self.cython_py2c_conv and not isinstance(tkey, basestring):\n tinst = tkey\n tkey = tkey[1] if (0 < len(tkey) and self.isrefinement(tkey[1])) else tkey[0]\n if tkey not in self.cython_py2c_conv:\n tkey = t\n while tkey not in self.cython_py2c_conv and \\\n not isinstance(tkey, basestring):\n tkey = tkey[0]\n py2ct = self.cython_py2c_conv[tkey]\n if callable(py2ct):\n self.cython_py2c_conv[t] = py2ct(t, self)\n py2ct = self.cython_py2c_conv[t]\n if py2ct is NotImplemented or py2ct is None:\n raise NotImplementedError('conversion from Python to C/C++ for ' + \\\n str(t) + ' has not been implemented.')\n body_template, rtn_template = py2ct\n var = name if inst_name is None else \"{0}.{1}\".format(inst_name, name)\n proxy_name = \"{0}_proxy\".format(name) if proxy_name is None else proxy_name\n tstr = self.typestr(t, self)\n template_kw = dict(var=var, proxy_name=proxy_name, last=last, t=tstr)\n nested = False\n if self.isdependent(tkey):\n tsig = [ts for ts in self.refined_types if ts[0] == tkey][0]\n for ts, ti in zip(tsig[1:], tinst[1:]):\n if isinstance(ts, basestring):\n template_kw[ts] = self.cython_ctype(ti)\n else:\n template_kw[ti[0]] = ti[2]\n vartype = self.refined_types[tsig]\n if vartype in tsig[1:]:\n vartype = tinst[tsig.index(vartype)][1]\n if self.isrefinement(vartype):\n nested = True\n vdecl, vbody, vrtn = self.cython_py2c(var, vartype)\n template_kw['var'] = vrtn\n body_filled = body_template.format(**template_kw)\n if rtn_template:\n if '{t.cython_ctype}'in body_template:\n deft = tstr.cython_ctype\n elif '{t.cython_ctype_nopred}'in body_template:\n deft = tstr.cython_ctype_nopred\n elif '{t.cython_cytype_nopred}'in body_template:\n deft = tstr.cython_cytype_nopred\n else:\n deft = tstr.cython_cytype\n decl = \"cdef {0} {1}\".format(deft, proxy_name)\n body = body_filled\n rtn = rtn_template.format(**template_kw)\n decl += '\\n'+\"\\n\".join([l for l in body.splitlines() \\\n if l.startswith('cdef')])\n body = \"\\n\".join([l for l in body.splitlines() \\\n if not l.startswith('cdef')])\n else:\n decl = body = None\n rtn = body_filled\n if nested:\n decl = '' if decl is None else decl\n vdecl = '' if vdecl is None else vdecl\n decl = (vdecl + '\\n' + decl).strip()\n decl = None if 0 == len(decl) else decl\n body = '' if body is None else body\n vbody = '' if vbody is None else vbody\n body = (vbody + '\\n' + body).strip()\n body = None if 0 == len(body) else body\n return decl, body, rtn", "def define_extensions(use_cython, use_openmp):\n if sys.platform.startswith('win'):\n # compile args from\n # https://msdn.microsoft.com/en-us/library/fwkeyyhe.aspx\n link_args = []\n compile_args = ['/O2', '/openmp']\n else:\n link_args = []\n compile_args = ['-Wno-unused-function', '-Wno-maybe-uninitialized', '-O3', '-ffast-math']\n if use_openmp:\n compile_args.append('-fopenmp')\n link_args.append('-fopenmp')\n\n if 'anaconda' not in sys.version.lower():\n compile_args.append('-march=native')\n\n # recommended approach is that the user can choose not to\n # compile the code using cython, they can instead just use\n # the .c file that's also distributed\n # http://cython.readthedocs.io/en/latest/src/reference/compilation.html#distributing-cython-modules\n src_ext = '.pyx' if use_cython else '.c'\n names = ['pairwise3']\n modules = [Extension(name,\n [os.path.join(name + src_ext)],\n extra_compile_args = compile_args,\n extra_link_args = link_args) for name in names]\n\n if use_cython:\n return cythonize(modules)\n else:\n return modules", "def finalize_cython(program_info, s):\n if verbose:\n util.print_header('finalize_cython received:', s)\n \n if macros.workaround_cython_546:\n s = macros.fix_range_3_args(s)\n \n T_replace_node = 0.0\n T_rewrite_var = 0.0\n \n T0 = time.time()\n rootnode = py_ast.get_ast(s)\n py_ast.add_parent_info(rootnode)\n \n rewrite_vars = {}\n \n all_nodes = py_ast.find_all(rootnode, (ast.Str, ast.Subscript, ast.Call))\n comment_nodes = [tnode for tnode in all_nodes if isinstance(tnode, ast.Str)]\n \n T1 = time.time()\n lines = s.split('\\n')\n \n for commentnode in comment_nodes:\n comment = py_ast.dump_ast(commentnode)\n if comment.startswith(cython_preallocate_intermediate):\n (prefix, varname, hexcode_ordinary, hexcode_float32) = comment.split()[:4]\n try:\n defnode = get_previous_node_func(rootnode, commentnode, ast.FunctionDef)\n except TransformError:\n warnings.warn('Could not extract defnode for preallocate intermediate node:', comment)\n continue\n \n if id(defnode) not in rewrite_vars:\n rewrite_vars.setdefault(id(defnode), get_all_rewrite_vars_py_ast(defnode))\n \n is_rewrite = varname in rewrite_vars[id(defnode)]\n \n if verbose:\n print('commentnode:', commentnode, 'prefix:', prefix, 'varname:', varname, 'hexcode_ordinary:', hexcode_ordinary, 'hexcode_float32:', hexcode_float32, 'is_rewrite:', is_rewrite)\n \n local_types = None\n try:\n local_types = chosen_typespec_loads_py_ast(program_info, defnode)\n except TransformError:\n pass\n \n if local_types is None: # In the non-type-specialized function, do nothing\n continue\n \n if is_rewrite:\n commentnode.s = ''\n if verbose:\n print(' => commentnode after rewrite:', commentnode)\n else:\n try:\n var_type = local_types[varname]\n except NameError:\n continue\n \n if var_type.primitive_type() == 'float':\n hexcode = hexcode_float32\n else:\n hexcode = hexcode_ordinary\n start_time_replace_node = time.time()\n py_ast.replace_node(rootnode, commentnode, \n py_ast.get_ast(cython_preallocate_intermediate + ' ' + varname + ' ' + hexcode))\n T_replace_node += time.time() - start_time_replace_node\n T2 = time.time()\n\n # Rewrite a[y,x] to a[y][x] for the variables that were rewritten to C array type\n subscript_nodes = [tnode for tnode in all_nodes if isinstance(tnode, ast.Subscript)]\n for node in subscript_nodes:\n try:\n varname = node.value.id\n except:\n continue\n try:\n defnode = get_previous_node_func(rootnode, node, ast.FunctionDef)\n except TransformError:\n continue\n\n if id(defnode) not in rewrite_vars:\n rewrite_vars.setdefault(id(defnode), get_all_rewrite_vars_py_ast(defnode)) \n is_rewrite = varname in rewrite_vars[id(defnode)]\n\n if is_rewrite:\n try:\n #not sure what it means here, is it used to check dictionary?\n do_continue = True\n if hasattr(node.slice, 'value'):\n if isinstance(node.slice.value, ast.Str):\n do_continue = False\n elif isinstance(node.slice.value, ast.Tuple):\n if isinstance(node.slice.value.elts[0], ast.Str):\n do_continue = False\n elif hasattr(node.slice, 'dims'):\n if isinstance(node.slice.dims[0], ast.Str):\n do_continue = False\n except:\n do_continue = False\n if do_continue:\n if hasattr(node.slice, 'value'):\n if not hasattr(node.slice.value, 'elts'):\n args = py_ast.dump_ast(node.slice.value)\n node_new_str = varname + ''.join('[' + args + ']')\n else:\n args = [py_ast.dump_ast(subnode) for subnode in node.slice.value.elts]\n node_new_str = varname + ''.join('[' + arg + ']' for arg in args)\n else:\n args = [py_ast.dump_ast(subnode for subnode in node.slice.dims)]\n node_new_str = varname + ''.join('[' + arg + ']' for arg in args)\n if verbose:\n print('node before replacement: ', py_ast.dump_ast(node), 'after replacement:', node_new_str)\n start_time_replace_node = time.time()\n py_ast.replace_node(rootnode, node, py_ast.get_ast(node_new_str).body[0].value)\n T_replace_node += time.time() - start_time_replace_node\n\n T3 = time.time()\n\n # Replace type_cast and pointer_cast with unique IDs that later get replaced with the Cython casting operation\n cast_d = {}\n \n atom_nodes = [tnode for tnode in all_nodes if isinstance(tnode, ast.Call) or isinstance(tnode, ast.Subscript)]\n for node in atom_nodes:\n #don't know what if hasattr(node, 'name') and isinstance(node.name, redbaron.NameNode): is used to check\n name = None\n try:\n if isinstance(node, ast.Call):\n if isinstance(node.func, ast.Name):\n name = node.func.id\n elif isinstance(node.func, ast.Attribute):\n name = node.func.value.id\n elif isinstance(node, ast.Subscript):\n if isinstance(node.value, ast.Name) and isinstance(node.slice, ast.Index) and isinstance(node.slice.value, ast.Tuple) and len(node.slice.value.elts) == 2:\n name = node.value.id\n except:\n name = None\n \n if name is not None:\n if verbose:\n print('finalize_cython, name:', name)\n if name in [pointer_cast, type_cast]:\n current_id = get_unique_id('cast')\n is_ptr = (name == pointer_cast)\n \n if isinstance(node, ast.Call):\n v0 = py_ast.dump_ast(node.args[0])\n v1 = py_ast.dump_ast(node.args[1])\n elif isinstance(node, ast.Subscript):\n v0 = py_ast.dump_ast(node.slice.value.elts[0])\n v1 = py_ast.dump_ast(node.slice.value.elts[1])\n else:\n raise ValueError\n \n #rest = ','.join([py_ast.dump_ast(tnode) for tnode in node.args])\n rest = ''\n ptr = '*' if is_ptr else ''\n while current_id in cast_d:\n current_id = get_unique_id('cast')\n cast_d[current_id] = '(<{} {}>({})){}'.format(v0, ptr, v1, rest)\n start_time_replace_node = time.time()\n py_ast.replace_node(rootnode, node, py_ast.get_ast(current_id).body[0].value)\n T_replace_node += time.time() - start_time_replace_node\n\n T4 = time.time()\n\n if verbose:\n util.print_header('finalize_cython, cast_d:', cast_d)\n \n T5 = time.time()\n s = py_ast.dump_ast(rootnode)\n T6 = time.time()\n\n # Replace cython_preallocate_intermediate with actual preallocation code\n lines = s.split('\\n')\n i=0\n while i < len(lines):\n line = lines[i]\n line_strip = lines[i].lstrip()\n if line_strip.startswith(cython_preallocate_intermediate):\n (j, indentation_j) = get_next_indentation(lines, i)\n if verbose:\n print('get_next_indentation: {} => {}, {!r}'.format(i, j, indentation_j))\n (prefix, varname, hexcode) = line_strip.strip().split()[:3]\n \n #if varname doesn't exist in any other part of the code, ignore this comment\n namenodes = py_ast.find_all(rootnode, ast.Name)\n names = [node.id for node in namenodes if node.id == varname]\n if len(names):\n code_block = binascii.unhexlify(hexcode.encode('ascii')).decode('ascii')\n code_blockL = code_block.split('\\n')\n lines[i:i+1] = [indentation_j + code_block_line for code_block_line in code_blockL]\n else:\n lines[i] = ''\n \n i += 1\n s = '\\n'.join(lines)\n T7 = time.time()\n\n # Replace var.shape with (<object> var).shape. Works around Cython bug 302 (http://trac.cython.org/ticket/302)\n rootnode = py_ast.get_ast(s)\n py_ast.add_parent_info(rootnode)\n strip_id = get_unique_id('finalize_cython')\n dot_nodes = py_ast.find_all(rootnode, ast.Attribute)\n for dot_node in dot_nodes:\n if isinstance(dot_node.value, ast.Name) and dot_node.attr == 'shape' and (not isinstance(dot_node.parent, ast.Subscript)):\n dot_node.value.id = '(' + strip_id + dot_node.value.id + ')'\n s = py_ast.dump_ast(rootnode)\n s = s.replace(strip_id, '<object>')\n\n T8 = time.time()\n\n for (current_id, current_s) in cast_d.items():\n s = s.replace(current_id, current_s)\n \n lines = s.split('\\n')\n i = 0\n while i < len(lines):\n line = lines[i]\n line_strip = lines[i].lstrip()\n (nindent, indentation) = get_indentation(lines[i])\n if line_strip.startswith(cython_replace_str):\n lines[i] = ''\n (j, indentation_j) = get_next_indentation(lines, i)\n lines[j] = indentation_j + line[nindent + len(cython_replace_str)+1:].rstrip()[:-1]\n #for j in range(i + 1, len(lines)):\n # if lines[j].strip() != '':\n # (nindent_j, indentation_j) = get_indentation(lines[j])\n # lines[j] = indentation_j + line[nindent + len(cython_replace_str)+1:].rstrip()[:-1]\n # break\n i = j\n elif line_strip.startswith(cython_str):\n (j, indentation_j) = get_next_indentation(lines, i)\n lines[i] = indentation_j + lines[i][nindent + len(cython_str)+1:].rstrip()[:-1]\n \n i += 1\n s = '\\n'.join(lines)\n s = cython_headers + '\\n'.join(list(macro_funcs_templated.templated_func.values())) + '\\n' + s\n\n T9 = time.time()\n\n s = macros.fix_cpointer_in_final_cython(s)\n T10 = time.time()\n if do_profile:\n profile['transforms: finalize_cython_part0'] += T1-T0\n profile['transforms: finalize_cython_part1'] += T2-T1\n profile['transforms: finalize_cython_part2'] += T3-T2\n profile['transforms: finalize_cython_part3'] += T4-T3\n profile['transforms: finalize_cython_part4'] += T5-T4\n profile['transforms: finalize_cython_part5'] += T6-T5\n profile['transforms: finalize_cython_part6'] += T7-T6\n profile['transforms: finalize_cython_part7'] += T8-T7\n profile['transforms: finalize_cython_part8'] += T9-T8\n profile['transforms: finalize_cython_part9'] += T10-T9\n profile['transforms: finalize_cython: replace_node'] += T_replace_node\n if verbose:\n util.print_header('finalize_cython returned:', s)\n return s", "def test_py_compile_basic(self):\n self._test_py_compile('basic')", "def _add_compiler_args(parser):\n ap = parser\n ap.add_argument('--full-event-pattern',\n help=\"If set, use the 'full' format \"\n \"(TYPE, (CLK, DST, SRC), MSG) for event patterns;\"\n \"otherwise, use 'short' format (MSG, SRC)\",\n action='store_true')\n ap.add_argument('--enable-object-pattern',\n help=\"Enable the use of object-style tuple pattern syntax:\"\n \" Object(ARGS...); which is equivalent to \"\n \"('Object', ARGS...)\",\n action='store_true')\n ap.add_argument('--enable-membertest-pattern',\n help=\"Overloads the Python 'in' operator to support using \"\n \"tuple patterns, e.g.: '(_a, 1, _) in S', which is \"\n \"equivalent to 'some((_a, 1, _) in S)'\",\n action='store_true')\n ap.add_argument('--enable-iterator-pattern',\n help=\"Overloads the Python 'for ... in ...' keywords to \"\n \"support using tuple patterns in the target, \"\n \"e.g.: '[b for (_a, 1, b) in S]', which is equivalent to \"\n \"'[b for (var1, var2, b) in S if var1 == a if var2 == b]'\",\n action='store_true')\n ap.add_argument('--use-top-semantic',\n help=\"Use 'top' semantics for query variable and \"\n \"parameter resolution. Under 'top' semantics, only \"\n \"parameters to the top-level query are marked.\",\n action='store_true')\n ap.add_argument('--no-table1',\n help=\"Disable table 1 quantification transformations. \"\n \"Only used when '-i' is enabled.\",\n action='store_true')\n ap.add_argument('--no-table2',\n help=\"Disable table 2 quantification transformations. \"\n \"Only used when '-i' is enabled.\",\n action='store_true')\n ap.add_argument('--no-table3',\n help=\"Disable table 3 quantification transformations. \"\n \"Only used when '-i' is enabled.\",\n action='store_true')\n ap.add_argument('--no-table4',\n help=\"Disable table 4 quantification transformations. \"\n \"Only used when '-i' is enabled.\",\n action='store_true')\n ap.add_argument('--jb-style',\n help=\"Generate Jon-friendly quantification transformations. \"\n \"Only useful with '-i'.\",\n action='store_true')\n ap.add_argument('--no-all-tables',\n help=\"Disable all quantification transformations. \"\n \"Only useful with '-i'.\",\n action='store_true')\n ap.add_argument('--module-name', type=str, default='__main__',\n help=\"Name of the module to be compiled.\")", "def generate(env) -> None:\n c_file, cxx_file = SCons.Tool.createCFileBuilders(env)\n\n # C\n c_file.add_action(\".l\", LexAction)\n c_file.add_emitter(\".l\", lexEmitter)\n\n c_file.add_action(\".lex\", LexAction)\n c_file.add_emitter(\".lex\", lexEmitter)\n\n # Objective-C\n cxx_file.add_action(\".lm\", LexAction)\n cxx_file.add_emitter(\".lm\", lexEmitter)\n\n # C++\n cxx_file.add_action(\".ll\", LexAction)\n cxx_file.add_emitter(\".ll\", lexEmitter)\n\n if sys.platform == 'win32':\n # ignore the return, all we need is for the path to be added\n _ = get_lex_path(env, append_paths=True)\n\n env.SetDefault(\n LEX=env.Detect(BINS),\n LEXFLAGS=CLVar(\"\"),\n LEX_HEADER_FILE=\"\",\n LEX_TABLES_FILE=\"\",\n )\n\n if sys.platform == 'win32':\n env.SetDefault(LEXUNISTD=CLVar(\"\"))\n env[\"LEXCOM\"] = \"$LEX $LEXUNISTD $LEXFLAGS $_LEX_HEADER $_LEX_TABLES -t $SOURCES > $TARGET\"\n else:\n env[\"LEXCOM\"] = \"$LEX $LEXFLAGS $_LEX_HEADER $_LEX_TABLES -t $SOURCES > $TARGET\"\n\n env['_LEX_HEADER'] = '${LEX_HEADER_FILE and \"--header-file=\" + str(LEX_HEADER_FILE)}'\n env['_LEX_TABLES'] = '${LEX_TABLES_FILE and \"--tables-file=\" + str(LEX_TABLES_FILE)}'", "def preprocess_emitter(source, target, env):\n target = []\n for src in source:\n basename = os.path.basename(src.abspath)\n (base, _ext) = os.path.splitext(basename)\n prefix = \"\"\n for var in [\"OBJPREFIX\", \"OBJSUFFIX\", \"SHOBJPREFIX\", \"SHOBJSUFFIX\"]:\n mod = env.subst(\"$%s\" % var)\n if var == \"OBJSUFFIX\" and mod == \".o\":\n continue\n if var == \"SHOBJSUFFIX\" and mod == \".os\":\n continue\n if mod != \"\":\n prefix = prefix + \"_\" + mod\n target.append(prefix + base + \"_pp.c\")\n return target, source", "def build_extensions(self):\n c = self.compiler.compiler_type\n CF = [] ; LF=[]\n if \"CFLAGS\" in os.environ:\n CF = os.environ.get(\"CFLAGS\").split(\" \")\n if \"LDFLAGS\" in os.environ:\n LF = os.environ.get(\"LDFLAGS\").split(\" \")\n for e in self.extensions:\n if c in copt:\n e.extra_compile_args = copt[ c ] + CF\n e.extra_link_args = lopt[ c ] + LF\n print(\"Customised compiler\",c,e.extra_compile_args,\n e.extra_link_args)\n build_ext.build_ext.build_extensions(self)", "def dump_to_pyc(co, python_version, output_dir):\n # assume Windows path information from the .exe\n pyc_basename = ntpath.basename(co.co_filename)\n pyc_name = f'{pyc_basename}.pyc'\n\n if pyc_name not in IGNORE:\n logging.info(\"Extracting %s\", pyc_name)\n pyc_header = _generate_pyc_header(python_version, len(co.co_code))\n destination = os.path.join(output_dir, pyc_name)\n with open(destination, 'wb') as pyc:\n pyc.write(pyc_header)\n marshaled_code = marshal.dumps(co)\n pyc.write(marshaled_code)\n else:\n logging.info(\"Skipping %s\", pyc_name)", "def preprocessor(output_directory, filepath, stats, hip_clang_launch, is_pytorch_extension, clean_ctx):\n fin_path = os.path.join(output_directory, filepath)\n with open(fin_path, 'r', encoding='utf-8') as fin:\n output_source = fin.read()\n\n fout_path = os.path.join(output_directory, get_hip_file_path(filepath))\n if not os.path.exists(os.path.dirname(fout_path)):\n clean_ctx.makedirs(os.path.dirname(fout_path))\n\n # unsupported_calls statistics reporting is broken atm\n def pt_repl(m):\n return PYTORCH_MAP[m.group(0)]\n\n if is_pytorch_extension:\n output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source)\n else:\n if is_pytorch_file(filepath):\n output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source)\n else:\n def c2_repl(m):\n return CAFFE2_MAP[m.group(0)]\n output_source = RE_CAFFE2_PREPROCESSOR.sub(c2_repl, output_source)\n\n # Header rewrites\n def mk_repl(templ):\n def repl(m):\n f = m.group(1)\n if (\n f.startswith(\"ATen/cuda\")\n or f.startswith(\"ATen/native/cuda\")\n or f.startswith(\"ATen/native/quantized/cuda\")\n or f.startswith(\"ATen/native/sparse/cuda\")\n or f.startswith(\"THC/\")\n or f.startswith(\"THCUNN/\")\n or (f.startswith(\"THC\") and not f.startswith(\"THCP\"))\n ):\n return templ.format(get_hip_file_path(m.group(1)))\n return m.group(0)\n return repl\n output_source = RE_QUOTE_HEADER.sub(mk_repl('#include \"{0}\"'), output_source)\n output_source = RE_ANGLE_HEADER.sub(mk_repl('#include <{0}>'), output_source)\n output_source = RE_THC_GENERIC_FILE.sub(mk_repl('#define THC_GENERIC_FILE \"{0}\"'), output_source)\n\n # CMakeLists.txt rewrites\n if filepath.endswith('CMakeLists.txt'):\n output_source = output_source.replace('CUDA', 'HIP')\n output_source = output_source.replace('THC', 'THH')\n output_source = RE_CU_SUFFIX.sub('.hip', output_source)\n\n # Perform Kernel Launch Replacements\n if not hip_clang_launch:\n output_source = processKernelLaunches(output_source, stats)\n\n # Replace std:: with non-std:: versions\n if (filepath.endswith(\".cu\") or filepath.endswith(\".cuh\")) and \"PowKernel\" not in filepath:\n output_source = replace_math_functions(output_source)\n\n # Include header if device code is contained.\n output_source = hip_header_magic(output_source)\n\n # Replace the extern __shared__\n output_source = replace_extern_shared(output_source)\n\n do_write = True\n if os.path.exists(fout_path):\n with open(fout_path, 'r', encoding='utf-8') as fout_old:\n do_write = fout_old.read() != output_source\n if do_write:\n with clean_ctx.open(fout_path, 'w', encoding='utf-8') as fout:\n fout.write(output_source)\n return \"ok\"\n else:\n return \"skipped\"", "def generate(env):\n\n indent = find_indent()\n\n generator = lambda source, target, env, for_signature: pp_gen(source,\n target,\n env, indent)\n\n # Only handle C for now\n preprocess = Builder(generator=generator, suffix=\"_pp.c\",\n emitter=preprocess_emitter, src_suffix=\".c\")\n\n env.Append(BUILDERS={\"Preprocess\":preprocess})", "def compile(self,**attrs):\n\n\n\t\tpath_to_txt = os.path.join(self.path_raw,'COCA Text')\n\t\tpath_to_sources = os.path.join(self.path_raw,'coca-sources_2017_12.txt')\n\n\t\tif not os.path.exists(path_to_txt) or not os.path.exists(path_to_sources):\n\t\t\tprint(f'Place in {self.path_raw} the following files:\\n * COCA Text\\n * coca-sources_2017_12.txt')\n\t\t\treturn\n\n\t\t#txt\n\t\tself.compile_txt()\n\t\t# metadata\n\t\tself.compile_metadata()", "def run_cython(args):\n args = magic.arg_split(args, posix=True)\n filename = args.pop()\n if '--force' not in args:\n args.append('--force')\n ip = get_ipython()\n ip.extension_manager.load_extension('cython')\n with io.open(filename, 'r', encoding='utf-8') as f:\n ip.run_cell_magic('cython', ' '.join(args), f.read())", "def generate_outputs(input_dir, output_dir, clang_args):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n output_src_dir = os.path.join(output_dir, 'src/')\n web_dir = os.path.join(output_dir, 'web/')\n copy_web_resources(web_dir)\n\n input_files = get_source_file_list(input_dir)\n\n index = cindex.Index.create()\n tus = {}\n for src_filename in input_files:\n rel_src = os.path.relpath(src_filename, input_dir)\n print('Parsing ' + rel_src)\n\n if not is_header(src_filename):\n tus[src_filename] = index.parse(src_filename, args=clang_args)\n\n print('Performing cross-translation-unit analysis...')\n all_nodes = find_all_usrs(tus, input_files)\n\n annotation_sets = {src: HTMLAnnotationSet() for src in input_files}\n anchored_nodes = {}\n diagnostics = get_line_diagnostics(tus)\n src_to_output = {\n src: os.path.join(output_src_dir,\n os.path.relpath(src, input_dir)+'.html')\n for src in input_files\n }\n for src_filename in input_files:\n rel_src = os.path.relpath(src_filename, input_dir)\n print('Analyzing ' + rel_src)\n\n annotation_set = annotation_sets[src_filename]\n\n if src_filename in diagnostics:\n highlight_diagnostics(diagnostics[src_filename],\n annotation_set)\n\n if src_filename not in tus:\n continue\n\n tu = tus[src_filename]\n output_filename = src_to_output[src_filename]\n output_path = os.path.dirname(output_filename)\n rel_src_to_output = {src: os.path.relpath(src_to_output[src],\n output_path)\n for src in src_to_output}\n link_function_calls(tu,\n all_nodes,\n annotation_set,\n rel_src_to_output,\n anchored_nodes)\n\n add_anchors(annotation_sets, anchored_nodes)\n\n with open('templates/source.html', 'r') as tpl_file:\n tpl = Template(tpl_file.read())\n\n index_filename = os.path.join(output_dir, 'index.html')\n\n for src_filename in input_files:\n rel_src = os.path.relpath(src_filename, input_dir)\n print('Outputting ' + rel_src)\n\n with open(src_filename, 'r') as src_file:\n src = src_file.read()\n\n output_filename = src_to_output[src_filename]\n output_path = os.path.dirname(output_filename)\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n web_path = os.path.relpath(web_dir, output_path)\n index_path = os.path.relpath(index_filename, output_path)\n\n with open(output_filename, 'w') as html_file:\n html_file.write(format_source(rel_src,\n src,\n annotation_sets[src_filename],\n tpl,\n web_path,\n index_path))\n\n web_path = os.path.relpath(web_dir, output_dir)\n with open(index_filename, 'w') as index_file:\n index_file.write(generate_source_index(src_to_output, input_dir,\n output_dir,\n output_src_dir, web_path,\n 'templates/index.html'))", "def cython(self, line, cell):\n from sage.misc.cython_c import cython_compile\n return cython_compile(cell)", "def drive_compile(args):\n tvmc_model = frontends.load_model(args.FILE, args.model_format, args.input_shapes)\n\n dump_code = [x.strip() for x in args.dump_code.split(\",\")] if args.dump_code else None\n\n compile_model(\n tvmc_model,\n args.target,\n opt_level=args.opt_level,\n executor=reconstruct_registry_entity(args, Executor),\n runtime=reconstruct_registry_entity(args, Runtime),\n tuning_records=args.tuning_records,\n package_path=args.output,\n cross=args.cross_compiler,\n cross_options=args.cross_compiler_options,\n output_format=args.output_format,\n dump_code=dump_code,\n target_host=None,\n desired_layout=args.desired_layout,\n disabled_pass=args.disabled_pass,\n pass_context_configs=args.pass_config,\n additional_target_options=reconstruct_target_args(args),\n )\n\n return 0", "def compile(self, args):\n if args not in self._compileinfos:\n cres = compile_with_dppl(self.py_func, None, args, debug=self.debug)\n func = cres.library.get_function(cres.fndesc.llvm_func_name)\n cres.target_context.mark_ocl_device(func)\n first_definition = not self._compileinfos\n self._compileinfos[args] = cres\n libs = [cres.library]\n\n if first_definition:\n # First definition\n cres.target_context.insert_user_function(self, cres.fndesc,\n libs)\n else:\n cres.target_context.add_user_function(self, cres.fndesc, libs)\n\n else:\n cres = self._compileinfos[args]\n\n return cres.signature", "def compile_c(self):\n if(self.input == \"\"):\n stderr = subprocess.run(\n [\"gcc\", self.id+\".c\", \"-o\", self.id+\"_c\"], stderr=subprocess.PIPE).stderr.decode('utf-8')\n if(len(stderr) == 0):\n self.status = 1\n stdout = subprocess.run(\n [\"./\"+self.id+\"_c\"], stdout=subprocess.PIPE).stdout.decode('utf-8')\n self.output = stdout\n else:\n self.status = 0\n self.output = stderr\n else:\n pass", "def write_c_source(self, dst):\n wfd = open(dst, \"wt\")\n wfd.write(self.generate_c_source())\n wfd.close()" ]
[ "0.644899", "0.644899", "0.63634294", "0.63512045", "0.6192792", "0.6134807", "0.5765677", "0.5760449", "0.5747454", "0.5743692", "0.56498545", "0.5609859", "0.55956745", "0.5582601", "0.5581401", "0.5578906", "0.5548663", "0.5526328", "0.55054164", "0.5475244", "0.54675543", "0.54403216", "0.5439205", "0.54365754", "0.54218704", "0.5395951", "0.5390189", "0.5365238", "0.5363834", "0.5363394" ]
0.65219265
0
Helper method to get rsp options. rsp_file_syntax() is only guaranteed to be implemented if can_linker_accept_rsp() returns True.
def _rsp_options(self, tool: T.Union['Compiler', 'StaticLinker', 'DynamicLinker']) -> T.Dict[str, T.Union[bool, RSPFileSyntax]]: options = {'rspable': tool.can_linker_accept_rsp()} if options['rspable']: options['rspfile_quote_style'] = tool.rsp_file_syntax() return options
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_options(ret):\n attrs = {\"host\": \"host\", \"port\": \"port\", \"skip\": \"skip_on_error\", \"mode\": \"mode\"}\n\n _options = salt.returners.get_returner_options(\n __virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__\n )\n return _options", "def compilation_options(self):\n #'-target','i386-pc-linux-gnu','-m32','-O2',\n opts = ['-Wno-implicit-function-declaration','-Wno-incompatible-library-redeclaration','-fno-vectorize',\n '-fno-slp-vectorize','-gline-tables-only','-Xclang','-disable-lifetime-markers','-Rpass=.*','-Rpass-missed=.*',\n '-Rpass-analysis=.*','-mllvm','-inline-threshold=15000','-Dassert=__VERIFIER_assert']\n if self._options.property.undefinedness():\n opts.append('-fsanitize=undefined')\n opts.append('-fno-sanitize=unsigned-integer-overflow')\n elif self._options.property.signedoverflow():\n opts.append('-fsanitize=signed-integer-overflow')\n opts.append('-fsanitize=shift')\n\n return opts", "def source_flags(self):\n return self.ast_node.source_flags", "def rsp_process(rsp_signal, sampling_rate=1000, method=\"khodadad2018\"):\n # Clean signal\n rsp_cleaned = rsp_clean(rsp_signal, sampling_rate=sampling_rate, method=method)\n\n # Extract, fix and format peaks\n peak_signal, info = rsp_peaks(rsp_cleaned, sampling_rate=sampling_rate, method=method, amplitude_min=0.3)\n\n # Get additional parameters\n phase = rsp_phase(peak_signal, desired_length=len(rsp_signal))\n amplitude = rsp_amplitude(rsp_cleaned, peak_signal)\n rate = signal_rate(peak_signal, sampling_rate=sampling_rate, desired_length=len(rsp_signal))\n\n # Prepare output\n signals = pd.DataFrame(\n {\"RSP_Raw\": rsp_signal, \"RSP_Clean\": rsp_cleaned, \"RSP_Amplitude\": amplitude, \"RSP_Rate\": rate}\n )\n signals = pd.concat([signals, phase, peak_signal], axis=1)\n\n return signals, info", "def get_linux_opts(command_name):\n\n opts = None\n description = None\n\n soup, page_found = get_soup(LINUX_MAN_PAGES, command_name)\n\n if not page_found:\n return opts, description\n\n # The description is always the second pre on the page\n description = soup.find_all('pre')[1].text.strip()\n\n search_sections = [\n 'OPTIONS',\n 'EXPRESSION', # The GNU find man page has more options under this heading\n ]\n\n opts = set()\n for section in search_sections:\n opts.update(find_opts_linux(soup, section))\n\n return opts, description", "def _gtDiffrsp(self):\n if not os.path.isfile(self.outmktime):\n self._gtMktime()\n os.popen(\"gtdiffrsp evfile={} scfile={} srcmdl={} irfs={} convert=Y chatter=4\\\n evtype={}\".format(self.outmktime, self.ft2, self.diffModel, self.irf, self.evtype))\n return", "def linkOptions(self):\n linker_options = [x.text if x.text is not None else \"\" for x in\n self.subdoc.find(\"link-options\").findall(\"option\")]\n if not ld_option_verifier.verify(linker_options):\n env.error(u\"Linker option verification \"\n \"failed for bundle {} ({})\".format(\n self.input,\n ld_option_verifier.error_msg))\n if linker_options.count(\"-execute\") != 0:\n self.is_executable = True\n\n # make sure linker has a none zero version min for watchos.\n try:\n # check watchos version.\n version_min = linker_options.index(\"-watchos_version_min\")\n # if valid version min location, check if it is 0.0\n if version_min < (len(linker_options) - 1) and linker_options[version_min + 1] == \"0.0.0\":\n # write a default watchos version.\n if self.is_translate_watchos:\n linker_options[version_min + 1] = \"5.0.0\"\n else:\n linker_options[version_min + 1] = \"2.0.0\"\n self.deployment_target = linker_options[version_min + 1]\n except ValueError:\n # if watchos is not specified during translate, add default deployment target.\n if self.is_translate_watchos:\n linker_options.extend([\"-watchos_version_min\", \"5.0.0\"])\n\n if self.platform is not None and self.platform != \"Unknown\":\n linker_options.extend([\"-syslibroot\", env.getSDK()])\n if self.sdk_version is not None and self.sdk_version != \"NA\":\n linker_options.extend([\"-sdk_version\", self.sdk_version])\n return linker_options", "def get_options(self):\n options = dict()\n while True:\n line = self.rfile.readline().decode(\"utf8\").strip()\n if not line:\n break\n self.log.debug(\"Got line: %s\", line)\n if \":\" not in line:\n self.log.debug(\"Invalid option: %s\", line)\n error_msg = \"header not in 'Name: value' format\"\n raise oa.errors.InvalidOption(error_msg)\n name, value = line.split(\":\", 1)\n options[name.lower()] = value.strip()\n return options", "def _get_run_options(self, cmdp, exec_engine=None):\n cmdp.declare_options(\"-v= -e= -w= -u= -p= -i -t -a -P\")\n cmd_options = {\n \"netcoop\": {\n \"fl\": (\"-P\", \"--publish-all\", \"--netcoop\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"portsmap\": {\n \"fl\": (\"-p=\", \"--publish=\",), \"act\": \"E\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"novol\": {\n \"fl\": (\"--novol=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"vol\": {\n \"fl\": (\"-v=\", \"--volume=\",), \"act\": \"E\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"env\": {\n \"fl\": (\"-e=\", \"--env=\",), \"act\": \"E\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"envfile\": {\n \"fl\": (\"--env-file=\",), \"act\": 'E',\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"user\": {\n \"fl\": (\"-u=\", \"--user=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"cwd\": {\n \"fl\": (\"-w=\", \"--workdir=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"entryp\": {\n \"fl\": (\"--entrypoint=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"cpuset\": {\n \"fl\": (\"--cpuset-cpus=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"hostauth\": {\n \"fl\": (\"--hostauth\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"containerauth\": {\n \"fl\": (\"--containerauth\",), \"act\": 'R',\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"nosysdirs\": {\n \"fl\": (\"--nosysdirs\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"hostenv\": {\n \"fl\": (\"--hostenv\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"bindhome\": {\n \"fl\": (\"--bindhome\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"nometa\": {\n \"fl\": (\"--nometa\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"dri\": {\n \"fl\": (\"--dri\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"cmd\": {\n \"fl\": (\"P+\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"volfrom\": {\n \"fl\": (\"--volumes-from=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"dns\": {\n \"fl\": (\"--dns=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"dnssearch\": {\n \"fl\": (\"--dns-search=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"kernel\": {\n \"fl\": (\"--kernel=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"devices\": {\n \"fl\": (\"--device=\",), \"act\": \"E\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"nobanner\": {\n \"fl\": (\"--nobanner\",), \"act\": 'R',\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"platform\": {\n \"fl\": (\"--platform=\",), \"act\": 'R',\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"pull\": {\n \"fl\": (\"--pull=\"), \"act\": 'R',\n \"p2\": \"CMD_OPT\", \"p3\": False\n }\n }\n for option, cmdp_args in list(cmd_options.items()):\n last_value = None\n for cmdp_fl in cmdp_args[\"fl\"]:\n option_value = cmdp.get(cmdp_fl, cmdp_args[\"p2\"],\n cmdp_args[\"p3\"])\n if not exec_engine:\n continue\n if cmdp_args[\"act\"] == \"R\": # action is replace\n if option_value or last_value is None:\n exec_engine.opt[option] = option_value\n elif cmdp_args[\"act\"] == \"E\": # action is extend\n # if option == \"env\":\n # print (type(option_value))\n # print (option_value)\n exec_engine.opt[option].extend(option_value)\n last_value = option_value", "def get_options(self) -> dict:\n assert self.task\n task_options = {\n **self.task.get_task_options(),\n **self.expr.task_expr_options,\n **self.task_options,\n }\n return task_options", "def cmdline(self, executable, options, task, rlimits):\n data_model_param = get_data_model_from_task(task, {ILP32: \"-m32\", LP64: \"-m64\"})\n print(options)\n if data_model_param and not any(\n option.startswith(\"--clang-options=\") for option in options\n ):\n options += [\"--clang-options=\" + data_model_param]\n\n if task.property_file:\n options += [\"--svcomp-property\", task.property_file]\n else:\n raise UnsupportedFeatureException(\n \"SMACK can't execute without a property file.\"\n )\n\n options += [task.single_input_file]\n\n return [executable] + options", "def __get_options(self):\n for sect in self.file_parser.sections():\n if self.file_parser.has_option(sect, 'implementation'):\n selected_imp = self.file_parser.get(sect, 'implementation')\n imptype = self.file_parser.get(sect, 'optype')\n # pylint: disable = E1103\n enabled = self.file_parser.get(sect, 'enabled').lower()\n # pylint: enable = E1103\n if enabled == 'always':\n stateval = True\n permanent = True\n elif enabled == 'true':\n stateval = True\n permanent = False\n else:\n stateval = False\n permanent = False\n\n if self.file_parser.has_option(sect, 'id'):\n _id = self.file_parser.get(sect, 'id')\n self.opt_dict[sect]['id'] = _id\n\n self.opt_dict[sect]['permanent'] = permanent\n self.opt_dict[sect]['imptype'] = imptype\n if stateval == True:\n imp_unavailable = (selected_imp in self.imp2opt_dict) and (\n self.imp2opt_dict[selected_imp] != 'none' )\n if selected_imp == 'none' or imp_unavailable:\n self.opt_dict[sect]['enabled'] = False\n self.opt_dict[sect]['selected_imp'] = 'none'\n else:\n self.opt_dict[sect]['enabled'] = True\n self.set_imp(sect, selected_imp)\n# dbmsg = 'Add imp2opt_dict[{0}] = {1}'\n# print dbmsg.format(selected_imp, sect)\n else:\n self.opt_dict[sect]['enabled'] = False\n self.opt_dict[sect]['selected_imp'] = 'none'", "def get_options(options, opt_path):\r\n options_in = open(opt_path, 'r')\r\n # get exceptions\r\n for line_in in options_in:\r\n line = line_in.strip()\r\n if len(line) == 0:\r\n continue\r\n if line.startswith(\"#\"):\r\n continue\r\n if line.startswith(\"[\") and \"pep8\" in line:\r\n continue\r\n option = line\r\n if not line.startswith(\"-\"):\r\n line = \"--\" + line\r\n options.append(line)\r\n\r\n options_in.close()", "def get_current_syntax(self, view):\n\n syntaxFile = view.settings().get('syntax')\n\n if syntaxFile not in syntaxInfos:\n syntaxInfos[syntaxFile] = {\n 'fileName' : os.path.splitext(os.path.basename(syntaxFile))[0],\n 'syntaxName' : self.find_syntax_name(syntaxFile),\n }\n\n return [\n v\n for v in syntaxInfos[syntaxFile].values()\n if isinstance(v, str)\n ]", "def get_gromacs_arch(self):\n # default: fall back on autodetection\n res = None\n\n optarch = build_option('optarch') or ''\n # take into account that optarch value is a dictionary if it is specified by compiler family\n if isinstance(optarch, dict):\n comp_fam = self.toolchain.comp_family()\n optarch = optarch.get(comp_fam, '')\n optarch = optarch.upper()\n\n # The list of GMX_SIMD options can be found\n # http://manual.gromacs.org/documentation/2018/install-guide/index.html#simd-support\n if 'MIC-AVX512' in optarch and LooseVersion(self.version) >= LooseVersion('2016'):\n res = 'AVX_512_KNL'\n elif 'AVX512' in optarch and LooseVersion(self.version) >= LooseVersion('2016'):\n res = 'AVX_512'\n elif 'AVX2' in optarch and LooseVersion(self.version) >= LooseVersion('5.0'):\n res = 'AVX2_256'\n elif 'AVX' in optarch:\n res = 'AVX_256'\n elif 'SSE3' in optarch or 'SSE2' in optarch or 'MARCH=NOCONA' in optarch:\n # Gromacs doesn't have any GMX_SIMD=SSE3 but only SSE2 and SSE4.1 [1].\n # According to [2] the performance difference between SSE2 and SSE4.1 is minor on x86\n # and SSE4.1 is not supported by AMD Magny-Cours[1].\n res = 'SSE2'\n elif optarch == OPTARCH_GENERIC:\n cpu_arch = get_cpu_architecture()\n if cpu_arch == X86_64:\n res = 'SSE2'\n else:\n res = 'None'\n elif optarch:\n warn_msg = \"--optarch configuration setting set to %s but not taken into account; \" % optarch\n warn_msg += \"compiling GROMACS for the current host architecture (i.e. the default behavior)\"\n self.log.warning(warn_msg)\n print_warning(warn_msg)\n\n if res:\n self.log.info(\"Target architecture based on optarch configuration option ('%s'): %s\", optarch, res)\n else:\n self.log.info(\"No target architecture specified based on optarch configuration option ('%s')\", optarch)\n\n return res", "def _parser_options():\n #We have two options: get some of the details from the config file,\n import argparse\n from pydft import base\n pdescr = \"Numerical DFT code.\"\n parser = argparse.ArgumentParser(parents=[base.bparser], description=pdescr)\n for arg, options in script_options.items():\n parser.add_argument(arg, **options)\n \n args = base.exhandler(examples, parser)\n if args is None:\n return\n\n return args # pragma: no cover", "def get(self):\n if self.parser_type == 'optparse':\n actions = self.parser.option_list\n elif self.parser_type == 'argparse':\n actions = self.parser._actions\n ret = []\n ret.append(\"#compdef %s\" % self.commandname)\n ret.append(\"#\\n# this is zsh completion function file.\")\n ret.append(\"# generated by genzshcomp(ver: %s)\\n#\\n\" % __version__)\n ret.append(\"typeset -A opt_args\")\n ret.append(\"local context state line\\n\")\n ret.append(\"_arguments -s -S \\\\\")\n for action in actions:\n if action.metavar:\n metavar = \"::%s:_files\" % action.metavar\n else:\n metavar = \"\"\n if self.parser_type == 'optparse':\n opts = [i for i in action._long_opts]\n opts += [i for i in action._short_opts]\n elif self.parser_type == 'argparse':\n opts = action.option_strings\n for opt in opts:\n directory_comp = self._get_dircomp(opt)\n tmp = \" \\\"%s[%s]%s%s\\\" \\\\\" % (opt,\n _escape_squarebracket(action.help),\n metavar, directory_comp)\n ret.append(tmp)\n ret.append(\" \\\"*::args:_files\\\"\")\n return \"\\n\".join(ret)", "def _get_build_options(cls, opt: Opt):\n query_model = 'bert'\n document_model = 'bert'\n query_path = opt['model_file']\n document_path = opt['model_file']\n try:\n # determine if loading a RAG model\n loaded_opt = Opt.load(f\"{query_path}.opt\")\n document_path = loaded_opt.get('dpr_model_file', document_path)\n if loaded_opt['model'] in ['rag', 'fid'] and loaded_opt['query_model'] in [\n 'bert',\n 'bert_from_parlai_rag',\n ]:\n query_model = 'bert_from_parlai_rag'\n if loaded_opt['model'] == 'fid':\n # document model is always frozen\n # but may be loading a FiD-RAG Model\n doc_loaded_opt = Opt.load(\n f\"{modelzoo_path(opt['datapath'], document_path)}.opt\"\n )\n document_path = doc_loaded_opt.get('dpr_model_file', document_path)\n\n except FileNotFoundError:\n pass\n\n return query_model, query_path, document_model, document_path", "def _GetOptionsParser():\n\n parser = optparse.OptionParser(__doc__)\n\n parser.add_option('--filePath',\n dest='filePath',\n action='store',\n help='js or css file path')\n\n return parser", "def _get_options(self):\n return self.options", "def options(self):\n if self._ast:\n for option in self._ast[1]:\n yield option", "def runoptions(self):\n # outstanding = self.missing_required()\n # if outstanding:\n # raise TypeError('Module missing required parameter: %s' % ', '.join(outstanding))\n return self._runopts", "def _getoptions():\n parser = OptionParser()\n parser.add_option(\"-f\", \"--dwca_file\", dest=\"dwca_file\",\n help=\"Darwin Core Archive file\",\n default=None)\n return parser.parse_args()[0]", "def _get_options(arguments):\n options = {'mods':[]}\n ex_conditions = [\n (arguments.regex, 'regex'),\n (arguments.silent, 'silent'),\n (arguments.dry, 'dry'),\n (arguments.confirm, 'confirm'),\n (arguments.loglvl, 'loglvl'),\n (arguments.force, 'force'),\n (arguments.nobin, 'nobin'),\n (arguments.userconfpath, 'userconfpath')\n ]\n\n conditions = [\n (arguments.binmove, ['binmove']),\n (arguments.bincopy, ['bincopy']),\n (arguments.bincreate, ['bincreate']),\n (arguments.binempty, ['binempty']),\n (arguments.binpath, ['binpath']),\n (arguments.binprint, ['binprint']),\n (arguments.restore, ['restore']),\n (not arguments.restore, ['remove'])\n ]\n\n for condition in conditions:\n if condition[0]:\n if type(condition[0]) != bool:\n options.update({'path': condition[0]})\n options['mods'] += condition[1]\n break\n\n for ex_condition in ex_conditions:\n if ex_condition[0]:\n options.update({ex_condition[1]: ex_condition[0]})\n return options", "async def get_options(self):", "def _options(self):\r\n xmi_file = self.tb_xmi_file_name.GetValue()\r\n topic = self.tb_pragma.GetValue()\r\n package = self.tb_package.GetValue()\r\n header = self.tb_file_header.GetValue()\r\n target_folder = self.tb_target_folder.GetValue()\r\n encoding = self.tb_encoding.GetValue()\r\n \r\n return {\"topic\" : topic, \r\n \"package\" : package, \r\n \"header\" : header, \r\n \"target_folder\" : target_folder,\r\n \"encoding\" : encoding,\r\n \"xmi_file\" : xmi_file}", "def get_options():\n\n global args\n\n options = parser.add_argument_group(\"flags\")\n options.add_argument(\n \"-t\",\n \"--hash-type\",\n help=\"type of hash from the following: lm, ntlm, md4, md5, sha1, sha256, sha512\",\n metavar=\"\",\n required=True,\n choices=[\"lm\", \"ntlm\", \"md4\", \"md5\", \"sha1\", \"sha256\", \"sha512\"],\n )\n options.add_argument(\n \"-w\",\n \"--wordlist\",\n help=\"file path to wordlist\",\n metavar=\"\",\n type=argparse.FileType(\"r\"),\n required=True,\n )\n\n hash_group = options.add_mutually_exclusive_group(required=True)\n hash_group.add_argument(\n \"-s\", \"--hash-string\", help=\"hash string to crack\", metavar=\"\"\n )\n hash_group.add_argument(\n \"-l\",\n \"--hash-list\",\n help=\"file path to the list of hashes\",\n metavar=\"\",\n type=argparse.FileType(\"r\"),\n )\n\n args = parser.parse_args()", "def _get_lsp_config_path_select_mode_unconditional(self):\n return self.__lsp_config_path_select_mode_unconditional", "def get_parsed_flags():\n return Flags.parsed_args", "def options(self):\r\n return self._options" ]
[ "0.50831187", "0.4936194", "0.4710779", "0.4703603", "0.47000256", "0.46416172", "0.46384525", "0.46382034", "0.46292686", "0.45956933", "0.45816252", "0.4573522", "0.45415303", "0.45326465", "0.45186582", "0.44897", "0.44642767", "0.44639853", "0.4455904", "0.44428545", "0.4437825", "0.44121397", "0.43968147", "0.4396251", "0.4382855", "0.43708652", "0.43596554", "0.4349671", "0.43468475", "0.43130547" ]
0.7745526
0
scan a Fortran file for dependencies. Needs to be distinct from target to allow for recursion induced by `include` statements.er It makes a number of assumptions, including `use`, `module`, `submodule` name is not on a continuation line Regex `incre` works for `include "foo.f90"` and `include "foo.f90"` `usere` works for legacy and Fortran 2003 `use` statements `submodre` is for Fortran >= 2008 `submodule`
def _scan_fortran_file_deps(src: Path, srcdir: Path, dirname: Path, tdeps, compiler) -> T.List[str]: incre = re.compile(FORTRAN_INCLUDE_PAT, re.IGNORECASE) usere = re.compile(FORTRAN_USE_PAT, re.IGNORECASE) submodre = re.compile(FORTRAN_SUBMOD_PAT, re.IGNORECASE) mod_files = [] src = Path(src) with src.open(encoding='ascii', errors='ignore') as f: for line in f: # included files incmatch = incre.match(line) if incmatch is not None: incfile = src.parent / incmatch.group(1) # NOTE: src.parent is most general, in particular for CMake subproject with Fortran file # having an `include 'foo.f'` statement. if incfile.suffix.lower()[1:] in compiler.file_suffixes: mod_files.extend(_scan_fortran_file_deps(incfile, srcdir, dirname, tdeps, compiler)) # modules usematch = usere.match(line) if usematch is not None: usename = usematch.group(1).lower() if usename == 'intrinsic': # this keeps the regex simpler continue if usename not in tdeps: # The module is not provided by any source file. This # is due to: # a) missing file/typo/etc # b) using a module provided by the compiler, such as # OpenMP # There's no easy way to tell which is which (that I # know of) so just ignore this and go on. Ideally we # would print a warning message to the user but this is # a common occurrence, which would lead to lots of # distracting noise. continue srcfile = srcdir / tdeps[usename].fname if not srcfile.is_file(): if srcfile.name != src.name: # generated source file pass else: # subproject continue elif srcfile.samefile(src): # self-reference continue mod_name = compiler.module_name_to_filename(usename) mod_files.append(str(dirname / mod_name)) else: # submodules submodmatch = submodre.match(line) if submodmatch is not None: parents = submodmatch.group(1).lower().split(':') assert len(parents) in {1, 2}, ( 'submodule ancestry must be specified as' f' ancestor:parent but Meson found {parents}') ancestor_child = '_'.join(parents) if ancestor_child not in tdeps: raise MesonException("submodule {} relies on ancestor module {} that was not found.".format(submodmatch.group(2).lower(), ancestor_child.split('_', maxsplit=1)[0])) submodsrcfile = srcdir / tdeps[ancestor_child].fname if not submodsrcfile.is_file(): if submodsrcfile.name != src.name: # generated source file pass else: # subproject continue elif submodsrcfile.samefile(src): # self-reference continue mod_name = compiler.module_name_to_filename(ancestor_child) mod_files.append(str(dirname / mod_name)) return mod_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scan_fortran_module_outputs(self, target):\n if self.use_dyndeps_for_fortran():\n return\n compiler = None\n # TODO other compilers\n for lang, c in self.environment.coredata.compilers.host.items():\n if lang == 'fortran':\n compiler = c\n break\n if compiler is None:\n self.fortran_deps[target.get_basename()] = {}\n return\n\n modre = re.compile(FORTRAN_MODULE_PAT, re.IGNORECASE)\n submodre = re.compile(FORTRAN_SUBMOD_PAT, re.IGNORECASE)\n module_files = {}\n submodule_files = {}\n for s in target.get_sources():\n # FIXME, does not work for Fortran sources generated by\n # custom_target() and generator() as those are run after\n # the configuration (configure_file() is OK)\n if not compiler.can_compile(s):\n continue\n filename = s.absolute_path(self.environment.get_source_dir(),\n self.environment.get_build_dir())\n # Fortran keywords must be ASCII.\n with open(filename, encoding='ascii', errors='ignore') as f:\n for line in f:\n modmatch = modre.match(line)\n if modmatch is not None:\n modname = modmatch.group(1).lower()\n if modname in module_files:\n raise InvalidArguments(\n f'Namespace collision: module {modname} defined in '\n f'two files {module_files[modname]} and {s}.')\n module_files[modname] = s\n else:\n submodmatch = submodre.match(line)\n if submodmatch is not None:\n # '_' is arbitrarily used to distinguish submod from mod.\n parents = submodmatch.group(1).lower().split(':')\n submodname = parents[0] + '_' + submodmatch.group(2).lower()\n\n if submodname in submodule_files:\n raise InvalidArguments(\n f'Namespace collision: submodule {submodname} defined in '\n f'two files {submodule_files[submodname]} and {s}.')\n submodule_files[submodname] = s\n\n self.fortran_deps[target.get_basename()] = {**module_files, **submodule_files}", "def used_mods(ffile):\n import re\n import codecs\n\n # Go through line by line,\n # remove comments and strings because the latter can include ';'.\n # Then split at at ';', if given.\n # The stripped line should start with 'use '.\n # After use should be the \"module_name\", ', intrinsic :: module_name', or\n # ', non_intrinsic :: module_name'. We allow also to use \":: module_name\"\n # After module name should only be ', only: ...' or ', a ==> b'\n olist = list()\n of = codecs.open(ffile, 'r', encoding='ascii', errors='ignore')\n for line in of:\n ll = line.rstrip().lower() # everything lower case\n ll = re.sub('!.*$', '', ll) # remove F90 comment\n ll = re.sub('^c.*$', '', ll) # remove F77 comments\n ll = re.sub('\".*?\"', '', ll) # remove \"string\"\n ll = re.sub(\"'.*?'\", '', ll) # remove 'string'\n # check if several commands are on one line\n if ';' in ll:\n lll = ll.split(';')\n else:\n lll = [ll]\n for il in lll:\n iil = il.strip()\n # line should start with 'use '\n if iil.startswith('use '):\n iil = iil[4:].strip() # remove 'use '\n # skip intrinsic modules\n if 'intrinsic' in iil:\n if 'non_intrinsic' in iil:\n iil = re.sub(', *non_intrinsic', '', iil)\n iil = iil.strip()\n else:\n continue # skip to next in lll\n if iil.startswith('::'):\n iil = iil[2:].strip() # remove ':: '\n # remove after ',' if rename-list or only-list\n iil = re.sub(',.*$', '', iil)\n olist.append(iil.strip())\n of.close()\n\n return olist", "def parse_deps():\n Files = []\n Dependencies = []\n TimeBins = ['recover_parameters', 'startup', 'wragh', 'paramcheck',\n 'preregridinitial', 'postregridinitial', 'basegrid', \n 'initial', 'postinitial', 'postrestrictinitial', \n 'postpostinitial', 'recover_variables', \n 'post_recover_variables', 'cpinitial', 'checkpoint', \n 'preregrid', 'postregrid', 'prestep', 'evol', 'postrestrict', \n 'poststep', 'analysis', 'terminate', 'shutdown']\n\n implement_re = re.compile('implements:\\s*(\\w+)', re.I)\n inherit_re = re.compile('inherits:\\s*(.+)', re.I)\n provides_function_re = re.compile('PROVIDES\\s+FUNCTION\\s+(\\w+)', re.I)\n uses_function_re = re.compile('USES\\s+FUNCTION\\s+(\\w+)', re.I)\n requires_function_re = re.compile('REQUIRES\\s+FUNCTION\\s+(\\w+)', re.I)\n shares_re = re.compile('shares:\\s*(\\w+)', re.I)\n requires_thorn_re = re.compile('REQUIRES\\s+(?!FUNCTION\\s*)(\\w+)', re.I)\n schedules_function_re = re.compile('schedule\\s+(?:group\\s+)?(\\w+)\\s+(?:in|at)\\s+(\\w+)', re.I)\n\n # find all interface.ccl and param.ccl files in cwd\n Cactus_Path = os.path.expanduser('~/Cactus/')\n for dirpath, dirnames, filenames in os.walk(Cactus_Path + 'arrangements', followlinks=True):\n for file in filenames:\n if file == 'interface.ccl':\n Files.append(os.path.join(dirpath, file))\n\n for file in Files:\n # first parse interface.ccl\n try:\n fptr = open(file, 'r')\n except IOError:\n print(\"Could not open %s\" % file) \n\n lines = fptr.readlines()\n\n try:\n fptr.close()\n except IOError:\n print(\"Could not close %s\" % file) \n\n # then parse param.ccl\n file = re.sub('interface.ccl', 'param.ccl', file)\n\n try:\n fptr = open(file, 'r')\n except IOError:\n print(\"Could not open %s\" % file) \n\n lines += fptr.readlines()\n\n try:\n fptr.close()\n except IOError:\n print(\"Could not close %s\" % file) \n\n # then configuration.ccl\n file = re.sub('param.ccl', 'configuration.ccl', file)\n\n try:\n fptr = open(file, 'r')\n lines += fptr.readlines()\n fptr.close()\n except IOError:\n pass\n\n # then schedule.ccl\n file = re.sub('configuration.ccl', 'schedule.ccl', file)\n\n try:\n fptr = open(file, 'r')\n lines += fptr.readlines()\n fptr.close()\n except IOError:\n pass\n\n # get the thorn dir and its parent\n thornname = os.path.basename(os.path.dirname(file))\n parentdir = os.path.basename(os.path.dirname(os.path.dirname(file)))\n thornname = os.path.join(parentdir, thornname)\n file_dict = {'name' : thornname.lower()}\n for line in lines:\n line = line.strip()\n m = re.match(implement_re, line)\n if m:\n file_dict['implements'] = m.group(1).lower()\n\n m = re.match(inherit_re, line)\n if m:\n inheritance = re.split('\\W+', m.group(1).lower())\n file_dict['inherits'] = inheritance\n\n m = re.match(provides_function_re, line)\n if m:\n try:\n file_dict['provides_function'].append(m.group(1).lower())\n except KeyError:\n file_dict['provides_function'] = [m.group(1).lower()]\n\n m = re.match(uses_function_re, line)\n if m:\n try:\n file_dict['uses_function'].append(m.group(1).lower())\n except KeyError:\n file_dict['uses_function'] = [m.group(1).lower()]\n\n m = re.match(requires_function_re, line)\n if m:\n try:\n file_dict['requires_function'].append(m.group(1).lower())\n except KeyError:\n file_dict['requires_function'] = [m.group(1).lower()]\n\n m = re.match(requires_thorn_re, line)\n if m:\n requires = re.split('\\W+', m.group(1).lower())\n # sometimes we have 'REQUIRES THORNS' instead of 'REQUIRES'\n if requires[0].lower() == 'thorns':\n del requires[0]\n file_dict['requires_thorn'] = requires\n\n m = re.match(shares_re, line)\n if m:\n try:\n file_dict['shares'].append(m.group(1).lower())\n except KeyError:\n file_dict['shares'] = [m.group(1).lower()]\n\n m = re.match(schedules_function_re, line)\n if m:\n bin, func = m.group(2).lower(), m.group(1).lower()\n if bin in TimeBins:\n bin = 'cctk_' + bin\n func_dict = {bin : func}\n try:\n file_dict['schedules_function'].append(func_dict)\n except KeyError:\n file_dict['schedules_function'] = [func_dict]\n\n\n Dependencies.append(file_dict)\n\n return Dependencies", "def parse_deps(self, infile, classpath_indexer):\r\n raise NotImplementedError()", "def get_fortran_deps(self, compiler: FortranCompiler, src: Path, target) -> T.List[str]:\n if self.use_dyndeps_for_fortran():\n return []\n\n dirname = Path(self.get_target_private_dir(target))\n tdeps = self.fortran_deps[target.get_basename()]\n srcdir = Path(self.source_dir)\n\n mod_files = _scan_fortran_file_deps(src, srcdir, dirname, tdeps, compiler)\n return mod_files", "def _parseDependDotMake( targetBuildDir, platformBuildDir ):\n Any.requireIsTextNonEmpty( targetBuildDir )\n Any.requireIsTextNonEmpty( platformBuildDir )\n\n dependDotMakePath = os.path.join( targetBuildDir, 'depend.make' )\n\n lines = FastScript.getFileContent( dependDotMakePath, splitLines=True )\n result = collections.defaultdict( set )\n\n languageNormalizationMap = {\n '.c' : 'c',\n '.C' : 'c++',\n '.CC' : 'c++',\n '.CPP': 'c++',\n '.CXX': 'c++',\n '.cc' : 'c++',\n '.cpp': 'c++',\n '.cxx': 'c++',\n }\n\n for l in lines:\n # skip comments and empty lines\n if Any.isTextNonEmpty( l ) and not l.startswith( '#' ):\n # lines are in the format\n # /path/to/obj/file.{c,cpp,cc,cxx}.o: /path/to/dependencyfile.{c,cpp,cc,cxx,h,hpp,hxx,hh}\n objFile, depFile = l.split( ':' )\n srcFile, objExt = os.path.splitext( objFile.strip( ) )\n srcName, srcExt = os.path.splitext( srcFile )\n depFile = depFile.strip( )\n _, depFileExt = os.path.splitext( depFile )\n language = languageNormalizationMap[ srcExt ]\n\n if depFileExt.lower( ) in ('.h', '.hxx', '.hpp', '.hh'):\n if not os.path.isabs( depFile ):\n relPath = os.path.join( platformBuildDir, depFile )\n absPath = os.path.abspath( relPath )\n else:\n absPath = depFile\n result[ absPath ].add( language )\n\n\n return result", "def rl_file_deps(file_deps, launch_file, verbose=False):\n parse_launch(launch_file, file_deps, verbose)", "def main(path):\n try:\n print(\"Finding imports in '\" + path + \"':----------------------------------------------------------------------\")\n\n file = open(path)\n contents = file.read()\n wordArray = re.split(\" |\\n\", contents)\n\n currentList = list()\n nextPaths = list()\n skipWord = -1\n\n for wordNumb in range(len(wordArray)):\n word = wordArray[wordNumb]\n\n if wordNumb == skipWord:\n continue\n\n elif word == \"from\":\n item = wordArray[wordNumb + 1]\n if 'vespa.' in item:\n currentList.append(item)\n skipWord = wordNumb + 2\n\n elif word == \"import\":\n item = wordArray[wordNumb + 1]\n if 'vespa.' in item:\n currentList.append(item)\n\n currentList = set(currentList)\n for i in currentList:\n print(i)\n\n # print(\"Found imports in '\" + path + \"'\")\n # print(\"Finding paths for imports in '\" + path + \"':\")\n\n currentList2 = currentList.copy()\n currentList = list()\n\n for i in currentList2:\n if i in dependenciesNames:\n # print(i, \"already found\")\n pass\n\n else:\n dependenciesNames.append(i)\n\n try:\n fileInfo = importlib.machinery.PathFinder().find_spec(i)\n if fileInfo is None:\n fileInfo = importlib.util.find_spec(i)\n if fileInfo is None:\n origin = 'None'\n else:\n origin = fileInfo.origin\n else:\n origin = fileInfo.origin\n\n print(origin)\n dependenciesPaths.append(origin)\n currentList.append(origin)\n\n except AttributeError as e:\n print(\"Hit Exception: AttributeError\")\n print(e)\n print(i)\n print(importlib.machinery.PathFinder().find_spec(i))\n # print(red, \"Odd noneType import called \", i, \" in path \", path, end, sep='')\n\n\n# print(\"Found paths for imports in '\" + path + \"'\")\n\n\n for fileInfo in currentList:\n main(fileInfo)\n\n except Exception as e:\n print(e)", "def scan_docs():\n\n\n def scan_file(fn):\n f = open(fn)\n\n for l in f:\n m = re.search(r\"\\.\\. (\\w+):: ([.\\w+]+)\", l)\n\n if not m:\n continue\n\n name_kind[m.group(2)] = m.group(1)\n\n for i in os.listdir(\"source\"):\n if i.endswith(\".rst\"):\n scan_file(os.path.join(\"source\", i))\n\n for i in os.listdir(\"source/inc\"):\n scan_file(os.path.join(\"source\", \"inc\", i))", "def read_deps():\n with open(\"./dependencies.txt\", 'r') as deps:\n return [d for d in re.split(r'\\s', ''.join(deps)) if d]", "def find_define_file_uses(self):\n # Executing git grep is substantially faster than using the define_re\n # directly on the contents of the file in Python.\n for define_file in self.get_checked_define_files():\n excluded_files = set([define_file])\n excluded_files.update(define_file.get_included_files(recursive=True))\n all_defines = define_file.get_declared_defines()\n args = ['git', 'grep', '-zwIF']\n for define in all_defines:\n args.extend(['-e', define])\n args.extend(['--', '*.cpp', '*.c', '*.cu', '*.h', '*.cuh'])\n define_re = r'\\b(?:' + '|'.join(all_defines)+ r')\\b'\n output = subprocess.check_output(args, cwd=self._source_root).decode()\n for line in output.splitlines():\n (filename, text) = line.split('\\0')\n fileobj = self._files.get(filename)\n if fileobj is not None and fileobj not in excluded_files:\n defines = re.findall(define_re, text)\n fileobj.add_used_defines(define_file, defines)", "def add_file_dependency(self, file_name=None):\n # self.logger.debug(\"add_file_dependency({0})\".format(file_name))\n\n if not isinstance(file_name, str):\n raise TypeError\n\n if not os.path.exists(file_name):\n raise FileNotFoundError(\n \"module_dependency: file({}) doesn't exist.\".format(file_name))\n\n if not os.path.isfile(file_name):\n raise FileNotFoundError(\n \"module_dependency: file({}) doesn't exist.\".format(file_name))\n\n value = os.path.splitext(file_name)\n # should be like ('network\\\\tcp_echo\\\\tcp_echo', '.py') or\n # ('network\\\\tcp_echo', '')\n # self.logger.debug(\"value({})\".format(value))\n if value[1] != \".py\":\n # self.logger.debug(\n # \"module_dependency: file({}) is not PYTHON (.py)normal file.\")\n return None\n\n # at this point, the file should be a .PY file at least\n file_han = open(file_name)\n for line in file_han:\n offset = line.find(\"import\")\n if offset >= 0:\n # then we found a line\n tokens = line.split()\n\n if len(tokens) >= 2 and tokens[0] == \"import\":\n # then like \"import os.path\" or \"import os, sys, socket\"\n\n if tokens[1][-1] != \",\":\n # then is like \"import os.path\"\n self.logger.debug(\"add_file_dep:{}\".format(tokens[1]))\n self.add_if_new(tokens[1])\n\n else: # like \"import os, sys, socket\"\n for name in tokens[1:]:\n self.logger.debug(\"token({})\".format(name))\n if name[-1] == ',':\n value = name[:-1]\n else:\n value = name\n self.add_if_new(value)\n\n elif len(tokens) >= 4 and tokens[0] == \"from\" and \\\n tokens[2] == \"import\":\n # then is like \"from cp_lib.cp_self.logger import\n # get_recommended_logger\"\n self.add_if_new(tokens[1])\n\n file_han.close()\n\n # self.logger.debug(\"module_dependency: {}\".format(self.dep_list))\n return self.dep_list", "def scan_files(self, only_files=None, keep_contents=False):\n if only_files:\n filelist = only_files\n else:\n filelist = self._files.values()\n define_files = list(self.get_checked_define_files())\n for define_file in list(define_files):\n if isinstance(define_file, GeneratedFile) and \\\n define_file.get_generator_source() is not None:\n define_files.append(define_file.get_generator_source())\n for fileobj in filelist:\n if not fileobj.is_external():\n detect_defines = fileobj in define_files\n fileobj.scan_contents(self, keep_contents, detect_defines)\n module = fileobj.get_module()\n if module:\n for includedfile in fileobj.get_includes():\n otherfile = includedfile.get_file()\n if otherfile:\n othermodule = otherfile.get_module()\n if othermodule and othermodule != module:\n module.add_dependency(othermodule, includedfile)", "def find_with_deps(self, package_names):", "def scan(\n cli_call_name: str, module: str, package: Optional[str], verbose: bool, help: Optional[str]\n) -> CommandTrie:\n root_module = importlib.import_module(module, package=package)\n root_path = root_module.__file__\n\n if verbose:\n print(\"Scanning module {} starting at file path: {}\".format(module, root_path))\n\n # Format the module name correctly if this is a relative import we're starting with\n target_module = module\n if _is_relative(target_module):\n if package is None:\n raise CommandError(\"Package was not specified but the module is relative.\")\n\n target_module = package\n\n # Search path changes if the __file__ entry is a python init file and not a directory\n search_path = root_path\n if search_path.endswith(_PYTHON_MODULE_INIT_FILE):\n search_path = os.path.dirname(root_path)\n\n # First identify all submodules\n submodule_names = list()\n\n # If our search path is not a directory, move on\n if os.path.isdir(search_path):\n for filename in os.listdir(search_path):\n if filename in _IGNORE_LIST:\n continue\n\n abs_path = os.path.join(search_path, filename)\n init_path = os.path.join(abs_path, _PYTHON_MODULE_INIT_FILE)\n\n module_name = \"\"\n if os.path.isdir(abs_path) and os.path.exists(init_path):\n # Figure out if we're dealing with a directory that has the init file\n module_name = \".\".join((target_module, filename))\n elif _is_python_src_file(filename):\n # Is it a python source file that's stand-alone?\n file_module_name = os.path.splitext(filename)[0]\n module_name = \".\".join((target_module, file_module_name))\n else:\n # I don't like this continue but avoiding the print statement twice is a nice consequence\n continue\n\n if verbose:\n print(\"Adding module {} to the scan list.\".format(module_name))\n\n # Add the module to our scan and import list\n submodule_names.append(module_name)\n\n # Load the modules\n submodules = [importlib.import_module(n) for n in submodule_names]\n\n # Add the root module since that's part of the scan\n submodules.append(root_module)\n\n # Load and scan the submodules for command components\n command_components = list()\n for submodule in submodules:\n for component_name in dir(submodule):\n component = getattr(submodule, component_name)\n if isinstance(component, CommandWrapper):\n if verbose:\n print(\"Found command component: {}\".format(component))\n\n command_components.append(component)\n\n # Build our command trie with collected components and perform rudimentary\n # dependency resolution for command paths\n command_trie = CommandTrie(cli_call_name, help=help)\n while len(command_components) > 0:\n delete_list = list()\n for idx in range(0, len(command_components)):\n command = command_components[idx]\n\n if command_trie.insert(command) is True:\n if verbose:\n print(\"Inserted {}\".format(command))\n\n delete_list.append(idx)\n break\n\n if len(delete_list) == 0:\n raise CommandDependencyError(\"Dependency resolution error!\")\n\n for idx in reversed(sorted(delete_list)):\n command_components.pop(idx)\n\n return command_trie", "def PrepareCompile(file):\n global oilcc_I,oilcc_o,oilcc_S,oilcc_target\n fp = open(file,'r')\n # some flags\n item = ''; #one item is minimum object such as TASK,ALARM ...\n barcenum = 0;\n flag = False; #has \" { \" encountered or not\n start = False #has match an obj start or not\n for line in fp.readlines():\n #firstly, filter out the comment on this line\n el = DropComment(line);\n if(start == False):\n #{\n item = ''; \n barcenum = 0;\n flag = False;\n if(IsIt('osekObj',el)):\n start = True;\n item += el;\n if(el.count('{') > 0): #so at comment should not include '{}'\n flag = True;\n barcenum += el.count('{');\n if(el.count('}') > 0):\n barcenum -= el.count('}');\n if((flag == True) and (barcenum == 0)): #in one line\n #filter out the multi-line comment\n item = DropComment(item)\n oilcc_texts.append(item);\n start = False\n else: # special process for include\n inc = GetIt('include',el)\n if(inc != None): #include file\n flag_inc = False\n for I in oilcc_I:\n finc = I + '/' + inc[0]\n if(os.path.exists(finc)):\n print 'INFO:parse include file <%s> in the path <%s>'%(inc[0],I)\n PrepareCompile(finc);\n flag_inc = True;\n if(flag_inc == False):\n print 'ERROR:cann\\'t find out the file %s!'%(inc[0])\n sys.exit(-1)\n #}\n else:\n #{\n if(el.count('{') > 0): #so at comment should not include '{}'\n flag = True;\n barcenum += el.count('{');\n if(el.count('}') > 0):\n barcenum -= el.count('}');\n item += el;\n if((flag == True) and (barcenum == 0)):\n #filter out the multi-line comment\n item = DropComment(item)\n oilcc_texts.append(item);\n start = False\n #}\n fp.close()", "def _get_include_files(self):\n for dirpath, dirnames, filenames in os.walk(self.IncludesDirectory):\n for f in filenames:\n rel_name = path.join(dirpath, f)\n if f.endswith('.pyx'):\n yield (rel_name, 'PyRex')\n elif f.endswith('.h'):\n yield (rel_name, 'Header')\n else:\n pass", "def _resolve_required_macros(file_content):\n call_commands = \"\"\n for line in file_content.split(\"\\n\"):\n match = re.search(\"^!@require\\s+([^\\s]+).*$\", line)\n if match is not None:\n required_macros = _add_macro_lib_ending(match.group(1))\n required_macros_file = abspath(join(LIB, required_macros))\n call_commands += _resolve_required_macros(_read_input_file(required_macros_file))\n call_commands += \"call, file = \\\"\" + required_macros_file + \"\\\";\\n\"\n return call_commands", "def Load():\n global items, libraries, _line_number, _groups_to_be_defined\n deps_file = open(\"dependencies.txt\")\n try:\n line = None\n current_type = None\n while True:\n while not line: line = _RemoveComment(deps_file.next())\n\n if line.startswith(\"library: \"):\n current_type = \"library\"\n name = line[9:].lstrip()\n _CheckLibraryName(name)\n if name in items:\n sys.exit(\"Error:%d: library definition using duplicate name %s\" % (_line_number, name))\n libraries.add(name)\n item = items[name] = {\"type\": \"library\", \"name\": name}\n line = _ReadFiles(deps_file, item, name)\n elif line.startswith(\"group: \"):\n current_type = \"group\"\n name = line[7:].lstrip()\n _CheckGroupName(name)\n if name not in items:\n sys.exit(\"Error:%d: group %s defined before mentioned as a dependency\" %\n (_line_number, name))\n if name not in _groups_to_be_defined:\n sys.exit(\"Error:%d: group definition using duplicate name %s\" % (_line_number, name))\n _groups_to_be_defined.remove(name)\n item = items[name]\n item[\"name\"] = name\n library_name = item.get(\"library\")\n if library_name:\n line = _ReadFiles(deps_file, item, library_name)\n else:\n line = _ReadSystemSymbols(deps_file, item)\n elif line == \" deps\":\n if current_type == \"library\":\n line = _ReadDeps(deps_file, items[name], name)\n elif current_type == \"group\":\n item = items[name]\n line = _ReadDeps(deps_file, item, item.get(\"library\"))\n elif current_type == \"system_symbols\":\n item = items[current_type]\n line = _ReadDeps(deps_file, item, None)\n else:\n sys.exit(\"Error:%d: deps before any library or group\" % _line_number)\n elif line == \"system_symbols:\":\n current_type = \"system_symbols\"\n if current_type in items:\n sys.exit(\"Error:%d: duplicate entry for system_symbols\" % _line_number)\n item = items[current_type] = {\"type\": current_type, \"name\": current_type}\n line = _ReadSystemSymbols(deps_file, item)\n else:\n sys.exit(\"Syntax error:%d: %s\" % (_line_number, line))\n except StopIteration:\n pass\n if _groups_to_be_defined:\n sys.exit(\"Error: some groups mentioned in dependencies are undefined: %s\" % _groups_to_be_defined)", "def read_dependencies(filename):\n\n dependencies = []\n with open(filename) as f:\n for line in f.readlines():\n if not line or line.startswith('#'):\n continue\n dependencies.append(line.strip())\n return dependencies", "def get_docs( mysource , basename ):\n import parser\n ast = parser.suite(mysource)\n return ModuleInfo(ast.totuple(1), basename)", "def pre_process(in_path):\n in_string = open(in_path, 'r').read()\n multi_line = '/\\\\*[^*]*\\\\*+(?:[^/*][^*]*\\\\*+)*/'\n\n # header\n description = re.search(multi_line, in_string).group(0)\n unit = re.search('\\\\n\\\\s*// unit .*', in_string).group(0)\n imports = re.findall('\\\\n\\\\s*// import .*', in_string)\n import_string = ''\n for i in imports:\n import_string += resolve_import(i.strip()[10:], in_path.parent)\n\n use_string = ''\n uses = re.findall('\\\\n\\\\s*// uses .*', in_string)\n for u in uses:\n use_string += 'uses ' + u.strip()[8:] + ';\\n'\n if use_string != '':\n use_string = '\\n\\n' + use_string\n\n header = '{' + description[2:-2] + '}\\n\\nunit ' + unit.strip()[8:] + ';' + use_string + '\\n\\n'\n\n # main part\n in_string_list, delphi_string_list = split(import_string + '\\n\\n' + in_string)\n\n return header, in_string_list, delphi_string_list", "def test_scan_and_find_dependencies_golang():\n manifests = [{\n \"filename\": \"golist.json\",\n \"filepath\": \"/bin/local\",\n \"content\": open(str(Path(__file__).parent / \"data/manifests/gograph.txt\")).read()\n }]\n res = DependencyFinder().scan_and_find_dependencies(\"golang\", manifests, \"true\")\n assert \"result\" in res\n assert res['result'][0]['details'][0]['_resolved'][0]['package'] == \\\n \"github.com/go-logr/logr\"", "def main():\n if len(sys.argv) == 1:\n print(\"No dependencies file to validate!\")\n return\n dependencies_file = sys.argv[1]\n try:\n dependencies = json.loads(open(dependencies_file, 'r').read())\n except json.decoder.JSONDecodeError:\n print(\"Invalid dependency file syntax! Make sure you don't have any commas at the end of your last dependency.\")\n return\n for dependency in dependencies:\n if 'target_path' in dependency and 'repository' in dependency:\n print(\"Validated {}\".format(dependency['target_path']))\n suggest_edits(dependency)\n elif 'target_path' not in dependency and 'repository' in dependency:\n print(\"Define target_path for dependency {}\".format(dependency['repository']))\n elif 'repository' not in dependency and 'target_path' in dependency:\n print(\"Define repository for dependency {}\".format(dependency['target_path']))\n else:\n print(\"Invalid format, missing repository and target_path for dependency {}\".format(dependencies.index(dependency)))", "def _scan_bytecode(\n self, module, module_code_object, is_scanning_imports):\n level = None\n fromlist = None\n\n # 'deque' is a list-like container with fast appends, pops on\n # either end, and automatically discarding elements too much.\n prev_insts = deque(maxlen=2)\n for inst in util.iterate_instructions(module_code_object):\n if not inst:\n continue\n # If this is an import statement originating from this module,\n # parse this import.\n #\n # Note that the related \"IMPORT_FROM\" opcode need *NOT* be parsed.\n # \"IMPORT_NAME\" suffices. For further details, see\n # http://probablyprogramming.com/2008/04/14/python-import_name\n if inst.opname == 'IMPORT_NAME':\n # If this method is ignoring import statements, skip to the\n # next opcode.\n if not is_scanning_imports:\n continue\n\n assert prev_insts[-2].opname == 'LOAD_CONST'\n assert prev_insts[-1].opname == 'LOAD_CONST'\n\n # Python >=2.5: LOAD_CONST flags, LOAD_CONST names, IMPORT_NAME name\n level = prev_insts[-2].argval\n fromlist = prev_insts[-1].argval\n\n assert fromlist is None or type(fromlist) is tuple\n target_module_partname = inst.argval\n\n #FIXME: The exact same logic appears in _collect_import(),\n #which isn't particularly helpful. Instead, defer this logic\n #until later by:\n #\n #* Refactor the \"_deferred_imports\" list to contain 2-tuples\n # \"(_safe_import_hook_args, _safe_import_hook_kwargs)\" rather\n # than 3-tuples \"(have_star, _safe_import_hook_args,\n # _safe_import_hook_kwargs)\".\n #* Stop prepending these tuples by a \"have_star\" boolean both\n # here, in _collect_import(), and in _process_imports().\n #* Shift the logic below to _process_imports().\n #* Remove the same logic from _collect_import().\n have_star = False\n if fromlist is not None:\n fromlist = uniq(fromlist)\n if '*' in fromlist:\n fromlist.remove('*')\n have_star = True\n\n # Record this import as originating from this module for\n # subsequent handling by the _process_imports() method.\n module._deferred_imports.append((\n have_star,\n (target_module_partname, module, fromlist, level),\n {}\n ))\n\n elif inst.opname in ('STORE_NAME', 'STORE_GLOBAL'):\n # If this is the declaration of a global attribute (e.g.,\n # class, variable) in this module, store this declaration for\n # subsequent lookup. See method docstring for further details.\n #\n # Global attributes are usually irrelevant to import parsing, but\n # remain the only means of distinguishing erroneous non-ignorable\n # attempts to import non-existent submodules of a package from\n # successful ignorable attempts to import existing global\n # attributes of a package's \"__init__\" submodule (e.g., the \"bar\"\n # in \"from foo import bar\", which is either a non-ignorable\n # submodule of \"foo\" or an ignorable global attribute of\n # \"foo.__init__\").\n name = inst.argval\n module.add_global_attr(name)\n\n elif inst.opname in ('DELETE_NAME', 'DELETE_GLOBAL'):\n # If this is the undeclaration of a previously declared global\n # attribute (e.g., class, variable) in this module, remove that\n # declaration to prevent subsequent lookup. See method docstring\n # for further details.\n name = inst.argval\n module.remove_global_attr_if_found(name)\n\n prev_insts.append(inst)", "def required_files(self, args):\n args_set = set(args)\n edge_list = self.__transform_pre(self.__include_deps_supply.get_file_include_deps())\n targets = chain((target for (source, target) in edge_list if source in args_set), args_set)\n return self.__transform_post(targets)", "def get_deps(cat, pkg, ns, rpkgs):\n with (settings.RAINBOARD_RPKG / cat / pkg / \"Makefile\").open() as file_handle:\n cont = file_handle.read()\n deps = [\n d_pkg\n for d_cat, d_pkg, _ in rpkgs\n if f\"\\ninclude ../../{d_cat}/{d_pkg}/depend.mk\\n\" in cont\n ]\n if pkg.startswith(\"py-\") and (cat, pkg[3:], ns) in rpkgs:\n deps.append(pkg[3:])\n deps_cache[pkg] = sorted(set(deps))\n return deps_cache[pkg]", "def gather_required_files(filename):\n # open the file, while ignoring encoding errors (usually comments)\n encoding = open_guess_encoding(filename)\n with open(filename, encoding=encoding, errors='surrogateescape') as fp:\n config = MugenParser()\n config.read_string(fp.read())\n\n # go through each section and store any options that look like filenames\n required = set()\n for section in config.sections():\n section = config[section]\n options = set(find_asset(normpath(v)) for k, v in section.items()\n if filename_regex.match(v))\n required.update(options)\n\n # check other def files, then search them and add the results\n root = dirname(filename)\n for child_file in required.copy():\n name, ext = os.path.splitext(child_file)\n if ext.lower() == '.def':\n path = join(root, child_file)\n required.update(gather_required_files(path))\n\n # TODO: this is not implemented\n # mugen does checking against many paths, so we need\n # to emulate that the if we want to check for missing files\n # finally, go through the potential files and verify they exist\n # for child_file in required.copy():\n # path = join(root, child_file)\n # if not os.path.exists(path):\n # required.remove(child_file)\n\n return required", "def generate_dependencies(data, mib_name):\r\n if mib_name not in mib_dependency_map:\r\n mib_dependency_map[mib_name] = []\r\n imports_section_search = re.search('IMPORTS(?P<imports_section>.*?);', data, re.DOTALL)\r\n if imports_section_search:\r\n imports_section = imports_section_search.group('imports_section')\r\n for dependency in re.finditer('FROM (?P<mib_name>[\\w-]+)', imports_section):\r\n dependency_name = dependency.group('mib_name')\r\n if dependency_name not in mib_dependency_map:\r\n mib_dependency_map[dependency_name] = []\r\n mib_dependency_map[mib_name].append(dependency_name)", "def find_extra_include(file_name):\r\n extra_includes = []\r\n with open(file_name) as f:\r\n for m in re.finditer(regex.extra_include, f.read()):\r\n extra_includes.append(m.groups(1))\r\n return extra_includes" ]
[ "0.66169536", "0.61312497", "0.58868647", "0.5637438", "0.55885524", "0.5586976", "0.5523092", "0.5467778", "0.5432288", "0.5417047", "0.5335516", "0.519833", "0.51949865", "0.51465106", "0.5142541", "0.5124167", "0.5095581", "0.50270045", "0.5003758", "0.49810615", "0.49445158", "0.49398267", "0.48794067", "0.4865043", "0.48591664", "0.48571917", "0.48447096", "0.483119", "0.48221153", "0.48210844" ]
0.7567411
0
Returns a histogram over all relationships in a graph
def count_relations(graph): return Counter( data[RELATION] for _, _, data in graph.edges_iter(data=True) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_unique_relations(graph):\n return Counter(itt.chain.from_iterable(get_edge_relations(graph).values()))", "def multiple_connections_histogram(synapses):\n count_of_synapses = synapses.groupby(['pre', 'post']).size()\n return count_of_synapses", "def count_pathologies(graph):\n return Counter(_pathology_iterator(graph))", "def get_all_relations(graph, u, v):\n return {\n data[RELATION]\n for data in graph.edge[u][v].values()\n }", "def PlotDirectedEdgeHistogram(self, g, edgeAttribute=\"weight\", useLogP1Space=True):\n\t\tseries = []\n\t\tfor e in g.es:\n\t\t\tsrc = g.vs[e.source][\"name\"]\n\t\t\tdest = g.vs[e.target][\"name\"]\n\t\t\tif useLogP1Space:\n\t\t\t\tedgeValue = np.log(e[edgeAttribute])+1 #add one, simply for plotting\n\t\t\telse:\n\t\t\t\tedgeValue = e[edgeAttribute]\n\t\t\tpair = (src+\"--\"+dest, edgeValue)\n\t\t\tseries.append(pair)\n\t\t\n\t\tprint(str(series))\n\t\tdf = pd.Series([pair[1] for pair in series], index=[pair[0] for pair in series])\n\t\tprint(str(df))\n\t\tprint(\"Plotting...\")\n\t\tdf.sort_values().plot(kind='bar',title=\"Log-Space Host-Host Flow Frequency\")\n\t\t#hist.plot()\n\t\tplt.tight_layout()\n\t\tplt.show()\n\t\tplt.clf()\n\t\t\n\t\t#plot outgoing flow distributions, only for vertices with more than one outgoing edge\n\t\tfor v in g.vs:\n\t\t\tedges = g.es.select(_source=v.index)\n\t\t\tif len(edges) > 1:\n\t\t\t\tprint(str(len(edges)))\n\t\t\t\tneighborFrequencies = [(g.vs[e.target][\"name\"], e[\"weight\"]) for e in edges]\n\t\t\t\tprint(\"NEIGHBORS: \"+str(neighborFrequencies))\n\t\t\t\tseries = pd.Series([pair[1] for pair in neighborFrequencies], index=[pair[0] for pair in neighborFrequencies])\n\t\t\t\tseries.sort_values().plot(kind='bar',title=v[\"name\"]+\" Neighbor Flow Frequency\")\n\t\t\t\tplt.tight_layout()\n\t\t\t\tplt.show()\n\t\t\t\tplt.clf()", "def relationships(self):", "def histogram_categorical_attribute(nodes: typ.Iterable[vtna.graph.TemporalNode], attribute_name: str) \\\n -> typ.Dict[str, int]:\n hist = collections.Counter()\n hist.update(node.get_global_attribute(attribute_name) for node in nodes)\n return hist", "def cassandra_histograms(mycluster=RING_1_dev__allnodes):\n cassandra_nodetool(mycluster,cmd=\"cfhistograms\")", "def num_edges(g):\n total_edges_with_duplicates = sum(len(v) for v in g.values())\n return total_edges_with_duplicates // 2", "def Test_Histogram(Graph_MD):\n \n Edges = NX.edges(Graph_MD)\n KnotenNamen = NX.nodes(Graph_MD)\n \n KnotenNamenListe = M_Helfer.unique_String(KnotenNamen)\n NumKnotenListe = len(KnotenNamenListe)\n KnotenLeitung = arr.array('i', list(range(1, NumKnotenListe+1)))\n \n count = 0\n for Knoten in KnotenLeitung:\n KnotenLeitung[count] = 0\n count = count + 1\n \n \n for ii in list(range(NumKnotenListe)):\n KnotenName = KnotenNamenListe[ii]\n for edge in Edges:\n posS = edge[0] == KnotenName\n posE = edge[1] == KnotenName\n \n if posS :\n KnotenLeitung[ii] = KnotenLeitung[ii] + 1\n if posE:\n KnotenLeitung[ii] = KnotenLeitung[ii] + 1\n \n MaxKnotenLeitung = max(KnotenLeitung)\n HistSegmKnoten = M_MatLab.zeros('i', MaxKnotenLeitung+1)\n \n for ii in list(range(0, MaxKnotenLeitung + 1)):\n HistSegmKnoten[ii] = len(M_FindPos.find_pos_ValInVector(ii, KnotenLeitung, '=='))\n\n\n return HistSegmKnoten", "def graph(self) -> dict:\n return self.flat_graph()", "def bins(G):\n bins = {}\n for i in range(0, len(G.nodes(\"binary\")[0])+1):\n bins[i] = []\n\n for node in range(len(list(G.nodes()))):\n node_attr = G.node[node]\n # Calculate the level of each node\n level = node_attr[\"binary\"].count(\"1\")\n bins[level].append(node)\n\n return bins", "def read_graph(args):\n dataset = pd.read_csv(args.features_path).values.tolist()\n edges = {}\n edges[\"positive_edges\"] = [edge[0:2] for edge in dataset if edge[2] == 1]\n edges[\"negative_edges\"] = [edge[0:2] for edge in dataset if edge[2] == -1]\n edges[\"ecount\"] = len(dataset)\n edges[\"ncount\"] = len(set([edge[0] for edge in dataset]+[edge[1] for edge in dataset]))\n return edges", "def populate_graph(self):", "def adjList(self, relations):\n adj = {}\n for agent in self.agents:\n adj[agent] = {}\n for agent in self.agents:\n for relation in relations[agent]:\n if relation[0] not in adj[agent].keys():\n adj[agent][relation[0]] = []\n adj[agent][relation[0]].append(relation[1])\n return adj", "def graph(self):\n ...", "def hog_histograms(*args, **kwargs): # real signature unknown\n pass", "def _get_full_graph(self):", "def table_graph(self, i):\n return self.__graph_list[i]", "def findAllVisitRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:VISIT]->(n2:Location) \"\n \"RETURN ID(n1) , r , r.date , r.start_hour , r.end_hour , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def generate_formula_histogram(self):\n\n histogram = dict()\n for element in self.atomelements:\n if element in histogram.keys():\n histogram[element] += 1\n else:\n histogram[element] = 1\n return histogram", "def get_all_edges(self):\n sum = 0\n for vertex in self:\n sum += vertex.get_edges()\n return sum", "def describe_graph(g):\n print(\"Order: {} nodes\".format(g.number_of_nodes()))\n print(\"Max node id: {}\".format(max([n for n in g.nodes()])))\n print(\"Size: {} edges (interactions)\".format(g.number_of_edges()))\n print(\"Density: {}\".format(nx.density(g)))\n ts = nx.get_edge_attributes(g, 'start')\n ds = nx.get_edge_attributes(g, 'duration')\n print(\"First timestamp is: {}\".format(min(ts.values())))\n print(\"Last timestamp is: {}\".format(max([ts[k] + ds[k] for k in ts.keys()])))", "def histogram_edges(edges: typ.List[typ.Tuple[int, int, int]], granularity: int=None) -> typ.List[int]:\n if len(edges) == 0:\n return list()\n if granularity is None:\n granularity = vtna.data_import.infer_update_delta(edges)\n histogram = [len(ls) for ls in vtna.data_import.group_edges_by_granularity(edges, granularity)]\n return histogram", "def adjacency(graph, directed=False, reverse=False, stochastic=False, heuristic=None):\n \n v = {}\n for n in graph.nodes:\n v[n] = {}\n \n for e in graph.edges:\n id1, id2 = e\n if reverse:\n id1, id2 = reversed(e)\n \n v[id1][id2] = 1.0 - graph.edges[e].get('weight', 1.0) * 0.5\n \n if heuristic:\n v[id1][id2] += heuristic(id1, id2)\n \n if not directed:\n v[id2][id1] = v[id1][id2]\n \n if stochastic:\n for id1 in v:\n d = sum(v[id1].values())\n for id2 in v[id1]:\n v[id1][id2] /= d\n \n return v", "def gen_graph(self):", "def in_degree_distribution(graph):\n in_degrees = collections.Counter()\n for node in graph.nodes(data=True):\n in_degrees[graph.in_degree(node[0])] += 1\n\n in_degrees = sorted(in_degrees.items(), key=lambda x: x[0])\n\n print(in_degrees)", "def get_indegrees(graph: Graph):\n transpose = get_transpose_graph(graph)\n return {node: len(target_nodes) for node, target_nodes in transpose.items()}", "def compute_num_edges(graph):\n # return the number of edges\n return sum([len(graph[source_node].keys()) for source_node in graph.keys()]) / 2", "def graph_count(self) -> int:\n return int(self.graph_tuple_stats.graph_count)" ]
[ "0.69387263", "0.6183445", "0.6046419", "0.6034515", "0.59751195", "0.57758623", "0.57072544", "0.5633479", "0.5611335", "0.56087476", "0.55993664", "0.5599142", "0.55365855", "0.5526105", "0.54780513", "0.547426", "0.54703754", "0.5457386", "0.54382086", "0.5435362", "0.5413176", "0.53789157", "0.53737825", "0.5373599", "0.5351694", "0.5347484", "0.532761", "0.53258425", "0.5315799", "0.53111726" ]
0.7300097
0
Makes a dict that accumulates the values for each key in an iterator of doubles
def group_dict_set(iterator): d = defaultdict(set) for key, value in iterator: d[key].add(value) return dict(d)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_densities(densities):\n\n return {spin: sum(np.array(dens[spin]) for dens in densities) \n for spin in densities[0].keys()}", "def build_histogram(iterator, key):\n buckets = defaultdict(int)\n values = {}\n\n num_objects = 0\n for obj in iterator:\n num_objects += 1\n\n try:\n val = obj[key]\n except (KeyError, TypeError):\n continue\n\n value_hash = hashlib.sha1()\n value_hash.update(syaml.dump_config(sort_yaml_obj(val)).encode())\n value_hash = value_hash.hexdigest()\n\n buckets[value_hash] += 1\n values[value_hash] = val\n\n return [\n (h, buckets[h], float(buckets[h]) / num_objects, values[h])\n for h in sorted(buckets.keys(), key=lambda k: -buckets[k])\n ]", "def _create_freq_dist(self):\r\n freq_dict = dict()\r\n\r\n for element in self.data:\r\n if element in freq_dict:\r\n freq_dict[element] += 1\r\n else:\r\n freq_dict[element] = 1\r\n\r\n return freq_dict", "def probability(self):\r\n \r\n my_dict = dict()\r\n \r\n for i in self.__dtmc:\r\n \r\n sum_Pij = float(sum([self.__dtmc[i][j] for j in self.__dtmc[i]]))\r\n \r\n if sum_Pij == 0:\r\n \r\n my_dict[i] = dict()\r\n \r\n elif sum_Pij > 0:\r\n \r\n if i not in my_dict:\r\n \r\n my_dict[i] = dict()\r\n \r\n for j in self.__dtmc[i]:\r\n \r\n Pij = self.__dtmc[i][j] / sum_Pij\r\n \r\n my_dict[i][j] = Pij\r\n \r\n return my_dict", "def collect_by_key(pair_iter):\n out = {}\n for (k, v) in pair_iter:\n out[k] = out.get(k, [])\n out[k].append(v)\n return out", "def process_datapoints(datapoints):\n point_dict = {}\n\n ddata = [p for p in datapoints]\n for point in ddata:\n point_dict[hash_datapoint(point)] = {'results': [],\n 'time': [],\n 'features': point['features']}\n\n for point in ddata:\n point_dict[hash_datapoint(point)]['results'].append(point['result'])\n point_dict[hash_datapoint(point)]['time'].append(point['time'])\n\n for e in point_dict:\n result_array = np.array(point_dict[e]['results'])\n point_dict[e]['n'] = len(point_dict[e]['results'])\n point_dict[e]['mu'] = np.mean(result_array)\n point_dict[e]['sigma'] = np.std(result_array)\n del point_dict[e]['results']\n\n return point_dict", "def calcRs(distances):\n r = {}\n \n for key in distances.keys():\n summedDistances = 0\n for subkey in distances[key].keys():\n summedDistances += distances[key][subkey]\n r[key] = summedDistances/(len(distances.keys())-2)\n\n return r", "def daily_speed_sum_reduce(key, values):\n\tyield \"%s: %s, %s\\n\" % (key, sum([int(value) for value in values]), len(values))", "def dictionary_of_metrics(items):\n\n 'Initialize dict'\n d = {}\n\n # Add 'mean' key to the dict with the value of the mean calculate by using\n # np.mean rounded to 2 decimal places\n d['mean'] = round(np.mean(items), 2)\n\n # Add 'median' key to the dict with the value of the median calculate by\n # using np.median rounded to 2 decimal places\n d['median'] = round(np.median(items), 2)\n\n # Add 'var' key to the dict with the value of the varience calculate by\n # using np.var rounded to 2 decimal places\n d['var'] = round(np.var(items, ddof=1), 2)\n\n # Add 'std' key to the dict with the value of the standard deviation\n # calculate by using np.std to 2 decimal places\n d['std'] = round(np.std(items, ddof=1), 2)\n\n # Add 'min' key to the dict with the value of the minimum calculate by\n # using np.min to 2 decimal places\n d['min'] = round(np.min(items), 2)\n\n # Add 'max' key to the dict with the value of the maximum calculate by\n # using np.max to 2 decimal places\n d['max'] = round(np.max(items), 2)\n\n # returns dictionary, d\n return d", "def dict(self) -> Dict[str, List[NumericType]]:\n return {r.name: r.floats() for r in self._records}", "def compute_key_value(self) -> Dict[str, float]:\n # @TODO: ddp hotfix, could be done better\n if self._is_ddp:\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = self.compute()\n metrics = self._convert_metrics_to_kv(\n per_class=per_class, micro=micro, macro=macro, weighted=weighted\n )\n return metrics", "def dictionary_of_metrics(items):\n total = 0\n count = 0\n for value in items:\n total = total + value\n count = count + 1\n the_mean = round(total / count, 2)\n sorted_items = sorted(items)\n if count % 2 == 1:\n the_median = sorted_items[int(round(count+1)/2-1)]\n else:\n lower_median = sorted_items[int(round(count/2-1))]\n upper_median = sorted_items[int(round(count/2))]\n the_median = (lower_median + upper_median) / 2\n sum_of_sqz = 0 # Calculate Sum of squares for Varience\n for j in items:\n sqrz_calc = (j - the_mean)**2\n sum_of_sqz = sum_of_sqz + sqrz_calc\n the_varience = round(sum_of_sqz / (count - 1), 2)\n the_standard_dev = round((the_varience)**(1/2), 2)\n the_min = sorted_items[0]\n the_max = sorted_items[count - 1]\n dict = {\n 'mean': the_mean,\n 'median': the_median,\n 'var': the_varience,\n 'std': the_standard_dev,\n 'min': the_min,\n 'max': the_max\n }\n return dict", "def compute_empirical_distribution(values):\n distribution = {}\n\n # -------------------------------------------------------------------------\n # YOUR CODE HERE\n #\n for value in values:\n if value not in distribution:\n distribution[value] = 1\n else:\n distribution[value] += 1\n \n total = len(values)\n for v in distribution.keys():\n distribution[v] /= total\n \n\n #\n # END OF YOUR CODE\n # -------------------------------------------------------------------------\n\n return distribution", "def build_intermediate_dict(self) -> Dict[str, float]:\n intermediate = {}\n now = datetime.now()\n current_time = now.replace(hour=0, minute=0, second=0, microsecond=0)\n\n delta = timedelta(minutes=15)\n\n for i in range(0, 96):\n intermediate[self.date_to_intermediate_time_str(current_time)] = 0.0\n current_time = current_time + delta\n\n return intermediate", "def values_to_dict(keys: tuple, values: list) -> dict:\n out = {}\n for i, key in enumerate(keys):\n out[key] = np.array([x[i] for x in values])\n return out", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['lambda'] = self.lambdaVar\n retDict['k'] = self.k\n retDict['low'] = self.low\n return retDict", "def accumulate (cls, dict, inverse=False):\n total = 0\n output = {}\n keys = range(0, cls.max_key(dict) + 1)\n if inverse:\n keys = range(cls.max_key(dict), -1, -1) # Invert range if needed.\n for i in keys:\n if i in dict:\n total += dict[i] # Accumulate frequencies.\n output[i] = total\n return output", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['mu'] = self.mean\n retDict['sigma'] = self.sigma\n retDict['low'] = self.low\n return retDict", "def aggregator(index_keynames, value_keynames, ts_keyname, func, interval = 60 * 5):\n data = {}\n ts = None\n #print ts_keyname\n for parsts, parsdata in func():\n #print parsdata\n #print parsdata[\"log_timestamp\"]\n if ts is None:\n ts = parsts\n key = tuple((parsdata[key] for key in index_keynames))\n values = tuple((int(parsdata[key]) for key in value_keynames))\n if key not in data:\n data[key] = values\n else:\n data[key] = tuple((data[key][index] + int(values[index]) for index in range(len(values))))\n if parsts > (ts + interval):\n for keys, values in data.items():\n yield \"%s\\t%s\\t%s\" % (ts, \"\\t\".join((str(index_key) for index_key in keys)), \"\\t\".join((str(value_key) for value_key in values)))\n ts = None\n data = {}", "def associate(keys, values):\n result = {}\n val_it = iter(values)\n\n for key in keys:\n try:\n result[key] = next(val_it)\n except StopIteration:\n result[key] = None\n return result", "def calc_sim_collector(self, key, values):\r\n (rest1, rest2), common_ratings = key, values\r\n\t #your code here\r\n yield (rest1, rest2), (rho, n_common)", "def update_dict(self, total_dict: dict, iter_dict: dict) -> dict:\n for k in iter_dict:\n if k not in total_dict:\n total_dict[k] = iter_dict[k]\n else:\n total_dict[k] += iter_dict[k]\n \n return total_dict", "def _buildTotalsDict(self, fromdt, todt):\r\n pairs = [(t, t.effectForPeriod(fromdt, todt)) for t in self.transactions]\r\n return dict(pairs)", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['mu'] = self.mean\n retDict['sigma'] = self.sigma\n return retDict", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['k'] = self.alpha\n retDict['theta'] = 1.0/self.beta\n retDict['low'] = self.low\n return retDict", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['alpha'] = self.alpha\n retDict['beta'] = self.beta\n retDict['scale'] = self.high-self.low\n retDict['low'] = self.low\n return retDict", "def group_by(data, key_func=identity, value_func=identity):\n\n def reduce(dic, ele):\n k = key_func(ele)\n if k in dic:\n dic[k].append(value_func(ele))\n else:\n dic[k] = [value_func(ele)]\n return dic\n\n return functools.reduce(reduce, data, {})", "def test_metric(self, qset: Iterator[Tuple[str, float]]) -> Dict[str, float]:\n res = dict(mks0=0.0, mks1=0.0, mks2=0.0, sum_weights=0.0, sum_wlen=0.0, n=0)\n hist = {k: {} for k in {\"mks0\", \"mks1\", \"mks2\", \"l\"}} # pylint: disable=C0208\n wei = {k: {} for k in hist}\n res[\"hist\"] = hist\n res[\"histnow\"] = wei\n\n for el, _ in self.enumerate_test_metric(qset):\n le = len(el.value)\n w = el.weight\n res[\"mks0\"] += w * el.mks0\n res[\"mks1\"] += w * el.mks1\n res[\"mks2\"] += w * el.mks2\n res[\"sum_weights\"] += w\n res[\"sum_wlen\"] += w * le\n res[\"n\"] += 1\n\n if el.mks0 not in hist[\"mks0\"]:\n hist[\"mks0\"][el.mks0] = w\n wei[\"mks0\"][el.mks0] = 1\n else:\n hist[\"mks0\"][el.mks0] += w\n wei[\"mks0\"][el.mks0] += 1\n if el.mks1 not in hist[\"mks1\"]:\n hist[\"mks1\"][el.mks1] = w\n wei[\"mks1\"][el.mks1] = 1\n else:\n hist[\"mks1\"][el.mks1] += w\n wei[\"mks1\"][el.mks1] += 1\n if el.mks2 not in hist[\"mks2\"]:\n hist[\"mks2\"][el.mks2] = w\n wei[\"mks2\"][el.mks2] = 1\n else:\n hist[\"mks2\"][el.mks2] += w\n wei[\"mks2\"][el.mks2] += 1\n if le not in hist[\"l\"]:\n hist[\"l\"][le] = w\n wei[\"l\"][le] = 1\n else:\n hist[\"l\"][le] += w\n wei[\"l\"][le] += 1\n return res", "def reduceWith(aggregation_fn, key, values ):\n yield (key, aggregation_fn(values))", "def gen_dict():\n lines = [line for line in csv.reader(open(__ppath__ + \"/data/occupations.csv\"))] # uses a csv.reader to parse the file, converts the generic iterable to a list\n lines = [(line[0],float(line[1])) for line in lines[1:-2]]# removes the column names and \"Total\" row, re-expresses as a list of tuples to enable dictionary conversion\n lines.append((\"Unemployed\",0.2)) # accounts for missing 0.2% of jobs\n return dict(lines) # converts to dictionary" ]
[ "0.6195448", "0.5842633", "0.5762505", "0.5756769", "0.5659886", "0.5645777", "0.5627008", "0.5534746", "0.5509884", "0.55032265", "0.54376537", "0.54350764", "0.54216063", "0.5409323", "0.540671", "0.5402735", "0.53859186", "0.53531986", "0.53487366", "0.5334113", "0.5327092", "0.5318451", "0.53141636", "0.52982175", "0.5296931", "0.5287551", "0.526345", "0.52515876", "0.52324295", "0.5223859" ]
0.61645067
1
Returns a histogram of the different types of relations present in a graph.
def count_unique_relations(graph): return Counter(itt.chain.from_iterable(get_edge_relations(graph).values()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_relations(graph):\n return Counter(\n data[RELATION]\n for _, _, data in graph.edges_iter(data=True)\n )", "def relationship_types(self):\n return frozenset(self._relationships_by_type.keys())", "def get_all_relations(graph, u, v):\n return {\n data[RELATION]\n for data in graph.edge[u][v].values()\n }", "def bins(G):\n bins = {}\n for i in range(0, len(G.nodes(\"binary\")[0])+1):\n bins[i] = []\n\n for node in range(len(list(G.nodes()))):\n node_attr = G.node[node]\n # Calculate the level of each node\n level = node_attr[\"binary\"].count(\"1\")\n bins[level].append(node)\n\n return bins", "def count_relation_doc(document):\n count = {}\n for line in document[1:]:\n _, _, _, relation_types, _ = conll04_parser.split_line(line)\n for relation in relation_types:\n if relation in count:\n count[relation] += 1\n else:\n count[relation] = 1\n return count", "def hog_histograms(*args, **kwargs): # real signature unknown\n pass", "def subtype_counts(node_set, G, log=False):\n subtypes = Counter()\n for n in node_set:\n subtype = G.node[n]['subtype']\n subtypes[subtype] += 1\n\n if log:\n for k, v in subtypes.items():\n subtypes[k] = np.log10(v)\n \n return subtypes", "def count_pathologies(graph):\n return Counter(_pathology_iterator(graph))", "def histogram_categorical_attribute(nodes: typ.Iterable[vtna.graph.TemporalNode], attribute_name: str) \\\n -> typ.Dict[str, int]:\n hist = collections.Counter()\n hist.update(node.get_global_attribute(attribute_name) for node in nodes)\n return hist", "def histogram_edges(edges: typ.List[typ.Tuple[int, int, int]], granularity: int=None) -> typ.List[int]:\n if len(edges) == 0:\n return list()\n if granularity is None:\n granularity = vtna.data_import.infer_update_delta(edges)\n histogram = [len(ls) for ls in vtna.data_import.group_edges_by_granularity(edges, granularity)]\n return histogram", "def PlotDirectedEdgeHistogram(self, g, edgeAttribute=\"weight\", useLogP1Space=True):\n\t\tseries = []\n\t\tfor e in g.es:\n\t\t\tsrc = g.vs[e.source][\"name\"]\n\t\t\tdest = g.vs[e.target][\"name\"]\n\t\t\tif useLogP1Space:\n\t\t\t\tedgeValue = np.log(e[edgeAttribute])+1 #add one, simply for plotting\n\t\t\telse:\n\t\t\t\tedgeValue = e[edgeAttribute]\n\t\t\tpair = (src+\"--\"+dest, edgeValue)\n\t\t\tseries.append(pair)\n\t\t\n\t\tprint(str(series))\n\t\tdf = pd.Series([pair[1] for pair in series], index=[pair[0] for pair in series])\n\t\tprint(str(df))\n\t\tprint(\"Plotting...\")\n\t\tdf.sort_values().plot(kind='bar',title=\"Log-Space Host-Host Flow Frequency\")\n\t\t#hist.plot()\n\t\tplt.tight_layout()\n\t\tplt.show()\n\t\tplt.clf()\n\t\t\n\t\t#plot outgoing flow distributions, only for vertices with more than one outgoing edge\n\t\tfor v in g.vs:\n\t\t\tedges = g.es.select(_source=v.index)\n\t\t\tif len(edges) > 1:\n\t\t\t\tprint(str(len(edges)))\n\t\t\t\tneighborFrequencies = [(g.vs[e.target][\"name\"], e[\"weight\"]) for e in edges]\n\t\t\t\tprint(\"NEIGHBORS: \"+str(neighborFrequencies))\n\t\t\t\tseries = pd.Series([pair[1] for pair in neighborFrequencies], index=[pair[0] for pair in neighborFrequencies])\n\t\t\t\tseries.sort_values().plot(kind='bar',title=v[\"name\"]+\" Neighbor Flow Frequency\")\n\t\t\t\tplt.tight_layout()\n\t\t\t\tplt.show()\n\t\t\t\tplt.clf()", "def relations(self):\n return set(self.triples()[\"relation\"])", "def degree_histogram_directed(G, in_degree=False, out_degree=False):\n nodes = G.nodes()\n if in_degree:\n in_degree = dict(G.in_degree())\n degseq=[in_degree.get(k,0) for k in nodes]\n elif out_degree:\n out_degree = dict(G.out_degree())\n degseq=[out_degree.get(k,0) for k in nodes]\n else:\n degseq=[v for k, v in G.degree()]\n dmax=max(degseq)+1\n freq= [ 0 for d in range(dmax) ]\n for d in degseq:\n freq[d] += 1\n return freq", "def GetInOutRelationsForList(self, G, node, relations=[]):\n res = {gc.InputRelations: {}, gc.OutgoingRelations : {}}\n if len(relations) > 0:\n outEdges = [edge for edge in G.out_edges([node], data = True) if Strings.rel in edge[2]]\n inEdges = [edge for edge in G.in_edges([node], data = True) if Strings.rel in edge[2]]\n \n for rel in relations:\n outRelations = [r for r in outEdges if (Strings.rel, rel) in list(r[2].items())]\n res[gc.OutgoingRelations][rel] = outRelations\n inRelations = [r for r in inEdges if (Strings.rel, rel) in list(r[2].items())]\n res[gc.InputRelations][rel] = inRelations\n return res", "def multiple_connections_histogram(synapses):\n count_of_synapses = synapses.groupby(['pre', 'post']).size()\n return count_of_synapses", "def test_make_histograms(self):\r\n raw_lengths = [90, 100, 110, 110, 130, 135]\r\n pre_lengths = [100, 110, 105, 130, 135]\r\n post_lengths = [130, 135]\r\n raw_hist, pre_hist, post_hist, bin_edges = \\\r\n make_histograms(raw_lengths, pre_lengths, post_lengths)\r\n assert_almost_equal(pre_hist, array([0, 2, 1, 0, 2]))\r\n assert_almost_equal(post_hist, array([0, 0, 0, 0, 2]))\r\n assert_almost_equal(bin_edges, array([90, 100, 110, 120, 130, 140]))", "def hist_of_numeric(X):\n figsize(10,3)\n for col in get_numeric(X):\n print(col)\n X[col].hist(bins=50)\n show()", "def __init__(self, network):\n self.network = network\n self.histogram = [0] * (network.maxDegree() + 1)\n\n for key, node in network.nodes.items():\n self.histogram[node.degree()] += 1\n\n #print(\"Debug: histogram list \", self.histogram)\n\n # Other option:\n # Dict containing {id:degree}\n # self.degrees = {}\n # for node in network.nodes.iteritems():\n # self.degrees[node.identifier] = node.degree()\n # for i in range(0, network.maxDegree() + 1:\n # self.histogram[i] = self.degrees.values().count(i)", "def initialize_graph(compound_relations, relation_types):\n graph = nx.DiGraph()\n for compound, targets in compound_relations.items():\n for target, relation in targets.items():\n if relation in relation_types:\n graph.add_edge(compound, target)\n return graph", "def GetValRelations(self, G, node):\n res = {gc.InputRelations: [], gc.OutgoingRelations : []}\n outEdges = [edge for edge in G.out_edges([node], data = True) if Strings.val in edge[2]]\n inEdges = [edge for edge in G.in_edges([node], data = True) if Strings.val in edge[2]]\n res[gc.OutgoingRelations] = outEdges\n res[gc.InputRelations] = inEdges\n\n return res", "def Test_Histogram(Graph_MD):\n \n Edges = NX.edges(Graph_MD)\n KnotenNamen = NX.nodes(Graph_MD)\n \n KnotenNamenListe = M_Helfer.unique_String(KnotenNamen)\n NumKnotenListe = len(KnotenNamenListe)\n KnotenLeitung = arr.array('i', list(range(1, NumKnotenListe+1)))\n \n count = 0\n for Knoten in KnotenLeitung:\n KnotenLeitung[count] = 0\n count = count + 1\n \n \n for ii in list(range(NumKnotenListe)):\n KnotenName = KnotenNamenListe[ii]\n for edge in Edges:\n posS = edge[0] == KnotenName\n posE = edge[1] == KnotenName\n \n if posS :\n KnotenLeitung[ii] = KnotenLeitung[ii] + 1\n if posE:\n KnotenLeitung[ii] = KnotenLeitung[ii] + 1\n \n MaxKnotenLeitung = max(KnotenLeitung)\n HistSegmKnoten = M_MatLab.zeros('i', MaxKnotenLeitung+1)\n \n for ii in list(range(0, MaxKnotenLeitung + 1)):\n HistSegmKnoten[ii] = len(M_FindPos.find_pos_ValInVector(ii, KnotenLeitung, '=='))\n\n\n return HistSegmKnoten", "def graph_fo_relation(self, universe):\n return FO_Relation([tuple(row) for row in self.table()], universe)", "def cassandra_histograms(mycluster=RING_1_dev__allnodes):\n cassandra_nodetool(mycluster,cmd=\"cfhistograms\")", "def compare_histograms(categorical_var, numerical_var):\n pass", "def adjList(self, relations):\n adj = {}\n for agent in self.agents:\n adj[agent] = {}\n for agent in self.agents:\n for relation in relations[agent]:\n if relation[0] not in adj[agent].keys():\n adj[agent][relation[0]] = []\n adj[agent][relation[0]].append(relation[1])\n return adj", "def plot_histograms(top, bot, edges, resolution, *, ax=None):\n if ax is None:\n ax = plt.gca()\n\n ax.hlines(y=0,\n xmin=0,\n xmax=1,\n linestyle='dashed',\n color='black',\n alpha=0.2)\n ax.bar(edges, top, width=resolution)\n ax.bar(edges, -bot, width=resolution)\n # Set some sensible defaults - these can be overridden after the fact,\n # since we return the axes object\n ax.set_xlim((-0.05, 1.05))\n ax.set_xlabel('Predicted Probability')\n height = max(abs(x) for x in ax.get_ylim())\n ax.set_ylim((-height, height))\n ax.set_ylabel('Count')\n return ax", "def generate_formula_histogram(self):\n\n histogram = dict()\n for element in self.atomelements:\n if element in histogram.keys():\n histogram[element] += 1\n else:\n histogram[element] = 1\n return histogram", "def histogram(self):\n return self._hist", "def num_edges(g):\n total_edges_with_duplicates = sum(len(v) for v in g.values())\n return total_edges_with_duplicates // 2", "def relations_table(relations):\r\n result = \"<table id='relation_result'>\"\r\n for relation in relations:\r\n result += \"<tr><td>\" + relation + \"</td></tr>\"\r\n result += \"</table>\"\r\n return result" ]
[ "0.7243773", "0.5846342", "0.5788476", "0.5771844", "0.5683003", "0.55974525", "0.5592673", "0.55913526", "0.55775154", "0.5538538", "0.55222", "0.55188054", "0.53761876", "0.53658545", "0.5363041", "0.5343787", "0.529524", "0.52806026", "0.5279505", "0.5273678", "0.5265037", "0.52416044", "0.52044827", "0.5204362", "0.51930565", "0.5157562", "0.51557356", "0.5137359", "0.5124456", "0.5122751" ]
0.6904264
1
Counts how many times each annotation is used in the graph
def count_annotations(graph): return Counter(_annotation_iter_helper(graph))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_annotation_values(graph, annotation):\n return Counter(iter_annotation_values(graph, annotation))", "def get_annotation_count(self):\n return self._num_annos", "def get_no_of_annotations(database, label, train_vids_all):\n count = 0\n for vid in train_vids_all:\n for ann in database[vid]['annotations']:\n if ann['label'] == label:\n count += 1\n return count", "def count():", "def graph_count(self) -> int:\n return int(self.graph_tuple_stats.graph_count)", "def count_annotation_values_filtered(graph, annotation, source_filter=None, target_filter=None):\n source_filter = keep_node_permissive if source_filter is None else source_filter\n target_filter = keep_node_permissive if target_filter is None else target_filter\n\n return Counter(\n data[ANNOTATIONS][annotation]\n for u, v, data in graph.edges_iter(data=True)\n if edge_has_annotation(data, annotation) and source_filter(graph, u) and target_filter(graph, v)\n )", "def getCounts(self):\n ret = [0]*len(self.numToLabel)\n for block in self.blocks:\n for label in block[1]: ret[label] += 1\n return ret", "def mark_as(self, label: str) -> int:\n self.label = label\n return len(self._content) // len(ANNOTATIONS)", "def test_total_new_annotations():\n\told_num_anns = len(oset['annotations'])\n\tnew_num_anns = len(nset['annotations'])\n\tnum_NAs_found = 0\n\n\told_anns = oset['annotations']\n\tfor ann in old_anns:\n\t\tann_id = ann['id']\n\t\tcat_id = ann['category_id']\n\t\tcat = old_coco_obj.cats[cat_id]\n\t\tOL = cat['name']\n\t\tfor search_row in map_rows:\n\t\t\tif OL == search_row[0]:\n\t\t\t\trow = search_row \n\t\t\t\tNL = row[2]\n\n\t\t\t\t# now we have the particular row from the CSV whose old category corresponds to this annotation's category\n\t\t\t\tif NL == 'NA':\n\t\t\t\t\tnum_NAs_found += 1\n\n\tassert old_num_anns - num_NAs_found == new_num_anns", "def count_nodes(self, term=None, labels: istr = None) -> int:", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def count_nodes(self, term=None, labels: istr = None):", "def count(self):\n\n raise NotImplementedError", "def hits(self):\n return sum(self.labels.values())", "def getHitCount(self): #$NON-NLS-1$\r", "def obstacle_count(self):\n #scan area in front of robot\n self.scan()\n #Figure ot how many obstacles there were\n see_an_object = False\n count = 0", "def get_gini(self, rows):\n label_count = defaultdict(int)\n total_count = 0\n for row in rows:\n label = row[self.target_attribute]\n label_count[label] += 1\n total_count += 1\n return 1 - sum([np.square(float(label_count[label])/total_count) for label in label_count.keys()])", "def obstacle_count(self):\n self.wide_scan()\n found_something = False\n counter = 0\n for distance in self.scan:\n if distance and distance < 200 and not found_something:\n found_something = True\n counter += 1\n print(\"Object # %d found, I think\" % counter)\n if distance and distance > 200 and found_something:\n found_something = False\n print(\"\\n----I SEE %d OBJECTS----\\n\" % counter)", "def _get_observation_count(self):\n observation_count = 0\n for sequence in self.seq_list:\n observation_count += sequence.shape[0] \n \n return observation_count", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def count_pathologies(graph):\n return Counter(_pathology_iterator(graph))", "def count_pegs(self):\r\n count = 0\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"1\":\r\n count += 1\r\n\r\n return count", "def num_labels(self) -> int:\n raise NotImplementedError", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def count(self):\n\t\treturn len(list(self.nodes))", "def node_count(self) -> int:\n return int(self.graph_tuple_stats.node_count or 0)", "def counts(self):\n return sum(self.counter.values()), len(self.visited)" ]
[ "0.8343825", "0.75170594", "0.68242604", "0.6573023", "0.64739114", "0.6471781", "0.6350306", "0.6324876", "0.6313155", "0.62601817", "0.6247608", "0.6229666", "0.6193566", "0.6186364", "0.61589694", "0.6157267", "0.6121791", "0.6050442", "0.6028076", "0.60266453", "0.60266453", "0.60266453", "0.60266453", "0.60212284", "0.6016933", "0.60095996", "0.59944993", "0.5985487", "0.5972277", "0.59574944" ]
0.8530131
0
Gets the set of annotations used in the graph
def get_annotations(graph): return set(_annotation_iter_helper(graph))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def annotations(self):\n return self._annotations", "def annotations(self):\n\n return self._annotations", "def annotations(self) -> Mapping[str, str]:\n return pulumi.get(self, \"annotations\")", "def annotations(self) -> Mapping[str, str]:\n return pulumi.get(self, \"annotations\")", "def annotations(self) -> pulumi.Output[Mapping[str, Any]]:\n return pulumi.get(self, \"annotations\")", "def GetAnnotationsQS(self):\n return self._costly_annotations_qs", "def get_annotation_values(graph, annotation):\n return set(iter_annotation_values(graph, annotation))", "def get_unused_annotations(graph):\n return graph.defined_annotation_keywords - get_annotations(graph)", "def annotations(self):\n\t\tif self._record is not None:\n\t\t return self._record.annotations\n\t\telse:\n\t\t return {}", "def annotations(self) -> Iterator['Annotation']:\n return itertools.chain(self.footnotecitations.all(),\n self.cites.all(),\n self.externallinks.all(),\n self.inlinerequirements.all())", "def get_annotations(self, img_id):\n return self._img_id2annotations.get(img_id, [])", "def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"annotations\")", "def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"annotations\")", "def get_annotations(self):\n entity = self.get_object()\n serializer = AnnotationValueSerializer(entity.annotations.all(), many=True)\n return Response(serializer.data)", "def _annotation_iter_helper(graph):\n return (\n key\n for _, _, data in graph.edges(data=True)\n if ANNOTATIONS in data\n for key in data[ANNOTATIONS]\n )", "def get_annotation_count(self):\n return self._num_annos", "def _get_annotations(self) -> List[Dict[int, Dict[str, Any]]]:\n annotations = []\n for item in self.collector:\n data_file_type = os.path.basename(item).split(\".\")[-1]\n annotations.append(\n load_annotation_file(\n os.path.join(\n self.annotation_folder,\n os.path.basename(item).replace(data_file_type, \"json\"),\n )\n )\n )\n\n return annotations", "def _annotations(request):\n result = Search(request).run(MultiDict(request.params))\n\n return request.find_service(AnnotationReadService).get_annotations_by_id(\n ids=result.annotation_ids\n )", "def _get_corner_annotations_visibility(self) :\n \n return self._corner_annotations_visibility", "def count_annotations(graph):\n return Counter(_annotation_iter_helper(graph))", "def get_annotations(self):\n ann = wfdb.rdann(self.patient_number, 'atr', pb_dir='mitdb', return_label_elements=['symbol', 'label_store',\n 'description'],\n summarize_labels=True)\n\n mit_bih_labels_str = ann.symbol\n\n labels_locations = ann.sample\n\n labels_description = ann.description\n\n return mit_bih_labels_str, labels_locations, labels_description", "def get_alignable_annotations(self, root):\n\n aas = root.findall(\".//ALIGNABLE_ANNOTATION\")\n return {aa.attrib[\"ANNOTATION_ID\"]: aa for aa in aas}", "def _used_annotations(cls) -> set:\n return set(field.type for field in dataclasses.fields(cls))", "def extract_annotations(self, min_annot=3):\n for g in self.games:\n annotation_list = [move.strip(\"{}\") for move in g.moves if move.strip().startswith(\"{\")]\n if len(annotation_list) < min_annot:\n continue\n\n annotation = \" \".join(annotation_list)\n self.annotations.append(annotation)", "def get_overview_annotations() -> dict:\n return {}", "def audit_annotations(self) -> Optional[Sequence['outputs.AuditAnnotation']]:\n return pulumi.get(self, \"audit_annotations\")", "def _load_annotations(self):\n if self._raw_annotations is not None:\n return self._raw_annotations\n\n dataset_file = os.path.join(self._annotation_path, 'complete_dataset_v{}.pkl'.format(self._version))\n idx_file = os.path.join(self._annotation_path, 'splits_indices_v{}.pkl'.format(self._version))\n\n def get_split_from_ds(ds, idx):\n split = {}\n keys = sorted(ds.keys())\n for j in xrange(len(idx)):\n k = keys[idx[j]]\n split[k] = ds[k]\n return split\n\n with open(idx_file, 'rb') as fid:\n indices = cPickle.load(fid)[self._image_set]\n with open(dataset_file, 'rb') as fid:\n ds = cPickle.load(fid)\n self._raw_annotations = get_split_from_ds(ds, indices)\n\n return self._raw_annotations", "def get_annotations(xmlsent):\n annotations = []\n annotation_elements = xmlsent.findall(\".//{%s}a\" % NS)\n for element in annotation_elements:\n annotation = {}\n annotation['type'] = element.attrib.get('type')\n annotation['flavor'] = element.attrib.get('flavor')\n annotation['who'] = element.attrib.get('who')\n annotation['text'] = element.text\n annot = {'type': element.attrib.get('type'), 'flavor': element.attrib.get('flavor'), \n 'who': element.attrib.get('who'), 'text': element.text}\n annotations.append(annot)\n return annotations", "def all_annotation_fns(self):\n return [a[1] for a in getmembers(self, lambda x: isinstance(x, RowAnnotation))]", "def audit_annotations(self) -> Optional[Sequence['outputs.AuditAnnotationPatch']]:\n return pulumi.get(self, \"audit_annotations\")" ]
[ "0.8111795", "0.8045864", "0.7362889", "0.7362889", "0.7331711", "0.70147973", "0.69533426", "0.69257975", "0.69232273", "0.69228804", "0.68237376", "0.6820551", "0.6820551", "0.67251045", "0.6618748", "0.6587142", "0.65761614", "0.652672", "0.6499983", "0.6469789", "0.633797", "0.63091135", "0.6214065", "0.6190084", "0.6026786", "0.6002573", "0.5992932", "0.5984607", "0.59333974", "0.59197986" ]
0.8332945
0
Gets the set of all annotations that are defined in a graph, but are never used.
def get_unused_annotations(graph): return graph.defined_annotation_keywords - get_annotations(graph)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_annotations(graph):\n return set(_annotation_iter_helper(graph))", "def get_annotation_values(graph, annotation):\n return set(iter_annotation_values(graph, annotation))", "def get_unused_list_annotation_values(graph):\n result = {}\n for annotation, values in graph.annotation_list.items():\n used_values = get_annotation_values(graph, annotation)\n if len(used_values) == len(values): # all values have been used\n continue\n result[annotation] = set(values) - used_values\n return result", "def _used_annotations(cls) -> set:\n return set(field.type for field in dataclasses.fields(cls))", "def _annotation_iter_helper(graph):\n return (\n key\n for _, _, data in graph.edges(data=True)\n if ANNOTATIONS in data\n for key in data[ANNOTATIONS]\n )", "def annotations(self):\n return self._annotations", "def annotations(self):\n\n return self._annotations", "def annotations(self) -> Iterator['Annotation']:\n return itertools.chain(self.footnotecitations.all(),\n self.cites.all(),\n self.externallinks.all(),\n self.inlinerequirements.all())", "def count_annotations(graph):\n return Counter(_annotation_iter_helper(graph))", "def GetAnnotationsQS(self):\n return self._costly_annotations_qs", "def get_naked_names(graph: BELGraph) -> Set[str]:\n return set(_naked_names_iter(graph))", "def GetQNoAnnotations(cls):\n return models.Q(annotationstable__isnull=True)", "def eligible_nodes(self):\n return [v for v in self.G if self.eligible_node(v)]", "def annotations(self):\n\t\tif self._record is not None:\n\t\t return self._record.annotations\n\t\telse:\n\t\t return {}", "def likely_regressions(self):\n return set([label for label, count in self.regressions.items() if count == 0])", "def needsAnnotationsDictionary(self):\n return self.needs_annotations_dict", "def __get_ids_of_all_unrelaxed_candidates__(self):\n\n all_unrelaxed_ids = set([t.gaid for t in self.c.select(relaxed=0)])\n all_relaxed_ids = set([t.gaid for t in self.c.select(relaxed=1)])\n all_queued_ids = set([t.gaid for t in self.c.select(queued=1)])\n\n actually_unrelaxed = [gaid for gaid in all_unrelaxed_ids\n if (gaid not in all_relaxed_ids and\n gaid not in all_queued_ids)]\n\n return actually_unrelaxed", "def tags_used(self):\n return set([query.all_referenced_tags for query in self.snippets])", "def GetRequiredNamespaces(self):\n return set(self._required_namespaces)", "def ind_nodes(self, graph=None):\n if graph is None:\n graph = self.graph\n\n dependent_nodes = set(\n node for dependents in graph.values() for node in dependents\n )\n return [node for node in graph.keys() if node not in dependent_nodes]", "def reset_annotations(self):\n # FIXME: this state does not make sense\n self.annotation_date_set = False\n self.annotation_comment_set = False\n self.annotation_type_set = False\n self.annotation_spdx_id_set = False", "def infoboxes_of_graph(self):\n infoboxes = []\n for nodeName in super(SynonymNetwork, self).nodes():\n infoboxes = infoboxes + self.infoboxes_of_graph_node(nodeName)\n return list(set(infoboxes))", "def GetProvidedNamespaces(self):\n return set(self._provided_namespaces)", "def GetMissingRequires(self):\n external_dependencies = set(self._required_namespaces)\n\n # Assume goog namespace is always available.\n external_dependencies.add('goog')\n # goog.module is treated as a builtin, too (for goog.module.get).\n external_dependencies.add('goog.module')\n\n created_identifiers = set()\n for unused_namespace, identifier, unused_line_number in (\n self._created_namespaces):\n created_identifiers.add(identifier)\n\n missing_requires = dict()\n illegal_alias_statements = dict()\n\n def ShouldRequireNamespace(namespace, identifier):\n \"\"\"Checks if a namespace would normally be required.\"\"\"\n return (\n not self._IsPrivateIdentifier(identifier) and\n namespace not in external_dependencies and\n namespace not in self._provided_namespaces and\n identifier not in external_dependencies and\n identifier not in created_identifiers and\n namespace not in missing_requires)\n\n # First check all the used identifiers where we know that their namespace\n # needs to be provided (unless they are optional).\n for ns in self._used_namespaces:\n namespace = ns.namespace\n identifier = ns.identifier\n if (not ns.alias_definition and\n ShouldRequireNamespace(namespace, identifier)):\n missing_requires[namespace] = ns.GetLine()\n\n # Now that all required namespaces are known, we can check if the alias\n # definitions (that are likely being used for typeannotations that don't\n # need explicit goog.require statements) are already covered. If not\n # the user shouldn't use the alias.\n for ns in self._used_namespaces:\n if (not ns.alias_definition or\n not ShouldRequireNamespace(ns.namespace, ns.identifier)):\n continue\n if self._FindNamespace(ns.identifier, self._provided_namespaces,\n created_identifiers, external_dependencies,\n missing_requires):\n continue\n namespace = ns.identifier.rsplit('.', 1)[0]\n illegal_alias_statements[namespace] = ns.token\n\n return missing_requires, illegal_alias_statements", "def referencedNamespaces (self):\n return frozenset(self.__referencedNamespaces)", "def annotations(self) -> pulumi.Output[Mapping[str, Any]]:\n return pulumi.get(self, \"annotations\")", "def excluded(cls):\n return []", "def fully_hidden_arrays(self):\n hidden = []\n for m in self.masks():\n invalid = self.get_property(m, '_no_valid_items')\n if invalid: hidden.append(m)\n return hidden", "def all_leaves(self, graph=None):\n if graph is None:\n graph = self.graph\n return [key for key in graph if not graph[key]]", "def excludes(self):\r\n\r\n return self._excludes" ]
[ "0.8046899", "0.6747276", "0.6727379", "0.6385317", "0.6370691", "0.6229582", "0.6198001", "0.59983647", "0.5742247", "0.57196945", "0.5694495", "0.5674026", "0.56460255", "0.56373686", "0.56286234", "0.55884176", "0.55659765", "0.55608773", "0.5503239", "0.5494925", "0.5482385", "0.5464223", "0.5454497", "0.5441737", "0.54344213", "0.54050016", "0.5356201", "0.5329748", "0.5328618", "0.53178555" ]
0.79468316
1
Gets all of the unused values for list annotations
def get_unused_list_annotation_values(graph): result = {} for annotation, values in graph.annotation_list.items(): used_values = get_annotation_values(graph, annotation) if len(used_values) == len(values): # all values have been used continue result[annotation] = set(values) - used_values return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_unused_annotations(graph):\n return graph.defined_annotation_keywords - get_annotations(graph)", "def get_annotation_values(graph, annotation):\n return set(iter_annotation_values(graph, annotation))", "def metric_annotations_allow_list(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"metric_annotations_allow_list\")", "def handle_enum(enum_annotations: Any) -> list:\n result = []\n for attribute in list(enum_annotations):\n result.append(attribute.value)\n return result", "def variables_used (self) :\r\n\t\treturn [i[0] for i in self.parameters]", "def unused_featurevalues():\n\n fvs = FeatureValue.objects.filter(feature__active=True)\n unused_fvs = fvs.filter(languages__isnull=True)\n natlang_only_fvs = fvs.filter(languages__language__natlang=True).exclude(languages__language__natlang=False)\n\n if not natlang_only_fvs:\n # Natlangs had no unique features so return early\n return unused_fvs\n\n # dsd\n decorate = ((fv.id, fv) for fv in set(unused_fvs) | set(natlang_only_fvs))\n sort = sorted(decorate)\n return [fv for (_, fv) in sort]", "def get_misused_opt_arg_dec():\n return list(incompletely_used_decorators.values())", "def list_value(self) -> global___Expression.RepeatedValue:", "def get_annotations(graph):\n return set(_annotation_iter_helper(graph))", "def values(self):\n return iter(util.annotate(v) for _, v in self._pairs())", "def unusedVars(self):\n fullcode = self.code_cfg\n variables = set([x[1:] for x in codeconfig_getvars(fullcode)])\n exceptions = set(['complexity', 'code_cfg'])\n clsvars = set(vars(self).keys())\n nones = set(filter(lambda x: self.__dict__[x] is None, clsvars))\n nones = nones.union(set(filter(lambda x: str(self.__dict__[x]) == \"\", clsvars)))\n unused = clsvars - variables - exceptions - nones\n return unused", "def get_values(self):\n \n return []", "def get_attribute_list(self):\n attributes = [attr for attr in vars(self.entries[0]) if not attr.startswith('__')]\n return attributes", "def _used_annotations(cls) -> set:\n return set(field.type for field in dataclasses.fields(cls))", "def variables_used (self) :\r\n\t\treturn []", "def used_yvals(self):\n return [y for y in self.yvals() if any([len(self.get_plaquette(x, y)) > 0\n for x in self.xvals()])]", "def values(self) -> List:\n pass", "def values(self):\n return [_ for _ in self._dict.values()]", "def all_label_values(self, label_list_ids=None):\n values = set()\n\n for label_list in self.label_lists.values():\n if label_list_ids is None or label_list.idx in label_list_ids:\n values = values.union(label_list.label_values())\n\n return values", "def _suppressions(self) -> List:\n sonar, sonar_id = self._metric_source, self._sonar_id()\n return [\n (sonar.false_positives(sonar_id), sonar.false_positives_url(sonar_id),\n \"Gemarkeerd als false positive in SonarQube\"),\n (sonar.wont_fix(sonar_id), sonar.wont_fix_url(sonar_id),\n \"Gemarkeerd als won't fix in SonarQube\"),\n (sonar.suppressions(sonar_id), sonar.suppressions_url(sonar_id),\n \"Gemarkeerd in de broncode met annotatie, commentaar (bijv. //NOSONAR) of pragma\")]", "def annotation_filter(self,annotations):\n result = []\n for key,value in annotations.items():\n if key not in [Result.START_TIME,Result.END_TIME]:\n result.append((key,value))\n result.sort(key=operator.itemgetter(0))\n return result", "def used_xvals(self):\n return [x for x in self.xvals() if any([len(self.get_plaquette(x, y)) > 0\n for y in self.yvals()])]", "def extensible_attributes_list_values():\n return \"extensibleattributedef?\" \\\n \"_return_fields=\" \\\n \"list_values,\" \\\n \"comment,\" \\\n \"name,\" \\\n \"type\"", "def get_all(cls, exclude_values: Iterator['CommonBusinessAdvertisingType'] = None) -> Tuple['CommonBusinessAdvertisingType']:\n if exclude_values is None:\n exclude_values = (cls.INVALID,)\n # noinspection PyTypeChecker\n value_list: Tuple[CommonBusinessAdvertisingType, ...] = tuple([value for value in cls.values if value not in exclude_values])\n return value_list", "def get_values(self, names):\n r = []\n for n in names:\n if n in self.raw_metrics:\n r.append(self.raw_metrics[n])\n else:\n return None\n return r", "def values(self):\n return [entry.value for entry in self.table if entry.value is not None]", "def annotations(self):\n return self._annotations", "def get_all(cls, exclude_values: Iterator['CommonBucksType'] = None) -> Tuple['CommonBucksType']:\n if exclude_values is None:\n exclude_values = (cls.INVALID,)\n # noinspection PyTypeChecker\n value_list: Tuple[CommonBucksType, ...] = tuple([value for value in cls.values if value not in exclude_values])\n return value_list", "def get_list_of_present_entries(list_):\n\n _List = []\n for x in list_:\n if x not in _List:\n if not None:\n if not pd.isna(x):\n _List.append(x)\n return _List", "def missing_variables(self):\n return [k for k in self.all_variables if k not in self._properties]" ]
[ "0.68537354", "0.62805945", "0.59040225", "0.589097", "0.58017445", "0.575836", "0.5727465", "0.57149005", "0.56691", "0.56430316", "0.56349885", "0.5633079", "0.55975", "0.55915046", "0.55841595", "0.5538164", "0.5534096", "0.5508441", "0.5505978", "0.55038834", "0.54778254", "0.5466189", "0.54593086", "0.5431938", "0.541781", "0.54102445", "0.54101956", "0.53927505", "0.5387052", "0.53804106" ]
0.7874108
0
Gets annotation/value pairs for values for whom the search string is a substring
def get_annotations_containing_keyword(graph, keyword): return [ { 'annotation': annotation, 'value': value } for annotation, value in iter_annotation_value_pairs(graph) if keyword.lower() in value.lower() ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _substring_occurrences(\n cls, in_str: str, substrings: Iterable[str]\n ) -> Dict[str, List[int]]:\n occurrences = {}\n for substring in substrings:\n occurrences[substring] = list(findall(substring, in_str))\n return occurrences", "def search_in_tree(self, tree, substring):\n s_options_for_match = set()\n node = tree\n idx = 0\n substring_length = len(substring)\n while idx < substring_length and len(node[substring[idx]]) != 0:\n node = node[substring[idx]]\n s_options_for_match.update(node['end'])\n idx += 1\n if idx == substring_length:\n s_options_for_match.update(node['part'])\n\n return s_options_for_match", "def searchRef(self, searchStr):\n filter = []\n attr = self.__listAttr()\n for name in attr:\n if searchStr.lower() in name.lower():\n doc = getattr(self, name)\n filter.append([name, doc]) \n # if in gloss, search for synonymes\n elif name in self.__glossIndex.keys():\n for altName in self.__glossIndex[name]['syn']:\n if searchStr in altName or altName in searchStr:\n doc = getattr(self, name)\n filter.append([name, doc])\n break\n \n return filter", "def extract_strings_from_i(incarnato_fragments, genome, param):\r\n i_dict = {}\r\n i_fragment_regions = \"\"\r\n\r\n with open(incarnato_fragments) as f:\r\n for line in f:\r\n start, end = line.strip().split(\"_\")[1].split(\"-\")\r\n seq = next(f).strip()\r\n ss = next(f).strip()\r\n i_dict[(int(start), int(end))] = [seq,ss]\r\n\r\n for start, end in sorted(i_dict.keys()):\r\n temp = start - len(i_fragment_regions)\r\n gaps = \"\".join([\"_\" for x in range(0, temp)])\r\n i_fragment_regions += gaps\r\n i_fragment_regions += i_dict[(start, end)][param]\r\n \r\n tail = \"\".join([\"_\" for x in range(len(i_fragment_regions), genome+1)])\r\n i_fragment_regions += tail\r\n return i_fragment_regions", "def search(self, searchstring):\n return {}", "def occurrences(substring, string, sensitive=True):\n pos = -1\n o = []\n if not sensitive:\n substring = substring.lower()\n string = string.lower()\n while True:\n pos = string.find(substring, pos + 1)\n if pos == -1:\n return o\n else:\n o.append([pos, pos + len(substring)])", "def find_all( source, substring, start=None, end=None, overlap=False ):\n return [x for x in find_all_iter( source, substring, start, end, overlap )]", "def search(self):\n if self.substring in [None, \"\"]:\n print(\"Invalid Value For Substring\")\n elif self.string in [None, \"\"]:\n print(\"Invalid Value For String\")\n elif len(self.substring) > len(self.string):\n print(\"Length of Substring Less Than String\")\n else:\n posn = self.comparison()\n if posn == -1:\n print(\" Substring Not Found :: Search Failed\")\n else:\n print(\" Substring Found at Position --> \", posn+1)", "def search(self, query_string):\n terms = query_string.lower().split()\n result = set(self.wordDict[terms[0]])\n if len(result) == 0:\n return list()\n else:\n for t in terms[2:]:\n records_containing_t = self.wordDict[t]\n result = result.intersection(records_containing_t)\n return [self.get_record_dict(id).getTuple() for id in result]", "def get_search_tag_values(self, tag_name):\n return self._ruleset[tag_name].keys()", "def find_possible(search_string):\n codes = []; names = []\n search_string = search_string.lower()\n for c,n in name_given_code.items():\n\n if (search_string in n):\n codes.append(c)\n names.append(n)\n\n return codes, names", "def get_search_space(self) -> Dict[str, str]:", "def by_label_contains(self, value):\n return {k: v for k, v in self.items() if value in k}", "def search_student(student):\n result=[]\n for name,age in alumnos.items():\n if student.lower() in name.lower():\n result.append(name)\n\n print(f\"Result {result}\")\n return result", "def search(self, value):\n pass", "def get_searches(self,redcap,boolean,val):\n\t\tmatches = []\n\t\ttry:\n\t\t\tfor eachdict in self.get_data(redcap):\n\t\t\t\tif (boolean):\n\t\t\t\t\tif (re.search(str(val),eachdict[redcap].strip(),re.I)):\n\t\t\t\t\t\tmatches.append((eachdict['article_doi'],eachdict['record_id'],eachdict[redcap]))\n\t\t\t\telse:\n\t\t\t\t\tif not (re.search(str(val),eachdict[redcap].strip(),re.I)):\n\t\t\t\t\t\tmatches.append((eachdict['article_doi'],eachdict['record_id'],eachdict[redcap]))\n\t\texcept KeyError as e:\n\t\t\tprint(\"redcap field: '{}'\\nnot found. did you mean: '{}'?\\nverify and try again\".format(redcap,get_close_matches(redcap,[d['field_name'] for d in self.get_metadata()])))\n\n\t\treturn matches", "def search(query_string):", "def search_motif(sequences):\n motif = re.compile(r'(?=(N[^P](S|T)[^P]))') #N{P}[ST]{P}\n motif_index = {}\n\n for key,value in sequences.items():\n match_motif = re.finditer(motif, value)\n motif_start_list = []\n\n for i in match_motif:\n motif_start_list.append(str(i.start()+1))\n motif_index[key] = ' '.join(motif_start_list)\n return motif_index", "def fn_find_in_map(self, value):\n\n current = self.root['Mappings']\n for index, key in enumerate(value):\n current = self.parser.exploded(current, key)\n return current", "def get_series_by_substring(self, substr):\n self._build_series_cache()\n for series in self._series_cache.values():\n if substr.lower() in series.name.lower():\n return series", "def substring_indexes(substring, string):\n last_found = -1 # Begin at -1 so the next position to search from is 0\n while True:\n # Find next index of substring, by starting after its last known position\n last_found = string.find(substring, last_found + 1)\n if last_found == -1: \n break # All occurrences have been found\n yield last_found", "def get_annotation_values(graph, annotation):\n return set(iter_annotation_values(graph, annotation))", "def search(self, sstrings, **kwargs):\n si = self.allinfo()\n return _search(si, sstrings, **kwargs)", "def search_word(i_word, i_string, i_startpos=0):\n outputDict = {}\n idx = 0\n for pos in re.finditer(r'(\\b%s\\b)' % i_word, i_string[i_startpos:]):\n outputDict[idx] = [pos.start(), pos.end() + 1]\n idx += 1\n return outputDict", "def _search(self, searchterm, pred, **args):\n # TODO: DRY with sparql_ontol_utils\n searchterm = searchterm.replace('%','.*')\n namedGraph = get_named_graph(self.handle)\n query = \"\"\"\n prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#>\n SELECT ?c WHERE {{\n GRAPH <{g}> {{\n ?c {pred} ?l\n FILTER regex(?l,'{s}','i')\n }}\n }}\n \"\"\".format(pred=pred, s=searchterm, g=namedGraph)\n bindings = run_sparql(query)\n return [r['c']['value'] for r in bindings]", "def search_by_substring(db, table, column, substring):\n\n condition = column + \" LIKE \\'%\" + substring + \"%\\'\"\n result = select_columns(db, table, \"*\", condition=condition)\n\n return result", "def searchByNameSubstring(self, substring):\n if substring.strip() == '':\n return None\n return self.filter(name__icontains=substring)", "def indexof(self, value, tag=WORD):\n match = lambda a, b: a.endswith(\"*\") and b.startswith(a[:-1]) or a==b\n indices = []\n for i in range(len(self.words)):\n if match(value, unicode(self.get(i, tag))):\n indices.append(i)\n return indices", "def lookup(self, term):\n results = []\n lookup_term = term.lower()\n for char, latex, description, user_description in self.entries:\n if (char == term or\n latex.startswith(lookup_term) or\n latex[1:].startswith(lookup_term) or\n lookup_term in description.lower() or\n (user_description and lookup_term in user_description)):\n results.append((char, latex, description, user_description))\n return results", "def positions(self, searchstr: str):\n out = []\n for x in range(0, len(self.sa)):\n sub = self.sa[x]\n if searchstr == sub[0:len(searchstr)]:\n out.append(x)\n return out\n \n pass" ]
[ "0.5903146", "0.5488121", "0.5460287", "0.5396356", "0.53472704", "0.5338512", "0.53332084", "0.5275697", "0.5245533", "0.5171115", "0.51687026", "0.5167059", "0.5165257", "0.51592106", "0.5158753", "0.5154511", "0.5152063", "0.5150403", "0.5129545", "0.51103365", "0.51068866", "0.5091393", "0.5087708", "0.5074303", "0.5064305", "0.5060551", "0.50573236", "0.50471854", "0.5032215", "0.5031321" ]
0.576712
1
Counts in how many edges each annotation appears in a graph
def count_annotation_values(graph, annotation): return Counter(iter_annotation_values(graph, annotation))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_annotations(graph):\n return Counter(_annotation_iter_helper(graph))", "def edgecount(self):\n\n raise NotImplementedError", "def num_edges(g):\n total_edges_with_duplicates = sum(len(v) for v in g.values())\n return total_edges_with_duplicates // 2", "def edge_count(self) -> int:\n return int(self.graph_tuple_stats.edge_count or 0)", "def get_annotation_count(self):\n return self._num_annos", "def compute_num_edges(graph):\n # return the number of edges\n return sum([len(graph[source_node].keys()) for source_node in graph.keys()]) / 2", "def num_of_edge(self):\n try:\n return self.edges\n except:\n print(\"ERROR: No graph exists\")", "def _num_edges(self):\n return len(self._eid2partid)", "def num_edges(self):\n return sum(1 for _ in self.iteredges())", "def return_num_edges(self):\n return sum(map(lambda x: len(x),self.__adj))", "def countEdges(self):\n n = 0\n for (hub, table) in self.totsupport.iteritems():\n n += len(table)\n return n", "def _number_of_edges(self):\n if self._edges is None:\n return 0\n return len(self._edges)", "def street_segment_count(Gu):\n if nx.is_directed(Gu): # pragma: no cover\n msg = \"`Gu` must be undirected\"\n raise ValueError(msg)\n return len(Gu.edges)", "def graph_count(self) -> int:\n return int(self.graph_tuple_stats.graph_count)", "def count_relations(graph):\n return Counter(\n data[RELATION]\n for _, _, data in graph.edges_iter(data=True)\n )", "def countEdges(self):\n return numpy.count_nonzero(self.supportArray) / 2", "def num_edges(self):\r\n return len(self.__generate_edges())", "def edge_count(self):\r\n return int(sum(self.degree(node) for node in range(self.size))/2)", "def num_edges(self):\n return sum(len(v.adj) for v in self.vertices.values())", "def _num_edges(self):\n return int(self._edge_map[-1])", "def countEdges(self):\n s1 = self.DBcurs.execute(\"SELECT count(*) FROM edges\")\n data = s1.fetchone()\n if data:\n return data[0]\n else:\n return 0", "def get_no_of_annotations(database, label, train_vids_all):\n count = 0\n for vid in train_vids_all:\n for ann in database[vid]['annotations']:\n if ann['label'] == label:\n count += 1\n return count", "def edge_count(adjList):\n edges = {}\n for id, neigh in enumerate(adjList):\n for n in neigh:\n edges[max(n, id), min(n, id)] = id\n\n return len(edges)", "def control_edge_count(self) -> int:\n return int(self.graph_tuple_stats.control_edge_count or 0)", "def Nedges(self):\n return len(self.edges)", "def count_annotation_values_filtered(graph, annotation, source_filter=None, target_filter=None):\n source_filter = keep_node_permissive if source_filter is None else source_filter\n target_filter = keep_node_permissive if target_filter is None else target_filter\n\n return Counter(\n data[ANNOTATIONS][annotation]\n for u, v, data in graph.edges_iter(data=True)\n if edge_has_annotation(data, annotation) and source_filter(graph, u) and target_filter(graph, v)\n )", "def edge_count(self) -> int:\n return self._n_edges", "def num_edges_rows(graph):\n return len(graph.graph.edges), len(graph.graph.nodes)", "def data_edge_count(self) -> int:\n return int(self.graph_tuple_stats.data_edge_count or 0)", "def number_of_edges(self) -> int:\n return self.graph.number_of_edges()" ]
[ "0.8137551", "0.73022085", "0.6926789", "0.69061303", "0.68878543", "0.6854445", "0.67950374", "0.6794573", "0.67014533", "0.6696235", "0.6683472", "0.66686267", "0.6668122", "0.6655918", "0.6652132", "0.6610412", "0.65590703", "0.65477306", "0.6536954", "0.6498154", "0.64939207", "0.6468801", "0.64552736", "0.6453763", "0.6427407", "0.6414381", "0.6392772", "0.63516915", "0.63411623", "0.62807196" ]
0.7824741
1
Get all values for the given annotation
def get_annotation_values(graph, annotation): return set(iter_annotation_values(graph, annotation))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_annotations(self):\n entity = self.get_object()\n serializer = AnnotationValueSerializer(entity.annotations.all(), many=True)\n return Response(serializer.data)", "def handle_enum(enum_annotations: Any) -> list:\n result = []\n for attribute in list(enum_annotations):\n result.append(attribute.value)\n return result", "def values(self):\n return iter(util.annotate(v) for _, v in self._pairs())", "def GetAnnotations(args, messages):\n annotations = getattr(args, 'annotations', {})\n additional_property_messages = []\n if not annotations:\n return None\n\n for key, value in annotations.items():\n additional_property_messages.append(\n messages.V2Key.AnnotationsValue.AdditionalProperty(\n key=key, value=value))\n\n annotation_value_message = messages.V2Key.AnnotationsValue(\n additionalProperties=additional_property_messages)\n\n return annotation_value_message", "def _get_vals(self, doc: Doc) -> Iterable[V]:\n raise NotImplementedError", "def annotation(self):\n return (self._name, self._value)", "def values(self, *args, **kwargs):\n return [ self._get(doc, *args, **kwargs) for doc in self.keys(*args, **kwargs) ]", "def values():", "def GetValues(self):", "def _annotations(request):\n result = Search(request).run(MultiDict(request.params))\n\n return request.find_service(AnnotationReadService).get_annotations_by_id(\n ids=result.annotation_ids\n )", "def annotations(self):\n return self._annotations", "def values(self):\n return [getattr(self, a.name) for a in self.__attrs_attrs__]", "def GetValues(self):\n ...", "def GetValues(self):\n ...", "def get_values(self):\n \n return []", "def annotations(self) -> Mapping[str, str]:\n return pulumi.get(self, \"annotations\")", "def annotations(self) -> Mapping[str, str]:\n return pulumi.get(self, \"annotations\")", "def annotations(self) -> pulumi.Output[Mapping[str, Any]]:\n return pulumi.get(self, \"annotations\")", "def get_annotations(graph):\n return set(_annotation_iter_helper(graph))", "def annotations(self):\n\n return self._annotations", "def get_values(self):\n raise NotImplementedError(\"Abstract method not implemented.\")", "def values(self):\n return self._tagged.values()", "def values(self):\r\n return [self[k] for k in self]", "def values(self):\n return self[\"values\"]", "def values(self):\n return self[\"values\"]", "def get_all_values(self):\n return self.display_table.get_all_values(root=self.display_table_root,include=self.params)", "def values(self):\n return [ self[x] for x in self ]", "def getValues(self):\n return self.__get('values')", "def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"annotations\")", "def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"annotations\")" ]
[ "0.67769945", "0.6325543", "0.6274638", "0.6220138", "0.6180968", "0.6171129", "0.61034197", "0.60787046", "0.60713106", "0.6068879", "0.5992964", "0.59564936", "0.59216946", "0.59216946", "0.59015006", "0.5887698", "0.5887698", "0.5886613", "0.5818207", "0.5798842", "0.5798271", "0.5781662", "0.57796156", "0.5766157", "0.5766157", "0.5762097", "0.57582843", "0.57390237", "0.56659573", "0.56659573" ]
0.75477934
0
Counts in how many edges each annotation appears in a graph, but filter out source nodes and target nodes
def count_annotation_values_filtered(graph, annotation, source_filter=None, target_filter=None): source_filter = keep_node_permissive if source_filter is None else source_filter target_filter = keep_node_permissive if target_filter is None else target_filter return Counter( data[ANNOTATIONS][annotation] for u, v, data in graph.edges_iter(data=True) if edge_has_annotation(data, annotation) and source_filter(graph, u) and target_filter(graph, v) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_annotations(graph):\n return Counter(_annotation_iter_helper(graph))", "def compute_num_edges(graph):\n # return the number of edges\n return sum([len(graph[source_node].keys()) for source_node in graph.keys()]) / 2", "def count_annotation_values(graph, annotation):\n return Counter(iter_annotation_values(graph, annotation))", "def __filterEdges(self):", "def count_nodes(self, term=None, labels: istr = None):", "def get_indegrees(graph: Graph):\n transpose = get_transpose_graph(graph)\n return {node: len(target_nodes) for node, target_nodes in transpose.items()}", "def count_nodes(self, term=None, labels: istr = None) -> int:", "def proximal_neighbor_count(source : Image, destination : Image = None, min_distance : float = 0, max_distance : float = np.finfo(np.float32).max) -> Image:\n from .._tier1 import set_column\n from .._tier9 import centroids_of_labels\n from .._tier1 import generate_distance_matrix\n from .._tier3 import generate_proximal_neighbors_matrix\n from .._tier1 import count_touching_neighbors\n\n pointlist = centroids_of_labels(source)\n distance_matrix = generate_distance_matrix(pointlist, pointlist)\n\n touch_matrix = generate_proximal_neighbors_matrix(distance_matrix, min_distance=min_distance, max_distance=max_distance)\n destination = count_touching_neighbors(touch_matrix, destination)\n set_column(destination, 0, 0)\n return destination", "def init_inbound_counts(nodes, edges):\n inbound_counts = {}\n for node in nodes:\n inbound_counts[node] = 0\n for e in edges:\n inbound_counts[e[1]] = inbound_counts[e[1]] + 1\n return inbound_counts", "def num_ad_pruning(edge_list=\n path+'connected-component-analysis-round2/network-profiling-data/cid6_analysis/cid6-edge-list',\n worker_ads_file = path+'connected-component-analysis-round2/network-profiling-data/cid6_analysis/worker-ads-int-dict.json'):\n G = nx.read_edgelist(edge_list, delimiter='\\t')\n worker_ints = json.load(open(worker_ads_file, 'r'))\n print nx.info(G)\n threshold = 16\n count = 0\n forbidden_phones = set()\n # with codecs.open(edge_phone_count, 'r', 'utf-8') as f:\n # for line in f:\n # obj = json.loads(line[0:-1])\n # if int(obj.keys()[0]) >= threshold:\n # forbidden_phones = forbidden_phones.union(set(obj[obj.keys()[0]]))\n # with codecs.open(phone_edge_list, 'r', 'utf-8') as f:\n # for line in f:\n # fields = re.split('\\t', line[0:-1])\n # phones = set(fields[2:])\n # if len(phones.intersection(forbidden_phones)) != 0:\n # count += 1\n # G.remove_edge(fields[0], fields[1])\n H = nx.Graph()\n for e in G.edges:\n if e[0] not in worker_ints or e[1] not in worker_ints:\n raise Exception\n else:\n if len(worker_ints[e[0]]) < threshold and len(worker_ints[e[1]]) < threshold:\n H.add_edge(e[0], e[1])\n else:\n count += 1\n print str(count),' edges pruned from graph'\n print nx.info(H)\n ccs = sorted(nx.connected_components(H), key=len, reverse=True)\n print len(ccs)\n print len(ccs[0])", "def edgecount(self):\n\n raise NotImplementedError", "def count_relations(graph):\n return Counter(\n data[RELATION]\n for _, _, data in graph.edges_iter(data=True)\n )", "def sum_edges(g, source, hops):\n\n edges = 0\n\n paths = nx.single_source_shortest_path(g, source, hops)\n for node in paths.iterkeys():\n edges += len(g.neighbors(node))\n\n return edges", "def num_edges(g):\n total_edges_with_duplicates = sum(len(v) for v in g.values())\n return total_edges_with_duplicates // 2", "def reduce_inbound_connections(inbound_counts, nodes):\n nodes_without_inbound = []\n for node in nodes:\n inbound_counts[node] = inbound_counts[node] - 1\n if inbound_counts[node] == 0:\n nodes_without_inbound.append(node)\n return nodes_without_inbound", "def count_unique_relations(graph):\n return Counter(itt.chain.from_iterable(get_edge_relations(graph).values()))", "def street_segment_count(Gu):\n if nx.is_directed(Gu): # pragma: no cover\n msg = \"`Gu` must be undirected\"\n raise ValueError(msg)\n return len(Gu.edges)", "def local_node_connectivity(G, source, target, cutoff=None):\n if target == source:\n raise nx.NetworkXError(\"source and target have to be different nodes.\")\n\n # Maximum possible node independent paths\n if G.is_directed():\n possible = min(G.out_degree(source), G.in_degree(target))\n else:\n possible = min(G.degree(source), G.degree(target))\n\n K = 0\n if not possible:\n return K\n\n if cutoff is None:\n cutoff = INF\n\n exclude = set()\n for i in range(min(possible, cutoff)):\n try:\n path = _bidirectional_shortest_path(G, source, target, exclude)\n exclude.update(set(path))\n K += 1\n except nx.NetworkXNoPath:\n break\n\n return K", "def nodes_per_time_step(graphs: typ.Iterable[vtna.graph.Graph]) -> typ.List[int]:\n return [len(set(node for edge in graph.get_edges() for node in edge.get_incident_nodes())) for graph in graphs]", "def return_num_edges(self):\n return sum(map(lambda x: len(x),self.__adj))", "def _prune_graph(self, graph, target_graph):\n count = 1\n _logger.debug(\n '|----+----------------------------------------+------------------------------+------------------------------|'\n )\n _logger.debug('|{:^4}|{:^40}|{:^30}|{:^30}|'.format('id', 'parammeter',\n 'from', 'to'))\n for param in target_graph.all_parameters():\n var = graph.var(param.name())\n ori_shape = var.shape()\n var.set_shape(param.shape())\n _logger.debug(\n '|----+----------------------------------------+------------------------------+------------------------------|'\n )\n _logger.debug('|{:^4}|{:^40}|{:^30}|{:^30}|'.format(\n str(count),\n str(param.name()), str(ori_shape), str(param.shape())))\n count += 1\n _logger.debug(\n '|----+----------------------------------------+------------------------------+------------------------------|'\n )", "def control_edge_count(self) -> int:\n return int(self.graph_tuple_stats.control_edge_count or 0)", "def test_count_neighbors(self):\n m, n = 5, 5\n k, p = 0.2, 0.7\n agents = [ConwayAgent(ii, ii & 0x1 == 1) for ii in range(m * n)]\n C = ConwayModel(m, n, k, p, agents)\n\n to_count = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]])\n expected = np.array([[1, 1, 2], [2, 3, 1], [0, 2, 1]])\n result = C.count_neighbors(to_count)\n self.assertTrue(np.all(expected == result))", "def count_fillin(graph, nodes):\n count = 0\n for v1 in nodes:\n for v2 in nodes:\n if v1 != v2 and v2 not in graph[v1]:\n count += 1\n return count / 2", "def intersection_count(G=None, min_streets=2):\n spn = streets_per_node(G)\n node_ids = set(G.nodes)\n return sum(count >= min_streets and node in node_ids for node, count in spn.items())", "def _num_edges(self):\n return len(self._eid2partid)", "def get_nb_edit_operations(g1, g2, forward_map, backward_map):\n n_vi = 0\n n_vr = 0\n n_vs = 0\n n_ei = 0\n n_er = 0\n n_es = 0\n\n nodes1 = [n for n in g1.nodes()]\n for i, map_i in enumerate(forward_map):\n if map_i == np.inf:\n n_vr += 1\n elif g1.nodes[nodes1[i]]['atom_symbol'] != g2.nodes[map_i]['atom_symbol']:\n n_vs += 1\n for map_i in backward_map:\n if map_i == np.inf:\n n_vi += 1\n\n# idx_nodes1 = range(0, len(node1))\n\n edges1 = [e for e in g1.edges()]\n nb_edges2_cnted = 0\n for n1, n2 in edges1:\n idx1 = nodes1.index(n1)\n idx2 = nodes1.index(n2)\n # one of the nodes is removed, thus the edge is removed.\n if forward_map[idx1] == np.inf or forward_map[idx2] == np.inf:\n n_er += 1\n # corresponding edge is in g2. Edge label is not considered.\n elif (forward_map[idx1], forward_map[idx2]) in g2.edges() or \\\n (forward_map[idx2], forward_map[idx1]) in g2.edges():\n nb_edges2_cnted += 1\n # corresponding nodes are in g2, however the edge is removed.\n else:\n n_er += 1\n n_ei = nx.number_of_edges(g2) - nb_edges2_cnted\n\n return n_vi, n_vr, n_vs, n_ei, n_er, n_es", "def _scan_targets(self, indices_to_nodes, node_property, source_index,\n factor_aggregator, compute_statistics,\n total_factor_instances,\n generated_edges, reverse_edges=False, limit=None,\n verbose=False):\n edge_list = []\n for target_index in range(source_index + 1, len(indices_to_nodes)):\n s = indices_to_nodes[source_index]\n t = indices_to_nodes[target_index]\n\n if node_property is not None:\n s_factors, t_factors = self._get_node_factors(\n s, t, node_property, factor_aggregator)\n else:\n if factor_aggregator is None:\n factor_aggregator = aggregate_index\n s_factors, t_factors = self._get_edge_factors(\n s, t, factor_aggregator, reverse_edges)\n\n common_factors = safe_intersection(\n s_factors, t_factors)\n\n if len(common_factors) > 0:\n edge = {\n \"@source_id\": s,\n \"@target_id\": t,\n \"common_factors\": common_factors\n }\n\n for stat in compute_statistics:\n edge[stat] = COOCCURRENCE_STATISTICS[stat](\n self.pgframe, s, t,\n node_property,\n common_factors,\n total_factor_instances,\n factor_aggregator,\n reverse_edges)\n\n edge_list.append(edge)\n\n if limit:\n if len(generated_edges) + len(edge_list) == limit:\n if verbose:\n print(\"Reached the edge limit ({})\".format(limit))\n return edge_list\n\n return edge_list", "def node_count(self) -> int:\n return int(self.graph_tuple_stats.node_count or 0)", "def num_edges(self):\n return sum(1 for _ in self.iteredges())" ]
[ "0.64924645", "0.61406404", "0.60482", "0.5913293", "0.5882233", "0.5866123", "0.5851271", "0.58161896", "0.57976633", "0.57823884", "0.57695407", "0.57527435", "0.56889486", "0.5676223", "0.5665394", "0.5659691", "0.5659477", "0.56017035", "0.556301", "0.5519132", "0.5494026", "0.5463144", "0.54623425", "0.54562557", "0.5451037", "0.54458684", "0.54374087", "0.54263175", "0.54207623", "0.5418176" ]
0.7213139
0
Iterates over unique nodenode pairs in the graph
def _iter_pairs(graph): for u, v in set(graph.edges_iter()): yield u, v
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iteredges(self):\n for source, targets in self.successors.items():\n for target in targets:\n yield source, target", "def iter_nodes(self):", "def all_pairs(self):\n return chain(self.nx_graph.edges(), nx.non_edges(self.nx_graph))", "def nodes(self):\n for node_set in self.itervalues():\n for node in node_set:\n yield node", "def iter_node(self,i):\n nd = self.nodes[i]\n for kn in nd.get_close():\n # for kn in nd.get_known():\n # for kn in nd.neighbours:\n kn_node = self.nodes[kn.lindex]\n nd.add_known_nodes(kn.path_len,kn_node.get_close())", "def nodes_iter(self) -> Generator:\n for n in self.graph.nodes(data=True):\n yield n", "def _get_common_neighbour_node_pairs(self):\n node_pairs = []\n for node1 in self.graph.nodes():\n for node2 in self.graph.nodes():\n if node1 != node2:\n neighbour_count = self.neighbour_counts[(node1, node2)]\n if neighbour_count >= 1:\n node_pairs.append((node1, node2))\n return node_pairs", "def mut_space(graph: nx.Graph) -> t.Iterator[t.Tuple[int, t.List[str]]]:\n genes = get_attr(graph, 'gene')\n xs = sorted(chain.from_iterable(((g.P1, g.A1), (g.P2, g.A2)) for g in genes))\n return ((g, sorted(set(x[1] for x in gg))) for g, gg in groupby(xs, key=op.itemgetter(0)))", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if (neighbour, vertex) not in edges:\n edges.append((vertex, neighbour))\n \n for pair in edges:\n for otherpair in edges:\n if pair[1] == otherpair[0]:\n edges.append((pair[0],otherpair[1]))\n return edges", "def get_consistent_edges(graph):\n for u, v in _iter_pairs(graph):\n if pair_is_consistent(graph, u, v):\n yield u, v", "def neighbors_iter(node, topology):\n return topology[node]", "def edges(self) -> Iterable[Tuple[Node]]:\n edges = []\n for node in self.__graph_dict.keys():\n for neighbour in self.__graph_dict[node]:\n # Since all edges go both ways, we need only return one of them.\n if {neighbour, node} not in edges:\n edges.append({node, neighbour})\n yield (node, neighbour)", "def likely_pairs(self, k=2):\n for a in self.G.nodes():\n if not self.eligible_node(a):\n continue\n for b in neighbourhood(self.G, a, k):\n if not self.eligible_node(b):\n continue\n yield (a, b)", "def connect_all(graph, nodeset):\n for element in nodeset:\n graph.add_node(element)\n for element1 in nodeset:\n for element2 in nodeset:\n if not element1 == element2:\n graph.add_edge(element1, element2)\n return graph", "def nodes_iter(topology):\n return topology.nodes_iter()", "def get_inconsistent_edges(graph):\n for u, v in _iter_pairs(graph):\n if not pair_is_consistent(graph, u, v):\n yield u, v", "def _create_node_iterator(self) -> Iterator[GraphNode]:\n return\n yield", "def get_inconsistent_edges(graph: BELGraph) -> Iterable[Tuple[BaseEntity]]:\n for u, v in graph.edges():\n if not pair_is_consistent(graph, u, v):\n yield u, v", "def nodes(self):\n\n return list(set(self._graph.keys() + [x for x in itertools.chain.from_iterable(self._graph.values())]))", "def _find_paired_nodes(self, graph):\n paired_list = []\n for line in nx.generate_edgelist(graph):\n if ('basepair' in line):\n if not (int(line.split(' ')[0]) in paired_list):\n paired_list.append(int(line.split(' ')[0]))\n if not (int(line.split(' ')[1]) in paired_list):\n paired_list.append(int(line.split(' ')[1]))\n return paired_list", "def all_pairs_node_connectivity(G, nbunch=None, cutoff=None):\n if nbunch is None:\n nbunch = G\n else:\n nbunch = set(nbunch)\n\n directed = G.is_directed()\n if directed:\n iter_func = itertools.permutations\n else:\n iter_func = itertools.combinations\n\n all_pairs = {n: {} for n in nbunch}\n\n for u, v in iter_func(nbunch, 2):\n k = local_node_connectivity(G, u, v, cutoff=cutoff)\n all_pairs[u][v] = k\n if not directed:\n all_pairs[v][u] = k\n\n return all_pairs", "def differentNodesForNode(ntupleSet,nodeList,verbose=False):\n nodesPerNode = dict(zip(nodeList,[[] for n in range(len(nodeList))]))\n for ntuple in ntupleSet:\n for nodeInTuple in ntuple:\n nodesPerNode[nodeInTuple].extend(ntuple)\n \n for a,v in nodesPerNode.iteritems():\n nodesPerNode[a] = set(v)\n \n return nodesPerNode", "def iter_all(self):\n for i in range(self.num_nodes):\n self.iter_node(i)", "def fdupes(g, node_prefix='_qh/',edge_type='qhash'):\n build_index_on_node_attr(g, 'qhash', 'f', node_prefix=node_prefix, expand=True)\n paths = g.node_labels\n print '\\n'.join(sorted(str(s) for s in paths.items()))\n for k in paths:\n if k:\n edge = g.edge[k]\n if len(edge) > 1:\n for path in edge:\n print paths.get(path) # , g.node[path]\n print ''", "def __generate_edges(self):\r\n edges = []\r\n for vertex in self.__graph_dict:\r\n for neighbor in self.__graph_dict[vertex]:\r\n if {neighbor, vertex} not in edges:\r\n edges.append({vertex, neighbor})\r\n return edges", "def get_all_pairs(G):\n # list all (start,dest) pairs between which the route must be computed\n pairs_list = [(start, dest) for dest in G.nodes for start in G.nodes]\n\n # shuffle all elements in-place\n random.shuffle(pairs_list)\n\n # generate a set from the list\n pairs_set = set(pairs_list)\n\n return pairs_list, pairs_set", "def differentNTuplesForNode(ntupleSet,nodeList,verbose=False):\n ntuplesPerNode = dict(zip(nodeList,[[] for n in range(len(nodeList))]))\n for ntuple in ntupleSet:\n ntuple.sort()\n joinedTuple = \"\".join(ntuple)\n for nodeInTuple in ntuple:\n ntuplesPerNode[nodeInTuple].append(joinedTuple)\n \n for a,v in ntuplesPerNode.iteritems():\n ntuplesPerNode[a] = set(v)\n \n return ntuplesPerNode", "def iter_links(self):\n for site in self.iter_sites():\n for u in range(self.dim):\n yield tuple(list(site) + [u])", "def neighbours(assignment): \n for index_1, index_2 in itertools.combinations(range(len(assignment)), 2):\n new_assign = list(assignment)\n new_assign[index_1], new_assign[index_2] = new_assign[index_2], new_assign[index_1]\n yield tuple(new_assign)", "def get_connected_nodes(self, node):\n assert node in self.nodes, \"No node \"+str(node)+\" in graph \"+str(self)\n result = [x.node2 for x in self.edges if x.node1 == node]\n result += [x.node1 for x in self.edges if x.node2 == node]\n return sorted(result)" ]
[ "0.68542266", "0.6712754", "0.6575832", "0.6528945", "0.64538825", "0.6438056", "0.6314288", "0.62993896", "0.6280058", "0.6257878", "0.62574124", "0.625634", "0.6213354", "0.62093157", "0.6184859", "0.61808145", "0.61796695", "0.6145432", "0.60995543", "0.60977054", "0.60902834", "0.60635364", "0.6036189", "0.60259694", "0.598934", "0.5987208", "0.59763455", "0.5975638", "0.59750134", "0.59552693" ]
0.78772706
0
Returns if the set of relations contains a contradiction
def relation_set_has_contradictions(relations): has_increases = any(relation in CAUSAL_INCREASE_RELATIONS for relation in relations) has_decreases = any(relation in CAUSAL_DECREASE_RELATIONS for relation in relations) has_cnc = any(relation == CAUSES_NO_CHANGE for relation in relations) return 1 < sum([has_cnc, has_decreases, has_increases])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_relations(self, relations):\n if self.debug:\n print(\"Checking relations\")\n result = False\n work_relations = []\n\n # Eliminate unnecessary(duplicated) clauses\n if relations[\"is_derived_from\"]:\n relations[\"has_derived_form\"] = True\n relations[\"is_derived_from\"] = False\n if relations[\"etymology\"]:\n relations[\"etymological_origin_of\"] = True\n relations[\"etymology\"] = False\n\n for relation in relations:\n if relations[relation]:\n result = True\n work_relations.append(relation)\n return result, work_relations", "def _relation_check(self):\n seen = set()\n for entity in self.get_entities():\n for field in entity.fields.itervalues():\n if field.is_relation():\n seen.add(field.remote_name)\n missing = seen - set(self.entities.keys())\n if missing:\n raise exceptions.SchemaError(\n 'undefined entities referenced in relations: %s' % (\n ', '.join(missing)))", "def test_conceptional_relations(id, conrel, expected_ids):\n synset = germanet_data.get_synset_by_id(id)\n related = synset.relations[conrel]\n np.testing.assert_equal(sorted([syn.id for syn in related]), sorted(expected_ids))", "def has_conflict(self):\n for diffstat in self.diffstat():\n if diffstat.has_conflict:\n return True\n return False", "def pair_has_contradiction(graph, u, v):\n relations = get_all_relations(graph, u, v)\n return relation_set_has_contradictions(relations)", "def can_create_relation(self, rids):\n if len(rids) < 2:\n raise ValueError(\"It takes at least 2 to tango!\")\n rids = tuple(rids)\n existing = self.find_relations(rids[0], *rids[1:])\n if existing:\n cmp_set = set(rids)\n for rel_id in existing:\n if self.get(rel_id) == cmp_set:\n raise ValueError(\"Already has relations: %s\" % \", \".join(existing))", "def is_equivalence(self) -> bool:", "def exact_fact_check(self, triple, transitive=False):\n exists = self.knowledge_graph.check_triple_object_existence(triple, transitive)\n if exists is True:\n return 'exists', []\n conflicts = self.knowledge_graph.get_triples(triple.subject, triple.relation, transitive)\n if conflicts is not None:\n return 'conflicts', conflicts\n # The following checks triples with the same subject and object, but different relation. Are those conflict?\n # if conflicts is None:\n # conflicts = []\n # for obj in triple.objects:\n # relations = self.knowledge_graph.get_relation_triples(triple.subject, obj)\n # if relations is not None:\n # conflicts += relations\n # if len(conflicts) > 0:\n # return 'conflicts', conflicts\n return 'none', []", "def consistent(self):\n return all((constraint.consistent() for constraint in self.constraints))", "def is_relation(self, rel_name):\n return rel_name in self._declaration", "def is_consistent(self, constraints):\n for constraint in constraints:\n if not constraint.is_satisfied_with(self):\n return False\n return True", "def is_reflexive(fuzzy_set):\n\tfor index, element in enumerate(fuzzy_set.domain.domain_elements):\n\t\tif element[0] == element[1]:\n\t\t\tif not fuzzy_set.memberships[index] == 1:\n\t\t\t\treturn False\n\treturn True", "def author_ManyToMany_entry_check(): #Old it was OneToMany before adding multiple authors\n import itertools\n entry_author_ids = itertools.chain(*Entry.objects.all().values_list('author_ids', flat=True))\n entry_author_ids_set = set(entry_author_ids)\n user_ids = set(User.objects.all().values_list('id',flat=True))\n\n author_id_not_in_user = entry_author_ids_set - user_ids\n\n if author_id_not_in_user:\n return (\"Error: There are entries without a correct cross relation with user: {}\"\n .format(\",\".join(str(s) for s in author_id_not_in_user)))\n else:\n return \"OK\"", "def complete(self):\n return all((constraint.satisfied() for constraint in self.constraints))", "def __has_multiple_edges(self):\n return \\\n len(\n list(\n [\n tuple((edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()))\n for edge in self.get_edges()\n ] # the length of the list which allows duplicates...\n )\n ) != \\\n len(\n set(\n {\n tuple((edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()))\n for edge in self.get_edges()\n } # ...should equal the length of the set that does not allow duplicates\n )\n ) # return True if the two data structures are equal in size and False otherwise", "def __has_no_dependents (self, obj, constraints):\n failed = False\n while constraints and not failed:\n c = constraints [0]\n\n if c [1] == obj:\n failed = True\n\n constraints = constraints [1:]\n\n return not failed", "def check_membership(self, ref_set, my_set):\n rset = set(ref_set)\n mset = set(my_set)\n if len(rset.intersection(mset)) > 0:\n return 1\n else:\n return 0", "def can_satisfy_without_refs(self):\n\n if self.constraint_type in (self.ONE, self.AT_LEAST_ONE):\n result = any(not _is_ref_prop(name) for name in self.property_names)\n\n else:\n # ALL\n result = all(not _is_ref_prop(name) for name in self.property_names)\n\n return result", "def is_relational(*args):\n return _ida_hexrays.is_relational(*args)", "def check_rel(rel, rel_set):\n if rel is None:\n return False\n if isinstance(rel, tuple):\n return check_rel(rel[0], rel_set)\n if isinstance(rel, string_types):\n return rel.lower() in rel_set\n raise TypeError(\"Unsupported type {} of relation {!r}.\".format(\n type(rel), rel))", "def test_cli_validate_with_invalid_check_relations():\n result = CliRunner().invoke(\n cli.cli,\n [\n \"validate\",\n \"tests/data/invalid-relations/datapackage.json\",\n \"--check-relations\",\n ],\n )\n assert result.exit_code == 0\n assert '\"valid\": true' in result.output\n assert \"Foreign key\" in result.output", "def _check_duplicate_trans(self):\n transactions_set = set(self._transactions)\n return len(transactions_set) == len(self._transactions)", "def hasConflicts(self):\n partners = {}\n for first, second in self:\n #print >>sys.stderr, \"first:\", first, \"second:\", second\n if first is None:\n if second is None:\n continue #no pairing info\n else:\n first, second = second, first #swap order so None is 2nd\n if second is None: #check first isn't paired\n if partners.get(first, None) is not None:\n print >>sys.stderr, \"here1\"\n print >>sys.stderr, \"first:\", first, \"second:\", second\n return True\n else:\n partners[first] = None\n else: #first and second were both non-empty: check partners\n if first in partners:\n if partners[first] != second:\n print >>sys.stderr, \"here2\"\n print >>sys.stderr, \"first:\", first, \"second:\", second, \"partners[first]\", partners[first]\n print \"partners:\", partners\n return True\n if second in partners:\n if partners[second] != first:\n print >>sys.stderr, \"here3\"\n print >>sys.stderr, \"first:\", first, \"second:\", second, \"partners[second]:\", partners[second]\n return True\n #add current pair to the list of constraints\n partners[first] = second\n partners[second] = first\n #can only get here if there weren't conflicts\n return False", "def valid(self):\n return len(self._totals_) <= 1", "def test_incoming_conceptional_relations(id, conrel, expected_ids):\n synset = germanet_data.get_synset_by_id(id)\n related = synset.incoming_relations[conrel]\n np.testing.assert_equal(sorted([syn.id for syn in related]), sorted(expected_ids))", "def relation_exists(cls, model):\n return bool(cls.get_related_field(model)\n or cls.get_reverse_related_field(model))", "def valid(self):\n return len(self.missing()) == 0", "def checkIncToSets(_session, _el, _sets, _arc_type):\n for set in _sets:\n if _session.search_one_shot(_session.sc_constraint_new(sc_constants.CONSTR_3_f_a_f,\n set,\n sc.SC_ARC | _arc_type,\n _el), True, 3) is None:\n return False\n \n return True", "def is_id(self):\n found = False\n for p in self.ant:\n for prop in self.con:\n if p == prop:\n found = True\n return found", "def isRelational(self):\n return _libsbml.ASTNode_isRelational(self)" ]
[ "0.7122376", "0.67345405", "0.6449695", "0.6206758", "0.6205824", "0.6188202", "0.61652434", "0.61545885", "0.6087285", "0.60733366", "0.6025005", "0.6013213", "0.60035706", "0.5984546", "0.59587294", "0.5939061", "0.5924129", "0.5839123", "0.5822666", "0.5814324", "0.5813227", "0.5801442", "0.5782242", "0.57773024", "0.57684803", "0.57624924", "0.5761884", "0.57327116", "0.5720645", "0.5710378" ]
0.7635147
0
Checks if a pair of nodes has any contradictions in their causal relationships.
def pair_has_contradiction(graph, u, v): relations = get_all_relations(graph, u, v) return relation_set_has_contradictions(relations)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relation_set_has_contradictions(relations):\n has_increases = any(relation in CAUSAL_INCREASE_RELATIONS for relation in relations)\n has_decreases = any(relation in CAUSAL_DECREASE_RELATIONS for relation in relations)\n has_cnc = any(relation == CAUSES_NO_CHANGE for relation in relations)\n return 1 < sum([has_cnc, has_decreases, has_increases])", "def get_contradictory_pairs(graph):\n for u, v in _iter_pairs(graph):\n if pair_has_contradiction(graph, u, v):\n yield u, v", "def is_contradiction_(transition):\n is_contr = False\n\n # check implications of lower left corner\n if np.argmax(transition[0]) == 0:\n if np.argmax(transition[2]) == 2 or np.argmax(transition[2]) == 3:\n is_contr = True\n elif np.argmax(transition[0]) == 1:\n if np.argmax(transition[1]) == 0 or np.argmax(transition[1]) == 2:\n is_contr = True\n if np.argmax(transition[2]) != 1:\n is_contr = True\n elif np.argmax(transition[0]) == 2:\n if np.argmax(transition[1]) == 0 or np.argmax(transition[1]) == 1:\n is_contr = True\n elif np.argmax(transition[0]) == 3:\n if np.argmax(transition[1]) != 3:\n is_contr = True\n if np.argmax(transition[2]) == 0 or np.argmax(transition[2]) == 2:\n is_contr = True\n\n # check implicatiosn of upper right corner\n if np.argmax(transition[2]) == 0:\n if np.argmax(transition[0]) == 1 or np.argmax(transition[0]) == 3:\n is_contr = True\n elif np.argmax(transition[2]) == 1:\n if np.argmax(transition[1]) == 0 or np.argmax(transition[1]) == 2:\n is_contr = True\n elif np.argmax(transition[2]) == 2:\n if np.argmax(transition[0]) != 2:\n is_contr = True\n if np.argmax(transition[1]) == 0 or np.argmax(transition[1]) == 1:\n is_contr = True\n elif np.argmax(transition[2]) == 3:\n if np.argmax(transition[1]) != 3:\n is_contr = True\n if np.argmax(transition[0]) == 0 or np.argmax(transition[0]) == 1:\n is_contr = True\n\n return is_contr", "def are_connected(self, node1, node2):\n return bool( self.get_edge(node1, node2) )", "def valid_chain(chain):\n\n for i in range(len(chain) - 1):\n parent_edge = chain[i]\n child_edge = chain[i + 1]\n # verify that the child of the parent edge (second node) matches the parent of the child edge (first node)\n if not parent_edge[1] == child_edge[0]:\n # if this isn't\n return False\n return True", "def __has_multiple_edges(self):\n return \\\n len(\n list(\n [\n tuple((edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()))\n for edge in self.get_edges()\n ] # the length of the list which allows duplicates...\n )\n ) != \\\n len(\n set(\n {\n tuple((edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()))\n for edge in self.get_edges()\n } # ...should equal the length of the set that does not allow duplicates\n )\n ) # return True if the two data structures are equal in size and False otherwise", "def check_adjacency(layer_a, layer_b, topology):\n adjacency = None\n for node in topology.graph.node.keys():\n if layer_a in node:\n # print topology.graph.edge[node].keys()\n for edge in topology.graph.edge[node].keys():\n if layer_b in edge:\n # print topology.graph.edge[node][edge]\n # print \"Node \",layerA,\" and \",layerB,\" share an edge!\"\n adjacency = True\n if adjacency is True:\n return True\n else:\n return False", "def hasConflicts(self):\n partners = {}\n for first, second in self:\n #print >>sys.stderr, \"first:\", first, \"second:\", second\n if first is None:\n if second is None:\n continue #no pairing info\n else:\n first, second = second, first #swap order so None is 2nd\n if second is None: #check first isn't paired\n if partners.get(first, None) is not None:\n print >>sys.stderr, \"here1\"\n print >>sys.stderr, \"first:\", first, \"second:\", second\n return True\n else:\n partners[first] = None\n else: #first and second were both non-empty: check partners\n if first in partners:\n if partners[first] != second:\n print >>sys.stderr, \"here2\"\n print >>sys.stderr, \"first:\", first, \"second:\", second, \"partners[first]\", partners[first]\n print \"partners:\", partners\n return True\n if second in partners:\n if partners[second] != first:\n print >>sys.stderr, \"here3\"\n print >>sys.stderr, \"first:\", first, \"second:\", second, \"partners[second]:\", partners[second]\n return True\n #add current pair to the list of constraints\n partners[first] = second\n partners[second] = first\n #can only get here if there weren't conflicts\n return False", "def _degree_has_changed(first, second):\n return len(set(first) ^ set(second)) != 0", "def is_equivalence(self) -> bool:", "def check_model(self):\n for node in self.nodes():\n cpd = self.get_cpds(node=node)\n\n if cpd is None:\n raise ValueError(\"No CPD associated with {}\".format(node))\n elif isinstance(cpd, (TabularCPD, ContinuousFactor)):\n evidence = cpd.get_evidence()\n parents = self.get_parents(node)\n if set(evidence if evidence else []) != set(parents if parents else []):\n raise ValueError(\n \"CPD associated with {node} doesn't have \"\n \"proper parents associated with it.\".format(node=node)\n )\n if not cpd.is_valid_cpd():\n raise ValueError(\n \"Sum or integral of conditional probabilites for node {node}\"\n \" is not equal to 1.\".format(node=node)\n )\n return True", "def are_connected(self, person1, person2):\n\n possible_nodes = Queue()\n seen = set()\n possible_nodes.enqueue(person1)\n seen.add(person1)\n\n while not possible_nodes.is_empty():\n person = possible_nodes.dequeue()\n print(\"checking\", person)\n if person is person2:\n return True\n else:\n for cohabitant in person.adjacent - seen:\n possible_nodes.enqueue(cohabitant)\n seen.add(cohabitant)\n print(\"added to queue:\", cohabitant)\n return False", "def _valid_bond_pair(self, set):\n (sbu1, cp1), (sbu2, cp2) = set\n if all([i is None for i in [cp1.special, cp2.special, cp1.constraint, cp2.constraint]]):\n return sbu1.is_metal != sbu2.is_metal\n\n return (cp1.special == cp2.constraint) and (cp2.special == cp1.constraint)", "def consistency(node, sequence, orientation, overlap):\n from_id, to_id = node\n from_sequence, to_sequence = sequence\n from_orn, to_orn = orientation\n if from_orn == '-':\n from_sequence = reverse_and_complement(from_sequence)\n if to_orn == '-':\n to_sequence = reverse_and_complement(to_sequence)\n size_overlap = real_overlap(from_sequence, to_sequence)\n if not size_overlap == overlap:\n GRAPH_LOGGER.debug('Edge between node %s and %s have \\\n \tno consistency between CIGAR overlap end \"real\" overlap', from_id, to_id)\n return False\n\n return True", "def check_connected(self, update=True):\n # update if needed\n if update:\n\n self.update_neighbors()\n\n # go through each node checking that each degree id greater than 0\n for node in self.nodes:\n\n # only one node needs to be disconnected to fail\n if len(self.nodes[node].neighbors) < 1:\n return False\n\n return True", "def conflateable(seg1, seg2, segment_pairs):\n for segment_pair in segment_pairs:\n seg_set = set(segment_pair)\n if seg1 in seg_set and seg2 in seg_set:\n return True\n return False", "def compare_balanced_tree(G, node1:str, node2:str, traversed1:list, traversed2:list):\n logger.debug(f\"checking symmtrical connections for nodes: {node1}, {node2}\")\n tree1 = set(get_next_level(G,[node1]))\n tree2 = set(get_next_level(G,[node2]))\n traversed1.append(node1)\n traversed2.append(node2)\n if tree1==tree2:\n return True\n while(len(list(tree1))== len(list(tree2)) > 0):\n logger.debug(f\"tree1 {tree1} tree2 {tree2} traversed1 {traversed1} traversed2 {traversed2}\")\n tree1 = set(tree1) - set(traversed1)\n tree2 = set(tree2) - set(traversed2)\n\n if tree1.intersection(tree2) or len(list(tree1))== len(list(tree2))==0:\n return True\n else:\n traversed1+=list(tree1)\n traversed2+=list(tree2)\n tree1=set(get_next_level(G,tree1))\n tree2=set(get_next_level(G,tree2))\n\n logger.debug(f\"Non symmetrical branches for nets: {node1}, {node2}\")\n return False", "def consistent(self):\n return all((constraint.consistent() for constraint in self.constraints))", "def connected_pair(self, first, second):\n cover = set()\n queue = {first}\n while queue:\n new = queue.pop()\n cover.add(new)\n for adjacent in new.parents() | new.children():\n if adjacent == second:\n return True\n elif not adjacent in cover:\n queue.add(adjacent)\n return False", "def permissible(e1, e2):\n return e1[1] == e2[0] and \\\n total_edge_length(e1, e2) < maximum_distance and \\\n total_edge_angle(e1, e2) < maximum_angle_delta", "def is_converged(clusters1, clusters2, k, num_of_cords):\r\n for i in range(k):\r\n for j in range(num_of_cords):\r\n if clusters1[i][j] != clusters2[i][j]:\r\n return False\r\n return True", "def is_connected(self, node1, node2):\r\n\r\n return node1 in self.graph and node2 in self.graph[node1]", "def check(self):\n\n constrains = pm.ls(type='constraint')\n uselessConstrains = []\n\n for const in constrains:\n connections = const.listConnections(scn=True, s=False, d=True)\n if const in connections:\n connections.remove(const)\n\n if len(connections) == 0:\n uselessConstrains.append(const)\n\n if not uselessConstrains:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = uselessConstrains\n for obj in uselessConstrains:\n self.addError(\"%s doesn't have outgoing connections.\" % obj)\n self.errorMessage = \"%s useless constrains\" % (\n len(uselessConstrains))", "def is_consistent(self, constraints):\n for constraint in constraints:\n if not constraint.is_satisfied_with(self):\n return False\n return True", "def is_complete(self, A, B):\n return all(self.is_edge(v, w) for v in A for w in B)", "def _check_consistency(self, item, path, targets):\n for neighbor in self._edges[path[-1]]:\n if neighbor in path:\n continue\n elif self._nodes[neighbor][item] in (EMPTY, VISITED):\n continue\n\n remaining = set(targets)\n if neighbor in targets:\n remaining.remove(neighbor)\n if len(remaining) == 0:\n return True\n\n if self._check_consistency(item, path + [neighbor], remaining):\n return True\n\n return False", "def perms_are_connected(g, n):\n from sage.graphs.graph import Graph\n G = Graph()\n if g:\n G.add_vertices(g[0].domain())\n for p in g:\n G.add_edges(p.dict().items())\n return G.is_connected()", "def cross_closeness(self, node_list1, node_list2, link_attribute=None):\n path_lengths = InteractingNetworks.cross_path_lengths(self, node_list1,\n node_list2, link_attribute)\n return self._calculate_general_closeness(path_lengths, internal=False)", "def check_pairs(self, all_pr, curr):\n flag = True\n for pair_ox in all_pr:\n if (curr[0] == pair_ox or curr[1] == pair_ox):\n flag = False\n return flag", "def is_adjecent(self, cell1, cell2):\r\n if cell1 == cell2:\r\n return True\r\n elif cell1[0] == cell2[0] and (cell1[1] - cell2[1] == 1 or cell1[1] - cell2[1] == -1):\r\n return True\r\n elif cell1[1] == cell2[1] and (cell1[0] - cell2[0] == 1 or cell1[0] - cell2[0] == -1):\r\n return True\r\n else:\r\n return False" ]
[ "0.68102324", "0.6214052", "0.6207057", "0.60906833", "0.6065176", "0.60568976", "0.6026552", "0.6008109", "0.59604144", "0.59581804", "0.59490097", "0.5934767", "0.5910104", "0.59072256", "0.5901164", "0.5900084", "0.58665067", "0.58433616", "0.5840681", "0.5824252", "0.5824074", "0.5818633", "0.57808894", "0.5774537", "0.5769276", "0.5736746", "0.57347584", "0.5721173", "0.57204604", "0.5719087" ]
0.62543714
1
Iterates over contradictory node pairs in the graph based on their causal relationships
def get_contradictory_pairs(graph): for u, v in _iter_pairs(graph): if pair_has_contradiction(graph, u, v): yield u, v
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_successors(self, node):\n succs = []\n parent_state = self.node_to_state(node)\n for it in self.children:\n child_node = (node[0] + it[0], node[1] + it[1])\n child_state = self.node_to_state(child_node)\n edge = self.interpolate(parent_state, child_state, self.distance_bw_states(parent_state, child_state)/self.path_resolution)\n succs.append([child_node, edge])\n return succs", "def cohen_sadeh(prelations):\n # Adaptation to avoid multiple end nodes\n successors = graph.reversed_prelation_table(prelations)\n end_act = graph.ending_activities(successors)\n\n #Step 1. Construct work table with Immediate Predecessors\n Columns = namedlist.namedlist('Columns', ['pre', 'blocked', 'dummy', 'suc', 'start_node', 'end_node'])\n # [0 Predecesors, 1 Blocked, 2 Dummy, 3 Successors, 4 Start node, 5 End node]\n # Blocked = (False or Activity with same precedents)\n work_table = {}\n for act, predecessors in prelations.items():\n work_table[act] = Columns(set(predecessors), False, False, None, None, None)\n\n# print \"\\n--- Step 1 ---\"\n# __print_work_table(work_table)\n\n #Step 2. Identify Identical Precedence Constraint of Diferent Activities\n visited_pred = {}\n for act, columns in work_table.items():\n pred = frozenset(columns.pre)\n if pred not in visited_pred:\n visited_pred[pred] = act\n else:\n columns.blocked = visited_pred[pred]\n\n# print \"\\n--- Step 2 ---\"\n# __print_work_table(work_table)\n\n\n #Step 3. Identify Necessary Dummy Arcs\n dups = set()\n visited_act = set()\n for columns in work_table.values():\n if not columns.blocked:\n for act in columns.pre:\n if act in visited_act:\n dups.add(act)\n visited_act.add(act)\n\n# print \"\\n--- Step 3.1 ---\"\n# print dups\n\n\n #Step 3.2, 3.3 and 4. Create rows and information for Dummy Arcs\n dummy_counter = collections.Counter()\n for _, columns in work_table.items():\n # Avoid blocked\n if not columns.blocked:\n predecessors = columns.pre\n if len(predecessors) > 1:\n for pre in list(predecessors):\n if pre in dups:\n predecessors.remove(pre)\n dummy_name = pre + '-d' + str(dummy_counter[pre])\n dummy_counter[pre] += 1\n predecessors.add(dummy_name)\n work_table[dummy_name] = Columns(set([pre]), False, True, None, None, None)\n\n# print \"\\n--- Step 4 ---\"\n# __print_work_table(work_table)\n\n\n #Step 5. Creating nodes\n node = 0 # instead of 0, can start at 100 to avoid confusion with activities named with numbers when debugging\n for act, columns in work_table.items():\n if not columns.dummy and not columns.blocked:\n columns.start_node = node\n node += 1\n\n# print \"\\n--- Step 5a ---\"\n# __print_work_table(work_table)\n\n for act, columns in work_table.items():\n if not columns.dummy and columns.blocked:\n columns.start_node = work_table[columns.blocked].start_node\n\n# print \"\\n--- Step 5b ---\"\n# __print_work_table(work_table)\n\n\n #Step 6. Associate activities with their end nodes\n # (a) find one non-dummy successor for each activity\n for act, columns in work_table.items():\n for suc, suc_columns in work_table.items():\n if not suc_columns.dummy and not suc_columns.blocked:\n if act in suc_columns.pre:\n columns.suc = suc\n break\n\n# print \"\\n--- Step 6a ---\"\n# __print_work_table(work_table)\n\n # (b) find end nodes\n graph_end_node = node # Reserve one node for graph end \n node += 1\n for act, columns in work_table.items():\n suc = columns.suc\n if suc:\n columns.end_node = work_table[suc].start_node\n else:\n # Create needed end nodes, avoiding multiple graph end nodes (adaptation)\n if act in end_act:\n columns.end_node = graph_end_node\n else:\n columns.end_node = node \n node += 1\n\n# print \"\\n--- Step 6b ---\"\n# __print_work_table(work_table)\n\n\n #Step 7. Associate dummy arcs with start nodes\n for act, columns in work_table.items():\n if columns.dummy:\n pred = iter(columns.pre).next()\n start_node = work_table[pred].end_node\n columns.start_node = start_node\n\n# print \"\\n--- Step 7 ---\"\n# __print_work_table(work_table)\n\n\n #Step 8. Generate the graph\n pm_graph = pert.PertMultigraph()\n for act, columns in work_table.items():\n _, _, dummy, _, start, end = columns\n pm_graph.add_arc((start, end), (act, dummy))\n\n p_graph = pm_graph.to_directed_graph()\n return p_graph.renumerar()", "def calc_cc(graph):\n\tclustering_coeffs = {}\n\tfor node in graph.nodes():\n\t\tclustering_coeffs[node] = { \"cc\" : nx.clustering(graph, node)}\n\tnx.set_node_attributes(graph, clustering_coeffs)", "def cc_visited(ugraph):\n remain = set(ugraph.keys())\n conn_comp = []\n while remain:\n node = remain.pop()\n visited = bfs_visited(ugraph, node)\n conn_comp.append(visited)\n remain = remain.difference(visited)\n return conn_comp", "def cc_visited(ugraph):\r\n\tremaining_node = ugraph.keys()\t\t#The keys are accessible directly.\r\n\t\r\n\tcon_com = [] #connected component\r\n\twhile len(remaining_node) != 0 :\r\n\t\tnode = random.choice(remaining_node)\r\n\t\tvisited = bfs_visited(ugraph,node)\r\n\t\tcon_com.append(visited)\r\n\t\tfor item in visited:\r\n\t\t\tremaining_node.remove(item)\r\n\treturn con_com", "def iteredges(self):\n for source, targets in self.successors.items():\n for target in targets:\n yield source, target", "def cooccuranceBlock(ntupleSet,nodeList):\n nodesPerNode = dict(zip(nodeList,[[] for n in range(len(nodeList))]))\n for ntuple in ntupleSet:\n for nodeInTuple in ntuple:\n nodesPerNode[nodeInTuple].extend(ntuple)\n \n for a,v in nodesPerNode.iteritems():\n differentNodes = set(v).difference(set([a]))\n NumberdifferentNodes = len(differentNodes) \n nodesPerNode[a] = (NumberdifferentNodes,differentNodes)\n \n return sorted(nodesPerNode.iteritems(),key=operator.itemgetter(1))", "def cc_visited(ugraph):\n \n remaining = set(ugraph.keys())\n ccomp = []\n while len(remaining) > 0:\n node = remaining.pop()\n visited = bfs_visited(ugraph,node)\n ccomp.append(visited)\n remaining.difference_update(visited)\n \n return ccomp", "def betweenness_centrality(self, node):\n l=[]\n b=0\n for i in vertices:\n if i!=node:\n l.append(i)\n comb=list(itertools.combinations(l,2))\n \n for c in comb:\n count=0\n l=self.all_shortest_paths(c[0],c[1])\n if l==None:\n print(c)\n for i in range(len(l)):\n if node in l[i]:\n count+=1\n b+=count/len(l)\n\n return b", "def connectivity_graph(rdm):\n rdm = _rd_chem.AddHs(rdm)\n atms = rdm.GetAtoms()\n bnds = rdm.GetBonds()\n asbs = dict(enumerate((rda.GetSymbol(), 0, None) for rda in atms))\n cnns = {frozenset([rdb.GetBeginAtomIdx(), rdb.GetEndAtomIdx()]): (1, None)\n for rdb in bnds}\n return (asbs, cnns)", "def associate_successors(graph, node=\"\"):\n return {\n \"successors\": [\n {\n \"source\": node,\n \"target\": succ,\n \"edge_attribute\": graph.succ[node][succ][\"edge_attribute\"],\n }\n for succ in graph.succ[node]\n ]\n }", "def test_cc_visited(self):\n graph1 = {0: set()}\n self.assertEqual(p.cc_visited(graph1), [set([0])])\n\n graph2 = {0: set([1]), 1: set([0])}\n self.assertEqual(p.cc_visited(graph2), [set([0,1])])\n\n graph3 = {0: set([]), 1: set([])}\n self.assertItemsEqual(p.cc_visited(graph3), [set([0]), set([1])])\n\n graph4 = {0: set([]), 1: set([2]), 2: set([1])}\n self.assertItemsEqual(p.cc_visited(graph4), [set([0]), set([1,2])])\n\n graph6 = {\"a\": set([]), \"b\": set([\"c\"]), \"c\": set([\"d\",\"b\"]), \"d\":set([\"e\", \"c\"]), \"e\":set([\"d\"]) }\n self.assertItemsEqual(p.cc_visited(graph6), [set([\"a\"]), set([\"b\",\"c\", \"d\", \"e\"])])", "def _iter_pairs(graph):\n for u, v in set(graph.edges_iter()):\n yield u, v", "def transitive_closure(self, term_id_list):\n\n edges = set()\n visited = set(term_id_list)\n\n for term_id in term_id_list:\n current_term = term_id\n\n while current_term != 'KEGG Pathway':\n next_term = self.term2parent[current_term]\n edges.add((current_term, 'is_a', next_term))\n visited.add(current_term)\n current_term = next_term\n\n return visited, edges", "def condensation(G):\n scc = strongly_connected_components(G)\n mapping = dict([(n,tuple(sorted(c))) for c in scc for n in c])\n cG = nx.DiGraph()\n for u in mapping:\n cG.add_node(mapping[u])\n for _,v,d in G.edges_iter(u, data=True):\n if v not in mapping[u]:\n cG.add_edge(mapping[u], mapping[v])\n return cG", "def strongly_connected_components(self):\n for node in self.graph:\n node.data.visited = False\n\n reverse_graph = self.reverse_graph()\n dfs_reverse_graph = DFS(reverse_graph.graph, generate_stack=True)\n dfs_reverse_graph.search()\n\n dfs_graph = DFS(self.graph, generate_scc=True)\n\n while dfs_reverse_graph.stack:\n node = self.graph.get_node(dfs_reverse_graph.stack.top_pop())\n if not node.data.visited:\n dfs_graph.strongly_connected_components.append([])\n dfs_graph.num_scc += 1\n dfs_graph.explore(node)\n return dfs_graph.num_scc, dfs_graph.strongly_connected_components", "def combinations(graph, all_combs, all_costs, all_values, start, prev_cost, prev_value, prev_nodes):\n for ii in range(start, graph.size):\n # combination\n nodes = prev_nodes + [ii]\n all_combs.append(nodes)\n # cost\n cost = prev_cost + graph.node_weights[ii][0]\n all_costs.append(cost)\n # value\n value = prev_value + graph.node_weights[ii][1] - graph.node_weights[ii][0]\n for node in prev_nodes: # complementarity\n for adjacent in graph.graph[node]:\n if adjacent[0] == ii:\n value += adjacent[1]\n all_values.append(value)\n # recurse\n combinations(graph, all_combs, all_costs, all_values, ii+1, cost, value, nodes)", "def ClusteringTransitivity(graph):\n transitivity = nx.transitivity(graph)\n return transitivity", "def coarsen(A, levels, self_connections=False):\n # Function written by M. Defferrard, taken (almost) verbatim, from \n # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L5\n graphs, parents = metis(A, levels)\n perms = compute_perm(parents)\n\n for i, A in enumerate(graphs):\n M, M = A.shape\n\n if not self_connections:\n A = A.tocoo()\n A.setdiag(0)\n\n if i < levels:\n A = perm_adjacency(A, perms[i])\n\n A = A.tocsr()\n A.eliminate_zeros()\n graphs[i] = A\n\n# Mnew, Mnew = A.shape\n# print('Layer {0}: M_{0} = |V| = {1} nodes ({2} added),'\n# '|E| = {3} edges'.format(i, Mnew, Mnew-M, A.nnz//2))\n\n\n return graphs, perms[0] if levels > 0 else None", "def pair_has_contradiction(graph, u, v):\n relations = get_all_relations(graph, u, v)\n return relation_set_has_contradictions(relations)", "def get_consistent_edges(graph):\n for u, v in _iter_pairs(graph):\n if pair_is_consistent(graph, u, v):\n yield u, v", "def iterate_connected_atoms(self, atom):\n successors_iter = self._execution_graph.successors_iter\n return _depth_first_iterate(\n self._execution_graph, {\n co.FLOW: successors_iter,\n co.TASK: successors_iter,\n co.RETRY: successors_iter,\n }, successors_iter(atom))", "def transitive_reduction(self, sorted_node):\n \n if self.is_cyclic() is False:\n \n visited = set()\n path = []\n\n for i in sorted_node:\n if i not in visited:\n visited = self.transitive_reduction_helper(i, visited, path)\n \n print(\"Transitive reduction is completed.\")\n \n else:\n print(\"Transitive reduction can only be performed on directed acyclic graph.\")", "def relation_set_has_contradictions(relations):\n has_increases = any(relation in CAUSAL_INCREASE_RELATIONS for relation in relations)\n has_decreases = any(relation in CAUSAL_DECREASE_RELATIONS for relation in relations)\n has_cnc = any(relation == CAUSES_NO_CHANGE for relation in relations)\n return 1 < sum([has_cnc, has_decreases, has_increases])", "def cc_visited(ugraph):\n\tremain = []\n\tfor node in ugraph:\n\t\tremain.append(node)\n\tconnected = []\n\twhile remain:\n\t\tvisited = bfs_visited(ugraph, remain[0])\n\t\tconnected.append(visited)\n\t\tremain = [i for i in remain if not i in visited]\n\treturn connected", "def indirect(stack):\n g = nx.Graph(stack)\n for group in nx.connected_components(g):\n yield from map(frozenset, combinations(group, 2))", "def find_conflicts(graph):\n for node in graph:\n for neighbour in node.neighbours:\n for n in neighbour.neighbours:\n if n is node:\n continue\n _log.info(\"Nodes %s and %s are in conflict.\" % (node, n))\n node.add_conflict_with_node(n)\n n.add_conflict_with_node(node)", "def _get_common_neighbour_node_pairs(self):\n node_pairs = []\n for node1 in self.graph.nodes():\n for node2 in self.graph.nodes():\n if node1 != node2:\n neighbour_count = self.neighbour_counts[(node1, node2)]\n if neighbour_count >= 1:\n node_pairs.append((node1, node2))\n return node_pairs", "def iter_all_nucleic_acids(self):\n for model in self.model_list:\n for chain in model.chain_list:\n for frag in chain.iter_nucleic_acids():\n yield frag", "def cfdProcessNodeTopology(self):\r\n self.nodeElements = self.cfdInvertConnectivity(self.elementNodes)\r\n self.nodeFaces = self.cfdInvertConnectivity(self.faceNodes)" ]
[ "0.6238017", "0.5909709", "0.5907469", "0.58362544", "0.57939804", "0.5792186", "0.5703263", "0.5681727", "0.56771266", "0.5671722", "0.56632626", "0.5611376", "0.5602615", "0.559068", "0.5563447", "0.5538029", "0.55253816", "0.55157125", "0.5515107", "0.5483767", "0.5442139", "0.54309833", "0.5430769", "0.54278564", "0.5423296", "0.54151136", "0.5407725", "0.5407581", "0.5406938", "0.5402561" ]
0.69560146
0
Returns a counter of all of the mentions of pathologies in a network
def count_pathologies(graph): return Counter(_pathology_iterator(graph))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_relations(graph):\n return Counter(\n data[RELATION]\n for _, _, data in graph.edges_iter(data=True)\n )", "def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count", "def count_unique_relations(graph):\n return Counter(itt.chain.from_iterable(get_edge_relations(graph).values()))", "def count_selfloop_mates(node,bamfile,G):\n off_node_count = 0\n on_node_count = 0\n if node[-1] == \"'\": node = node[:-1]\n try:\n for hit in bamfile.fetch(node):\n nref = bamfile.getrname(hit.next_reference_id)\n if nref != node:\n off_node_count += 1\n else: on_node_count += 1\n\n except ValueError:\n pass\n\n return off_node_count, on_node_count", "def treeThreat():\n\tglobal treeThreatList\n\ttreeThreatList = []\n\tcount = 0\n\tfor name in threatList:\n\t\tif name in treeList:\n\t\t\tcount += 1\n\t\t\ttreeThreatList.append(name)\n\treturn count", "def countTotalDistance(path):\n current = path[0]\n totalDistance = 0\n\n for node in path[1:]:\n totalDistance += distance_func(current, node)\n current = node\n\n return totalDistance", "def get_stats(sents):\n import os\n import re \n # first, put the relevant trees into temp file\n if 'outname' in kwargs.keys():\n to_open = 'tmp-%s.txt' % kwargs['outname']\n else:\n to_open = 'tmp.txt'\n with open(to_open, \"w\") as fo:\n for sent in sents:\n statsmode_results['Sentences'] += 1\n fo.write(sent.parse_string.rstrip().encode('utf-8', errors = 'ignore') + '\\n')\n deps = get_deps(sent, dep_type)\n numpass = len([x for x in deps.links if x.type.endswith('pass')])\n statsmode_results['Passives'] += numpass\n statsmode_results['Tokens'] += len(sent.tokens)\n statsmode_results['Words'] += len([w for w in sent.tokens if w.word.isalnum()])\n #statsmode_results['Unique words'] += len(set([w.word.lower() for w in sent.tokens if w.word.isalnum()]))\n #statsmode_results['Unique lemmata'] += len(set([w.lemma.lower() for w in sent.tokens if w.word.isalnum()]))\n\n # count moods via trees (/\\?/ !< __)\n from dictionaries.process_types import processes\n from corpkit.other import as_regex\n tregex_qs = {'Imperative': r'ROOT < (/(S|SBAR)/ < (VP !< VBD !< VBG !$ NP !$ SBAR < NP !$-- S !$-- VP !$ VP)) !<< (/\\?/ !< __) !<<- /-R.B-/ !<<, /(?i)^(-l.b-|hi|hey|hello|oh|wow|thank|thankyou|thanks|welcome)$/',\n #'Open interrogative': r'ROOT < SBARQ <<- (/\\?/ !< __)', \n #'Closed interrogative': r'ROOT ( < (SQ < (NP $+ VP)) << (/\\?/ !< __) | < (/(S|SBAR)/ < (VP $+ NP)) <<- (/\\?/ !< __))',\n 'Unmodalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP !< MD)))',\n 'Modalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP < MD)))',\n 'Open class words': r'/^(NN|JJ|VB|RB)/ < __',\n 'Closed class words': r'__ !< __ !> /^(NN|JJ|VB|RB)/',\n 'Clauses': r'/^S/ < __',\n 'Interrogative': r'ROOT << (/\\?/ !< __)',\n 'Mental processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % as_regex(processes.mental, boundaries = 'w'),\n 'Verbal processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % as_regex(processes.verbal, boundaries = 'w'),\n 'Relational processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % as_regex(processes.relational, boundaries = 'w')}\n\n for name, q in sorted(tregex_qs.items()):\n res = tregex_engine(query = q, \n options = ['-o', '-C'], \n corpus = to_open, \n root = root)\n statsmode_results[name] += int(res)\n global numdone\n numdone += 1\n if root:\n root.update()\n if not root:\n tot_string = str(numdone + 1) + '/' + str(total_files * len(tregex_qs.keys()))\n if 'outname' in kwargs.keys():\n tot_string = '%s: %s' % (kwargs['outname'], tot_string)\n animator(p, numdone, tot_string, **par_args)\n if 'note' in kwargs.keys() and kwargs['note'] is not False:\n kwargs['note'].progvar.set((numdone * 100.0 / (total_files * len(tregex_qs.keys())) / denom) + startnum)\n os.remove(to_open)", "def count_naked_names(graph: BELGraph) -> typing.Counter[str]:\n return Counter(_naked_names_iter(graph))", "def count_annotations(graph):\n return Counter(_annotation_iter_helper(graph))", "def get_number_of_relations(model):\n if model == None:\n return 0\n counter = 0\n for line in model:\n if line.find('f(2') >= 0:\n counter += 1\n return float(counter)\n #TODO when multiples of same relation, the result is still 1", "def get_node_count(self) -> Iterable:\n return self._g.V().count().toList()[0]", "def node_count(self) -> int:\n return int(self.graph_tuple_stats.node_count or 0)", "def path_length(graph, node_names):\n\n total = 0\n for i in range(0, len(node_names) - 1):\n total += graph.get_edge(node_names[i], node_names[i + 1]).length\n\n return total", "def numPaths(self):\n if self.numpaths > -1:\n return self.numpaths\n\n if self.jolt == 0:\n return 1\n\n paths = 0\n for parent in self.parents:\n paths += parent.numPaths()\n \n return paths", "def count_relation_doc(document):\n count = {}\n for line in document[1:]:\n _, _, _, relation_types, _ = conll04_parser.split_line(line)\n for relation in relation_types:\n if relation in count:\n count[relation] += 1\n else:\n count[relation] = 1\n return count", "def count_nodes(self):\n\t\treturn self.__count_nodes(self)", "def graph_count(self) -> int:\n return int(self.graph_tuple_stats.graph_count)", "def numNodes(self):\n res = 0\n for n in self.iternodes():\n res += 1\n return res", "def count_topologies(self, sample_sets=None) -> tskit.TopologyCounter:\n if sample_sets is None:\n sample_sets = [\n self.tree_sequence.samples(population=pop.id)\n for pop in self.tree_sequence.populations()\n ]\n\n return combinatorics.tree_count_topologies(self, sample_sets)", "def count(self):\n\t\treturn len(list(self.nodes))", "def __node_rep(self):\n node_list_dict = {}\n for (i, beam) in enumerate(self.beams):\n if str(beam['n1']) not in node_list_dict.keys():\n node_list_dict[str(beam['n1'])] = 1\n else:\n node_list_dict[str(beam['n1'])] += 1\n if str(beam['n2']) not in node_list_dict.keys():\n node_list_dict[str(beam['n2'])] = 1\n else:\n node_list_dict[str(beam['n2'])] += 1\n return node_list_dict", "def _num_conn_comp(graph):\n\n return nx.number_connected_components(graph)", "def __init__(self, network):\n self.network = network\n self.histogram = [0] * (network.maxDegree() + 1)\n\n for key, node in network.nodes.items():\n self.histogram[node.degree()] += 1\n\n #print(\"Debug: histogram list \", self.histogram)\n\n # Other option:\n # Dict containing {id:degree}\n # self.degrees = {}\n # for node in network.nodes.iteritems():\n # self.degrees[node.identifier] = node.degree()\n # for i in range(0, network.maxDegree() + 1:\n # self.histogram[i] = self.degrees.values().count(i)", "def totalConnections(analyzer):\n return model.totalConnections(analyzer)", "def multiple_connections_histogram(synapses):\n count_of_synapses = synapses.groupby(['pre', 'post']).size()\n return count_of_synapses", "def numnems(self):\n count = 0\n for o in self._objs.values():\n count += len(o.netifs())\n return count", "def GetNumberOfOnFootConnections(PathInfo):\r\n\tNumberOfOnFootConnections = 0 \r\n\tOnFootGattungList = TrWay.values()\r\n\r\n\tfor ConnInfo in PathInfo:\r\n\t\tgattung = ConnInfo[ConnInfoInd['line_category']]\r\n\t\tif gattung in OnFootGattungList:\r\n\t\t\tNumberOfOnFootConnections += 1 \r\n\treturn NumberOfOnFootConnections", "def show_nt_msg(nt):\n\n nt_set = set(nt)\n nt_dict = {}\n\n for nt_node in nt_set:\n nt_dict[nt_node] = len(np.where(nt == nt_node)[0])\n\n return nt_dict", "def count_automorphisms(g: Graph) -> int:\n\n def generate_mapping(g: Graph, h: Graph):\n \"\"\"\n Generates the corresponding mapping from vertex to vertex for the isomorphism between graphs g and h.\n We map g to h.\n :param g: A graph\n :param h: A graph\n :return: A permutation with the mapping from g to h\n \"\"\"\n mapping = [0] * len(g.vertices)\n for v_g in g:\n for v_h in h:\n if v_g.colornum == v_h.colornum:\n mapping[v_g.label] = v_h.label\n return permutation(len(mapping), mapping=mapping)\n\n def generate_automorphisms(g: Graph, h: Graph, d: list[Vertex], i: list[Vertex]):\n \"\"\"\n Is called recursively to traverse through the branching tree and to find all automorphisms.\n :param g: A copy of the original graph\n :param h: Another copy of the original graph\n :param d: A list with pre-colored vertices for graph g\n :param i: A list with pre-colored vertices for graph h\n \"\"\"\n\n # Refine the graphs g and h.\n color_refinement([g, h])\n\n # Make sure that the colors are balanced, and check for a bijection.\n if not is_balanced(g, h):\n return\n if is_bijection(g, h):\n\n # Generate the mapping from g -> h.\n p = generate_mapping(g, h)\n\n # If the permutation cannot be generated by this generating set, we need to add it.\n if not is_member(generating_set, p):\n generating_set.append(p)\n\n # We can now back to the last trivial ancestor nodes in the branching tree.\n while [v.label for v in d] != [v.label for v in i]:\n # We remove the vertices from d and i and mark them as 'used'.\n # This should prevent the algorithm from trying to re-explore a branch that may be skipped.\n # FIXME: This strategy seems too aggressive, the results are sometimes off by a factor 2 or 4\n d.pop().pre_labeled = True\n i.pop().pre_labeled = True\n\n return\n\n c, next_color = get_c([g, h])\n for v_g in g:\n if v_g.colornum == c:# and not v_g.pre_labeled:\n x = v_g\n break\n\n for v_h in h:\n if v_h.colornum == c and not v_h.pre_labeled:\n g1 = g + Graph(False)\n h1 = h + Graph(False)\n g1.vertices[g.vertices.index(x)].colornum = next_color\n h1.vertices[h.vertices.index(v_h)].colornum = next_color\n d.append(x)\n i.append(v_h)\n generate_automorphisms(g1, h1, d, i)\n\n generating_set = []\n graph_copy_1 = g + Graph(False)\n graph_copy_2 = g + Graph(False)\n for v in graph_copy_1.vertices:\n v.pre_labeled = False\n for v in graph_copy_2.vertices:\n v.pre_labeled = False\n generate_automorphisms(graph_copy_1, graph_copy_2, [], [])\n return compute_order(generating_set)", "def generateNumsets(G):\n # paths = []\n #\n # path = [0]\n # for edge in nx.dfs_edges(G, 0):\n # if edge[0] == path[-1]:\n # path.append(edge[1])\n # else:\n # paths.append(path)\n # search_index = 2\n # while search_index <= len(path):\n # if edge[0] == path[-search_index]:\n # path = path[:-search_index + 1] + [edge[1]]\n # break\n # search_index += 1\n # else:\n # raise Exception(\"Wrong path structure?\", path, edge)\n # paths.append(path)\n # return paths\n\n \"\"\"\n Trying to use itertools LMAO\n \"\"\"\n # paths = []\n #\n # for path in itertools.combinations(G.nodes, 5):\n # paths.append(path)\n # return paths\n\n \"\"\"\n Generating paths using graph\n \"\"\"\n paths = []\n n = len(G.nodes)\n for source in range(n):\n for target in range(source+1, n):\n paths.extend([path for path in nx.all_simple_paths(G, source=source, target=target)])\n return paths\n\n # return paths" ]
[ "0.6493108", "0.6265583", "0.61763126", "0.58439344", "0.58234376", "0.5724303", "0.5712313", "0.56395566", "0.5600497", "0.5599743", "0.5586163", "0.55701447", "0.5519347", "0.55081844", "0.54878646", "0.54671645", "0.54594654", "0.54498696", "0.5437356", "0.543203", "0.5405759", "0.5396653", "0.5395347", "0.53907776", "0.53864044", "0.53670144", "0.53615487", "0.535674", "0.53499484", "0.53321254" ]
0.7180617
0
builds the url to get the static map. puts a marker on the start and end locations. assumes start and end are in a format / have enough info to give a proper location. does clean white spaces tho
def find_map(start, end, *otherlocs): small = "200x200" large = "512x512" start = start.replace(" ","+") end = end.replace(" ","+") small_url = g_api_base_url + static_url + small + map_type_url + small_marker_url + start + map_concat + end big_url = g_api_base_url + static_url + large + map_type_url + marker_url + start + map_concat + end for loc in otherlocs: loc = loc.replace(" ", "+") small_url += loc big_url += loc small_url += goog_static_map_key big_url += goog_static_map_key return small_url, big_url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_url(self):\n if self.has_marker:\n marker_param = f'mlat={self.mlat}&mlng={self.mlng}&'\n else:\n marker_param = ''\n if self.start:\n start_param = 'start=true&'\n else:\n start_param = ''\n url = f'{MapController.MAP_URL}?{start_param}clat={self.clat}&clng={self.clng}&{marker_param}zoom={self.zoom}'\n return url", "def get_static_map(start_lng, start_lat, end_lng, end_lat):\r\n geojson_str = get_map_directions(start_lng, start_lat, end_lng, end_lat)\r\n return (\r\n f\"https://api.mapbox.com/styles/v1/mapbox/streets-v11/static/\"\r\n f\"geojson({geojson_str})/auto/640x640?access_token={MAPBOX_TOKEN}\"\r\n ), geojson_str", "def small_map(self):\n self.map_url = \"https://maps.googleapis.com/maps/api/staticmap?center={},{}&zoom=12&size=350x350&key={}\".format(self.lat, self.lng, api_key) \n return (self.map_url)", "def google(self):\r\n prefix ='https://maps.googleapis.com/maps/api/staticmap?center='\r\n middle = '&zoom=14&size=400x400&markers='\r\n suffix = '&key=AIzaSyD5nqmDGFH1SUZxJAYVtFHP7RNjjFE9CHg'\r\n marker = '+'.join(self.placeToSearch) # marker in google format, no space but + separator\r\n request = prefix + marker+middle+marker+suffix\r\n\r\n return request", "def generate_googlemaps(self):\n args = {}\n args['title'] = self.options.title\n args['googlemapskey'] = self.options.googlekey\n args['south'], args['west'], args['north'], args['east'] = self.swne\n args['minzoom'] = self.tminz\n args['maxzoom'] = self.tmaxz\n args['tilesize'] = self.tilesize\n args['tileformat'] = format_extension[self.image_output.format]\n args['publishurl'] = \"\" if self.options.url is None else self.options.url\n args['copyright'] = self.options.copyright\n\n s = \"\"\"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n <html xmlns=\"http://www.w3.org/1999/xhtml\" xmlns:v=\"urn:schemas-microsoft-com:vml\">\n <head>\n <title>%(title)s</title>\n <meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\"/>\n <meta http-equiv='imagetoolbar' content='no'/>\n <style type=\"text/css\"> v\\:* {behavior:url(#default#VML);}\n html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }\n body { margin: 10px; background: #fff; }\n h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }\n #header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }\n #subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}\n #map { height: 95%%; border: 1px solid #888; }\n </style>\n <script src='http://maps.google.com/maps?file=api&amp;v=2&amp;key=%(googlemapskey)s' type='text/javascript'></script>\n <script type=\"text/javascript\">\n //<![CDATA[\n\n /*\n * Constants for given map\n * TODO: read it from tilemapresource.xml\n */\n\n var mapBounds = new GLatLngBounds(new GLatLng(%(south)s, %(west)s), new GLatLng(%(north)s, %(east)s));\n var mapMinZoom = %(minzoom)s;\n var mapMaxZoom = %(maxzoom)s;\n\n var opacity = 0.75;\n var map;\n var ge;\n var hybridOverlay;\n\n /*\n * Create a Custom Opacity GControl\n * https://github.com/mj10777/mapmbtilesgoogle-maps-overlay-opacity-control/\n */\n\n var CTransparencyLENGTH = 58;\n // maximum width that the knob can move (slide width minus knob width)\n\n function CTransparencyControl( overlay ) {\n this.overlay = overlay;\n this.opacity = overlay.getTileLayer().getOpacity();\n }\n CTransparencyControl.prototype = new GControl();\n\n // This function positions the slider to match the specified opacity\n CTransparencyControl.prototype.setSlider = function(pos) {\n var left = Math.round((CTransparencyLENGTH*pos));\n this.slide.left = left;\n this.knob.style.left = left+\"px\";\n this.knob.style.top = \"0px\";\n }\n\n // This function reads the slider and sets the overlay opacity level\n CTransparencyControl.prototype.setOpacity = function() {\n // set the global variable\n opacity = this.slide.left/CTransparencyLENGTH;\n this.map.clearOverlays();\n this.map.addOverlay(this.overlay, { zPriority: 0 });\n if (this.map.getCurrentMapType() == G_HYBRID_MAP) {\n this.map.addOverlay(hybridOverlay);\n }\n }\n\n // This gets called by the API when addControl(new CTransparencyControl())\n CTransparencyControl.prototype.initialize = function(map) {\n var that=this;\n this.map = map;\n\n // Is this MSIE, if so we need to use AlphaImageLoader\n var agent = navigator.userAgent.toLowerCase();\n if ((agent.indexOf(\"msie\") > -1) && (agent.indexOf(\"opera\") < 1)){this.ie = true} else {this.ie = false}\n\n // create the background graphic as a <div> containing an image\n var container = document.createElement(\"div\");\n container.style.width=\"70px\";\n container.style.height=\"21px\";\n\n // Handle transparent PNG files in MSIE\n if (this.ie) {\n var loader = \"filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='https://github.com/mj10777/mapmbtiles/img/opacity-slider.png', sizingMethod='crop');\";\n container.innerHTML = '<div style=\"height:21px; width:70px; ' +loader+ '\" ></div>';\n } else {\n container.innerHTML = '<div style=\"height:21px; width:70px; background-image: url(https://github.com/mj10777/mapmbtiles/img/opacity-slider.png)\" ></div>';\n }\n\n // create the knob as a GDraggableObject\n // Handle transparent PNG files in MSIE\n if (this.ie) {\n var loader = \"progid:DXImageTransform.Microsoft.AlphaImageLoader(src='https://github.com/mj10777/mapmbtiles/img/opacity-slider.png', sizingMethod='crop');\";\n this.knob = document.createElement(\"div\");\n this.knob.style.height=\"21px\";\n this.knob.style.width=\"13px\";\n this.knob.style.overflow=\"hidden\";\n this.knob_img = document.createElement(\"div\");\n this.knob_img.style.height=\"21px\";\n this.knob_img.style.width=\"83px\";\n this.knob_img.style.filter=loader;\n this.knob_img.style.position=\"relative\";\n this.knob_img.style.left=\"-70px\";\n this.knob.appendChild(this.knob_img);\n } else {\n this.knob = document.createElement(\"div\");\n this.knob.style.height=\"21px\";\n this.knob.style.width=\"13px\";\n this.knob.style.backgroundImage=\"url(https://github.com/mj10777/mapmbtiles/img/opacity-slider.png)\";\n this.knob.style.backgroundPosition=\"-70px 0px\";\n }\n container.appendChild(this.knob);\n this.slide=new GDraggableObject(this.knob, {container:container});\n this.slide.setDraggableCursor('pointer');\n this.slide.setDraggingCursor('pointer');\n this.container = container;\n\n // attach the control to the map\n map.getContainer().appendChild(container);\n\n // init slider\n this.setSlider(this.opacity);\n\n // Listen for the slider being moved and set the opacity\n GEvent.addListener(this.slide, \"dragend\", function() {that.setOpacity()});\n //GEvent.addListener(this.container, \"click\", function( x, y ) { alert(x, y) });\n\n return container;\n }\n\n // Set the default position for the control\n CTransparencyControl.prototype.getDefaultPosition = function() {\n return new GControlPosition(G_ANCHOR_TOP_RIGHT, new GSize(7, 47));\n }\n\n /*\n * Full-screen Window Resize\n */\n\n function getWindowHeight() {\n if (self.innerHeight) return self.innerHeight;\n if (document.documentElement && document.documentElement.clientHeight)\n return document.documentElement.clientHeight;\n if (document.body) return document.body.clientHeight;\n return 0;\n }\n\n function getWindowWidth() {\n if (self.innerWidth) return self.innerWidth;\n if (document.documentElement && document.documentElement.clientWidth)\n return document.documentElement.clientWidth;\n if (document.body) return document.body.clientWidth;\n return 0;\n }\n\n function resize() {\n var map = document.getElementById(\"map\");\n var header = document.getElementById(\"header\");\n var subheader = document.getElementById(\"subheader\");\n map.style.height = (getWindowHeight()-80) + \"px\";\n map.style.width = (getWindowWidth()-20) + \"px\";\n header.style.width = (getWindowWidth()-20) + \"px\";\n subheader.style.width = (getWindowWidth()-20) + \"px\";\n // map.checkResize();\n }\n\n\n /*\n * Main load function:\n */\n\n function load() {\n\n if (GBrowserIsCompatible()) {\n\n // Bug in the Google Maps: Copyright for Overlay is not correctly displayed\n var gcr = GMapType.prototype.getCopyrights;\n GMapType.prototype.getCopyrights = function(bounds,zoom) {\n return [\"%(copyright)s\"].concat(gcr.call(this,bounds,zoom));\n }\n\n map = new GMap2( document.getElementById(\"map\"), { backgroundColor: '#fff' } );\n\n map.addMapType(G_PHYSICAL_MAP);\n map.setMapType(G_PHYSICAL_MAP);\n\n map.setCenter( mapBounds.getCenter(), map.getBoundsZoomLevel( mapBounds ));\n\n hybridOverlay = new GTileLayerOverlay( G_HYBRID_MAP.getTileLayers()[1] );\n GEvent.addListener(map, \"maptypechanged\", function() {\n if (map.getCurrentMapType() == G_HYBRID_MAP) {\n map.addOverlay(hybridOverlay);\"\"\" % args\n if self.kml:\n s += \"\"\"\n } else if (map.getCurrentMapType() == G_SATELLITE_3D_MAP) {\n var url = document.location.toString();\n if (url.substr(0,4) != 'http') alert('You have to upload the tiles to a webserver to see the overlay in Google Earth Plugin');\n if (!ge) map.getEarthInstance(getEarthInstanceCB);\"\"\"\n s += \"\"\"\n } else {\n map.removeOverlay(hybridOverlay);\n }\n } );\n\n var tilelayer = new GTileLayer(GCopyrightCollection(''), mapMinZoom, mapMaxZoom);\n var mercator = new GMercatorProjection(mapMaxZoom+1);\n tilelayer.getTileUrl = function(tile,zoom) {\n if ((zoom < mapMinZoom) || (zoom > mapMaxZoom)) {\n return \"https://github.com/mj10777/mapmbtiles/img/none.png\";\n }\n var ymax = 1 << zoom;\n var y = ymax - tile.y -1;\n var tileBounds = new GLatLngBounds(\n mercator.fromPixelToLatLng( new GPoint( (tile.x)*256, (tile.y+1)*256 ) , zoom ),\n mercator.fromPixelToLatLng( new GPoint( (tile.x+1)*256, (tile.y)*256 ) , zoom )\n );\n if (mapBounds.intersects(tileBounds)) {\n return zoom+\"/\"+tile.x+\"/\"+y+\".%(tileformat)s\";\n } else {\n return \"https://github.com/mj10777/mapmbtiles/img/none.png\";\n }\n }\n // IE 7-: support for PNG alpha channel\n // Unfortunately, the opacity for whole overlay is then not changeable, either or...\n tilelayer.isPng = function() { return true;};\n tilelayer.getOpacity = function() { return opacity; }\n\n overlay = new GTileLayerOverlay( tilelayer );\n map.addOverlay(overlay);\n\n map.addControl(new GLargeMapControl3D());\n map.addControl(new GHierarchicalMapTypeControl());\n map.addControl(new CTransparencyControl( overlay ));\n \"\"\" % args\n if self.kml:\n s += \"\"\"\n map.addMapType(G_SATELLITE_3D_MAP);\n \"\"\"\n s += \"\"\"\n\n map.enableContinuousZoom();\n map.enableScrollWheelZoom();\n\n map.setMapType(G_HYBRID_MAP);\n }\n resize();\n }\n \"\"\"\n if self.kml:\n s += \"\"\"\n function getEarthInstanceCB(object) {\n ge = object;\n var url = document.location.toString();\n var newurl = url.substr(0,url.lastIndexOf('/'))+'/doc.kml';\n if (ge) {\n var link = ge.createLink(\"\");\n if (\"%(publishurl)s\") { link.setHref(\"%(publishurl)s/doc.kml\") }\n else { link.setHref(newurl) };\n var networkLink = ge.createNetworkLink(\"\");\n networkLink.set(link, false, false);\n ge.getFeatures().appendChild(networkLink);\n } else {\n // alert(\"Initialization of the Google Earth Plugin failed. You can still open the KML file in normal Google Earth.\");\n // window.location = newurl; // JavaScript redirect to the URL of KML\n }\n }\n \"\"\" % args\n s += \"\"\"\n onresize=function(){ resize(); };\n\n //]]>\n </script>\n </head>\n <body onload=\"load()\">\n <div id=\"header\"><h1>%(title)s</h1></div>\n <div id=\"subheader\">Generated by <a href=\"https://github.com/mj10777/mapmbtiles\">MapMbTiles</a>/<a href=\"http://www.klokan.cz/projects/gdal2mbtiles/\">GDAL2MbTiles</a>, Copyright &copy; 2008 <a href=\"http://www.klokan.cz/\">Klokan Petr Pridal</a>, <a href=\"http://www.gdal.org/\">GDAL</a> &amp; <a href=\"http://www.osgeo.org/\">OSGeo</a> <a href=\"http://code.google.com/soc/\">GSoC</a>\n <!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->\n </div>\n <div id=\"map\"></div>\n </body>\n </html>\n \"\"\" % args\n\n return s", "def generate_googlemaps(self):\n args = {}\n args['title'] = self.options.title\n args['googlemapskey'] = self.options.googlekey\n args['south'], args['west'], args['north'], args['east'] = self.swne\n args['minzoom'] = self.tminz\n args['maxzoom'] = self.tmaxz\n args['tilesize'] = self.tilesize\n args['tileformat'] = self.tileext\n args['publishurl'] = self.options.url\n args['copyright'] = self.options.copyright\n\n s = r\"\"\"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n <html xmlns=\"http://www.w3.org/1999/xhtml\" xmlns:v=\"urn:schemas-microsoft-com:vml\">\n <head>\n <title>%(title)s</title>\n <meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\"/>\n <meta http-equiv='imagetoolbar' content='no'/>\n <style type=\"text/css\"> v\\:* {behavior:url(#default#VML);}\n html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }\n body { margin: 10px; background: #fff; }\n h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }\n #header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }\n #subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}\n #map { height: 95%%; border: 1px solid #888; }\n </style>\n <script src='http://maps.google.com/maps?file=api&amp;v=2&amp;key=%(googlemapskey)s'></script>\n <script>\n //<![CDATA[\n\n /*\n * Constants for given map\n * TODO: read it from tilemapresource.xml\n */\n\n var mapBounds = new GLatLngBounds(new GLatLng(%(south)s, %(west)s), new GLatLng(%(north)s, %(east)s));\n var mapMinZoom = %(minzoom)s;\n var mapMaxZoom = %(maxzoom)s;\n\n var opacity = 0.75;\n var map;\n var hybridOverlay;\n\n /*\n * Create a Custom Opacity GControl\n * http://www.maptiler.org/google-maps-overlay-opacity-control/\n */\n\n var CTransparencyLENGTH = 58;\n // maximum width that the knob can move (slide width minus knob width)\n\n function CTransparencyControl( overlay ) {\n this.overlay = overlay;\n this.opacity = overlay.getTileLayer().getOpacity();\n }\n CTransparencyControl.prototype = new GControl();\n\n // This function positions the slider to match the specified opacity\n CTransparencyControl.prototype.setSlider = function(pos) {\n var left = Math.round((CTransparencyLENGTH*pos));\n this.slide.left = left;\n this.knob.style.left = left+\"px\";\n this.knob.style.top = \"0px\";\n }\n\n // This function reads the slider and sets the overlay opacity level\n CTransparencyControl.prototype.setOpacity = function() {\n // set the global variable\n opacity = this.slide.left/CTransparencyLENGTH;\n this.map.clearOverlays();\n this.map.addOverlay(this.overlay, { zPriority: 0 });\n if (this.map.getCurrentMapType() == G_HYBRID_MAP) {\n this.map.addOverlay(hybridOverlay);\n }\n }\n\n // This gets called by the API when addControl(new CTransparencyControl())\n CTransparencyControl.prototype.initialize = function(map) {\n var that=this;\n this.map = map;\n\n // Is this MSIE, if so we need to use AlphaImageLoader\n var agent = navigator.userAgent.toLowerCase();\n if ((agent.indexOf(\"msie\") > -1) && (agent.indexOf(\"opera\") < 1)){this.ie = true} else {this.ie = false}\n\n // create the background graphic as a <div> containing an image\n var container = document.createElement(\"div\");\n container.style.width=\"70px\";\n container.style.height=\"21px\";\n\n // Handle transparent PNG files in MSIE\n if (this.ie) {\n var loader = \"filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');\";\n container.innerHTML = '<div style=\"height:21px; width:70px; ' +loader+ '\" ></div>';\n } else {\n container.innerHTML = '<div style=\"height:21px; width:70px; background-image: url(http://www.maptiler.org/img/opacity-slider.png)\" ></div>';\n }\n\n // create the knob as a GDraggableObject\n // Handle transparent PNG files in MSIE\n if (this.ie) {\n var loader = \"progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');\";\n this.knob = document.createElement(\"div\");\n this.knob.style.height=\"21px\";\n this.knob.style.width=\"13px\";\n this.knob.style.overflow=\"hidden\";\n this.knob_img = document.createElement(\"div\");\n this.knob_img.style.height=\"21px\";\n this.knob_img.style.width=\"83px\";\n this.knob_img.style.filter=loader;\n this.knob_img.style.position=\"relative\";\n this.knob_img.style.left=\"-70px\";\n this.knob.appendChild(this.knob_img);\n } else {\n this.knob = document.createElement(\"div\");\n this.knob.style.height=\"21px\";\n this.knob.style.width=\"13px\";\n this.knob.style.backgroundImage=\"url(http://www.maptiler.org/img/opacity-slider.png)\";\n this.knob.style.backgroundPosition=\"-70px 0px\";\n }\n container.appendChild(this.knob);\n this.slide=new GDraggableObject(this.knob, {container:container});\n this.slide.setDraggableCursor('pointer');\n this.slide.setDraggingCursor('pointer');\n this.container = container;\n\n // attach the control to the map\n map.getContainer().appendChild(container);\n\n // init slider\n this.setSlider(this.opacity);\n\n // Listen for the slider being moved and set the opacity\n GEvent.addListener(this.slide, \"dragend\", function() {that.setOpacity()});\n //GEvent.addListener(this.container, \"click\", function( x, y ) { alert(x, y) });\n\n return container;\n }\n\n // Set the default position for the control\n CTransparencyControl.prototype.getDefaultPosition = function() {\n return new GControlPosition(G_ANCHOR_TOP_RIGHT, new GSize(7, 47));\n }\n\n /*\n * Full-screen Window Resize\n */\n\n function getWindowHeight() {\n if (self.innerHeight) return self.innerHeight;\n if (document.documentElement && document.documentElement.clientHeight)\n return document.documentElement.clientHeight;\n if (document.body) return document.body.clientHeight;\n return 0;\n }\n\n function getWindowWidth() {\n if (self.innerWidth) return self.innerWidth;\n if (document.documentElement && document.documentElement.clientWidth)\n return document.documentElement.clientWidth;\n if (document.body) return document.body.clientWidth;\n return 0;\n }\n\n function resize() {\n var map = document.getElementById(\"map\");\n var header = document.getElementById(\"header\");\n var subheader = document.getElementById(\"subheader\");\n map.style.height = (getWindowHeight()-80) + \"px\";\n map.style.width = (getWindowWidth()-20) + \"px\";\n header.style.width = (getWindowWidth()-20) + \"px\";\n subheader.style.width = (getWindowWidth()-20) + \"px\";\n // map.checkResize();\n }\n\n\n /*\n * Main load function:\n */\n\n function load() {\n\n if (GBrowserIsCompatible()) {\n\n // Bug in the Google Maps: Copyright for Overlay is not correctly displayed\n var gcr = GMapType.prototype.getCopyrights;\n GMapType.prototype.getCopyrights = function(bounds,zoom) {\n return [\"%(copyright)s\"].concat(gcr.call(this,bounds,zoom));\n }\n\n map = new GMap2( document.getElementById(\"map\"), { backgroundColor: '#fff' } );\n\n map.addMapType(G_PHYSICAL_MAP);\n map.setMapType(G_PHYSICAL_MAP);\n\n map.setCenter( mapBounds.getCenter(), map.getBoundsZoomLevel( mapBounds ));\n\n hybridOverlay = new GTileLayerOverlay( G_HYBRID_MAP.getTileLayers()[1] );\n GEvent.addListener(map, \"maptypechanged\", function() {\n if (map.getCurrentMapType() == G_HYBRID_MAP) {\n map.addOverlay(hybridOverlay);\n } else {\n map.removeOverlay(hybridOverlay);\n }\n } );\n\n var tilelayer = new GTileLayer(GCopyrightCollection(''), mapMinZoom, mapMaxZoom);\n var mercator = new GMercatorProjection(mapMaxZoom+1);\n tilelayer.getTileUrl = function(tile,zoom) {\n if ((zoom < mapMinZoom) || (zoom > mapMaxZoom)) {\n return \"http://www.maptiler.org/img/none.png\";\n }\n var ymax = 1 << zoom;\n var y = ymax - tile.y -1;\n var tileBounds = new GLatLngBounds(\n mercator.fromPixelToLatLng( new GPoint( (tile.x)*256, (tile.y+1)*256 ) , zoom ),\n mercator.fromPixelToLatLng( new GPoint( (tile.x+1)*256, (tile.y)*256 ) , zoom )\n );\n if (mapBounds.intersects(tileBounds)) {\n return zoom+\"/\"+tile.x+\"/\"+y+\".png\";\n } else {\n return \"http://www.maptiler.org/img/none.png\";\n }\n }\n // IE 7-: support for PNG alpha channel\n // Unfortunately, the opacity for whole overlay is then not changeable, either or...\n tilelayer.isPng = function() { return true;};\n tilelayer.getOpacity = function() { return opacity; }\n\n overlay = new GTileLayerOverlay( tilelayer );\n map.addOverlay(overlay);\n\n map.addControl(new GLargeMapControl());\n map.addControl(new GHierarchicalMapTypeControl());\n map.addControl(new CTransparencyControl( overlay ));\n \"\"\" % args # noqa\n if self.kml:\n s += \"\"\"\n map.addMapType(G_SATELLITE_3D_MAP);\n map.getEarthInstance(getEarthInstanceCB);\n \"\"\"\n s += \"\"\"\n\n map.enableContinuousZoom();\n map.enableScrollWheelZoom();\n\n map.setMapType(G_HYBRID_MAP);\n }\n resize();\n }\n \"\"\"\n if self.kml:\n s += \"\"\"\n function getEarthInstanceCB(object) {\n var ge = object;\n\n if (ge) {\n var url = document.location.toString();\n url = url.substr(0,url.lastIndexOf('/'))+'/doc.kml';\n var link = ge.createLink(\"\");\n if (\"%(publishurl)s\") { link.setHref(\"%(publishurl)s/doc.kml\") }\n else { link.setHref(url) };\n var networkLink = ge.createNetworkLink(\"\");\n networkLink.setName(\"TMS Map Overlay\");\n networkLink.setFlyToView(true);\n networkLink.setLink(link);\n ge.getFeatures().appendChild(networkLink);\n } else {\n // alert(\"You should open a KML in Google Earth\");\n // add div with the link to generated KML... - maybe JavaScript redirect to the URL of KML?\n }\n }\n \"\"\" % args # noqa\n s += \"\"\"\n onresize=function(){ resize(); };\n\n //]]>\n </script>\n </head>\n <body onload=\"load()\">\n <div id=\"header\"><h1>%(title)s</h1></div>\n <div id=\"subheader\">Generated by <a href=\"http://www.klokan.cz/projects/gdal2tiles/\">GDAL2Tiles</a>, Copyright &copy; 2008 <a href=\"http://www.klokan.cz/\">Klokan Petr Pridal</a>, <a href=\"http://www.gdal.org/\">GDAL</a> &amp; <a href=\"http://www.osgeo.org/\">OSGeo</a> <a href=\"http://code.google.com/soc/\">GSoC</a>\n <!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->\n </div>\n <div id=\"map\"></div>\n </body>\n </html>\n \"\"\" % args # noqa\n\n return s", "def build_url(start, end, transit_mode):\n transit = \"\"\n traffic = \"best_guess\"\n depart = \"now\"\n if transit_mode:\n transit = transit_mode\n direc_url = g_api_base_url + dir_url + \"origin=\" + start + \"&destination=\" + end + trans_url \\\n + transit + goog_dir_key\n dist_url = g_api_base_url + dis_url + units_i + or_dis_url + start + des_url + end + trans_url \\\n + transit + traffic_url + traffic + depart_url + depart + goog_dis_key\n direc_url = direc_url.replace(\" \",\"+\")\n print(\"directions :\"+ direc_url)\n dist_url = dist_url.replace(\" \",\"+\")\n return direc_url, dist_url", "def get_static_google_map(filename_wo_extension, center=None, zoom=None, imgsize=\"640x640\", imgformat=\"png\",\n maptype=\"roadmap\", markers=None):\n\n # assemble the URL\n # base URL, append query params, separated by &\n request = \"http://maps.google.com/maps/api/staticmap?\"\n apiKey = os.getenv('GOOGLE_MAPS_API_KEY')\n # if center and zoom are not given, the map will show all marker locations\n request += \"key=%s&\" % apiKey\n if center != None:\n request += \"center=%s&\" % center\n if zoom != None:\n # zoom 0 (all of the world scale ) to 22 (single buildings scale)\n request += \"zoom=%i&\" % zoom\n\n request += \"size=%ix%i&\" % (imgsize) # tuple of ints, up to 640 by 640\n request += \"format=%s&\" % imgformat\n request += \"bearing=90&\"\n # request += \"maptype=%s&\" % maptype # roadmap, satellite, hybrid, terrain\n\n # add markers (location and style)\n if markers != None:\n for marker in markers:\n request += \"%s&\" % marker\n\n request = request.rstrip('&')\n # #request += \"mobile=false&\" # optional: mobile=true will assume the image is shown on a small screen (mobile device)\n # request += \"sensor=false\" # must be given, deals with getting loction from mobile device\n # try:\n urllib.request.urlretrieve(request, filename_wo_extension)", "def get_driving_map(start_lng, start_lat, end_lng, end_lat):\r\n geojson_str = get_driving_directions(start_lng, start_lat, end_lng, end_lat)\r\n return (\r\n f\"https://api.mapbox.com/styles/v1/mapbox/streets-v11/static/\"\r\n f\"geojson({geojson_str})/auto/640x640?access_token={MAPBOX_TOKEN}\"\r\n ), geojson_str", "def build_maps():\n return render_template(\"maps.html\")", "def get_trouble_spot_map(trouble_spot_lst):\n lat_long_lst = []\n for item in trouble_spot_lst:\n latitude = item[0]\n longitude = item[1]\n married_lat_long = LAT_LONG_TMPL.format(lat=latitude, long=longitude)\n lat_long_lst.append(married_lat_long)\n\n lat_long_lst_str = ''.join(lat_long_lst)\n\n result = URL_TMPL.format(key=API_KEY, marker=MARKER_SPEC, lat_long_lst=lat_long_lst_str)\n return result", "def generate_leaflet(self):\n\n args = {}\n args['title'] = self.options.title.replace('\"', '\\\\\"')\n args['htmltitle'] = self.options.title\n args['south'], args['west'], args['north'], args['east'] = self.swne\n args['centerlon'] = (args['north'] + args['south']) / 2.\n args['centerlat'] = (args['west'] + args['east']) / 2.\n args['minzoom'] = self.tminz\n args['maxzoom'] = self.tmaxz\n args['beginzoom'] = self.tmaxz\n args['tilesize'] = self.tilesize # not used\n args['tileformat'] = self.tileext\n args['publishurl'] = self.options.url # not used\n args['copyright'] = self.options.copyright.replace('\"', '\\\\\"')\n\n s = \"\"\"<!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <meta name='viewport' content='width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no' />\n <title>%(htmltitle)s</title>\n\n <!-- Leaflet -->\n <link rel=\"stylesheet\" href=\"http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.css\" />\n <script src=\"http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.js\"></script>\n\n <style>\n body { margin:0; padding:0; }\n body, table, tr, td, th, div, h1, h2, input { font-family: \"Calibri\", \"Trebuchet MS\", \"Ubuntu\", Serif; font-size: 11pt; }\n #map { position:absolute; top:0; bottom:0; width:100%%; } /* full size */\n .ctl {\n padding: 2px 10px 2px 10px;\n background: white;\n background: rgba(255,255,255,0.9);\n box-shadow: 0 0 15px rgba(0,0,0,0.2);\n border-radius: 5px;\n text-align: right;\n }\n .title {\n font-size: 18pt;\n font-weight: bold;\n }\n .src {\n font-size: 10pt;\n }\n\n </style>\n\n </head>\n <body>\n\n <div id=\"map\"></div>\n\n <script>\n /* **** Leaflet **** */\n\n // Base layers\n // .. OpenStreetMap\n var osm = L.tileLayer('http://{s}.tile.osm.org/{z}/{x}/{y}.png', {attribution: '&copy; <a href=\"http://osm.org/copyright\">OpenStreetMap</a> contributors'});\n\n // .. CartoDB Positron\n var cartodb = L.tileLayer('http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png', {attribution: '&copy; <a href=\"http://www.openstreetmap.org/copyright\">OpenStreetMap</a> contributors, &copy; <a href=\"http://cartodb.com/attributions\">CartoDB</a>'});\n\n // .. OSM Toner\n var toner = L.tileLayer('http://{s}.tile.stamen.com/toner/{z}/{x}/{y}.png', {attribution: 'Map tiles by <a href=\"http://stamen.com\">Stamen Design</a>, under <a href=\"http://creativecommons.org/licenses/by/3.0\">CC BY 3.0</a>. Data by <a href=\"http://openstreetmap.org\">OpenStreetMap</a>, under <a href=\"http://www.openstreetmap.org/copyright\">ODbL</a>.'});\n\n // .. White background\n var white = L.tileLayer(\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEAAQMAAABmvDolAAAAA1BMVEX///+nxBvIAAAAH0lEQVQYGe3BAQ0AAADCIPunfg43YAAAAAAAAAAA5wIhAAAB9aK9BAAAAABJRU5ErkJggg==\");\n\n // Overlay layers (TMS)\n var lyr = L.tileLayer('./{z}/{x}/{y}.%(tileformat)s', {tms: true, opacity: 0.7, attribution: \"%(copyright)s\"});\n\n // Map\n var map = L.map('map', {\n center: [%(centerlon)s, %(centerlat)s],\n zoom: %(beginzoom)s,\n minZoom: %(minzoom)s,\n maxZoom: %(maxzoom)s,\n layers: [osm]\n });\n\n var basemaps = {\"OpenStreetMap\": osm, \"CartoDB Positron\": cartodb, \"Stamen Toner\": toner, \"Without background\": white}\n var overlaymaps = {\"Layer\": lyr}\n\n // Title\n var title = L.control();\n title.onAdd = function(map) {\n this._div = L.DomUtil.create('div', 'ctl title');\n this.update();\n return this._div;\n };\n title.update = function(props) {\n this._div.innerHTML = \"%(title)s\";\n };\n title.addTo(map);\n\n // Note\n var src = 'Generated by <a href=\"http://www.klokan.cz/projects/gdal2tiles/\">GDAL2Tiles</a>, Copyright &copy; 2008 <a href=\"http://www.klokan.cz/\">Klokan Petr Pridal</a>, <a href=\"http://www.gdal.org/\">GDAL</a> &amp; <a href=\"http://www.osgeo.org/\">OSGeo</a> <a href=\"http://code.google.com/soc/\">GSoC</a>';\n var title = L.control({position: 'bottomleft'});\n title.onAdd = function(map) {\n this._div = L.DomUtil.create('div', 'ctl src');\n this.update();\n return this._div;\n };\n title.update = function(props) {\n this._div.innerHTML = src;\n };\n title.addTo(map);\n\n\n // Add base layers\n L.control.layers(basemaps, overlaymaps, {collapsed: false}).addTo(map);\n\n // Fit to overlay bounds (SW and NE points with (lat, lon))\n map.fitBounds([[%(south)s, %(east)s], [%(north)s, %(west)s]]);\n\n </script>\n\n </body>\n </html>\n\n \"\"\" % args # noqa\n\n return s", "def calc_url(self):\n place = self._get_first_place()[0]\n path = \"\"\n # First see if we are in or near Sweden or Denmark\n # Change country to upper case\n location = get_main_location(self.database, place)\n country = location.get(PlaceType.COUNTRY, '').upper().strip()\n country_given = (country in MAP_NAMES_SWEDEN or \\\n country in MAP_NAMES_DENMARK) and (country != \"\")\n # if no country given, check if we might be in the vicinity defined by\n # 54 33' 0\" < lat < 66 9' 0\", 54.55 and 69.05\n # 8 3' 0\" < long < 24 9' 0\", 8.05 and 24.15 \n latitude, longitude = self._lat_lon(place)\n if latitude is None or longitude is None:\n coord_ok = False\n else:\n latitude = float(latitude) \n longitude = float(longitude)\n # Check if coordinates are inside Sweden and Denmark\n if (54.55 < latitude < 69.05) and (8.05 < longitude < 24.15):\n coord_ok = True\n else:\n msg2 = _(\"Latitude not within '54.55' to '69.05'\\n\") + \\\n _(\"Longitude not within '8.05' to '24.15'\")\n WarningDialog(_(\"Eniro map not available\"), msg2 )\n return\n\n if coord_ok:\n place_title = _build_title(self.database, place)\n place_city = _build_city(self.database, place)\n x_coord, y_coord = self._lat_lon(place, format=\"RT90\")\n # Set zoom level to 5 if Sweden/Denmark, others 3\n zoom = 5\n if not country_given:\n zoom = 3\n path = \"http://www.eniro.se/partner.fcgi?pis=1&x=%s&y=%s\" \\\n \"&zoom_level=%i&map_size=0&title=%s&city=%s&partner=gramps\"\n # Note x and y are swapped!\n path = path % (y_coord , x_coord, zoom, place_title, place_city)\n self.url = path.replace(\" \",\"%20\")\n return\n\n place_area = _build_area(self.database, place)\n if country_given and place_area:\n if country in MAP_NAMES_SWEDEN:\n path = \"http://kartor.eniro.se/query?&what=map_adr&mop=aq\" \\\n \"&geo_area=%s&partner=gramps\"\n path = path % (place_area)\n self.url = path.replace(\" \",\"%20\")\n return\n else:\n WarningDialog(_(\"Eniro map not available\"), \\\n _(\"Coordinates needed in Denmark\") )\n self.url = \"\"\n return\n\n WarningDialog(_(\"Eniro map not available\"), \n _(\"Latitude and longitude,\\n\" \\\n \"or street and city needed\") )\n return", "def generate_geo_map(start_date, end_date):\n start_time = dt.datetime.now()\n\n start_date = dt.datetime.strptime(start_date.split('T')[0], '%Y-%m-%d') # Convert strings to datetime objects\n end_date = dt.datetime.strptime(end_date.split('T')[0], '%Y-%m-%d')\n\n filtered_data = filter_dataframe(df, start_date, end_date)\n dff = filtered_data\n\n traces = []\n\n data = [ dict(\n type = 'scattermapbox',\n lon = dff['lat'],\n lat = dff['lon'],\n text = dff['timestamp'],\n mode = 'markers',\n marker = dict(\n size = 8,\n opacity = 0.8,\n color = 'orange'\n ))]\n\n # relayoutData is None by default, and {'autosize': True} without relayout action\n # if main_graph_layout is not None and selector is not None and \"locked\" in selector:\n # if \"mapbox.center\" in main_graph_layout.keys():\n # lon = float(main_graph_layout[\"mapbox.center\"][\"lon\"])\n # lat = float(main_graph_layout[\"mapbox.center\"][\"lat\"])\n # zoom = float(main_graph_layout[\"mapbox.zoom\"])\n # layout[\"mapbox\"][\"center\"][\"lon\"] = lon\n # layout[\"mapbox\"][\"center\"][\"lat\"] = lat\n # layout[\"mapbox\"][\"zoom\"] = zoom\n\n print('generate_geo_map:', (dt.datetime.now()-start_time).total_seconds())\n\n figure = dict(data=data, layout=layout)\n return figure", "def generate_openlayers( self ):\n\n args = {}\n args['title'] = self.options.title\n args['googlemapskey'] = self.options.googlekey\n args['yahooappid'] = self.options.yahookey\n args['south'], args['west'], args['north'], args['east'] = self.swne\n args['minzoom'] = self.tminz\n args['maxzoom'] = self.tmaxz\n args['tilesize'] = self.tilesize\n args['tileformat'] = format_extension[self.image_output.format]\n if self.image_output.format == \"PNG\":\n args['has_alpha'] = 'true'\n else:\n args['has_alpha'] = 'false'\n args['publishurl'] = \"\" if self.options.url is None else self.options.url\n args['copyright'] = self.options.copyright\n if self.options.profile in ('raster', 'gearth'):\n args['rasterzoomlevels'] = self.tmaxz+1\n args['rastermaxresolution'] = 2**(self.nativezoom) * self.out_gt[1]\n\n s = \"\"\"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n <html xmlns=\"http://www.w3.org/1999/xhtml>\"\n <head>\n <title>%(title)s</title>\n <meta http-equiv='imagetoolbar' content='no'/>\n <style type=\"text/css\"> v\\:* {behavior:url(#default#VML);}\n html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }\n body { margin: 10px; background: #fff; }\n h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }\n #header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }\n #subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}\n #map { height: 95%%; border: 1px solid #888; }\n </style>\"\"\" % args\n\n if self.options.profile == 'mercator':\n s += \"\"\"\n <script src='http://dev.virtualearth.net/mapcontrol/mapcontrol.ashx?v=6.1'></script>\n <script src='http://maps.google.com/maps?file=api&amp;v=2&amp;key=%(googlemapskey)s' type='text/javascript'></script>\n <script src=\"http://api.maps.yahoo.com/ajaxymap?v=3.0&amp;appid=%(yahooappid)s\"></script>\"\"\" % args\n\n s += \"\"\"\n <script src=\"http://www.openlayers.org/api/2.7/OpenLayers.js\" type=\"text/javascript\"></script>\n <script type=\"text/javascript\">\n var map;\n var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s);\n var mapMinZoom = %(minzoom)s;\n var mapMaxZoom = %(maxzoom)s;\n\n // avoid pink tiles\n OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3;\n OpenLayers.Util.onImageLoadErrorColor = \"transparent\";\n\n function init(){\"\"\" % args\n\n if self.options.profile == 'mercator':\n s += \"\"\"\n var options = {\n controls: [],\n projection: new OpenLayers.Projection(\"EPSG:900913\"),\n displayProjection: new OpenLayers.Projection(\"EPSG:4326\"),\n units: \"m\",\n maxResolution: 156543.0339,\n maxExtent: new OpenLayers.Bounds(-20037508, -20037508, 20037508, 20037508.34)\n };\n map = new OpenLayers.Map('map', options);\n\n // create Google Mercator layers\n var gmap = new OpenLayers.Layer.Google(\"Google Streets\",\n { sphericalMercator: true, numZoomLevels: 20} );\n var gsat = new OpenLayers.Layer.Google(\"Google Satellite\",\n {type: G_SATELLITE_MAP, sphericalMercator: true, numZoomLevels: 20} );\n var ghyb = new OpenLayers.Layer.Google(\"Google Hybrid\",\n {type: G_HYBRID_MAP, sphericalMercator: true, numZoomLevels: 20});\n var gter = new OpenLayers.Layer.Google(\"Google Terrain\",\n {type: G_PHYSICAL_MAP, sphericalMercator: true, numZoomLevels: 20 });\n\n // create Virtual Earth layers\n OpenLayers.Layer.VirtualEarth.prototype.MAX_ZOOM_LEVEL=19;\n OpenLayers.Layer.VirtualEarth.prototype.RESOLUTIONS=OpenLayers.Layer.Google.prototype.RESOLUTIONS\n var veroad = new OpenLayers.Layer.VirtualEarth(\"Virtual Earth Roads\",\n {'type': VEMapStyle.Road, 'sphericalMercator': true, numZoomLevels: 20});\n var veaer = new OpenLayers.Layer.VirtualEarth(\"Virtual Earth Aerial\",\n {'type': VEMapStyle.Aerial, 'sphericalMercator': true, numZoomLevels: 20 });\n var vehyb = new OpenLayers.Layer.VirtualEarth(\"Virtual Earth Hybrid\",\n {'type': VEMapStyle.Hybrid, 'sphericalMercator': true});\n\n // create Yahoo layer\n var yahoo = new OpenLayers.Layer.Yahoo(\"Yahoo Street\",\n {'sphericalMercator': true});\n var yahoosat = new OpenLayers.Layer.Yahoo(\"Yahoo Satellite\",\n {'type': YAHOO_MAP_SAT, 'sphericalMercator': true});\n var yahoohyb = new OpenLayers.Layer.Yahoo(\"Yahoo Hybrid\",\n {'type': YAHOO_MAP_HYB, 'sphericalMercator': true});\n\n // create OSM/OAM layer\n var osm = new OpenLayers.Layer.TMS( \"OpenStreetMap\",\n \"http://tile.openstreetmap.org/\",\n { type: 'png', getURL: osm_getTileURL, displayOutsideMaxExtent: true,\n attribution: '<a href=\"http://www.openstreetmap.org/\">OpenStreetMap</a>'} );\n var oam = new OpenLayers.Layer.TMS( \"OpenAerialMap\",\n \"http://tile.openaerialmap.org/tiles/1.0.0/openaerialmap-900913/\",\n { type: 'png', getURL: osm_getTileURL } );\n\n // create TMS Overlay layer\n var tmsoverlay = new OpenLayers.Layer.TMS( \"TMS Overlay\", \"\",\n { // url: '', serviceVersion: '.', layername: '.',\n type: '%(tileformat)s', getURL: overlay_getTileURL, alpha: %(has_alpha)s,\n isBaseLayer: false\n });\n if (OpenLayers.Util.alphaHack() == false) { tmsoverlay.setOpacity(0.7); }\n\n map.addLayers([gmap, gsat, ghyb, gter, veroad, veaer, vehyb,\n yahoo, yahoosat, yahoohyb, osm, oam,\n tmsoverlay]);\n\n var switcherControl = new OpenLayers.Control.LayerSwitcher();\n map.addControl(switcherControl);\n switcherControl.maximizeControl();\n\n map.zoomToExtent( mapBounds.transform(map.displayProjection, map.projection ) );\n \"\"\" % args\n\n elif self.options.profile == 'geodetic':\n s += \"\"\"\n var options = {\n controls: [],\n projection: new OpenLayers.Projection(\"EPSG:4326\"),\n maxResolution: 0.703125,\n maxExtent: new OpenLayers.Bounds(-180, -90, 180, 90)\n };\n map = new OpenLayers.Map('map', options);\n\n layer = new OpenLayers.Layer.WMS( \"Blue Marble\",\n \"http://labs.metacarta.com/wms-c/Basic.py?\", {layers: 'satellite' } );\n map.addLayer(layer);\n wms = new OpenLayers.Layer.WMS( \"VMap0\",\n \"http://labs.metacarta.com/wms-c/Basic.py?\", {layers: 'basic', format: 'image/png' } );\n map.addLayer(wms);\n\n var tmsoverlay = new OpenLayers.Layer.TMS( \"TMS Overlay\", \"\",\n {\n serviceVersion: '.', layername: '.', alpha: %(has_alpha)s,\n type: '%(tileformat)s', getURL: overlay_getTileURL,\n isBaseLayer: false\n });\n map.addLayer(tmsoverlay);\n if (OpenLayers.Util.alphaHack() == false) { tmsoverlay.setOpacity(0.7); }\n\n var switcherControl = new OpenLayers.Control.LayerSwitcher();\n map.addControl(switcherControl);\n switcherControl.maximizeControl();\n\n map.zoomToExtent( mapBounds );\n \"\"\" % args\n\n elif self.options.profile in ('raster', 'gearth'):\n s += \"\"\"\n var options = {\n controls: [],\n maxExtent: new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s ),\n maxResolution: %(rastermaxresolution)f,\n numZoomLevels: %(rasterzoomlevels)d\n };\n map = new OpenLayers.Map('map', options);\n\n var layer = new OpenLayers.Layer.TMS( \"TMS Layer\",\"\",\n { url: '', serviceVersion: '.', layername: '.', alpha: %(has_alpha)s,\n type: '%(tileformat)s', getURL: overlay_getTileURL\n });\n map.addLayer(layer);\n map.zoomToExtent( mapBounds );\n \"\"\" % args\n\n\n s += \"\"\"\n map.addControl(new OpenLayers.Control.PanZoomBar());\n map.addControl(new OpenLayers.Control.MousePosition());\n map.addControl(new OpenLayers.Control.MouseDefaults());\n map.addControl(new OpenLayers.Control.KeyboardDefaults());\n }\n \"\"\" % args\n\n if self.options.profile == 'mercator':\n s += \"\"\"\n function osm_getTileURL(bounds) {\n var res = this.map.getResolution();\n var x = Math.round((bounds.left - this.maxExtent.left) / (res * this.tileSize.w));\n var y = Math.round((this.maxExtent.top - bounds.top) / (res * this.tileSize.h));\n var z = this.map.getZoom();\n var limit = Math.pow(2, z);\n\n if (y < 0 || y >= limit) {\n return \"https://github.com/mj10777/mapmbtiles/img/none.png\";\n } else {\n x = ((x %% limit) + limit) %% limit;\n return this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n }\n }\n\n function overlay_getTileURL(bounds) {\n var res = this.map.getResolution();\n var x = Math.round((bounds.left - this.maxExtent.left) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.map.getZoom();\n if (this.map.baseLayer.name == 'Virtual Earth Roads' || this.map.baseLayer.name == 'Virtual Earth Aerial' || this.map.baseLayer.name == 'Virtual Earth Hybrid') {\n z = z + 1;\n }\n if (mapBounds.intersectsBounds( bounds ) && z >= mapMinZoom && z <= mapMaxZoom ) {\n //console.log( this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type);\n return this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n } else {\n return \"https://github.com/mj10777/mapmbtiles/img/none.png\";\n }\n }\n \"\"\" % args\n\n elif self.options.profile == 'geodetic':\n s += \"\"\"\n function overlay_getTileURL(bounds) {\n bounds = this.adjustBounds(bounds);\n var res = this.map.getResolution();\n var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.map.getZoom();\n var path = this.serviceVersion + \"/\" + this.layername + \"/\" + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n var url = this.url;\n if (mapBounds.intersectsBounds( bounds ) && z >= mapMinZoom && z <= mapMaxZoom) {\n // console.log( this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type);\n return this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n } else {\n return \"https://github.com/mj10777/mapmbtiles/img/none.png\";\n }\n }\n \"\"\" % args\n\n elif self.options.profile in ('raster','gearth'):\n s += \"\"\"\n function overlay_getTileURL(bounds) {\n var res = this.map.getResolution();\n var x = Math.round((bounds.left - this.maxExtent.left) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.maxExtent.bottom) / (res * this.tileSize.h));\n var z = this.map.getZoom();\n if (x >= 0 && y >= 0) {\n return this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n } else {\n return \"https://github.com/mj10777/mapmbtiles/img/none.png\";\n }\n }\n \"\"\" % args\n\n s += \"\"\"\n function getWindowHeight() {\n if (self.innerHeight) return self.innerHeight;\n if (document.documentElement && document.documentElement.clientHeight)\n return document.documentElement.clientHeight;\n if (document.body) return document.body.clientHeight;\n return 0;\n }\n\n function getWindowWidth() {\n if (self.innerWidth) return self.innerWidth;\n if (document.documentElement && document.documentElement.clientWidth)\n return document.documentElement.clientWidth;\n if (document.body) return document.body.clientWidth;\n return 0;\n }\n\n function resize() {\n var map = document.getElementById(\"map\");\n var header = document.getElementById(\"header\");\n var subheader = document.getElementById(\"subheader\");\n map.style.height = (getWindowHeight()-80) + \"px\";\n map.style.width = (getWindowWidth()-20) + \"px\";\n header.style.width = (getWindowWidth()-20) + \"px\";\n subheader.style.width = (getWindowWidth()-20) + \"px\";\n if (map.updateSize) { map.updateSize(); };\n }\n\n onresize=function(){ resize(); };\n\n </script>\n </head>\n <body onload=\"init()\">\n <div id=\"header\"><h1>%(title)s</h1></div>\n <div id=\"subheader\">Generated by <a href=\"https://github.com/mj10777/mapmbtiles\">MapMbTiles</a>/<a href=\"http://www.klokan.cz/projects/gdal2mbtiles/\">GDAL2MbTiles</a>, Copyright &copy; 2008 <a href=\"http://www.klokan.cz/\">Klokan Petr Pridal</a>, <a href=\"http://www.gdal.org/\">GDAL</a> &amp; <a href=\"http://www.osgeo.org/\">OSGeo</a> <a href=\"http://code.google.com/soc/\">GSoC</a>\n <!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->\n </div>\n <div id=\"map\"></div>\n <script type=\"text/javascript\" >resize()</script>\n </body>\n </html>\"\"\" % args\n\n return s", "def build_url(self):\n return self.data_url.format(latitude=self.latitude, longitude=self.longitude)", "def map_link(self):\n return self.map_url.format(latitude=self.latitude, longitude=self.longitude)", "def build_url(ori_lon, ori_lat, des_lon, des_lat, year, month, day, hour, minute, args={}):\n options = dict()\n with open(option_file, 'r', newline='') as file:\n # Read the options file\n for line in file:\n if line[0] == '#': # if the first character of a line is '#' skip it\n continue\n splited_line = line.rstrip().split(':')\n if len(splited_line) < 2: # if it is a line with no ':'\n continue\n options[splited_line[0]] = splited_line[1]\n base_URL = 'localhost:' + port + '/otp/routers/default/plan'\n fromPlace = ori_lon + ',' + ori_lat\n toPlace = des_lon + ',' + des_lat\n date = year + '/' + month + '/' + day\n time = hour + ':' + minute + ':00'\n\n url = 'http://' + base_URL + '?fromPlace=' + fromPlace + '&toPlace=' + toPlace + '&date=' + date + '&time=' + time\n for option_name in options.keys():\n option = options[option_name]\n url += '&' + option_name + '=' + option\n if not 'mode' in url:\n url += '&mode=TRANSIT,WALK'\n for key in args.keys():\n url+= '&' + key + '=' + args[key]\n\n return url", "def map():\n return render_template('map.html')", "def visualize(g):\n url = \"http://www.gcmap.com/mapui?P=\"\n routes = []\n \n for key in g.city_dict:\n for flight in g.city_dict[key].get_flights_out():\n route = (g.city_dict[key].get_code(), flight[0])\n if(route not in routes):\n routes.append(route)\n \n for flight in routes:\n url = url + flight[0] + \"-\" + flight[1]\n url = url + \",+\"\n \n url = url[:-2]\n return url", "def map():\n\n return render_template(\"map.html\")", "def mobilize_map_url(value, args=None):\n url = value\n # url = value.replace(\"maps.google.com\", 'm.google.com')\n return url # + \"&source=mobilesearchapp\"", "def write_prefix(self, lat, lon):\n # Open the Maps API and center at the mean of the data\n self.fd.write(\"\"\"<!DOCTYPE html>\n <html>\n <head>\n <meta name=\"viewport\" content=\"initial-scale=1.0, user-scalable=no\" />\n <style type=\"text/css\">\n html { height: 100%% }\n body { height: 100%%; margin: 0px; padding: 0px }\n </style>\n <script type=\"text/javascript\"\n src=\"http://maps.google.com/maps/api/js?sensor=false\">\n </script>\n <script type=\"text/javascript\">\n function initialize() {\n var myLatlng = new google.maps.LatLng(%f, %f);\n var myOptions = {\n zoom: 14,\n center: myLatlng,\n mapTypeId: google.maps.MapTypeId.ROADMAP\n }\n var map = new google.maps.Map(document.getElementById(\"map_canvas\"), myOptions);\n var infowindow = new google.maps.InfoWindow({\n content: 'No EXIF Data'\n });\n var contentStrings = {}\n \"\"\" % (lat, lon))", "def walking():\r\n name = request.args[\"address\"]\r\n end_name=request.args[\"end_point\"]\r\n end_lng = request.args[\"end_lng\"]\r\n end_lat = request.args[\"end_lat\"]\r\n end_lng = float(end_lng)\r\n end_lat = float(end_lat)\r\n start_lng=get_address(name)[1]\r\n start_lat=get_address(name)[0]\r\n\r\n\r\n #get coordinates of start and end point\r\n map_directions, geojson_str = get_static_map(\r\n start_lng=start_lng,\r\n start_lat=start_lat,\r\n end_lng=end_lng,\r\n end_lat=end_lat,\r\n )\r\n logging.warning(\"Map directions %s\", str(map_directions))\r\n\r\n\r\n #retrieve instructions\r\n instructions = get_map_instructions(\r\n start_lng=start_lng,\r\n start_lat=start_lat,\r\n end_lng=end_lng,\r\n end_lat=end_lat,\r\n )\r\n\r\n\r\n # generate interactive map\r\n return render_template(\r\n \"page4.html\",\r\n mapbox_token=MAPBOX_TOKEN,\r\n geojson_str=geojson_str,\r\n end_name=end_name,\r\n name=name,\r\n start_lng=start_lng,\r\n start_lat=start_lng,\r\n end_lng=end_lng,\r\n end_lat=end_lat,\r\n center_lng=(start_lng + end_lng) / 2,\r\n center_lat=(start_lat + end_lat) / 2,\r\n instructions=instructions,\r\n method = 'Walking'\r\n )", "def plot_gps_html(gps_list):\n import gmplot\n import matplotlib as mpl\n import vtool as vt\n\n import wbia.plottool as pt\n\n pt.qt4ensure()\n\n lat = gps_list.T[0]\n lon = gps_list.T[1]\n\n # Get extent of\n bbox = vt.bbox_from_verts(gps_list)\n centerx, centery = vt.bbox_center(bbox)\n\n gmap = gmplot.GoogleMapPlotter(centerx, centery, 13)\n color = mpl.colors.rgb2hex(pt.ORANGE)\n gmap.scatter(lat, lon, color=color, size=100, marker=False)\n gmap.draw('mymap.html')\n ut.startfile('mymap.html')\n\n # # Scale\n # bbox = vt.scale_bbox(bbox, 10.0)\n # extent = vt.extent_from_bbox(bbox)\n # basemap_extent = dict(llcrnrlon=extent[2], urcrnrlon=extent[3],\n # llcrnrlat=extent[0], urcrnrlat=extent[1])\n # # Whole globe\n # #basemap_extent = dict(llcrnrlon=0, llcrnrlat=-80,\n # # urcrnrlon=360, urcrnrlat=80)\n\n # from mpl_toolkits.basemap import Basemap\n # from matplotlib.colors import LightSource # NOQA\n # from mpl_toolkits.basemap import shiftgrid, cm # NOQA\n # from netCDF4 import Dataset\n # # Read information to make background pretty\n # logger.info('Grab topo information')\n # etopodata = Dataset('http://ferret.pmel.noaa.gov/thredds/dodsC/data/PMEL/etopo5.nc')\n # logger.info('Read topo information')\n # topoin = etopodata.variables['ROSE'][:]\n # lons = etopodata.variables['ETOPO05_X'][:]\n # lats = etopodata.variables['ETOPO05_Y'][:]\n # # shift data so lons go from -180 to 180 instead of 20 to 380.\n # logger.info('Shift data')\n # topoin, lons = shiftgrid(180., topoin, lons, start=False)\n\n # logger.info('Make figure')\n # fnum = pt.ensure_fnum(None)\n # fig = pt.figure(fnum=fnum, doclf=True, docla=True) # NOQA\n # logger.info('Draw projection')\n # m = Basemap(projection='mill', **basemap_extent)\n # # setup Lambert Conformal basemap.\n # #m = Basemap(projection='cea',resolution='h', **basemap_extent)\n\n # # transform to nx x ny regularly spaced 5km native projection grid\n # logger.info('projection grid')\n # nx = int((m.xmax - m.xmin) / 5000.) + 1\n # ny = int((m.ymax - m.ymin) / 5000.) + 1\n # topodat = m.transform_scalar(topoin, lons, lats, nx, ny)\n\n # # plot image over map with imshow.\n # im = m.imshow(topodat, cm.GMT_haxby) # NOQA\n # # draw coastlines and political boundaries.\n # m.drawcoastlines()\n # m.drawcountries()\n # m.drawstates()\n\n # transform to nx x ny regularly spaced 5km native projection grid\n # ls = LightSource(azdeg=90, altdeg=20)\n # rgb = ls.shade(topodat, cm.GMT_haxby)\n # im = m.imshow(rgb)\n # draw coastlines and political boundaries.\n\n # m.drawcoastlines()\n # m.drawcountries()\n # m.drawstates()\n\n # draw a boundary around the map, fill the background.\n # this background will end up being the ocean color, since\n # the continents will be drawn on top.\n # m.bluemarble()\n # m.drawmapboundary(fill_color='aqua')\n # m.fillcontinents(color='coral', lake_color='aqua')\n # Convert GPS to projected coordinates\n # x1, y1 = m(lon, lat) # convert to meters # lon==X, lat==Y\n # m.plot(x1, y1, '*', markersize=10)\n # fig.zoom_fac = pt.zoom_factory()\n # fig.pan_fac = pt.pan_factory()\n # fig.show()", "def map_image(res):\n # constants\n MAP_URL = \"https://maps.googleapis.com/maps/api/staticmap\"\n SIZE = \"400x400\"\n\n polygon_path = mh.get_polygon_path(res)\n origin = mh.get_latlon(mh.get_origin(res))\n destination = mh.get_latlon(mh.get_destination(res))\n params = {\n \"size\": SIZE,\n \"path\": f\"enc:{polygon_path}\",\n \"markers\": [f\"color:red|label:X|{destination}\", f\"size:small|color:blue|{origin}\"],\n \"key\": key\n }\n img_resp = requests.get(url=MAP_URL, params=params)\n return img_resp.url", "def download_map(latitude, longitude, outname):\n url1 = \"https://maps.googleapis.com/maps/api/staticmap?center={:.8f},{:.8f}&zoom=8&maptype=satellite&size=1000x1000&scale=4\".format(latitude,longitude)\n buffer1 = StringIO(urllib.urlopen(url1).read())\n image = Image.open(buffer1)\n image.save('{}.png'.format(outname))", "def map_locations(args, number_publications, numbers,\n links_locations_and_timestamps, gps_coordinates,\n countrycodes_for_js, continents_for_js):\n templateLoader = jinja2.FileSystemLoader(searchpath=\"./\")\n templateEnv = jinja2.Environment(loader=templateLoader)\n template = templateEnv.get_template(\"template.html\")\n outputText = template.render(\n target_account=args.target_account,\n publications_number=number_publications,\n retrieved_number=len(links_locations_and_timestamps),\n mapped_number=numbers[0],\n links=str([x[0] for x in links_locations_and_timestamps]),\n errors_number=len(links_locations_and_timestamps) - numbers[0],\n places=str([x[1] for x in links_locations_and_timestamps]),\n timestamps=str([x[2] for x in links_locations_and_timestamps]),\n locations=str(gps_coordinates),\n countrycodes=str(countrycodes_for_js),\n continents=str(continents_for_js))\n\n with open(\n \"output/\" + args.target_account + \"/\" + args.target_account +\n \"_instaloctrack_map.html\", \"w\") as f:\n f.write(outputText)\n f.close()\n print(Fore.WHITE + \"Map with all the markers was written to:\" +\n Fore.GREEN + \" output/\" + args.target_account + \"/\" +\n args.target_account + \"_instaloctrack_map.html\")", "def generate_garmin_kml(self, d ):\n return (\"\"\"\n <GroundOverlay>\n <Icon>\n <href>%(image_url)s</href>\n <DrawOrder>%(draw_order)d</DrawOrder>\n </Icon>\n <LatLonBox>\n <north>%(north).14f</north>\n <south>%(south).14f</south>\n <east>%(east).14f</east>\n <west>%(west).14f</west>\n </LatLonBox>\n </GroundOverlay>\"\"\" % d )", "def get_map_instructions(start_lng, start_lat, end_lng, end_lat):\r\n directions_resp = requests.get(\r\n f\"https://api.mapbox.com/directions/v5/mapbox/walking/{start_lng},{start_lat};{end_lng},{end_lat}\",\r\n params={\r\n \"access_token\": MAPBOX_TOKEN,\r\n \"geometries\": \"geojson\",\r\n \"steps\": \"true\",\r\n \"alternatives\": \"true\",\r\n },\r\n )\r\n instructions=[]\r\n for step in directions_resp.json()['routes'][0]['legs'][0]['steps']:\r\n instructions.append(f\"{step['maneuver']['instruction']}\")\r\n #listToStr = '<br>'.join(map(str, instruction))\r\n return instructions" ]
[ "0.7412921", "0.7408514", "0.6695499", "0.6539618", "0.65338993", "0.64012855", "0.6347697", "0.62790763", "0.625587", "0.61357576", "0.60827684", "0.60016644", "0.5909071", "0.5871183", "0.5864181", "0.58204234", "0.57807314", "0.5719914", "0.57073295", "0.5690754", "0.56851184", "0.5678762", "0.56695163", "0.5629338", "0.56254816", "0.5602676", "0.5541893", "0.5510373", "0.5475361", "0.5458768" ]
0.74909633
0
builds urls for the directions and distance matrix apis
def build_url(start, end, transit_mode): transit = "" traffic = "best_guess" depart = "now" if transit_mode: transit = transit_mode direc_url = g_api_base_url + dir_url + "origin=" + start + "&destination=" + end + trans_url \ + transit + goog_dir_key dist_url = g_api_base_url + dis_url + units_i + or_dis_url + start + des_url + end + trans_url \ + transit + traffic_url + traffic + depart_url + depart + goog_dis_key direc_url = direc_url.replace(" ","+") print("directions :"+ direc_url) dist_url = dist_url.replace(" ","+") return direc_url, dist_url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_url(_origin_details, travel_start_date, travel_start_time, destination_list):\n prefix = 'https://timetable.search.ch/api/route.json?one_to_many=1'\n\n origin_body = f'&from={_origin_details}&date={travel_start_date}&time={travel_start_time}'\n\n # Build iteratively with necessary syntax between destinations\n destination_body = ''\n for i, dest in enumerate(destination_list):\n destination_body = f'{destination_body}&to[{i}]={dest}'\n\n return f'{prefix}{origin_body}{destination_body}'", "def build_url(ori_lon, ori_lat, des_lon, des_lat, year, month, day, hour, minute, args={}):\n options = dict()\n with open(option_file, 'r', newline='') as file:\n # Read the options file\n for line in file:\n if line[0] == '#': # if the first character of a line is '#' skip it\n continue\n splited_line = line.rstrip().split(':')\n if len(splited_line) < 2: # if it is a line with no ':'\n continue\n options[splited_line[0]] = splited_line[1]\n base_URL = 'localhost:' + port + '/otp/routers/default/plan'\n fromPlace = ori_lon + ',' + ori_lat\n toPlace = des_lon + ',' + des_lat\n date = year + '/' + month + '/' + day\n time = hour + ':' + minute + ':00'\n\n url = 'http://' + base_URL + '?fromPlace=' + fromPlace + '&toPlace=' + toPlace + '&date=' + date + '&time=' + time\n for option_name in options.keys():\n option = options[option_name]\n url += '&' + option_name + '=' + option\n if not 'mode' in url:\n url += '&mode=TRANSIT,WALK'\n for key in args.keys():\n url+= '&' + key + '=' + args[key]\n\n return url", "def urlGenerator(self):\n \n # VERMONT #\n baseurl = 'https://www.vermontjudiciary.org'\n path = '/opinions-decisions'\n # from date\n param1 = 'facet_from_date=01/01'\n # to date\n param2 = 'facet_to_date=01/01/'\n # division\n param3 = 'f%5B0%5D=court_division_opinions_library%3A'\n # search by text\n param4 = 'search_api_fulltext='\n # page\n param5 = 'page='\n # generate list of URL\n listURL = []\n \n # list of divisions\n vt_court_division = {\"civil\": \"1\", \"supreme court\": \"7\", \"environmental\": \"3\", \"family\": \"4\", \"criminal\": \"2\"}\n # inputs\n from_year = 2000\n to_year = 2017\n endPages = 75 #0-74\n startPages = 0\n # make change to pull data from different division by changing division name below to any of the division in vt_court_vivision dict\n division = vt_court_division[\"environmental\"]\n # url generating\n for i in range(startPages, endPages):\n build_url = baseurl + path + '?' + param1 + str(from_year) + \"&\" + param2 + str(to_year) + \"&\" + param3 + division + param4 + \"&\" + param5 + str(i) + \"\"\n # append url to listUrl\n listURL.append(build_url)\n i += 1\n \n # return full list of URLs\n return listURL", "def gen_url(self):\n self.url = \"https://www.cubedb.net/?rank=3&title={}&time={}&scramble=\".format(self.name_of_solve, self.time_solve)\n for move in self.scramble.split():\n if \"\\'\" in move:\n move.replace(\"\\'\", \"-\")\n self.url += \"{}_\".format(move)\n self.url += \"&alg=\"\n count = 0\n for move in self.solve_stats:\n if self.comms_unparsed_bool:\n if self.comms_unparsed_bool:\n if move[\"comment\"] != \"\":\n if \"mistake\" in move[\"comment\"]:\n move[\"comment\"] = \"{}%0A//{}\".format(move[\"comment\"].split(\"mistake\")[0],\n \"mistake from here\")\n if \"#\" in move[\"comment\"]:\n piece = move[\"comment\"].split(\"#\")[0]\n move[\"comment\"] = move[\"comment\"].split(\"#\")[1]\n if self.url.rfind(\"%0A\") != -1:\n alg = self.url[self.url.rfind(\"%0A\") + 3:]\n self.url = self.url[:self.url.rfind(\"%0A\") + 3] + \"%0A//{}%0A\".format(piece) + alg\n else:\n alg = self.url[self.url.rfind(\"=\") + 1:]\n self.url = self.url[:self.url.rfind(\"=\") + 1] + \"%0A//{}%0A\".format(piece) + alg\n self.url += self.comms_unparsed[count]\n count += 1\n self.url += \"// {} %0A\".format(move[\"comment\"])\n\n\n else:\n if \"move\" in move:\n if move[\"move\"] != \"\":\n if \"\\'\" in move[\"move\"]:\n move[\"move\"].replace(\"\\'\", \"-\")\n self.url += \"{}_\".format(move[\"move\"])\n if move[\"comment\"] != \"\":\n if \"mistake\" in move[\"comment\"]:\n move[\"comment\"] = \"{}%0A//{}\".format(move[\"comment\"].split(\"mistake\")[0], \"mistake from here\")\n if \"#\" in move[\"comment\"]:\n piece = move[\"comment\"].split(\"#\")[0]\n move[\"comment\"] = move[\"comment\"].split(\"#\")[1]\n if self.url.rfind(\"%0A\") != -1:\n alg = self.url[self.url.rfind(\"%0A\") + 3:]\n self.url = self.url[:self.url.rfind(\"%0A\") + 3] + \"//{}%0A\".format(piece) + alg\n else:\n alg = self.url[self.url.rfind(\"=\") + 1:]\n self.url = self.url[:self.url.rfind(\"=\") + 1] + \"//{}%0A\".format(piece) + alg\n\n self.url += \"// {} %0A\".format(move[\"comment\"])\n else:\n self.url += \"// {} %0A\".format(move[\"comment\"])", "def getURLs():", "def _make_url(self):\n ...", "def build_url(vehicle, coordinates, format=\"json\", geometryformat=\"isoz\"):\n load = vehicle.load if vehicle.load > -1.0 else 0\n params = {\n \"format\": format,\n \"height\": vehicle.height,\n \"length\": vehicle.length,\n \"stops\": coordinates,\n \"load\": load,\n \"geometryformat\": geometryformat,\n \"lang\": \"nb-no\",\n }\n\n return '?'.join([ROUTE_URL_BASE, urlencode(params)])", "def generate_url(self):\n if self.has_marker:\n marker_param = f'mlat={self.mlat}&mlng={self.mlng}&'\n else:\n marker_param = ''\n if self.start:\n start_param = 'start=true&'\n else:\n start_param = ''\n url = f'{MapController.MAP_URL}?{start_param}clat={self.clat}&clng={self.clng}&{marker_param}zoom={self.zoom}'\n return url", "def make_url(site,node,instrument,method,stream,API_USERNAME,API_TOKEN):\n\n SENSOR_BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/'\n VOCAB_BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/12586/vocab/inv'\n meta_request_url ='/'.join((VOCAB_BASE_URL,site,node,instrument)) # Python wizard best\n data_request_url ='/'.join((SENSOR_BASE_URL,site,node,instrument,method,stream))\n\n # Retrieve vocabulary information for a given instrument\n r = requests.get(meta_request_url, auth=(API_USERNAME, API_TOKEN))\n meta_data = r.json()\n\n return (data_request_url,meta_data)", "def routes():\n import urllib.request, urllib.parse, urllib.error\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = urllib.parse.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, url))\n output.append(line)\n for line in sorted(output):\n print (line)", "def drivingdistance(origin, destination, APIkey):\n\n lat1, lon1 = origin\n lat2, lon2 = destination\n\n gm_url = ('https://maps.googleapis.com/maps/api/directions/xml?' +\n 'origin='+str(lat1) + ',' + str(lon1) +\n '&destination=' + str(lat2) + ','+str(lon2) +\n '&key='+APIkey)\n\n return gm_url", "def _create_api_ulr_list(self) -> List[str]:\n api = Setup.openweather_api\n now = int(time.time())\n urls_list = []\n\n for lat, lon in self.locations:\n urls_list.append(\n f\"https://api.openweathermap.org/data/2.5/onecall?\"\n f\"lat={lat}&lon={lon}&exclude=hourly,minutely,\"\n f\"alerts&units=metric&appid={api}\"\n )\n\n for days in range(1, 6):\n date_time = now - 86400 * days\n urls_list.append(\n f\"http://api.openweathermap.org/data/2.5/onecall/\"\n f\"timemachine?lat={lat}&lon={lon}&dt={date_time}\"\n f\"&units=metric&appid={api}\"\n )\n\n return urls_list", "def uri(self):\n if not self.parallel:\n if len(self.WMO) <= 5: # todo: This max WMO number should be parameterized somewhere else\n # Retrieve all WMOs in a single request\n return [self.get_url()]\n else:\n # Retrieve one WMO by URL sequentially (same behaviour as localftp and argovis)\n urls = []\n for wmo in self.WMO:\n urls.append(\n Fetch_wmo(\n WMO=wmo, CYC=self.CYC, ds=self.dataset_id, parallel=False\n ).get_url()\n )\n return urls\n else:\n self.Chunker = Chunker(\n {\"wmo\": self.WMO}, chunks=self.chunks, chunksize=self.chunks_maxsize\n )\n wmo_grps = self.Chunker.fit_transform()\n # self.chunks = C.chunks\n urls = []\n for wmos in wmo_grps:\n urls.append(\n Fetch_wmo(\n WMO=wmos, CYC=self.CYC, ds=self.dataset_id, parallel=False\n ).get_url()\n )\n return urls", "def test_url_construction(self):\n\n a = api.InvenTreeAPI(\"http://localhost:1234\", connect=False)\n\n tests = {\n 'part': 'http://localhost:1234/api/part/',\n '/part': 'http://localhost:1234/api/part/',\n '/part/': 'http://localhost:1234/api/part/',\n 'order/so/shipment': 'http://localhost:1234/api/order/so/shipment/',\n }\n\n for endpoint, url in tests.items():\n self.assertEqual(a.constructApiUrl(endpoint), url)", "def __get_urls(self):\n self.__valid_servers = {\n \"qa\": {\n \"server_url\": \"https://qa.api.deepaffex.ai:9443\",\n \"websocket_url\": \"wss://qa.api.deepaffex.ai:9080\"\n },\n \"dev\": {\n \"server_url\": \"https://dev.api.deepaffex.ai:9443\",\n \"websocket_url\": \"wss://dev.api.deepaffex.ai:9080\"\n },\n \"demo\": {\n \"server_url\": \"https://demo.api.deepaffex.ai:9443\",\n \"websocket_url\": \"wss://demo.api.deepaffex.ai:9080\"\n },\n \"prod\": {\n \"server_url\": \"https://api.deepaffex.ai:9443\",\n \"websocket_url\": \"wss://api.deepaffex.ai:9080\"\n },\n \"prod-cn\": {\n \"server_url\": \"https://api.deepaffex.cn:9443\",\n \"websocket_url\": \"wss://api.deepaffex.cn:9080\"\n },\n \"demo-cn\": {\n \"server_url\": \"https://demo.api.deepaffex.cn:9443\",\n \"websocket_url\": \"wss://demo.api.deepaffex.cn:9080\"\n }\n }\n try:\n self.server_url = self.__valid_servers[self.server][\"server_url\"]\n self.websocket_url = self.__valid_servers[self.server][\"websocket_url\"]\n except KeyError:\n raise KeyError(\"Invalid server ID given\")", "def get_url(self, source):\n if source == 'nomads':\n if self.model == 'rap':\n base = 'https://nomads.ncep.noaa.gov/pub/data/nccf/com/rap/prod/'\n path = f'rap.{self.date:%Y%m%d}/rap.t{self.date:%H}z.awip32f{self.fxx:02d}.grib2'\n else:\n base = 'https://nomads.ncep.noaa.gov/pub/data/nccf/com/hrrr/prod/'\n if self.model == 'hrrr':\n path = f\"hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.grib2\"\n elif self.model == 'hrrrak':\n path = f\"hrrr.{self.date:%Y%m%d}/alaska/hrrr.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.ak.grib2\"\n elif source == 'aws':\n if self.model == 'rap':\n base = 'https://noaa-rap-pds.s3.amazonaws.com/'\n path = f'rap.{self.date:%Y%m%d}/rap.t{self.date:%H}z.awip32f{self.fxx:02d}.grib2'\n else:\n base = 'https://noaa-hrrr-bdp-pds.s3.amazonaws.com/'\n if self.model == 'hrrr':\n path = f\"hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.grib2\"\n elif self.model == 'hrrrak':\n path = f\"hrrr.{self.date:%Y%m%d}/alaska/hrrr.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.ak.grib2\"\n elif source == 'google':\n if self.model == 'rap':\n base = 'https://storage.googleapis.com/rapid-refresh/'\n path = f'rap.{self.date:%Y%m%d}/rap.t{self.date:%H}z.awip32f{self.fxx:02d}.grib2'\n else:\n base = 'https://storage.googleapis.com/high-resolution-rapid-refresh/'\n if self.model == 'hrrr':\n path = f\"hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.grib2\"\n elif self.model == 'hrrrak':\n path = f\"hrrr.{self.date:%Y%m%d}/alaska/hrrr.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.ak.grib2\"\n elif source == 'azure':\n if self.model == 'rap':\n base = 'https://noaarap.blob.core.windows.net/rap'\n path = f'rap.{self.date:%Y%m%d}/rap.t{self.date:%H}z.awip32f{self.fxx:02d}.grib2'\n else:\n base = 'https://noaahrrr.blob.core.windows.net/hrrr/'\n if self.model == 'hrrr':\n path = f\"hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.grib2\"\n elif self.model == 'hrrrak':\n path = f\"hrrr.{self.date:%Y%m%d}/alaska/hrrr.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.ak.grib2\"\n elif source.startswith('pando'):\n if source[-1] == '2':\n gateway = 2\n else:\n gateway = 1\n if self.model == 'rap':\n return None # No RAP data on Pando\n else:\n base = f'https://pando-rgw0{gateway}.chpc.utah.edu/'\n path = f\"{self.model}/{self.field}/{self.date:%Y%m%d}/{self.model}.t{self.date:%H}z.wrf{self.field}f{self.fxx:02d}.grib2\"\n \n return base+path", "def calc_url(self):\n place = self._get_first_place()[0]\n path = \"\"\n # First see if we are in or near Sweden or Denmark\n # Change country to upper case\n location = get_main_location(self.database, place)\n country = location.get(PlaceType.COUNTRY, '').upper().strip()\n country_given = (country in MAP_NAMES_SWEDEN or \\\n country in MAP_NAMES_DENMARK) and (country != \"\")\n # if no country given, check if we might be in the vicinity defined by\n # 54 33' 0\" < lat < 66 9' 0\", 54.55 and 69.05\n # 8 3' 0\" < long < 24 9' 0\", 8.05 and 24.15 \n latitude, longitude = self._lat_lon(place)\n if latitude is None or longitude is None:\n coord_ok = False\n else:\n latitude = float(latitude) \n longitude = float(longitude)\n # Check if coordinates are inside Sweden and Denmark\n if (54.55 < latitude < 69.05) and (8.05 < longitude < 24.15):\n coord_ok = True\n else:\n msg2 = _(\"Latitude not within '54.55' to '69.05'\\n\") + \\\n _(\"Longitude not within '8.05' to '24.15'\")\n WarningDialog(_(\"Eniro map not available\"), msg2 )\n return\n\n if coord_ok:\n place_title = _build_title(self.database, place)\n place_city = _build_city(self.database, place)\n x_coord, y_coord = self._lat_lon(place, format=\"RT90\")\n # Set zoom level to 5 if Sweden/Denmark, others 3\n zoom = 5\n if not country_given:\n zoom = 3\n path = \"http://www.eniro.se/partner.fcgi?pis=1&x=%s&y=%s\" \\\n \"&zoom_level=%i&map_size=0&title=%s&city=%s&partner=gramps\"\n # Note x and y are swapped!\n path = path % (y_coord , x_coord, zoom, place_title, place_city)\n self.url = path.replace(\" \",\"%20\")\n return\n\n place_area = _build_area(self.database, place)\n if country_given and place_area:\n if country in MAP_NAMES_SWEDEN:\n path = \"http://kartor.eniro.se/query?&what=map_adr&mop=aq\" \\\n \"&geo_area=%s&partner=gramps\"\n path = path % (place_area)\n self.url = path.replace(\" \",\"%20\")\n return\n else:\n WarningDialog(_(\"Eniro map not available\"), \\\n _(\"Coordinates needed in Denmark\") )\n self.url = \"\"\n return\n\n WarningDialog(_(\"Eniro map not available\"), \n _(\"Latitude and longitude,\\n\" \\\n \"or street and city needed\") )\n return", "def getDataUrls(self):\n sub1 = self.id[0:3]\n sub2 = self.id[3:6]\n sub3 = self.id[6:9]\n self.xml = \"%s/static/model/%s/%s/%s/%s.xml\" % (serverString, sub1, sub2, sub3, self.id)\n self.image = \"%s/static/image/%s/%s/%s/%s_lrg.png\" % (serverString, sub1, sub2, sub3, self.id)\n self.thumb = \"%s/static/thumb/%s/%s/%s/%s.png\" % (serverString, sub1, sub2, sub3, self.id)", "def build_urls(self, listings_per_page=20, pages_per_location=15):\r\n url_list = []\r\n for i in range(pages_per_location):\r\n offset = listings_per_page * i\r\n url_pagination = self.link + f'&items_offset={offset}'\r\n url_list.append(url_pagination)\r\n self.url_list = url_list", "def build_url(self):\n return self.data_url.format(latitude=self.latitude, longitude=self.longitude)", "def _generate_urls(base_url, mbid):\n for level in LEVELS:\n yield base_url + mbid + level", "def url_shortner(self):", "def create_web_output_paths() -> None:\n create_path_and_index(\"\")\n create_path_and_index(\"photos/\")\n create_path_and_index(\"video/\")\n create_path_and_index(\"references/\")\n create_path_and_index(\"names/\")\n create_path_and_index(\"art/\")\n create_path_and_index(\"morphology/\")\n create_path_and_index(\"maps/\")\n create_path_and_index(\"images/\")\n create_path_and_index(\"images/flag-icon-css/\")\n create_path_and_index(\"images/flag-icon-css/css/\")\n create_path_and_index(\"images/flag-icon-css/flags/\")\n create_path_and_index(\"images/flag-icon-css/flags/4x3/\")\n create_path_and_index(\"locations/\")\n create_path_and_index(\"locations/keys/\")\n create_path_and_index(\"js/\")\n create_path_and_index(\"sizes/\")\n create_path_and_index(\"handedness/\")", "def _url_builder(url_root,api_key,path,params):\n params['api_key'] = api_key\n url_end = urlencode(params)\n url = \"%s%s%s\" % (url_root,path,url_end)\n return url", "def Url(self) -> str:", "def test_urls(self):\n self.base_check_request(\"get\", \"/\")\n self.base_check_request(\"get\", \"apartments/\")\n self.base_check_request(\"get\", \"complexes/\")\n self.base_check_request(\"get\", \"locations/\")\n self.base_check_request(\"get\", \"companies/\")\n self.base_check_request(\"get\", \"companies-types/\")\n\n self.base_check_request(\"get\", \"count/apartments/\")\n self.base_check_request(\"get\", \"count/complexes/\")\n\n self.base_check_request(\"get\", \"search-forms/apartments/\")\n self.base_check_request(\"get\", \"search-forms/complexes/\")\n self.base_check_request(\"get\", \"search-forms/main/\")\n\n self.base_check_request(\"get\", \"autocomplete/companies/\")\n self.base_check_request(\"get\", \"autocomplete/complexes/\")\n self.base_check_request(\"get\", \"autocomplete/locations/\")\n\n self.base_check_request(\"get\", \"apartments_for_maps/?count=1&fields=lat,lon\")\n # self.base_check_request(\"get\", \"reserve/\")\n # self.base_check_request(\"get\", \"complain/\")\n # self.base_check_request(\"post\", \"apartment-complain/\")\n # self.base_check_request(\"post\", \"order-apartment/\")", "def getSDDCT0routes(proxy_url, session_token):\n t0_routes_json = get_sddc_t0_routes_json(proxy_url, session_token)\n t0_routes = {}\n if 'results' in t0_routes_json:\n pass\n else:\n print(\"No results. Something went wrong - please check your syntax and try again.\")\n sys.exit(1)\n\n if t0_routes_json == None:\n print(\"API Error\")\n sys.exit(1)\n elif len(t0_routes_json['results']) == 1:\n t0_routes = t0_routes_json['results'][0]['route_entries']\n elif len(t0_routes_json['results']) >1:\n t0_routes0 = t0_routes_json['results'][0]['route_entries']\n t0_routes1 = t0_routes_json['results'][1]['route_entries']\n t0_routes = t0_routes0 + t0_routes1\n\n df = pd.DataFrame(t0_routes)\n df.drop(['lr_component_id', 'lr_component_type'], axis=1, inplace=True)\n df.drop_duplicates(inplace = True)\n print('T0 Routes')\n print('Route Type Legend:')\n print('t0c - Tier-0 Connected\\nt0s - Tier-0 Static\\nb - BGP\\nt0n - Tier-0 NAT\\nt1s - Tier-1 Static\\nt1c - Tier-1 Connected\\nisr: Inter-SR')\n print()\n print(df.sort_values(by=[ 'route_type', 'network'], ascending=True).to_string())\n # route_table = PrettyTable(['Route Type', 'Network', 'Admin Distance', 'Next Hop'])\n # for routes in t0_routes:\n # route_table.add_row([routes['route_type'],routes['network'],routes['admin_distance'],routes['next_hop']])\n # print (route_table.get_string(sort_key = operator.itemgetter(1,0), sortby = \"Network\", reversesort=True))", "def ajax_solve():\n\n # The possible errors and their human-readable messages\n ERRORS = {\n 403: \"Google Directions could not find a path\",\n 404: \"Google Directions did not send response\",\n 405: \"You did not specify a start\",\n 406: \"You need to specify at least two waypoints\",\n 407: \"You did not specify a valid algorithm\",\n 408: \"Internal Algorithm Error\",\n\n }\n\n\n def to_tuple(waypoint):\n \"\"\"\n Converts LatLng dicts to tuples.\n\n :param waypoint: A waypoint as string, tuple or LatLng dict\n :return: waypoint, if waypoint is string or tuple,\n a tuple of the lat and lng values, if dict\n\n \"\"\"\n if isinstance(waypoint, dict):\n return (waypoint[\"lat\"], waypoint[\"lng\"])\n else:\n return waypoint\n\n def to_dict(waypoint):\n \"\"\"\n Converts to tuples to LatLng dicts.\n\n :param waypoint: A waypoint as string or tuple\n :return: waypoint, if waypoint is string or tuple,\n a LatNg dict, if tuple\n \"\"\"\n if isinstance(waypoint, tuple):\n return {\"lat\": waypoint[0], \"lng\": waypoint[1]}\n else:\n return waypoint\n\n\n\n # Get the arguments\n json = request.args\n\n # Check that a start point is supplied\n start = json.get(\"origin\")\n if not start:\n return jsonify(status=406, msg=ERRORS[405])\n\n # Convert to tuple if necessary\n # This is needed to store waypoints as keys in a dict\n start = to_tuple(start)\n\n\n\n waypoints = json.getlist(\"waypoints[]\")\n if not waypoints:\n return jsonify(status=406, msg=ERRORS[406])\n\n # We need to have at least two points for a path\n if len(waypoints) < 2:\n return jsonify(status=406, msg=ERRORS[406])\n\n # Convert to tuple if necessary\n # This is needed to store waypoints as keys in a dict\n waypoints = map(to_tuple, waypoints)\n\n # Get the algorithm\n algorithm = algorithms[json[\"algo\"]]\n if not algorithm:\n return jsonify(status=407, msg=ERRORS[407])\n\n # Get the options\n options = {}\n for option in algorithm.options():\n options[option] = float(json.get(\"options[%s]\" % option))\n\n try:\n distances = matrix.create(waypoints)\n except BaseException as e:\n logging.warning(\"Exception %s while creating matrix for %s\" % (e, waypoints))\n return jsonify(status=404, msg=ERRORS[404])\n else:\n if distances:\n\n try:\n # Call the algorithm\n l, path = algorithm.solve(start, distances, **options)\n except BaseException as e:\n logging.warning(\"Exception %s while executing %s with %s\" % (e, algorithm.name, options))\n return jsonify(status=408, msg=ERRORS[408])\n else:\n # Pack result\n result = {\n \"status\": 200,\n \"length\": l,\n \"start\": start,\n \"algo\": json[\"algo\"],\n \"path\": map(to_dict, path),\n \"msg\": \"SUCCESS\"\n }\n\n # Return the result\n return jsonify(result)\n else:\n return jsonify(status=403, msg=ERRORS[403])", "def get_urls(self):\n \n url_strings = dict()\n \n \n for platform in constants.PLATFORMS:\n download_path = reverse('download-installer', kwargs={\n 'build_id': self.build_id,\n 'platform': platform,\n })\n \n url_strings[platform] = settings.BASE_URL.rstrip('/') + download_path\n \n \n return url_strings", "def get_urls(self, **kwargs):\n pass # pragma: no cover" ]
[ "0.662632", "0.6459529", "0.64105844", "0.63785255", "0.61424583", "0.6120072", "0.61160785", "0.5975166", "0.5922255", "0.5866318", "0.58486396", "0.58353645", "0.5809004", "0.5791701", "0.5777878", "0.57720447", "0.5690597", "0.5641834", "0.56298447", "0.5613718", "0.5601663", "0.55605197", "0.5559404", "0.5547739", "0.5547522", "0.5544924", "0.55438316", "0.5542241", "0.55405", "0.55054396" ]
0.7186997
0
Defines the way to parse the magic command ``%%maml``.
def maml_parser(): parser = MagicCommandParser(prog="maml", description='Runs a maml script.') parser.add_argument('-q', '--quiet', action='store_true', default=False, help='hide output') return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def maml(self, line, cell):\n parser = self.get_parser(CsMLMagics.maml_parser, \"maml\")\n args = self.get_args(line, parser)\n\n if args is not None:\n quiet = args.quiet\n out, err = maml(cell, not quiet)\n if out:\n print(out)\n if err:\n print('-----')\n print(err)", "def mlnet_parser():\n parser = MagicCommandParser(prog=\"mlnet\",\n description='Compiles and wrap a C# function into a Python function.\\n'\n 'Automatically adds ML.net dependencies.')\n parser.add_argument('name', type=str, help='function name')\n parser.add_argument('-i', '--idep', nargs='*', action='append',\n help='internal dependencies (like System, System.Linq)')\n parser.add_argument('-d', '--dep', nargs='*', action='append',\n help='dependencies (assembly name without extension)')\n parser.add_argument('-c', '--catch', action='store', default=False,\n help='catch exception')\n return parser", "async def man(self, ctx: Context, *, command: str) -> None:\n base_query = f\"https://www.mankier.com/api/v2/mans/?q={command}\"\n query_url = urllib.parse.quote_plus(base_query, safe=\";/?:@&=$,><-[]\")\n\n async with ctx.typing():\n # Get API query responses\n async with self.session.get(query_url) as response:\n if response.status != 200:\n await ctx.send(f\"An error occurred (status code: {response.status})\")\n return\n\n results = json.loads(await response.text())[\"results\"]\n\n # Use first result\n if len(results) > 0:\n result = results[0]\n else:\n await ctx.send(\"Invalid query, no such command\")\n return\n\n base_url = f\"https://www.mankier.com/api/v2/mans/{result['name']}.{result['section']}\"\n url = urllib.parse.quote_plus(base_url, safe=\";/?:@&=$,><-[]\")\n\n # Load man page from first result\n async with self.session.get(url) as response:\n if response.status != 200:\n await ctx.send(f\"An error occurred (status code: {response.status})\")\n return\n\n result = json.loads(await response.text())\n\n embed = Embed(\n title=f\"Man page of: **{result['name'].capitalize()}**\",\n url=result[\"url\"],\n description=f\"**{result['description']}** ({result['section']})\"\n )\n\n for anchor in result['anchors']:\n embed.add_field(\n name=f\"`{bleach.clean(anchor['anchor'], tags=[], strip=True)}`\",\n value=f\"{bleach.clean(anchor['description'], tags=[], strip=True)}\\n[Link here]({anchor['url']})\",\n inline=False\n )\n # TODO: Solve this with pagination\n try:\n await ctx.send(embed=embed)\n except HTTPException as e:\n if e.code == 50035:\n await ctx.send(embed=Embed(\n description=\"Body is too long to show\",\n color=Color.red()\n ))\n else:\n raise e", "def handle_magic(self, line, continue_prompt=None,\n pre=None,iFun=None,theRest=None):\n\n cmd = '%sipmagic(\"%s\")' % (pre,esc_quotes('%s %s' % (iFun,theRest)))\n self.log(cmd,continue_prompt)\n self.update_cache(line)\n #print 'in handle_magic, cmd=<%s>' % cmd # dbg\n return cmd", "def magic_lsmagic(self, parameter_s = ''):\n print 'Available magic functions:\\n@'+' @'.join(self.lsmagic())\n print '\\n' + Magic.auto_status[self.rc.automagic]\n return None", "def _from_command_line():\n # Manim can be called from the command line in three different\n # ways. The first two involve using the manim or manimcm commands\n prog = os.path.split(sys.argv[0])[-1]\n from_cli_command = prog in [\"manim\", \"manimcm\"]\n\n # The third way involves using `python -m manim ...`. In this\n # case, the CLI arguments passed to manim do not include 'manim',\n # 'manimcm', or even 'python'. However, the -m flag will always\n # be the first argument.\n from_python_m = sys.argv[0] == \"-m\"\n\n return from_cli_command or from_python_m", "def parse(self, args):\r\n # handle `sl ...`\r\n main_args = self.parse_main_args(args)\r\n module_name = main_args['<module>']\r\n\r\n # handle `sl <module> ...`\r\n module_args = self.parse_module_args(module_name, main_args['<args>'])\r\n\r\n # get the command argument\r\n command_name = module_args.get('<command>')\r\n\r\n # handle `sl <module> <command> ...`\r\n return self.parse_command_args(\r\n module_name,\r\n command_name,\r\n main_args['<args>'])", "def magic2python(cmd):\n\n if cmd.startswith('#@') or cmd.startswith('@'):\n if cmd[0]=='#':\n cmd = cmd[1:]\n # we need to return the proper line end later\n if cmd[-1] == '\\n':\n endl = '\\n'\n else:\n endl = ''\n try:\n func,args = cmd[1:].split(' ',1)\n except:\n func,args = cmd[1:].rstrip(),''\n args = args.replace('\"','\\\\\"').replace(\"'\",\"\\\\'\").rstrip()\n return '__IP.magic_%s (\"%s\")%s' % (func,args,endl)\n else:\n return cmd", "def magic_magic(self, parameter_s = ''):\n\n mode = ''\n try:\n if parameter_s.split()[0] == '-latex':\n mode = 'latex'\n except:\n pass\n\n magic_docs = []\n for fname in self.lsmagic():\n mname = 'magic_' + fname\n for space in (Magic,self,self.__class__):\n try:\n fn = space.__dict__[mname]\n except KeyError:\n pass\n else:\n break\n magic_docs.append('@%s:\\n\\t%s\\n' %(fname,fn.__doc__))\n magic_docs = ''.join(magic_docs)\n\n if mode == 'latex':\n print self.format_latex(magic_docs)\n return\n else:\n magic_docs = self.format_screen(magic_docs)\n \n outmsg = \"\"\"\nIPython's 'magic' functions\n===========================\n\nThe magic function system provides a series of functions which allow you to\ncontrol the behavior of IPython itself, plus a lot of system-type\nfeatures. All these functions are prefixed with a @ character, but parameters\nare given without parentheses or quotes.\n\nExample: typing '@cd mydir' (without the quotes) changes you working directory\nto 'mydir', if it exists.\n\nIf you have 'automagic' enabled (via the command line option or with the\n@automagic function), you don't need to type in the @ explicitly.\n\nYou can define your own magic functions to extend the system. See the supplied\nipythonrc and example-magic.py files for details (in your ipython\nconfiguration directory, typically $HOME/.ipython/).\n\nYou can also define your own aliased names for magic functions. In your\nipythonrc file, placing a line like:\n\n execute __IP.magic_cl = __IP.magic_clear\n\nwill define @cl as a new name for @clear.\n\nFor a list of the available magic functions, use @lsmagic. For a description\nof any of them, type @magic_name?.\n\nCurrently the magic system has the following functions:\\n\"\"\"\n\n outmsg = (\"%s\\n%s\\n\\nSummary of magic functions (from @lsmagic):\"\n \"\\n\\n@%s\\n\\n%s\" % (outmsg,\n magic_docs,\n ' @'.join(self.lsmagic()),\n Magic.auto_status[self.rc.automagic] ) )\n\n page(outmsg,screen_lines=self.rc.screen_length)", "def magic_ll(self, parameter_s=''):\n self.magic_lc(parameter_s+' | grep ^l')", "def _parse_ml(self, line):\n # Parse the line\n fields = line.split('\\\\')\n if self.lang == ENGLISH:\n # pylint: disable=C0301\n # English sample:\n # 14\\abandonment\\94\\C\\\\1\\N\\N\\N\\N\\Y\\abandon+ment\\2x\\SA\\N\\N\\N\\#\\N\\N\\SA\\((abandon)[V],(ment)[N|V.])[N]\\N\\N\\N\n # From the README:\n # The eml.cd file contains the following fields:\n # 1. IdNum\n # 2. Head\n # 3. Cob\n # 4. MorphStatus\n # 5. Lang\n # 6. MorphCnt\n # 7. NVAffComp\n # 8. Der\n # 9. Comp\n # 10. DerComp\n # 11. Def\n # 12. Imm\n # 13. ImmSubCat\n # 14. ImmSA\n # 15. ImmAllo\n # 16. ImmSubst\n # 17. ImmOpac\n # 18. TransDer\n # 19. ImmInfix\n # 20. ImmRevers\n # 21 FlatSA\n # 22. StrucLab\n # 23. StrucAllo\n # 24. StrucSubst\n # 25. StrucOpac\n lemma = fields[0]\n word = fields[1]\n derivation = fields[21]\n elif self.lang == DUTCH:\n # pylint: disable=C0301\n # Dutch sample:\n # 19\\aalbessengelei\\7\\C\\1\\Y\\Y\\Y\\aalbes+en+gelei\\NxN\\N\\N\\(((aal)[N],(bes)[N])[N],(en)[N|N.N],(gelei)[N])[N]\\N\\N\\N\n # The dml.cd file contains the following fields:\n # 1. IdNum\n # 2. Head\n # 3. Inl\n # 4. MorphStatus\n # 5. MorphCnt\n # 6. DerComp\n # 7. Comp\n # 8. Def\n # 9. Imm\n # 10. ImmSubCat\n # 11. ImmAllo\n # 12. ImmSubst\n # 13. StrucLab\n # 14. StruAcAllo\n # 15. StrucSubst\n # 16. Sepa\n lemma = fields[0]\n word = fields[1]\n derivation = fields[12]\n\n # Skip multi-word entries for roots\n roots = self._get_root(derivation) if \" \" not in word else None\n return (lemma, word, roots)", "def getArbitraryManInput(self):\n return \"\"\".TH MANHOLE \"1\" \"August 2001\" \"\" \"\"\n.SH NAME\nmanhole \\- Connect to a Twisted Manhole service\n.SH SYNOPSIS\n.B manhole\n.SH DESCRIPTION\nmanhole is a GTK interface to Twisted Manhole services. You can execute python\ncode as if at an interactive Python console inside a running Twisted process\nwith this.\"\"\"", "def maml(self, maml: List[PredictionsDatapoints]):\n\n self._maml = maml", "def niw_mmml_mp(args):\n return niw_mmml(*args)", "def man(command):\n\tspeech.speak(\"Executing 'man \" + command + \"' to show you documentation for this command.\")\n\tsubprocess.call([\"man\", command])", "def command(self,cmd):\n self.lib.lammps_command(self.lmp,cmd.encode('utf-8'))", "def magic(self, parameter_s=''):\n\n mode = ''\n try:\n mode = parameter_s.split()[0][1:]\n except IndexError:\n pass\n\n brief = (mode == 'brief')\n rest = (mode == 'rest')\n magic_docs = self._magic_docs(brief, rest)\n\n if mode == 'latex':\n print(self.format_latex(magic_docs))\n return\n else:\n magic_docs = format_screen(magic_docs)\n\n out = [\"\"\"\nIPython's 'magic' functions\n===========================\n\nThe magic function system provides a series of functions which allow you to\ncontrol the behavior of IPython itself, plus a lot of system-type\nfeatures. There are two kinds of magics, line-oriented and cell-oriented.\n\nLine magics are prefixed with the % character and work much like OS\ncommand-line calls: they get as an argument the rest of the line, where\narguments are passed without parentheses or quotes. For example, this will\ntime the given statement::\n\n %timeit range(1000)\n\nCell magics are prefixed with a double %%, and they are functions that get as\nan argument not only the rest of the line, but also the lines below it in a\nseparate argument. These magics are called with two arguments: the rest of the\ncall line and the body of the cell, consisting of the lines below the first.\nFor example::\n\n %%timeit x = numpy.random.randn((100, 100))\n numpy.linalg.svd(x)\n\nwill time the execution of the numpy svd routine, running the assignment of x\nas part of the setup phase, which is not timed.\n\nIn a line-oriented client (the terminal or Qt console IPython), starting a new\ninput with %% will automatically enter cell mode, and IPython will continue\nreading input until a blank line is given. In the notebook, simply type the\nwhole cell as one entity, but keep in mind that the %% escape can only be at\nthe very start of the cell.\n\nNOTE: If you have 'automagic' enabled (via the command line option or with the\n%automagic function), you don't need to type in the % explicitly for line\nmagics; cell magics always require an explicit '%%' escape. By default,\nIPython ships with automagic on, so you should only rarely need the % escape.\n\nExample: typing '%cd mydir' (without the quotes) changes your working directory\nto 'mydir', if it exists.\n\nFor a list of the available magic functions, use %lsmagic. For a description\nof any of them, type %magic_name?, e.g. '%cd?'.\n\nCurrently the magic system has the following functions:\"\"\",\n magic_docs,\n \"Summary of magic functions (from %slsmagic):\" % magic_escapes['line'],\n str(self.lsmagic()),\n ]\n page.page('\\n'.join(out))", "def parseargs(p):\n p.set_defaults(func=func)\n p.description = \"Print machine architecture.\"\n return p", "def yamlmain(argv=None):\n if argv is None:\n argv = sys.argv\n if len(argv) == 1:\n _fullusage()\n try:\n command = argv[1]\n if command in ['get', 'dump']:\n _rosparam_cmd_get_dump(command, argv)\n elif command in ['set', 'load']:\n _rosparam_cmd_set_load(command, argv)\n elif command in ['delete']:\n _rosparam_cmd_delete(argv)\n elif command == 'list':\n _rosparam_cmd_list(argv)\n else:\n _fullusage()\n except RosParamException as e:\n print(\"ERROR: \"+str(e), file=sys.stderr)\n sys.exit(1)", "def arg_parse():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-m\", \"--mix\", required=False, help=\"cube shuffle\")\n parser.add_argument(\"-e\", \"--explain\", action=\"store_true\", help=\"Get more explanation about steps\")\n options = parser.parse_args()\n return options", "def _llm_type(self) -> str:\n return \"llama.cpp\"", "def get_ymal_load(yamlfile):\n with open(yamlfile, 'r', encoding='utf-8') as fr:\n filedata = fr.read()\n yamldata = yaml.full_load(filedata)\n return yamldata", "def _parse_args(self, cmd_line_list):\n parser = ArgumentParser()\n parser.add_argument('--yaml', help='yaml file specifying config to run')\n args = parser.parse_args(cmd_line_list)\n return vars(args)", "def _compat_parse_magic(self):\n def warning(magic, option):\n LOG.warning(\"Deprecated \\\"magic\\\" option \\\"{m}\\\" found. Please use new-style option \\\"{o}\\\" instead (see user manual).\".format(m=magic, o=option))\n\n magic_auto_backports = re.search(r\"\\*\\s*MINI_BUILDD:\\s*AUTO_BACKPORTS:\\s*([^*.\\[\\]]+)\", self._top_changes)\n if magic_auto_backports:\n warning(\"AUTO_BACKPORTS\", \"auto-ports\")\n self._set(\"auto-ports\", magic_auto_backports.group(1))\n\n magic_backport_mode = re.search(r\"\\*\\s*MINI_BUILDD:\\s*BACKPORT_MODE\", self._top_changes)\n if magic_backport_mode:\n warning(\"BACKPORT_MODE\", \"ignore-lintian\")\n self._set(\"ignore-lintian\", \"true\")", "def yamllint(context):\n exec_cmd = \"yamllint .\"\n run_cmd(context, exec_cmd)", "def parsed(self, raw):\n if isinstance(raw, ParsedString):\n p = raw\n else:\n # preparse is an overridable hook; default makes no changes\n s = self.preparse(raw)\n s = self.input_source_parser.transformString(s.lstrip())\n s = self.commentGrammars.transformString(s)\n for (shortcut, expansion) in self.shortcuts:\n if s.lower().startswith(shortcut):\n s = s.replace(shortcut, expansion + ' ', 1)\n break\n try:\n result = self.main_parser.parseString(s)\n except pyparsing.ParseException:\n # If we have a parsing failure, treat it is an empty command and move to next prompt\n result = self.main_parser.parseString('')\n result['raw'] = raw\n result['command'] = result.multilineCommand or result.command\n result = self.postparse(result)\n p = ParsedString(result.args)\n p.parsed = result\n p.parser = self.parsed\n return p", "def configure_parser_motif(subparsers):\n help_msg = \"Motif set (PFMs/PWMs) commands.\"\n desc_msg = help_msg + dedent(\"\"\"\n \n MotifScan only detects the binding sites of known motifs. Before scanning, \n the motif set should be installed and built with PFMs (Position Frequency \n Matrices). Since different assemblies have different genome contents, it \n is necessary to build the PFMs and get proper motif score cutoffs for every \n genome assembly you want to scan later. \n \"\"\")\n\n epilog_msg = dedent(\"\"\"\n Examples:\n --------- \n 1) Display installed motif sets:\n \n motifscan motif --list\n \n 2) Display all available motif sets in a remote database:\n \n motifscan motif --list-remote\n \n 3) Install a motif set from a remote database and build for genome 'hg19':\n \n motifscan motif --install -n <motif_set> -r <remote_PFMs> -g hg19\n \n 4) Install a motif set with local PFMs file(s) and build for genome 'mm9':\n\n motifscan motif --install -n <motif_set> -i <pfms.jaspar> -g mm9\n \n 5) Build an installed motif set (PFMs) for additional assembly 'hg38':\n \n motifscan motif --build <motif_set> -g hg38\n \n 6) Uninstall a motif set:\n \n motifscan motif --uninstall <motif_set>\n \n Notes:\n ------\n 1) When installing a motif set by `--install`, you can append a `-g` option \n to build the PFMs for the specified assembly after installation.\n \n 2) The genome assembly specified by `-g` should be pre-installed by command \n `motifscan genome --install`.\n \n 3) The path of newly installed motif set will be automatically saved and \n all the built PWMs files are stored under the directory. If you move it \n to a new path, please reconfigure it:\n \n motifscan config --set-motif <motif_set> <new_path>\n \"\"\")\n\n parser = subparsers.add_parser(\n \"motif\", description=desc_msg, help=help_msg, epilog=epilog_msg,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n subcommands = parser.add_argument_group(\"Motif Subcommands\")\n subcommands = subcommands.add_mutually_exclusive_group()\n subcommands.add_argument(\n \"--list\", dest=\"list\", action=\"store_true\", default=False,\n help=\"Display installed motif sets.\")\n subcommands.add_argument(\n \"--list-remote\", dest=\"list_remote\", action=\"store_true\",\n default=False, help=\"Display available remote motif sets.\")\n subcommands.add_argument(\n \"--install\", dest=\"install\", action=\"store_true\", default=False,\n help=\"Install a new motif set with PFMs.\")\n subcommands.add_argument(\n \"--build\", metavar=\"NAME\", dest=\"build\", default=None,\n help=\"Build an installed motif set for additional genome assembly.\")\n subcommands.add_argument(\n \"--uninstall\", metavar=\"NAME\", dest=\"uninstall\",\n help=\"Uninstall a motif set.\")\n subcommands.required = True\n\n parser_install = parser.add_argument_group(\"Install Options\")\n parser_install.add_argument(\n \"-n\", \"--name\", metavar=\"NAME\", dest=\"name\",\n help=\"Name of the motif set (PFMs) to be installed.\")\n parser_install.add_argument(\n \"-i\", metavar=\"FILE\", dest=\"pfm_files\", nargs=\"+\",\n help=\"Local motif PFMs file(s) to be installed.\")\n parser_install.add_argument(\n \"-r\", \"--remote\", metavar=\"PFMs\", dest=\"remote\",\n help=\"Download a remote motif PFMs set.\")\n parser_install.add_argument(\n \"-o\", \"--output-dir\", metavar=\"DIR\", dest=\"output_dir\",\n help=\"Write to a given directory instead of the default directory.\")\n\n parser_remote = parser.add_argument_group(\"Remote Database Options\")\n parser_remote.add_argument(\n \"--database\", dest=\"database\",\n choices=[\"jaspar_core\", \"jaspar_collections\"], default=\"jaspar_core\",\n help=\"Which remote database is used to list/install motif set (PFMs). \"\n \"Default: jaspar_core\")\n\n parser_build = parser.add_argument_group(\"Build Options\")\n parser_build.add_argument(\n \"-g\", \"--genome\", metavar=\"GENOME\", dest=\"genome\",\n help=\"Genome assembly to build the motif set (PFMs) for.\")\n parser_build.add_argument(\n \"--n-random\", metavar=\"N\", dest=\"n_random\", type=int, default=1000000,\n help=\"Generate N random background sequences to calculate motif score \"\n \"cutoffs. Default: 1,000,000\")\n parser_build.add_argument(\n \"--n-repeat\", metavar=\"N\", dest=\"n_repeat\", type=_pos_int, default=1,\n help=\"Repeat N rounds of random sampling and use the averaged cutoff \"\n \"as final cutoff. Default: 1\")\n parser_build.add_argument(\n \"--max-n\", metavar=\"N\", dest=\"max_n\", type=int, default=0,\n help=\"The maximal number of `N` base allowed in each random sampled \"\n \"sequence. Default: 0\")\n parser_build.add_argument(\n \"--seed\", metavar=\"SEED\", dest=\"seed\", type=int, default=None,\n help=\"Random seed used to generate background sequences.\")\n\n parser_threads = parser.add_argument_group(\"Threads Options\")\n parser_threads.add_argument(\n \"-t\", \"--threads\", metavar=\"N\", dest=\"n_threads\", type=int, default=1,\n help=\"Number of processes used to run in parallel.\")\n\n parser = _add_verbose_argument(parser)\n parser.set_defaults(func=motif.run)", "def readMaf( options, data ):\n regex = 's\\s+([\\w\\d\\-]+?)\\.([\\w\\d\\.\\+\\-]+?)\\s+(\\d+)\\s+(\\d+)\\s+([-+])\\s+(\\d+)\\s+([\\-actgurykmswbdhvnACTGURYKMSWBDHVN]+)'\n pat = re.compile( regex )\n mf = open( options.maf )\n mafLineList = []\n order = -1\n hplList = []\n hpl = ''\n five = ''\n three = ''\n for line in mf:\n if line.startswith('#HPL'):\n d = line.split(' ')\n # example line: \"#HPL=12049 5=1 3=1 SPL=123412 S5=0 S3=12\"\n # there will be one hpl line per options.other line\n # in blocks that contain the options.ref\n hpl = int( d[0][5:] ) # comment at start of this field\n hFive = int( d[1][2] )\n hThree = int( d[2][2] )\n spl = int( d[3][4:] ) # no comment at start of this field\n hplList.append( { 'hpl': hpl, 'hFive': hFive, \n 'hThree': hThree, 'spl': spl } )\n continue\n if line.startswith('s'):\n line = line.strip()\n ml, order = extractMafLine( line, order, pat, options, data )\n if ml is None:\n sys.stderr.write( 'regexp fail on file %s line: \\'%s\\'\\n'\n 'Regex: \\'%s\\'\\n' % ( options.maf, line, regex ) )\n sys.exit( 1 )\n if ml == 'notOurGenome':\n continue\n if ml.length != len( ml.sequence ):\n sys.stderr.write( 'Error while working on file %s :\\n '\n 'printed sequence length (%d) not equal to actual sequence '\n 'length (%d) ref genome:%s other genome:%s line below:\\n%s\\n' % \n ( options.maf, ml.length, len( ml.sequence ), options.ref, options.other, line ) )\n sys.exit( 1 )\n mafLineList.append( ml )\n else:\n # end of the block\n if len( mafLineList ) > 0:\n extractBlockPairs( mafLineList, hplList, options, data )\n mafLineList = []\n order = -1\n hplList = []\n hpl = ''\n five = ''\n three = ''\n if len( mafLineList ) > 0:\n extractBlockPairs( mafLineList, hplList, options, data )", "def set_m(self, varname: str, varval: Optional[str]) -> None:\n if varname:\n self.pandoc.append('-M')\n self.pandoc.append(f'{varname}={varval}')", "def run_man(self, expanded, unexpanded) : \n\t\tmethodslist = map(lambda n: n[4:], filter(lambda f: f[:4] == 'run_', self.__class__.__dict__.keys()))\n\t\tif not unexpanded :\n\t\t\tunexpanded = methodslist\n\t\tunexpanded.sort()\n\n\t\tresults = []\n\t\tfor method in unexpanded :\n\t\t\tif results :\n\t\t\t\t# more than one command to display help for\n\t\t\t\t# so we separate them in the plain text output\n\t\t\t\tself.printf(\"\\n--------\\n\\n\")\n\t\t\tif not hasattr(self, 'run_' + method) :\n\t\t\t\thelp = helphtml = \"Invalid command\"\n\t\t\telse :\n\t\t\t\thelp = getattr(self, 'run_' + method).__doc__\n\t\t\t\tif not help :\n\t\t\t\t\thelp = helphtml = \"Undocumented command\"\n\t\t\t\telse :\n\t\t\t\t\thelplines = map(string.strip, string.split(help, '\\n'))\n\t\t\t\t\thelp = string.join(helplines, '\\n')\n\t\t\t\t\thelphtml = string.join(helplines, '<br />')\n\t\t\tcommand = '<a href=\"%s?zshellscript=man%%20%s&zshellscript=%s&zshelldontrun=1\">%s</a>' % (self.__context.REQUEST.URL0, method, method, method)\n\t\t\tresults.append({\"Command\": command, \"Help\": helphtml})\n\t\t\tself.printf(\"%s: %s\\n\" % (method, help))\n\t\tself.tableDisplay(\"man\", [\"Command\", \"Help\"], results)" ]
[ "0.670974", "0.5512939", "0.51862484", "0.5175543", "0.50888264", "0.49876153", "0.49551958", "0.49524027", "0.49337393", "0.4880694", "0.48664626", "0.48470613", "0.4794826", "0.47836807", "0.4777479", "0.47459564", "0.47455326", "0.4718852", "0.46997732", "0.46576157", "0.46563837", "0.46510884", "0.4574563", "0.45364666", "0.45285183", "0.4514571", "0.4444257", "0.44426432", "0.4435859", "0.44356012" ]
0.740539
0
Defines magic command ``%%maml``.
def maml(self, line, cell): parser = self.get_parser(CsMLMagics.maml_parser, "maml") args = self.get_args(line, parser) if args is not None: quiet = args.quiet out, err = maml(cell, not quiet) if out: print(out) if err: print('-----') print(err)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def maml_parser():\n parser = MagicCommandParser(prog=\"maml\",\n description='Runs a maml script.')\n parser.add_argument('-q', '--quiet', action='store_true', default=False,\n help='hide output')\n return parser", "def command(self,cmd):\n self.lib.lammps_command(self.lmp,cmd.encode('utf-8'))", "def command(self, cmd):\n self.lmp.command(cmd)", "def magic_lsmagic(self, parameter_s = ''):\n print 'Available magic functions:\\n@'+' @'.join(self.lsmagic())\n print '\\n' + Magic.auto_status[self.rc.automagic]\n return None", "def magic_magic(self, parameter_s = ''):\n\n mode = ''\n try:\n if parameter_s.split()[0] == '-latex':\n mode = 'latex'\n except:\n pass\n\n magic_docs = []\n for fname in self.lsmagic():\n mname = 'magic_' + fname\n for space in (Magic,self,self.__class__):\n try:\n fn = space.__dict__[mname]\n except KeyError:\n pass\n else:\n break\n magic_docs.append('@%s:\\n\\t%s\\n' %(fname,fn.__doc__))\n magic_docs = ''.join(magic_docs)\n\n if mode == 'latex':\n print self.format_latex(magic_docs)\n return\n else:\n magic_docs = self.format_screen(magic_docs)\n \n outmsg = \"\"\"\nIPython's 'magic' functions\n===========================\n\nThe magic function system provides a series of functions which allow you to\ncontrol the behavior of IPython itself, plus a lot of system-type\nfeatures. All these functions are prefixed with a @ character, but parameters\nare given without parentheses or quotes.\n\nExample: typing '@cd mydir' (without the quotes) changes you working directory\nto 'mydir', if it exists.\n\nIf you have 'automagic' enabled (via the command line option or with the\n@automagic function), you don't need to type in the @ explicitly.\n\nYou can define your own magic functions to extend the system. See the supplied\nipythonrc and example-magic.py files for details (in your ipython\nconfiguration directory, typically $HOME/.ipython/).\n\nYou can also define your own aliased names for magic functions. In your\nipythonrc file, placing a line like:\n\n execute __IP.magic_cl = __IP.magic_clear\n\nwill define @cl as a new name for @clear.\n\nFor a list of the available magic functions, use @lsmagic. For a description\nof any of them, type @magic_name?.\n\nCurrently the magic system has the following functions:\\n\"\"\"\n\n outmsg = (\"%s\\n%s\\n\\nSummary of magic functions (from @lsmagic):\"\n \"\\n\\n@%s\\n\\n%s\" % (outmsg,\n magic_docs,\n ' @'.join(self.lsmagic()),\n Magic.auto_status[self.rc.automagic] ) )\n\n page(outmsg,screen_lines=self.rc.screen_length)", "def handle_magic(self, line, continue_prompt=None,\n pre=None,iFun=None,theRest=None):\n\n cmd = '%sipmagic(\"%s\")' % (pre,esc_quotes('%s %s' % (iFun,theRest)))\n self.log(cmd,continue_prompt)\n self.update_cache(line)\n #print 'in handle_magic, cmd=<%s>' % cmd # dbg\n return cmd", "def man(command):\n\tspeech.speak(\"Executing 'man \" + command + \"' to show you documentation for this command.\")\n\tsubprocess.call([\"man\", command])", "def maml(self, maml: List[PredictionsDatapoints]):\n\n self._maml = maml", "def set_m(self, varname: str, varval: Optional[str]) -> None:\n if varname:\n self.pandoc.append('-M')\n self.pandoc.append(f'{varname}={varval}')", "def magic2python(cmd):\n\n if cmd.startswith('#@') or cmd.startswith('@'):\n if cmd[0]=='#':\n cmd = cmd[1:]\n # we need to return the proper line end later\n if cmd[-1] == '\\n':\n endl = '\\n'\n else:\n endl = ''\n try:\n func,args = cmd[1:].split(' ',1)\n except:\n func,args = cmd[1:].rstrip(),''\n args = args.replace('\"','\\\\\"').replace(\"'\",\"\\\\'\").rstrip()\n return '__IP.magic_%s (\"%s\")%s' % (func,args,endl)\n else:\n return cmd", "def niw_mmml_mp(args):\n return niw_mmml(*args)", "def _make_matlab_command(self):\n \n raise NotImplementedError", "def magic(self, parameter_s=''):\n\n mode = ''\n try:\n mode = parameter_s.split()[0][1:]\n except IndexError:\n pass\n\n brief = (mode == 'brief')\n rest = (mode == 'rest')\n magic_docs = self._magic_docs(brief, rest)\n\n if mode == 'latex':\n print(self.format_latex(magic_docs))\n return\n else:\n magic_docs = format_screen(magic_docs)\n\n out = [\"\"\"\nIPython's 'magic' functions\n===========================\n\nThe magic function system provides a series of functions which allow you to\ncontrol the behavior of IPython itself, plus a lot of system-type\nfeatures. There are two kinds of magics, line-oriented and cell-oriented.\n\nLine magics are prefixed with the % character and work much like OS\ncommand-line calls: they get as an argument the rest of the line, where\narguments are passed without parentheses or quotes. For example, this will\ntime the given statement::\n\n %timeit range(1000)\n\nCell magics are prefixed with a double %%, and they are functions that get as\nan argument not only the rest of the line, but also the lines below it in a\nseparate argument. These magics are called with two arguments: the rest of the\ncall line and the body of the cell, consisting of the lines below the first.\nFor example::\n\n %%timeit x = numpy.random.randn((100, 100))\n numpy.linalg.svd(x)\n\nwill time the execution of the numpy svd routine, running the assignment of x\nas part of the setup phase, which is not timed.\n\nIn a line-oriented client (the terminal or Qt console IPython), starting a new\ninput with %% will automatically enter cell mode, and IPython will continue\nreading input until a blank line is given. In the notebook, simply type the\nwhole cell as one entity, but keep in mind that the %% escape can only be at\nthe very start of the cell.\n\nNOTE: If you have 'automagic' enabled (via the command line option or with the\n%automagic function), you don't need to type in the % explicitly for line\nmagics; cell magics always require an explicit '%%' escape. By default,\nIPython ships with automagic on, so you should only rarely need the % escape.\n\nExample: typing '%cd mydir' (without the quotes) changes your working directory\nto 'mydir', if it exists.\n\nFor a list of the available magic functions, use %lsmagic. For a description\nof any of them, type %magic_name?, e.g. '%cd?'.\n\nCurrently the magic system has the following functions:\"\"\",\n magic_docs,\n \"Summary of magic functions (from %slsmagic):\" % magic_escapes['line'],\n str(self.lsmagic()),\n ]\n page.page('\\n'.join(out))", "async def man(self, ctx: Context, *, command: str) -> None:\n base_query = f\"https://www.mankier.com/api/v2/mans/?q={command}\"\n query_url = urllib.parse.quote_plus(base_query, safe=\";/?:@&=$,><-[]\")\n\n async with ctx.typing():\n # Get API query responses\n async with self.session.get(query_url) as response:\n if response.status != 200:\n await ctx.send(f\"An error occurred (status code: {response.status})\")\n return\n\n results = json.loads(await response.text())[\"results\"]\n\n # Use first result\n if len(results) > 0:\n result = results[0]\n else:\n await ctx.send(\"Invalid query, no such command\")\n return\n\n base_url = f\"https://www.mankier.com/api/v2/mans/{result['name']}.{result['section']}\"\n url = urllib.parse.quote_plus(base_url, safe=\";/?:@&=$,><-[]\")\n\n # Load man page from first result\n async with self.session.get(url) as response:\n if response.status != 200:\n await ctx.send(f\"An error occurred (status code: {response.status})\")\n return\n\n result = json.loads(await response.text())\n\n embed = Embed(\n title=f\"Man page of: **{result['name'].capitalize()}**\",\n url=result[\"url\"],\n description=f\"**{result['description']}** ({result['section']})\"\n )\n\n for anchor in result['anchors']:\n embed.add_field(\n name=f\"`{bleach.clean(anchor['anchor'], tags=[], strip=True)}`\",\n value=f\"{bleach.clean(anchor['description'], tags=[], strip=True)}\\n[Link here]({anchor['url']})\",\n inline=False\n )\n # TODO: Solve this with pagination\n try:\n await ctx.send(embed=embed)\n except HTTPException as e:\n if e.code == 50035:\n await ctx.send(embed=Embed(\n description=\"Body is too long to show\",\n color=Color.red()\n ))\n else:\n raise e", "def magic_ll(self, parameter_s=''):\n self.magic_lc(parameter_s+' | grep ^l')", "def mlnet_parser():\n parser = MagicCommandParser(prog=\"mlnet\",\n description='Compiles and wrap a C# function into a Python function.\\n'\n 'Automatically adds ML.net dependencies.')\n parser.add_argument('name', type=str, help='function name')\n parser.add_argument('-i', '--idep', nargs='*', action='append',\n help='internal dependencies (like System, System.Linq)')\n parser.add_argument('-d', '--dep', nargs='*', action='append',\n help='dependencies (assembly name without extension)')\n parser.add_argument('-c', '--catch', action='store', default=False,\n help='catch exception')\n return parser", "def _llm_type(self) -> str:\n return \"llama.cpp\"", "def getArbitraryManInput(self):\n return \"\"\".TH MANHOLE \"1\" \"August 2001\" \"\" \"\"\n.SH NAME\nmanhole \\- Connect to a Twisted Manhole service\n.SH SYNOPSIS\n.B manhole\n.SH DESCRIPTION\nmanhole is a GTK interface to Twisted Manhole services. You can execute python\ncode as if at an interactive Python console inside a running Twisted process\nwith this.\"\"\"", "def yamlmain(argv=None):\n if argv is None:\n argv = sys.argv\n if len(argv) == 1:\n _fullusage()\n try:\n command = argv[1]\n if command in ['get', 'dump']:\n _rosparam_cmd_get_dump(command, argv)\n elif command in ['set', 'load']:\n _rosparam_cmd_set_load(command, argv)\n elif command in ['delete']:\n _rosparam_cmd_delete(argv)\n elif command == 'list':\n _rosparam_cmd_list(argv)\n else:\n _fullusage()\n except RosParamException as e:\n print(\"ERROR: \"+str(e), file=sys.stderr)\n sys.exit(1)", "def add_command(self):\n self.write(\"@SP\\nAM=M-1\\nD=M\\nA=A-1\\nM=M+D\\n\")", "def __getattr__(self, item):\n if item not in _CommandsMetaClass.commands_dict:\n raise MPDCommandNotExists(\"no such command: '{}'\".format(item))\n\n f = lambda *args: \"{} {}\\n\".format(item, ' '.join(_CommandsMetaClass._quote_arguments(args)))\n f.__doc__ = \"Command's Arguments: {}\".format(_CommandsMetaClass.commands_dict[item])\n f.__name__ = item\n return f", "def magic_alias(self, parameter_s = ''):\n\n par = parameter_s.strip()\n if not par:\n if self.rc.automagic:\n prechar = ''\n else:\n prechar = '@'\n print 'Alias\\t\\tSystem Command\\n'+'-'*30\n aliases = self.alias_table.keys()\n aliases.sort()\n for alias in aliases:\n print prechar+alias+'\\t\\t'+self.alias_table[alias]\n return\n try:\n alias,cmd = par.split(' ',1)\n except:\n print inspect.getdoc(self.magic_alias)\n return\n nargs = cmd.count('%s')\n if nargs == 0: # simple aliases\n fndef = itpl(\n\"\"\"\ndef magic_${alias}(parameter_s = ''):\n '''Alias to the system command '$cmd' '''\n xsys('$cmd '+str(parameter_s))\n\nself.magic_$alias = magic_$alias\n\"\"\")\n else: # parametric aliases\n fndef = itpl(\n\"\"\"\ndef magic_${alias}(parameter_s = ''):\n '''Alias to the system command '$cmd' '''\n cmd = '$cmd'\n nargs = cmd.count('%s')\n args = str(parameter_s).split()\n\n if len(args) != nargs:\n print 'Incorrect number of arguments:',nargs,'expected.'\n print \"$alias is an alias to: '$cmd'\"\n return\n else:\n cmd_call = cmd % tuple(args)\n xsys(cmd_call)\n\nself.magic_$alias = magic_$alias\n\"\"\")\n try:\n exec fndef in globals(),locals()\n except:\n print self.magic_alias.__doc__\n self.alias_table.update({alias:cmd})", "def cmd(name: str) -> Callable:\n return g.new_cmd_decorator(name, ['c', 'spellCommands',])", "def yamllint(context):\n exec_cmd = \"yamllint .\"\n run_cmd(context, exec_cmd)", "def magic_macro(self,parameter_s = ''):\n\n args = parameter_s.split()\n name,ranges = args[0], args[1:]\n #print 'rng',ranges # dbg\n cmds = self.extract_input_slices(ranges)\n macro = Macro(cmds)\n self.user_ns.update({name:macro})\n print 'Macro `%s` created. To execute, type its name (without quotes).' % name\n print 'Macro contents:'\n print str(macro).rstrip(),", "def magic(self, alias):\n if alias in self.aliases:\n return self.aliases[alias]\n else:\n return \"%%{}\\n\".format(alias)", "def doautocmd(self, *autocmds):\n self._vim.command('doautocmd ' + ','.join(autocmds))", "def mfa_cli(ctx: click.Context) -> None:\n from montreal_forced_aligner.command_line.utils import check_server, start_server, stop_server\n\n config.load_configuration()\n auto_server = False\n run_check = True\n if ctx.invoked_subcommand == \"anchor\":\n config.CLEAN = False\n config.USE_POSTGRES = True\n config.CLEAN = False\n config.USE_POSTGRES = True\n if \"--help\" in sys.argv or ctx.invoked_subcommand in [\n \"configure\",\n \"version\",\n \"history\",\n \"server\",\n \"align_one\",\n ]:\n auto_server = False\n run_check = False\n elif ctx.invoked_subcommand in [\"model\", \"models\"]:\n if \"add_words\" in sys.argv or \"inspect\" in sys.argv:\n config.CLEAN = True\n config.USE_POSTGRES = False\n else:\n run_check = False\n elif ctx.invoked_subcommand == \"g2p\":\n if len(sys.argv) > 2 and sys.argv[2] == \"-\":\n run_check = False\n auto_server = False\n else:\n auto_server = config.AUTO_SERVER\n if \"--no_use_postgres\" in sys.argv or not config.USE_POSTGRES:\n run_check = False\n auto_server = False\n if auto_server:\n start_server()\n elif run_check:\n check_server()\n warnings.simplefilter(\"ignore\")\n check_third_party()\n if ctx.invoked_subcommand != \"anchor\":\n hooks = ExitHooks()\n hooks.hook()\n atexit.register(hooks.history_save_handler)\n if auto_server:\n atexit.register(stop_server)", "def command(name):\n def _decoration(fcn):\n fcn.command = name\n return fcn\n return _decoration", "def load_alias(name):\n mod = importlib.import_module(\"umdone.commands.\" + name)\n main = getattr(mod, \"main\")\n builtins.aliases[name] = main\n builtins.aliases[name.replace(\"_\", \"-\")] = main" ]
[ "0.6546807", "0.5515294", "0.52751005", "0.52579564", "0.5237155", "0.51861405", "0.512959", "0.5102926", "0.49472788", "0.49453253", "0.49353927", "0.49225372", "0.4891675", "0.48703486", "0.47610494", "0.470173", "0.46908697", "0.4684091", "0.46759415", "0.4654112", "0.46485576", "0.4603076", "0.46006072", "0.45542857", "0.455096", "0.45361915", "0.44818726", "0.44448832", "0.44351497", "0.44261438" ]
0.6613872
0
Defines the way to parse the magic command ``%%mlnet``.
def mlnet_parser(): parser = MagicCommandParser(prog="mlnet", description='Compiles and wrap a C# function into a Python function.\n' 'Automatically adds ML.net dependencies.') parser.add_argument('name', type=str, help='function name') parser.add_argument('-i', '--idep', nargs='*', action='append', help='internal dependencies (like System, System.Linq)') parser.add_argument('-d', '--dep', nargs='*', action='append', help='dependencies (assembly name without extension)') parser.add_argument('-c', '--catch', action='store', default=False, help='catch exception') return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mlnet(self, line, cell):\n line, cell = CsMagics._preprocess_line_cell_maml( # pylint: disable=W0212\n line, cell)\n\n parser = self.get_parser(CsMagics.CS_parser, \"CS\")\n args = self.get_args(line, parser)\n\n if args is not None:\n name = args.name\n dep = CsMagics._linearise_args(args.dep) # pylint: disable=W0212\n idep = CsMagics._linearise_args(args.idep) # pylint: disable=W0212\n\n if args.catch:\n try:\n f = mlnet(name, cell, idep, dep)\n except Exception as e: # pylint: disable=W0703\n print(str(e).replace('\\r', ''))\n return None\n else:\n f = mlnet(name, cell, idep, dep)\n if self.shell is not None:\n self.shell.user_ns[name] = f\n return f\n return None", "def cmd_nlst(args):", "def niw_mmml_mp(args):\n return niw_mmml(*args)", "def magic2python(cmd):\n\n if cmd.startswith('#@') or cmd.startswith('@'):\n if cmd[0]=='#':\n cmd = cmd[1:]\n # we need to return the proper line end later\n if cmd[-1] == '\\n':\n endl = '\\n'\n else:\n endl = ''\n try:\n func,args = cmd[1:].split(' ',1)\n except:\n func,args = cmd[1:].rstrip(),''\n args = args.replace('\"','\\\\\"').replace(\"'\",\"\\\\'\").rstrip()\n return '__IP.magic_%s (\"%s\")%s' % (func,args,endl)\n else:\n return cmd", "def parse_args():\n parser = argparse.ArgumentParser(description='MXNet Gluon Semantic Segmentation')\n # model and dataset\n parser.add_argument('--model', type=str, default='fcn',\n help='model name (default: fcn)')\n parser.add_argument('--model-zoo', type=str, default=None,\n help='evaluating on model zoo model')\n parser.add_argument('--pretrained', action=\"store_true\",\n help='whether to use pretrained params')\n parser.add_argument('--backbone', type=str, default='resnet50',\n help='backbone name (default: resnet50)')\n parser.add_argument('--dataset', type=str, default='pascal',\n help='dataset name (default: pascal)')\n parser.add_argument('--workers', type=int, default=16,\n metavar='N', help='dataloader threads')\n parser.add_argument('--base-size', type=int, default=520,\n help='base image size')\n parser.add_argument('--crop-size', type=int, default=480,\n help='crop image size')\n parser.add_argument('--train-split', type=str, default='train',\n help='dataset train split (default: train)')\n # training hyper params\n parser.add_argument('--aux', action='store_true', default=False,\n help='Auxiliary loss')\n parser.add_argument('--aux-weight', type=float, default=0.5,\n help='auxiliary loss weight')\n parser.add_argument('--epochs', type=int, default=50, metavar='N',\n help='number of epochs to train (default: 50)')\n parser.add_argument('--start_epoch', type=int, default=0,\n metavar='N', help='start epochs (default:0)')\n parser.add_argument('--batch-size', type=int, default=16,\n metavar='N', help='input batch size for \\\n training (default: 16)')\n parser.add_argument('--test-batch-size', type=int, default=16,\n metavar='N', help='input batch size for \\\n testing (default: 16)')\n parser.add_argument('--optimizer', type=str, default='sgd',\n help='optimizer (default: sgd)')\n parser.add_argument('--lr', type=float, default=1e-3, metavar='LR',\n help='learning rate (default: 1e-3)')\n parser.add_argument('--warmup-epochs', type=int, default=0,\n help='number of warmup epochs.')\n parser.add_argument('--momentum', type=float, default=0.9,\n metavar='M', help='momentum (default: 0.9)')\n parser.add_argument('--weight-decay', type=float, default=1e-4,\n metavar='M', help='w-decay (default: 1e-4)')\n parser.add_argument('--no-wd', action='store_true',\n help='whether to remove weight decay on bias, \\\n and beta/gamma for batchnorm layers.')\n parser.add_argument('--mode', type=str, default=None,\n help='whether to turn on model hybridization')\n # cuda and distribute\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--ngpus', type=int,\n default=len(mx.test_utils.list_gpus()),\n help='number of GPUs (default: 4)')\n parser.add_argument('--kvstore', type=str, default='device',\n help='kvstore to use for trainer/module.')\n parser.add_argument('--dtype', type=str, default='float32',\n help='data type for training. default is float32')\n # checking point\n parser.add_argument('--resume', type=str, default=None,\n help='put the path to resuming file if needed')\n parser.add_argument('--checkname', type=str, default='default',\n help='set the checkpoint name')\n parser.add_argument('--save-dir', type=str, default=None,\n help='directory of saved models')\n parser.add_argument('--log-interval', type=int, default=20,\n help='Number of batches to wait before logging.')\n parser.add_argument('--logging-file', type=str, default='train.log',\n help='name of training log file')\n # evaluation only\n parser.add_argument('--eval', action='store_true', default=False,\n help='evaluation only')\n parser.add_argument('--no-val', action='store_true', default=False,\n help='skip validation during training')\n # synchronized Batch Normalization\n parser.add_argument('--syncbn', action='store_true', default=False,\n help='using Synchronized Cross-GPU BatchNorm')\n # the parser\n args = parser.parse_args()\n\n # handle contexts\n if args.no_cuda:\n print('Using CPU')\n args.kvstore = 'local'\n args.ctx = [mx.cpu(0)]\n else:\n print('Number of GPUs:', args.ngpus)\n assert args.ngpus > 0, 'No GPUs found, please enable --no-cuda for CPU mode.'\n args.ctx = [mx.gpu(i) for i in range(args.ngpus)]\n\n if 'psp' in args.model or 'deeplab' in args.model:\n assert args.crop_size % 8 == 0, ('For PSPNet and DeepLabV3 model families, '\n 'we only support input crop size as multiples of 8.')\n\n # logging and checkpoint saving\n if args.save_dir is None:\n args.save_dir = \"runs/%s/%s/%s/\" % (args.dataset, args.model, args.backbone)\n makedirs(args.save_dir)\n\n # Synchronized BatchNorm\n args.norm_layer = mx.gluon.contrib.nn.SyncBatchNorm if args.syncbn \\\n else mx.gluon.nn.BatchNorm\n args.norm_kwargs = {'num_devices': args.ngpus} if args.syncbn else {}\n return args", "def net_cmd(self):\n \n logging.debug(\"net_cmd called\")\n cmd = \"\"\n # FIXME should probably grab the PrefixLength from the network definition\n # calc my router\n \n # FIXME: Need to split this into separate files...\n # files will be a dictionary of {\"filename\":\"contents\"}\n files = {}\n \n cmd = \"rem cmd\\r\\n\"\n tmpl = \"\"\"netsh interface ip set address \"%(nic)s\" static %(v4_fixed_ip)s 255.255.255.0\\r\\n\"\"\"\n # FIXME: this should be read out of the configuration, probably\n nets = self.networks\n ips = self.ip_addresses \n my_router = \"\"\n for netname in nets:\n v4_fixed_ip = ips.get(netname)\n my_net = v4_fixed_ip.split(\".\")[:3]\n my_net.append(\"254\")\n my_router = \".\".join(my_net)\n nic = \"%s-%s\" % (self.footprint.footprint_name, netname)\n logging.debug(\"Creating %s for %s\" % (nic, nets))\n # net_id = self.networks.get(netname)\n cmd = cmd + tmpl % locals()\n \n cmd += \"\"\"route -p add 192.168.1.0 MASK 255.255.255.0 %(my_router)s\\r\\n\"\"\" % locals()\n cmd += \"\"\"route -p add 192.168.2.0 MASK 255.255.255.0 %(my_router)s\\r\\n\"\"\" % locals()\n cmd += \"\"\"route -p add 192.168.3.0 MASK 255.255.255.0 %(my_router)s\\r\\n\"\"\" % locals()\n logging.debug(\"cmdfile:\\n\" + cmd)\n \n # print 50 * \"x\"\n # print cmd\n return cmd", "def parse_args():\n parser = argparse.ArgumentParser(description='MXNet Gluon Segmentation')\n\n parser.add_argument('--model', type=str, default='fcn', help='model name (default: fcn)')\n parser.add_argument('--backbone', type=str, default='resnet50', help='backbone name (default: resnet50)')\n parser.add_argument('--dataset', type=str, default='pascalaug', help='dataset name (default: pascal)')\n parser.add_argument('--dataset-dir', type=str, default='../imgclsmob_data/voc', help='dataset path')\n parser.add_argument('--workers', type=int, default=16, metavar='N', help='dataloader threads')\n parser.add_argument('--base-size', type=int, default=520, help='base image size')\n parser.add_argument('--crop-size', type=int, default=480, help='crop image size')\n\n parser.add_argument('--batch-size', type=int, default=1, metavar='N', help='input batch size for testing')\n\n parser.add_argument('--ngpus', type=int, default=len(mx.test_utils.list_gpus()), help='number of GPUs (default: 4)')\n\n # checking point\n parser.add_argument('--resume', type=str, default=None, help='put the path to resuming file if needed')\n parser.add_argument('--checkname', type=str, default='default', help='set the checkpoint name')\n parser.add_argument('--model-zoo', type=str, default=None, help='evaluating on model zoo model')\n\n # the parser\n args = parser.parse_args()\n # handle contexts\n if args.ngpus == 0:\n args.ctx = [mx.cpu(0)]\n else:\n args.ctx = [mx.gpu(i) for i in range(args.ngpus)]\n args.test_batch_size = args.ngpus if args.ngpus > 0 else 1\n print(args)\n return args", "def test_shortopt(self):\n pp = ParlaiParser(False, False)\n pp.add_argument(\"-m\", \"--model\")\n pp.add_argument(\"-mtw\", \"--multitask-weights\")\n opt = pp.parse_args([\"-m\", \"memnn\"])\n print(opt)", "def cmd_mt_irc(word, word_eol, userdata):\n\tif len(word) > 1:\n\t\tsubcmd = word[1]\n\t\tif subcmd in subcommands:\n\t\t\tsubcmd = subcommands[subcmd]\n\t\t\tsubcmd(word[1:], word_eol[1:])\n\t\telse:\n\t\t\tprint('[mt_irc] Unknown subcommand \"%s\". Try \"/mt_irc help\".')\n\telse:\n\t\tprint('Usage: /mt_irc SUBCOMMAND')\n\t\tprint('Try \"/mt_irc help\".')\n\treturn xchat.EAT_XCHAT", "def _parse_network_list(self, *cmd):\n if self._fail_network_list:\n raise processutils.ProcessExecutionError(exit_code=1)\n else:\n return NETWORK_LIST, ''", "def parse_args():\n parser = argparse.ArgumentParser(description='MXNet Gluon Segmentation')\n parser.add_argument('--host', type=str, default='xxx',\n help='xxx is a place holder')\n parser.add_argument('--model', type=str, default='ResFPN',\n help='model name: ResNetFPN, ResUNet')\n parser.add_argument('--fuse-mode', type=str, default='AsymBi',\n help='DirectAdd, Concat, SK, BiLocal, BiGlobal, AsymBi, '\n 'TopDownGlobal, TopDownLocal')\n parser.add_argument('--tiny', action='store_true', default=False,\n help='evaluation only')\n parser.add_argument('--blocks', type=int, default=3,\n help='block num in each stage')\n parser.add_argument('--channel-times', type=int, default=1,\n help='times of channel width')\n parser.add_argument('--dataset', type=str, default='DENTIST',\n help='dataset name: DENTIST, Iceberg, StopSign')\n parser.add_argument('--workers', type=int, default=48,\n metavar='N', help='dataloader threads')\n parser.add_argument('--base-size', type=int, default=512,\n help='base image size')\n parser.add_argument('--iou-thresh', type=float, default=0.5,\n help='iou-thresh')\n parser.add_argument('--crop-size', type=int, default=480,\n help='crop image size')\n parser.add_argument('--train-split', type=str, default='trainval',\n help='dataset train split (default: train)')\n parser.add_argument('--val-split', type=str, default='test',\n help='dataset val split (default: val)')\n # training hyper params\n parser.add_argument('--epochs', type=int, default=300, metavar='N',\n help='number of epochs to train (default: 110)')\n parser.add_argument('--start_epoch', type=int, default=0,\n metavar='N', help='start epochs (default:0)')\n parser.add_argument('--batch-size', type=int, default=8,\n metavar='N', help='input batch size for \\\n training (default: 16)')\n parser.add_argument('--test-batch-size', type=int, default=8,\n metavar='N', help='input batch size for \\\n testing (default: 32)')\n parser.add_argument('--optimizer', type=str, default='adagrad',\n help='sgd, adam, adagrad')\n parser.add_argument('--lr', type=float, default=0.05, metavar='LR',\n help='learning rate (default: 0.1)')\n parser.add_argument('--lr-decay', type=float, default=0.1,\n help='decay rate of learning rate. default is 0.1.')\n parser.add_argument('--gamma', type=int, default=2,\n help='gamma for Focal Soft IoU Loss')\n parser.add_argument('--momentum', type=float, default=0.9,\n metavar='M', help='momentum (default: 0.9)')\n parser.add_argument('--weight-decay', type=float, default=1e-4,\n metavar='M', help='w-decay (default: 1e-4)')\n parser.add_argument('--no-wd', action='store_true',\n help='whether to remove weight decay on bias, \\\n and beta/gamma for batchnorm layers.')\n parser.add_argument('--score-thresh', type=float, default=0.5,\n help='score-thresh')\n parser.add_argument('--warmup-epochs', type=int, default=0,\n help='number of warmup epochs.')\n # cuda and logging\n parser.add_argument('--no-cuda', action='store_true', default=\n False, help='disables CUDA training')\n parser.add_argument('--gpus', type=str, default='0',\n help='Training with GPUs, you can specify 1,3 for example.')\n parser.add_argument('--kvstore', type=str, default='device',\n help='kvstore to use for trainer/module.')\n parser.add_argument('--dtype', type=str, default='float32',\n help='data type for training. default is float32')\n parser.add_argument('--wd', type=float, default=0.0001,\n help='weight decay rate. default is 0.0001.')\n parser.add_argument('--log-interval', type=int, default=50,\n help='Number of batches to wait before logging.')\n # checking point\n parser.add_argument('--resume', type=str, default=None,\n help='put the path to resuming file if needed')\n parser.add_argument('--colab', action='store_true', default=\n False, help='whether using colab')\n parser.add_argument('--save-dir', type=str, default=None,\n help='directory of saved models')\n # evaluation only\n parser.add_argument('--eval', action='store_true', default= False,\n help='evaluation only')\n parser.add_argument('--no-val', action='store_true', default= False,\n help='skip validation during training')\n parser.add_argument('--metric', type=str, default='mAP',\n help='F1, IoU, mAP')\n parser.add_argument('--logging-file', type=str, default='train.log',\n help='name of training log file')\n parser.add_argument('--summary', action='store_true',\n help='print parameters')\n # synchronized Batch Normalization\n parser.add_argument('--syncbn', action='store_true', default= False,\n help='using Synchronized Cross-GPU BatchNorm')\n # the parser\n args = parser.parse_args()\n # handle contexts\n if args.no_cuda or (len(mx.test_utils.list_gpus()) == 0):\n print('Using CPU')\n args.kvstore = 'local'\n args.ctx = [mx.cpu(0)]\n else:\n args.ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]\n print('Number of GPUs:', len(args.ctx))\n\n # logging and checkpoint saving\n if args.save_dir is None:\n args.save_dir = \"runs/%s/%s/\" % (args.dataset, args.model)\n makedirs(args.save_dir)\n\n # Synchronized BatchNorm\n args.norm_layer = mx.gluon.contrib.nn.SyncBatchNorm if args.syncbn \\\n else mx.gluon.nn.BatchNorm\n args.norm_kwargs = {'num_devices': len(args.ctx)} if args.syncbn else {}\n print(args)\n return args", "def _parse_ml(self, line):\n # Parse the line\n fields = line.split('\\\\')\n if self.lang == ENGLISH:\n # pylint: disable=C0301\n # English sample:\n # 14\\abandonment\\94\\C\\\\1\\N\\N\\N\\N\\Y\\abandon+ment\\2x\\SA\\N\\N\\N\\#\\N\\N\\SA\\((abandon)[V],(ment)[N|V.])[N]\\N\\N\\N\n # From the README:\n # The eml.cd file contains the following fields:\n # 1. IdNum\n # 2. Head\n # 3. Cob\n # 4. MorphStatus\n # 5. Lang\n # 6. MorphCnt\n # 7. NVAffComp\n # 8. Der\n # 9. Comp\n # 10. DerComp\n # 11. Def\n # 12. Imm\n # 13. ImmSubCat\n # 14. ImmSA\n # 15. ImmAllo\n # 16. ImmSubst\n # 17. ImmOpac\n # 18. TransDer\n # 19. ImmInfix\n # 20. ImmRevers\n # 21 FlatSA\n # 22. StrucLab\n # 23. StrucAllo\n # 24. StrucSubst\n # 25. StrucOpac\n lemma = fields[0]\n word = fields[1]\n derivation = fields[21]\n elif self.lang == DUTCH:\n # pylint: disable=C0301\n # Dutch sample:\n # 19\\aalbessengelei\\7\\C\\1\\Y\\Y\\Y\\aalbes+en+gelei\\NxN\\N\\N\\(((aal)[N],(bes)[N])[N],(en)[N|N.N],(gelei)[N])[N]\\N\\N\\N\n # The dml.cd file contains the following fields:\n # 1. IdNum\n # 2. Head\n # 3. Inl\n # 4. MorphStatus\n # 5. MorphCnt\n # 6. DerComp\n # 7. Comp\n # 8. Def\n # 9. Imm\n # 10. ImmSubCat\n # 11. ImmAllo\n # 12. ImmSubst\n # 13. StrucLab\n # 14. StruAcAllo\n # 15. StrucSubst\n # 16. Sepa\n lemma = fields[0]\n word = fields[1]\n derivation = fields[12]\n\n # Skip multi-word entries for roots\n roots = self._get_root(derivation) if \" \" not in word else None\n return (lemma, word, roots)", "def maml_parser():\n parser = MagicCommandParser(prog=\"maml\",\n description='Runs a maml script.')\n parser.add_argument('-q', '--quiet', action='store_true', default=False,\n help='hide output')\n return parser", "def command(self,cmd):\n self.lib.lammps_command(self.lmp,cmd.encode('utf-8'))", "def _make_matlab_command(self):\n \n raise NotImplementedError", "def brain(msg):\n\n def check_message(msg):\n \"\"\"\n Check wich neuron to use.\n :param msg:\n :return:\n \"\"\"\n words_of_message = msg.split()\n find = False\n for key in gc_words:\n if words_of_message in gc_words[key]['groups']:\n getattr(neuron.general_conversations, key)()\n find = True\n break\n for key in fc_words:\n if words_of_message in fc_words[key]['groups']:\n getattr(neuron.forecast, key)()\n find = True\n break\n for key in twitter_words:\n if words_of_message in twitter_words[key]['groups']:\n getattr(neuron.twitter, key)()\n find = True\n break\n for key in pipo_words:\n if words_of_message in pipo_words[key]['groups']:\n getattr(neuron.pipotron, key)()\n find = True\n break\n if not find:\n neuron.general_conversations.undefined()\n\n check_message(msg)", "def main_net_args(parser, allowed_nets=['fc'], dfc_arch='100,100',\n dmlp_arch='100,100', show_net_act=True, dnet_act='relu',\n show_no_bias=False, show_dropout_rate=True,\n ddropout_rate=-1, show_specnorm=True, show_batchnorm=True,\n show_no_batchnorm=False, show_bn_no_running_stats=False,\n show_bn_distill_stats=False,\n show_bn_no_stats_checkpointing=False,\n prefix=None, pf_name=None):\n assert(prefix is None or pf_name is not None)\n\n # TODO Delete 'fc' from list.\n for nt in allowed_nets:\n assert(nt in ['fc', 'mlp', 'resnet', 'zenke', 'bio_conv_net'])\n\n assert(not show_batchnorm or not show_no_batchnorm)\n\n # TODO 'fc' should be renamed to 'mlp'.\n if 'fc' in allowed_nets and len(allowed_nets) == 1:\n warn('Network type \"fc\" is deprecated. Default value of argument ' +\n '\"allowed_nets\" will be changed to [\\'mlp\\'] in the future!',\n DeprecationWarning)\n elif 'fc' in allowed_nets:\n # TODO change warning into error at some point.\n warn('Network type \"fc\" is deprecated! Use \"mlp\" instead.',\n DeprecationWarning)\n if 'fc' in allowed_nets and 'mlp' in allowed_nets:\n # Doesn't make sense to have both.\n raise ValueError('Network type names \"fc\" and \"mlp\" refer to the ' +\n 'same network type! Note, \"fc\" is deprecated.')\n\n heading = 'Main network options'\n\n if prefix is None:\n prefix = ''\n pf_name = ''\n else:\n heading = 'Main network options for %s network' % pf_name\n pf_name += ' '\n\n # Abbreviations.\n p = prefix\n n = pf_name\n\n ### Main network options.\n agroup = parser.add_argument_group(heading)\n\n if len(allowed_nets) > 1:\n agroup.add_argument('--%snet_type' % p, type=str,\n default=allowed_nets[0],\n help='Type of network to be used for this %s ' % n +\n 'network. Default: %(default)s.',\n choices=allowed_nets)\n\n # DELETEME once we delete option 'fc'.\n if 'fc' in allowed_nets:\n agroup.add_argument('--%sfc_arch' % p, type=str, default=dfc_arch,\n help='If using a \"fc\" %s network, this will ' % n +\n 'specify the hidden layers. ' +\n 'Default: %(default)s.')\n\n if 'mlp' in allowed_nets:\n agroup.add_argument('--%smlp_arch' % p, type=str, default=dmlp_arch,\n help='If using a \"mlp\" %s network, this will ' % n +\n 'specify the hidden layers. ' +\n 'Default: %(default)s.')\n\n # Note, if you want to add more activation function choices here, you have\n # to add them to the corresponding function `utils.misc.str_to_act` as well!\n if show_net_act:\n agroup.add_argument('--%snet_act' % p, type=str, default=dnet_act,\n help='Activation function used in the %s network.' % n +\n 'If \"linear\", no activation function is used. ' +\n 'Default: %(default)s.',\n choices=['linear', 'sigmoid', 'relu', 'elu'])\n\n if show_no_bias:\n agroup.add_argument('--%sno_bias' % p, action='store_true',\n help='No biases will be used in the %s network. ' % n +\n 'Note, does not affect normalization (like ' +\n 'batchnorm).')\n\n if show_dropout_rate:\n agroup.add_argument('--%sdropout_rate' % p, type=float,\n default=ddropout_rate,\n help='Use dropout in the %s network with the ' % n +\n 'given dropout probability (dropout is ' +\n 'deactivated for a rate of -1). Default: ' +\n '%(default)s.')\n\n if show_specnorm:\n agroup.add_argument('--%sspecnorm' % p, action='store_true',\n help='Enable spectral normalization in the ' +\n '%s network.' % n)\n\n ### Batchnorm related options.\n if show_batchnorm:\n agroup.add_argument('--%sbatchnorm' % p, action='store_true',\n help='Enable batchnorm in the %s network.' % n)\n if show_no_batchnorm:\n agroup.add_argument('--%sno_batchnorm' % p, action='store_true',\n help='Disable batchnorm in the %s network.' % n)\n\n if show_bn_no_running_stats:\n agroup.add_argument('--%sbn_no_running_stats' % p, action='store_true',\n help='If batch normalization is used, then this ' +\n 'option will deactivate the tracking ' +\n 'of running statistics. Hence, statistics ' +\n 'computed per batch will be used during ' +\n 'evaluation.')\n\n if show_bn_distill_stats:\n agroup.add_argument('--%sbn_distill_stats' % p, action='store_true',\n help='If batch normalization is used, ' +\n 'then usually the running statistics are ' +\n 'checkpointed for every task (e.g., in ' +\n 'continual learning), which has linearly ' +\n 'increasing memory requirements. If ' +\n 'this option is activated, the running ' +\n 'statistics will be distilled into the ' +\n 'hypernetwork after training each task, ' +\n 'such that only the statistics of the ' +\n 'current and previous task have to be ' +\n 'explicitly kept in memory')\n\n if show_bn_no_stats_checkpointing:\n agroup.add_argument('--%sbn_no_stats_checkpointing' % p,\n action='store_true',\n help='If batch normalization is used, then' +\n 'this option will prevent the checkpointing' +\n 'of batchnorm statistics for every task.' +\n 'In this case, one set of statistics is ' +\n 'used for all tasks.')\n\n return agroup", "async def man(self, ctx: Context, *, command: str) -> None:\n base_query = f\"https://www.mankier.com/api/v2/mans/?q={command}\"\n query_url = urllib.parse.quote_plus(base_query, safe=\";/?:@&=$,><-[]\")\n\n async with ctx.typing():\n # Get API query responses\n async with self.session.get(query_url) as response:\n if response.status != 200:\n await ctx.send(f\"An error occurred (status code: {response.status})\")\n return\n\n results = json.loads(await response.text())[\"results\"]\n\n # Use first result\n if len(results) > 0:\n result = results[0]\n else:\n await ctx.send(\"Invalid query, no such command\")\n return\n\n base_url = f\"https://www.mankier.com/api/v2/mans/{result['name']}.{result['section']}\"\n url = urllib.parse.quote_plus(base_url, safe=\";/?:@&=$,><-[]\")\n\n # Load man page from first result\n async with self.session.get(url) as response:\n if response.status != 200:\n await ctx.send(f\"An error occurred (status code: {response.status})\")\n return\n\n result = json.loads(await response.text())\n\n embed = Embed(\n title=f\"Man page of: **{result['name'].capitalize()}**\",\n url=result[\"url\"],\n description=f\"**{result['description']}** ({result['section']})\"\n )\n\n for anchor in result['anchors']:\n embed.add_field(\n name=f\"`{bleach.clean(anchor['anchor'], tags=[], strip=True)}`\",\n value=f\"{bleach.clean(anchor['description'], tags=[], strip=True)}\\n[Link here]({anchor['url']})\",\n inline=False\n )\n # TODO: Solve this with pagination\n try:\n await ctx.send(embed=embed)\n except HTTPException as e:\n if e.code == 50035:\n await ctx.send(embed=Embed(\n description=\"Body is too long to show\",\n color=Color.red()\n ))\n else:\n raise e", "def resnet110m(**kwargs):\r\n return ResNet(BasicBlock, 110, mask=True, **kwargs)", "def export_mlp(self):\n return self.net", "def magic_ll(self, parameter_s=''):\n self.magic_lc(parameter_s+' | grep ^l')", "def parse():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', default='ml-1m', help='which dataset to use')\n args = parser.parse_args()\n main(args)", "def test_lspci_nnmmv_ubuntu_20_10(self):\n self.assertEqual(jc.parsers.lspci.parse(self.f_in['ubuntu_20_10_lspci_nnmmv'], quiet=True),\n self.f_json['ubuntu_20_10_lspci_nnmmv'])", "def command(self, cmd):\n self.lmp.command(cmd)", "def get_net(self,netname, mol = False):\n lines = self.mfp.get_net(netname)\n return lines", "def label(cmd):\r\n cmd = cmd.replace('make][.DP', 'make1][.NP')\r\n cmd = cmd.replace('make][.SC', 'make2][.SC')\r\n cmd = re.sub('(draw.*)one','\\\\1one1',cmd)\r\n cmd = re.sub('(make1.*)one','\\\\1one1',cmd)\r\n cmd = re.sub('(make2.*)one','\\\\1one2',cmd)\r\n cmd = re.sub('(move.*)one','\\\\1one2',cmd)\r\n cmd = re.sub('(hide.*)one','\\\\1one2',cmd)\r\n cmd = '[result ' + cmd + ']' #dummy function for plop\r\n return cmd", "def _mnasnet(arch, block, layers, expansions, kernel_sizes, SE, dropout=0,\n pretrained=False, progress=False, **kwargs):\n model = MnasNet(block, layers=layers, expansions=expansions, kernel_sizes=kernel_sizes,\n SE=SE, dropout=dropout, **kwargs)\n if pretrained:\n if arch in model_urls.keys():\n state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)\n model.load_state_dict(state_dict)\n return model", "def netz(self, mask, target, args):\n print('{mask} netz {args}'.format(mask=mask, args=args))\n nick = args['<nick>']\n if nick is None:\n nick = mask.nick\n yield self.bot.db.get(nick, {}).get('netz', None) or 'Nichts bekannt über {nick}'.format(nick=nick)", "def maml(self, line, cell):\n parser = self.get_parser(CsMLMagics.maml_parser, \"maml\")\n args = self.get_args(line, parser)\n\n if args is not None:\n quiet = args.quiet\n out, err = maml(cell, not quiet)\n if out:\n print(out)\n if err:\n print('-----')\n print(err)", "def get_cmd():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", action=\"store\", dest=\"inputfile\", help=\"file with multiple hmm models\")\n parser.add_argument(\"-o\", action=\"store\", dest=\"dirout\", help=\"directory to store separated hmm models\")\n params = parser.parse_args()\n return params" ]
[ "0.57447124", "0.53171813", "0.5278795", "0.52074957", "0.5104776", "0.5055335", "0.50357693", "0.5034653", "0.49979833", "0.49518523", "0.4937366", "0.49060217", "0.49001834", "0.4745951", "0.47331885", "0.4656194", "0.46342298", "0.46050298", "0.46031177", "0.4588288", "0.4587108", "0.45867229", "0.4583484", "0.45825943", "0.4566405", "0.4548295", "0.4545266", "0.4538004", "0.45378256", "0.45352617" ]
0.7271368
0
Defines magic command ``%%mlnet``.
def mlnet(self, line, cell): line, cell = CsMagics._preprocess_line_cell_maml( # pylint: disable=W0212 line, cell) parser = self.get_parser(CsMagics.CS_parser, "CS") args = self.get_args(line, parser) if args is not None: name = args.name dep = CsMagics._linearise_args(args.dep) # pylint: disable=W0212 idep = CsMagics._linearise_args(args.idep) # pylint: disable=W0212 if args.catch: try: f = mlnet(name, cell, idep, dep) except Exception as e: # pylint: disable=W0703 print(str(e).replace('\r', '')) return None else: f = mlnet(name, cell, idep, dep) if self.shell is not None: self.shell.user_ns[name] = f return f return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mlnet_parser():\n parser = MagicCommandParser(prog=\"mlnet\",\n description='Compiles and wrap a C# function into a Python function.\\n'\n 'Automatically adds ML.net dependencies.')\n parser.add_argument('name', type=str, help='function name')\n parser.add_argument('-i', '--idep', nargs='*', action='append',\n help='internal dependencies (like System, System.Linq)')\n parser.add_argument('-d', '--dep', nargs='*', action='append',\n help='dependencies (assembly name without extension)')\n parser.add_argument('-c', '--catch', action='store', default=False,\n help='catch exception')\n return parser", "def net_cmd(self):\n \n logging.debug(\"net_cmd called\")\n cmd = \"\"\n # FIXME should probably grab the PrefixLength from the network definition\n # calc my router\n \n # FIXME: Need to split this into separate files...\n # files will be a dictionary of {\"filename\":\"contents\"}\n files = {}\n \n cmd = \"rem cmd\\r\\n\"\n tmpl = \"\"\"netsh interface ip set address \"%(nic)s\" static %(v4_fixed_ip)s 255.255.255.0\\r\\n\"\"\"\n # FIXME: this should be read out of the configuration, probably\n nets = self.networks\n ips = self.ip_addresses \n my_router = \"\"\n for netname in nets:\n v4_fixed_ip = ips.get(netname)\n my_net = v4_fixed_ip.split(\".\")[:3]\n my_net.append(\"254\")\n my_router = \".\".join(my_net)\n nic = \"%s-%s\" % (self.footprint.footprint_name, netname)\n logging.debug(\"Creating %s for %s\" % (nic, nets))\n # net_id = self.networks.get(netname)\n cmd = cmd + tmpl % locals()\n \n cmd += \"\"\"route -p add 192.168.1.0 MASK 255.255.255.0 %(my_router)s\\r\\n\"\"\" % locals()\n cmd += \"\"\"route -p add 192.168.2.0 MASK 255.255.255.0 %(my_router)s\\r\\n\"\"\" % locals()\n cmd += \"\"\"route -p add 192.168.3.0 MASK 255.255.255.0 %(my_router)s\\r\\n\"\"\" % locals()\n logging.debug(\"cmdfile:\\n\" + cmd)\n \n # print 50 * \"x\"\n # print cmd\n return cmd", "def niw_mmml_mp(args):\n return niw_mmml(*args)", "def cmd_nlst(args):", "def command(self, cmd):\n self.lmp.command(cmd)", "def command(self,cmd):\n self.lib.lammps_command(self.lmp,cmd.encode('utf-8'))", "def _make_matlab_command(self):\n \n raise NotImplementedError", "def resnet110m(**kwargs):\r\n return ResNet(BasicBlock, 110, mask=True, **kwargs)", "def magic2python(cmd):\n\n if cmd.startswith('#@') or cmd.startswith('@'):\n if cmd[0]=='#':\n cmd = cmd[1:]\n # we need to return the proper line end later\n if cmd[-1] == '\\n':\n endl = '\\n'\n else:\n endl = ''\n try:\n func,args = cmd[1:].split(' ',1)\n except:\n func,args = cmd[1:].rstrip(),''\n args = args.replace('\"','\\\\\"').replace(\"'\",\"\\\\'\").rstrip()\n return '__IP.magic_%s (\"%s\")%s' % (func,args,endl)\n else:\n return cmd", "def lncli(cmd, node_index):\n node = Node.from_index(node_index)\n run_lncli(node, cmd)", "def cmd_mt_irc(word, word_eol, userdata):\n\tif len(word) > 1:\n\t\tsubcmd = word[1]\n\t\tif subcmd in subcommands:\n\t\t\tsubcmd = subcommands[subcmd]\n\t\t\tsubcmd(word[1:], word_eol[1:])\n\t\telse:\n\t\t\tprint('[mt_irc] Unknown subcommand \"%s\". Try \"/mt_irc help\".')\n\telse:\n\t\tprint('Usage: /mt_irc SUBCOMMAND')\n\t\tprint('Try \"/mt_irc help\".')\n\treturn xchat.EAT_XCHAT", "def command(name):\n def _decoration(fcn):\n fcn.command = name\n return fcn\n return _decoration", "def _magic_n(self, args):\n if idapy._d is None:\n print \"Please select a dump first. Example:\"\n print \"sel t2i\"\n return\n args = args.split(\" \")\n s,n = string.join(args[:-1], \" \"), args[-1]\n \n a = addr_from_magic_string(s, rounded_32bit = False)\n print \"NSTUB( 0x%X, %s )\" % (a, n)\n idapy._d.MakeName(a, n)", "def resnet110(**kwargs):\r\n return ResNet(BasicBlock, 110, **kwargs)", "def cg_label(self, cmd):\n label = self.makeLabel(cmd)\n self.asm(f\"({label})\")", "def __getFullCommandName(self, command, type):\n return 'cmd_%s_%s' % (type, command)", "def label(cmd):\r\n cmd = cmd.replace('make][.DP', 'make1][.NP')\r\n cmd = cmd.replace('make][.SC', 'make2][.SC')\r\n cmd = re.sub('(draw.*)one','\\\\1one1',cmd)\r\n cmd = re.sub('(make1.*)one','\\\\1one1',cmd)\r\n cmd = re.sub('(make2.*)one','\\\\1one2',cmd)\r\n cmd = re.sub('(move.*)one','\\\\1one2',cmd)\r\n cmd = re.sub('(hide.*)one','\\\\1one2',cmd)\r\n cmd = '[result ' + cmd + ']' #dummy function for plop\r\n return cmd", "def test_shortopt(self):\n pp = ParlaiParser(False, False)\n pp.add_argument(\"-m\", \"--model\")\n pp.add_argument(\"-mtw\", \"--multitask-weights\")\n opt = pp.parse_args([\"-m\", \"memnn\"])\n print(opt)", "def _mnasnet(arch, block, layers, expansions, kernel_sizes, SE, dropout=0,\n pretrained=False, progress=False, **kwargs):\n model = MnasNet(block, layers=layers, expansions=expansions, kernel_sizes=kernel_sizes,\n SE=SE, dropout=dropout, **kwargs)\n if pretrained:\n if arch in model_urls.keys():\n state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)\n model.load_state_dict(state_dict)\n return model", "def label(cmd):\n cmd = cmd.replace('make][.DP', 'make1][.NP')\n cmd = cmd.replace('make][.SC', 'make2][.SC')\n cmd = re.sub('(draw.*)one','\\\\1one1',cmd)\n cmd = re.sub('(make1.*)one','\\\\1one1',cmd)\n cmd = re.sub('(make2.*)one','\\\\1one2',cmd)\n cmd = re.sub('(move.*)one','\\\\1one2',cmd)\n cmd = re.sub('(hide.*)one','\\\\1one2',cmd)\n cmd = '[result ' + cmd + ']' #dummy function for plop\n return cmd", "def _gen_cmd(cmd, address):\n family = {4: 'inet', 6: 'inet6'}[address[0].version]\n args = ['addr', cmd, '%s/%s' % (address[0], address[1])]\n if family == 'inet' and cmd == 'add':\n args += ['brd', '+']\n args += ['dev', real_ifname]\n if family == 'inet6':\n args = ['-6'] + args\n return args", "def netz(self, mask, target, args):\n print('{mask} netz {args}'.format(mask=mask, args=args))\n nick = args['<nick>']\n if nick is None:\n nick = mask.nick\n yield self.bot.db.get(nick, {}).get('netz', None) or 'Nichts bekannt über {nick}'.format(nick=nick)", "def cmd(name: str) -> Callable:\n return g.new_cmd_decorator(name, ['c', 'spellCommands',])", "def meinnetz(self, mask, target, args):\n print('{mask} meinnetz {args}'.format(mask=mask, args=args))\n netz = args['<netz>'] or None\n if netz is not None:\n netz = ' '.join(netz)\n self.bot.db.set(mask.nick, netz=netz)\n yield 'ack'", "def test_set_node_name_for_matmul_add_linear(self, export_args):\n class Linear(torch.nn.Module):\n def __init__(self):\n super(Linear, self).__init__()\n self.linear = torch.nn.Linear(3, 2)\n\n def forward(self, inp):\n x = self.linear(inp)\n return x\n\n model = Linear()\n # Using an input to linear op with dimension != 2 causes torch to use matmul->add instead of gemm op\n onnx_path = './data/MyModel.onnx'\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 1, 3), onnx_export_args=copy.deepcopy(export_args))\n onnx_model = onnx.load(onnx_path)\n expected_node_names = ['linear', 'linear#1.end']\n\n actual_node_names = [node.name for node in onnx_model.graph.node]\n for name in expected_node_names:\n assert name in actual_node_names\n\n expected_param_names = ['linear.weight', 'linear.bias']\n _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n for name in expected_param_names:\n assert name in valid_param_set\n\n # Check that gemm still works as expected\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 3), onnx_export_args=copy.deepcopy(export_args))\n onnx_model = onnx.load(onnx_path)\n\n actual_node_names = [node.name for node in onnx_model.graph.node]\n assert 'linear' in actual_node_names\n assert 'linear#1' not in actual_node_names\n\n expected_param_names = ['linear.weight', 'linear.bias']\n _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n for name in expected_param_names:\n assert name in valid_param_set\n\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def addMulticast(command, controlList):\n _addMulticast(command, controlList)", "def get_name():\n\n return 'nettools'", "def cmd(self, cmd):\n return cmd", "def train_mann_multi(args):\n\n src = args.src\n tgt = args.tgt\n base_model = args.base_model\n num_cls = args.num_cls\n tgt_list = args.tgt_list\n num_epoch = args.mann_num_epoch\n batch = args.batch\n datadir = args.datadir\n outdir = args.outdir_mann\n src_weights = args.src_net_file\n lr = args.mann_lr\n betas = tuple(args.betas)\n weight_decay = args.weight_decay\n centroids_path = args.centroids_src_file\n feat_dim = 512\n\n ###########################\n # Setup cuda and networks #\n ###########################\n\n # setup cuda\n if torch.cuda.is_available():\n kwargs = {'num_workers': 8, 'pin_memory': True}\n else:\n kwargs = {}\n\n # setup network \n net = get_model('MannNet', model=base_model, num_cls=num_cls,\n src_weights_init=src_weights,\n centroids_path=centroids_path, feat_dim=feat_dim)\n \n # print network and arguments\n print(net)\n print('Training Mann {} model for {}->{}'.format(base_model, src, tgt))\n\n #######################################\n # Setup data for training and testing #\n #######################################\n train_src_data = load_data_multi(src, 'train', batch=batch,\n rootdir=join(datadir, src), num_channels=net.num_channels,\n image_size=net.image_size, download=True, kwargs=kwargs)\n\n train_tgt_data = load_data_multi(tgt_list, 'train', batch=batch, \n rootdir=datadir, num_channels=net.num_channels,\n image_size=net.image_size, download=True, kwargs=kwargs)\n\n ######################\n # Optimization setup #\n ######################\n opt_net = optim.Adam(net.tgt_net.parameters(), lr=lr, \n weight_decay=weight_decay, betas=betas)\n opt_dis = optim.Adam(net.discriminator.parameters(), lr=lr, \n weight_decay=weight_decay, betas=betas)\n opt_selector = optim.Adam(net.fc_selector.parameters(), lr=lr*0.1, \n weight_decay=weight_decay, betas=betas)\n opt_classifier = optim.Adam(net.classifier.parameters(), lr=lr*0.1, \n weight_decay=weight_decay, betas=betas)\n\n ##############\n # Train mann #\n ##############\n for epoch in range(num_epoch):\n err = train_epoch(train_src_data, train_tgt_data, net, opt_net, opt_dis, opt_selector, opt_classifier, epoch) \n if err == -1:\n print(\"No suitable discriminator\")\n break\n \n ##############\n # Save Model #\n ##############\n os.makedirs(outdir, exist_ok=True)\n outfile = join(outdir, 'mann_{:s}_net_{:s}_{:s}.pth'.format(base_model, src, tgt))\n print('Saving to', outfile)\n net.save(outfile)", "def normal(command_strn):\n return Cmd('execute \"normal! {}\"'.format(command_strn))" ]
[ "0.6607906", "0.5527357", "0.54824287", "0.5431624", "0.54184914", "0.54072374", "0.53655416", "0.52886176", "0.5091649", "0.49965236", "0.4974869", "0.49429768", "0.49062353", "0.48281196", "0.48139057", "0.4807876", "0.47813764", "0.47685274", "0.47484493", "0.47436157", "0.47424507", "0.4737015", "0.47182307", "0.46940708", "0.46901074", "0.4607513", "0.46017912", "0.45931214", "0.45905006", "0.45865294" ]
0.5684712
1
Deletes the local configuration for a container.
async def delete_local_configuration_routine(self, name: str): plat = get_local_platform_routines() user = LocalUserRoutines(plat) manager = LocalContainerConfigurationManager(user) cont = self.GetItemByName(name) manager.DeleteByID(cont.GetID())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_local_config(self):\n with ignored(OSError):\n os.remove(os.path.join(self.rundir, const.LOCAL_CONFIG_FILE))", "def delete_container(self, container: Container):", "def delete_container(self, account, container):\n \n pass", "def delete_container(ContainerName=None):\n pass", "async def before_cleanup(self, invoker: PluginInvoker):\n config_file = invoker.files[\"config\"]\n try:\n config_file.unlink()\n except FileNotFoundError:\n pass\n logging.debug(f\"Deleted configuration at {config_file}\")", "def kubeconfig_delete(self):\n\n self._client.delete(\n \"{}/kubeconfig\".format(LKECluster.api_endpoint), model=self\n )", "def clean():\n Log.d(DEBUG_TAG, \"Delete config file...\")\n try:\n os.remove(CONFIG_FILE)\n except os.error as e:\n Log.e(DEBUG_TAG, \"Delete config file%s error, reason:%s\"%(CONFIG_FILE, e))", "def keystone_departed():\n with charm.provide_charm_instance() as charm_instance:\n charm_instance.remove_config()", "def delete(self):\n try:\n self._client.delete_launch_configuration(LaunchConfigurationName=self._name)\n except botocore.exceptions.ClientError as e:\n if \"not found\" in e.response[\"Error\"][\"Message\"]:\n logger.warn(\"Launch configuration %s not found\", self._name)\n else:\n raise", "def remove_config(name):\n db = dbm.open(config_file, 'c')\n del db[name]\n db.close()", "def do_DELETE(self): # pylint: disable=C0103\r\n if self.path == \"/del_config\" or self.path == \"/del_config/\":\r\n self.server.config = dict()\r\n self.log_message(\"Reset Server Configuration.\")\r\n self.send_response(200)\r\n else:\r\n self.send_response(404)", "def delete_configuration(self, node, ports):\n return hpssa_manager.delete_configuration()", "def delete(configsetname):\n cnfset = configsetPath(configsetname)\n files = os.listdir(cnfset)\n for f in files: os.remove(os.path.join(cnfset, f))\n os.rmdir(cnfset)\n return None", "def remove(cls, config: Dict) -> None:\n node = get_node_by_id(cls.cluster, config[\"node\"])\n id_ = config[\"id\"]\n\n cls.shell(\n args=[\"ceph\", \"auth\", \"del\", id_],\n )\n\n if config.get(\"remove_admin_keyring\"):\n node.exec_command(\n cmd=\"rm -rf /etc/ceph/ceph.client.admin.keyring\",\n sudo=True,\n )\n\n node.exec_command(\n sudo=True, cmd=f\"rm -rf /etc/ceph/ceph.{id_}.keyring\", check_ec=False\n )\n\n out, _ = node.exec_command(cmd=\"ls -ltrh /etc/ceph/\", sudo=True)\n log.info(out)\n\n # Remove packages like ceph-common\n # Be-careful it may remove entire /etc/ceph directory\n if config.get(\"remove_packages\"):\n for pkg in config.get(\"remove_packages\"):\n node.exec_command(\n cmd=f\"yum remove -y {pkg}\",\n sudo=True,\n )", "def delete(self):\r\n return self.connection.delete_launch_configuration(self.name)", "def delete_endpoint_config(EndpointConfigName=None):\n pass", "def reset_server_configuration_fixture():\n config_instance = Configuration()\n Path(config_instance.server_config_dir, 'test-gigantum-com.json').unlink()\n Path(config_instance.server_config_dir, 'CURRENT').unlink()\n config_instance._get_redis_client().delete(config_instance.SERVER_CONFIG_CACHE_KEY,\n config_instance.AUTH_CONFIG_CACHE_KEY)\n Path(config_instance.server_data_dir, 'test-gigantum-com').rmdir()", "def _destroy(self):\r\n if self._client:\r\n self._client.returnNr(self._nr)\r\n self._client.unregisterContainer(self)\r\n self._client = None\r\n\r\n if self._confDir:\r\n shutil.rmtree(self._confDir, True)\r\n self._confDir = None\r\n\r\n if self._dataDir:\r\n shutil.rmtree(self._dataDir, True)\r\n self._dataDir = None", "def clean():\n local('rm -fr %s' % os.path.abspath(env.config['destination']))", "def config_exit(self):\n self._master.destroy()\n self._config_status = False # ensure the world wouldn't be built", "def remove_stored_config(self):\n stored_config_filename = self.stored_config_filename\n if stored_config_filename.exists():\n stored_config_filename.remove()\n self._stored_cmake_generator = self._stored_config.cmake_generator", "def remove_config_object() -> None:\n if G_CONFIG_OBJECT:\n G_CONFIG_OBJECT.clear()", "def delete_conf(src_ip):\n return delete_route(src_ip)", "def del_conf(self, path):\n\t\tself.monitor.removePath(path)\n\t\tself.cache.pop(path, None)", "def clear(self):\r\n del self.__config\r\n self.__config = {}\r\n self.save()", "def delete_k8s_configuration(client, resource_group_name, cluster_name, name, cluster_type):\n # Determine ClusterRP\n cluster_rp = __get_cluster_type(cluster_type)\n\n source_control_configuration_name = name\n\n return client.delete(resource_group_name, cluster_rp, cluster_type, cluster_name, source_control_configuration_name)", "def test_delete_namespaced_deployment_config(self):\n pass", "def removeConfigFile(alg):\r\n configPath = alg.getParameterValue('config')\r\n if isWindows():\r\n command = \"DEL {}\".format(os.path.join(rliPath(), configPath))\r\n else:\r\n command = \"rm {}\".format(os.path.join(rliPath(), configPath))\r\n alg.commands.append(command)", "def cleanup(self):\n self.qemu.clean_run_files()\n for tmp in glob.glob(self.configfile + \"?*\"):\n os.unlink(tmp)", "def DeleteConfig(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.68364567", "0.65952647", "0.65655595", "0.63887167", "0.6293753", "0.6252849", "0.6222041", "0.6218625", "0.6210771", "0.61385065", "0.6115384", "0.6097698", "0.60793775", "0.60496986", "0.6009285", "0.6003192", "0.59821314", "0.59776366", "0.5943736", "0.5928523", "0.5888046", "0.5878867", "0.5877662", "0.587197", "0.58602136", "0.58570594", "0.5850817", "0.5849261", "0.58488315", "0.5808771" ]
0.7014883
0
Returns a drop table or database SQL statement.
def drop_statement(self, object_type, object_name): drop_statement = "DROP %s %s" % (object_type, object_name) return drop_statement
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drop(name):\n\t\treturn \"DROP DATABASE {0};\".format(name)", "def _get_sql_drop_table(table_attr):\n template = 'DROP TABLE IF EXISTS \"%s\" CASCADE;' % (table_attr['name'])\n return template", "def drop_statement(self, objecttype, objectname):\n statement = Engine.drop_statement(self, objecttype, objectname)\n statement += \" CASCADE;\"\n return statement.replace(\" DATABASE \", \" SCHEMA \")", "def drop_table(self):\n\n return \"\"\"DROP TABLE \"users\\\"\"\"\"", "def drop_table(self, schema, table):\n sql = f'set role {self.write_role}; ' \\\n + f'DROP TABLE IF EXISTS {schema}.{table};'\n return sql", "def drop_table(conn, drop_table_sql):\n try:\n c = conn.cursor()\n c.execute(drop_table_sql)\n except Error as e:\n print(e)", "def drop_table(database, table):\n sql = \"\"\"DROP TABLE %s\"\"\" % table\n print \"Dropping Table %s from the Database %s\" % (table, database)\n execute_and_commit_sql(database, sql)\n return None", "def drop_table(self):\n sql = 'DROP TABLE {}'.format(self.TABLE_NAME)\n yield self._pool.execute(sql)", "def drop_table(self, table_name):\n drop_command = \"DROP TABLE {}\".format(table_name)\n try:\n self.cursor.execute(drop_command)\n status = 'Table {} dropped'.format(table_name)\n except p.Error as exception:\n status = 'Exception occured in drop_table()'\n print(exception.pgerror)", "def dropTable(self,table):\n query = \"DROP TABLE \"+table\n\tcur = self.db.cursor()\n\ttry:\n\t iQuery = self.updateLog(query)\n\t cur.execute(iQuery)\n# self.cursor.execute(iQuery)\n\texcept:\n\t self.log.write(\"No %s table found\\n\"%table)\n\tcur.close()", "def get_drop(s):\n s = match_token(s, 'DROP')\n s = match_token(s, 'ROLLUP')\n s = match_token(s, 'VIEW')\n s, rollup_view_name = get_token(s)\n t = drop_rollup_str(rollup_view_name)\n return (s, t)", "def drop_created_table(opts, stats):\n print(\"--------------------------------------\")\n print(\"Dropping created table %s\" % (opts.table_name, ))\n print(\"--------------------------------------\")\n print(timestamp())\n sql = \"DROP TABLE %s\" % (opts.table_name, )\n cmd = 'echo \"%s\" | impala-shell -i %s -f -' % (sql, opts.impalad_address)\n run_command(opts, cmd)", "def as_sql(self):\r\n assert len(self.query.tables) == 1, \\\r\n \"Can only delete from one table at a time.\"\r\n qn = self.quote_name_unless_alias\r\n #=======================================================================\r\n # self._hasConstraints(self.query.tables[0])\r\n #=======================================================================\r\n \r\n result = ['DELETE FROM %s' % qn(self.query.tables[0])]\r\n where, params = self.query.where.as_sql(qn=qn, connection=self.connection)\r\n if where:\r\n result.append('WHERE %s' % where)\r\n ##DOTO: Delete after test\r\n #=======================================================================\r\n # print '>>>',result,params\r\n # if result[0] == 'DELETE FROM \"django_flatpage_sites\"' :\r\n # import pdb; pdb.set_trace()\r\n #=======================================================================\r\n return ' '.join(result), tuple(params)", "def drop_tables(session):\n\n for query in drop_table_queries:\n session.execute(query)", "def drop(self, alter_table=True):\n if self.name == 'PRIMARY':\n return \"DROP PRIMARY KEY\"\n elif alter_table:\n return \"DROP INDEX `%s`\" % (self.name)\n else:\n return \"DROP INDEX `%s` ON `%s`\" % (self.name, self.parent.name)", "async def _delete_stmt(self):\n return \"DELETE FROM {} WHERE {}=$1\".format(self.table,\n self.identifier_key)", "def drop_table(cls):\n if not connection.connected:\n raise Exception('Not connected to the database.')\n sql = u'DROP TABLE IF EXISTS %s' % cls.table()\n connection.execute(sql)", "def table_drop(self, table):\n\n stmt = 'DROP TABLE %s' % table\n\n curs = self.cursor()\n try:\n curs.execute(stmt)\n except sqlite3.OperationalError:\n pass\n finally:\n curs.close()", "def test_drop(self):\n my_conn = MySQL(*self.conn_params)\n sql = \"CREATE TABLE table1 (id integer, column1 varchar(100), \" \\\n \"column2 double)\"\n my_conn.execute(sql)\n my_conn.get_table('table1')\n my_conn.drop('table1') # DROP example\n with self.assertRaises(InvalidRequestError):\n my_conn.get_table('table1')", "def drop_tables(session):\n for query in drop_table_queries:\n session.execute(query)", "def drop(self):\n self.__init__()\n cursor = self.connection.cursor()\n cursor.execute(drop_tables)\n queries = cursor.fetchall()\n for i in queries:\n cursor.execute(i[0])\n\n self.commit()\n self.__init__()", "def drop_table_if_exists(self):\n\n return \"\"\"DROP TABLE IF EXISTS \"users\\\"\"\"\"", "def drop_tables():\n commands = (\n \"\"\"\n DROP TABLE utilizador_partilha CASCADE\n \"\"\",\n \"\"\" \n DROP TABLE album CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE compositores CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE grupo CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE letras_musica CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE playlist CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE editora CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE criticas CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE genero_musical CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE utilizador_partilha_criticas CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE utilizador_partilha_playlist CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE album_genero_musical CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE letras_musica_playlist CASCADE \n \"\"\")\n\n try:\n\n conn = psycopg2.connect(host=\"localhost\",database=\"SoundBox\", user=\"postgres\", password=\"postgres\")\n cur = conn.cursor()\n # DROP table one by one\n for command in commands:\n cur.execute(command)\n # close communication with the PostgreSQL database server\n cur.close()\n # commit the changes\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()", "def drop_tables(session):\n \n for query in drop_table_queries:\n try:\n rows = session.execute(query)\n except Exception as e:\n print(e)", "def drop_schema(self, schema):\n sql = f'set role {self.write_role}; ' \\\n + f'DROP SCHEMA IF EXISTS {schema};'\n return sql", "def drop_database(cursor: Cursor, target: Union[Owner, str]) -> Result[str]:\n name = target if isinstance(target, str) else owner_name(target)\n result = pgsql.drop_database(cursor, name)\n result.value = name\n return result", "def drop_table():\n\n try:\n sql = \"DROP TABLE IF EXISTS movies\"\n conn = psycopg2.connect(dsn=DB_DSN)\n cur = conn.cursor()\n cur.execute(sql)\n conn.commit()\n except psycopg2.Error as e:\n print e.message\n else:\n cur.close()\n conn.close()", "def drop(self):\n cursor = self.connect.create_cursor()\n queries = (\n (\"USE dbPurBeurre\"),\n (\"SET foreign_key_checks = 0\"),\n (\"DROP TABLE IF EXISTS Asso_Prod_Cat\"),\n (\"DROP TABLE IF EXISTS Categories\"),\n (\"DROP TABLE IF EXISTS Produits\")\n )\n\n for query in queries:\n cursor.execute(query)", "def pg_drop(ctx):\n ctx.obj = ConfigDBConnector()\n ctx.obj.connect()", "def dropall_cmd():\n drop_all()\n print(\"all tables dropped\")" ]
[ "0.7439682", "0.7192483", "0.7143962", "0.7049561", "0.67682046", "0.6763672", "0.6577111", "0.64398366", "0.6311948", "0.63029295", "0.6295627", "0.62305164", "0.62251985", "0.6221728", "0.6211469", "0.6206424", "0.6188908", "0.61786735", "0.6153751", "0.61287767", "0.6127251", "0.6079706", "0.6077293", "0.6073789", "0.60670793", "0.6065751", "0.6057594", "0.6040607", "0.60100734", "0.5984584" ]
0.7213489
1
This function connects to the device provided when called (dev) in the instantiated testbed (testbed_obj) and executes the provided show command (if none was provided, 'show version' is executed by default. If the Save option = True (s in the command line) was provided then the output will be saved to a JSON file in the current working directory with the name .json. The default behavior is NOT to save the output.
def device_info(dev, testbed_obj, showcmd='show version', save_to_json=False, logstdout=True): device = testbed_obj.devices[dev] device.connect(log_stdout=logstdout) response = device.parse(showcmd) print(f"Response from {dev} is of type {type(response)} and length {len(response)}") print(f"RAW response: \n{response}\n") print(f"FORMATTED response:\n{json.dumps(response, indent=4)}") print(response.keys()) if save_to_json: json_filename = f"{dev}.json" with open(json_filename, 'w', encoding='utf-8') as f: json.dump(response, f, ensure_ascii=False, indent=4) print(f"\nFILE SAVED: Saved Response to JSON file {json_filename}") return device, response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def executeShow(self,\n rsrcType,\n showAdditionalParams=[],\n rsrcAdditionalParams=[]):\n\n args = [\"show\",\n \"--wavefrontHost\", util.wavefrontHostName,\n \"--apiToken\", util.wavefrontApiToken] \\\n + showAdditionalParams \\\n + [rsrcType] \\\n + rsrcAdditionalParams\n wc = wavectl.Wavectl(designForTestArgv=args)\n\n with util.StdoutCapture() as captOut:\n wc.runCmd()\n\n return captOut.str()", "def test_show(self):\n _help = \"[Usage: show <class name> <id>] or \"\\\n \"[Usage: <class name>.show(<id>)]\\n\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"help show\")\n self.assertEqual(f.getvalue(), _help)", "def snapshot(device):\n cmds = \"\"\n if device.model in ['9500', '4500']:\n cmds += \"\"\"\nshow users\nshow version\nshow vlan brief\nshow interfaces status\nshow ip interface brief\nshow ip route summary\nshow ipv6 interface brief\nshow ipv6 route summary\nshow ip mroute\nshow ip ospf neighbor\nshow isis neighbors\nshow bgp ipv4 unicast summary\nshow bgp ipv6 unicast summary\nshow ip arp\nshow mac address-table\nshow running-config\n\"\"\"\n elif device.model == '3850':\n cmds += \"\"\"\nshow users\nshow version\nshow vlan brief\nshow interfaces status\nshow ip interface brief\nshow ip route summary\nshow ip arp\nshow mac address-table\nshow running-config\n\"\"\"\n elif device.model == 'E6000':\n cmds += \"\"\"\nshow users\nshow version\nshow linecard status\nshow ip interface brief\nshow ip route summary\nshow ipv6 interface brief\nshow ipv6 route summary\nshow ip mroute\nshow ip ospf neighbor\nshow isis neighbor\nshow bgp ipv4 summary\nshow bgp ipv6 summary\nshow ip route rip\nshow cable modem summary mac\nshow cable modem\nshow cable modem detail\nshow video sessions\nshow video sessions summary\nshow running-config verbose\n\"\"\"\n elif device.model == 'C100G':\n cmds += \"\"\"\nshow user current\nshow version\nshow chassis status\nshow ip interface brief\nshow ip route summary\nshow ipv6 interface brief\nshow ip mroute\nshow ip ospf neighbor\nshow isis neighbors\nshow ip bgp summary\nshow ipv6 bgp summary\nshow ip route rip\nshow cable modem docsis-mac summary\nshow cable modem verbose\nshow cable modem cpe\nshow video session all brief\nshow video session all summary\nshow running-config\n\"\"\"\n elif device.model == 'CBR8':\n cmds += \"\"\"\nshow users\nshow version\nshow platform\nshow ip interface brief | exclude unset\nshow ip route summary\nshow ipv6 interface brief | exclude unass|^Cable|^Video|^In|^Wideband|^Dti|^VirtualPortGroup\nshow ipv6 route summary\nshow ip mroute\nshow ip ospf neighbor\nshow isis neighbors\nshow bgp ipv4 unicast summary\nshow bgp ipv6 unicast summary\nshow ip route rip\nshow cable modem summary total\nshow cable modem verbose\nshow cable video session all\nshow cable video session all summary\nshow running-config\n\"\"\"\n elif device.model in ['9504N', '9516']:\n cmds += \"\"\"\nshow users\nshow version\nshow ip interface brief | exclude unass\nshow ip route summary\nshow ipv6 interface brief\nshow ipv6 route summary\nshow ip mroute\nshow ip ospf neighbor\nshow isis neighbors\nshow bgp ipv4 unicast summary\nshow bgp ipv6 unicast summary\nshow ip route rip\nshow cable modem summary\nshow cable modem\nshow running-config\n\"\"\"\n elif device.model == '7360':\n cmds += \"\"\"\nshow session\nshow software-mngt oswp\nshow router interface summary\nshow router ospf neighbor\nshow router isis interface\nshow router bgp summary family ipv4\nshow router bgp summary family ipv6\nshow router status\nshow router route-table ipv4 summary\nshow router route-table ipv6 summary\nshow cable modem summary total\nshow cable modem\ninfo configure\n\"\"\"\n elif device.model == 'GAC':\n cmds += \"\"\"\nshow users | nomore\nshow version | tab | nomore\nshow router ospf 0 neighbor | nomore\nshow router isis 0 interface | nomore\nshow router bgp summary | nomore\nshow router rip database | nomore\nshow router route-table ipv4 summary | nomore\nshow router route-table ipv6 summary | nomore\nshow cable modem brief | tab | nomore\nshow cable modem cpe | tab | nomore\nshow configuration | display set | nomore\n\"\"\"\n return device.get_response(p.string_to_array(cmds), timeout=300, expect=r\"(?m)^(\\r)?(\\x1b\\[(\\?7h|K))?(\\*)?([)?([\\w\\-/]+[@:])?[\\w\\-]+(\\[A/U\\])?(\\([ \\w\\-]+\\))?([ :]~)?(])?(>([\\w\\-]+)+>)?(>)?([\\w\\-]+>)*[$#>%]\")", "def do_show(self, args):\n args = args.split(\" \")\n if args[0] == '':\n print(\"Incorrect command.\")\n return\n elif args[0] == 'device':\n if len(args) < 2:\n if len(self.topology.devices) == 0:\n print(\"No device in this topology.\")\n return\n for index, device in enumerate(self.topology.devices):\n print(\"[{}] {}\".format(index, device.infomation_text()))\n return\n device_ip = args[1]\n device = self.topology.get_device_by_ip(device_ip)\n if device is None:\n print(\"Not found device IP {}\".format(device_ip))\n return\n if len(args) < 3:\n # Todo show device info\n print(device.infomation_text())\n return\n if args[2] == 'route':\n routes = device.get_routes()\n self.print_pretty_routes(routes, device.get_interfaces())\n return\n if 'interface'.startswith(args[2]):\n interfaces = device.get_interfaces()\n self.print_interfaces(interfaces)\n return\n elif args[0] == 'flow':\n # print(len(self.topology.get_flows()))\n for flow in self.topology.get_flows():\n print(flow)\n return\n elif args[0] == 'route':\n return self.show_route(args[1:])\n elif args[0] == 'graph':\n G = self.topology.create_graph()\n # edge_labels = nx.get_edge_attributes(G,'state')\n nx.draw_networkx_edge_labels(G, pos=nx.spring_layout(G))\n # nx.draw_networkx_edge_labels(G, pos, labels = edge_labels)\n plt.rcParams[\"figure.figsize\"] = [30, 30]\n nx.draw_circular(G, with_labels=True)\n filename = \"imgs/topo-{}.png\".format(time.time())\n plt.savefig(filename)\n plt.show(block=False)\n elif args[0] == 'topology':\n self.topology.print_matrix()\n elif args[0] == 'version':\n print(\"SDN Handmade: 0.0.1\")", "def command_display_cli(data):\n\n command.actino_invoke('show-init')\n bigdb = command.bigsh.bigdb\n bigdb_show = command.bigdb_show\n\n modes = bigsh.command_dict.keys() + bigsh.command_nested_dict.keys()\n\n entry = {\n 'version' : ', '.join(command.command_syntax_version.keys()),\n 'desc' : ', '.join(sorted(command.command_added_modules.keys())),\n 'modes' : ', '.join(sorted(utif.unique_list_from_list(modes))),\n }\n basic = bigsh.pp.format_entry(entry, 'cli')\n\n table = 'display-cli'\n bigdb_show.tables[table].append(entry)\n\n bigdb_show.columns[table] = [\n 'version',\n 'desc',\n 'modes',\n ]\n bigdb_show.column_headers[table] = {}\n\n command.actino_invoke('show-print')\n\n return", "def main():\n # Set up the command line options\n creds = Credentials(['apic', 'nosnapshotfiles'],\n description=(\"This application replicates the switch \"\n \"CLI command 'show interface fex'\"))\n creds.add_argument('-s', '--switch',\n type=str,\n default=None,\n help='Specify a particular switch id, e.g. \"101\"')\n creds.add_argument('-i', '--interface',\n type=str,\n default=None,\n help='Specify a particular interface id, e.g. \"eth1/10\"')\n creds.add_argument('-b', '--brief',\n action='store_true',\n help='Display a brief summary')\n args = creds.get()\n\n interface_collector = InterfaceCollector(args.url, args.login, args.password)\n\n if args.brief:\n interface_collector.show_brief(node=args.switch, intf_id=args.interface)\n else:\n print 'detailed view is still under development...try brief view instead'", "def step_show(test, checks=None):\n if checks is None:\n checks = []\n test.cmd(\n \"az networkcloud virtualmachine console show \"\n \"--resource-group {resourceGroup} --virtual-machine-name {virtualMachineName}\",\n checks=checks,\n )", "def test_card_show(mock_card, capsys):\n mock_card.show()\n captured = capsys.readouterr()\n assert captured.out == \"SPADE, 1\\n\"", "def showDevs(repo):\n clientSocket = connectToServer() \n \n if repo == \".\":\n repo = getCurrentRepoName() \n \n userData = authenticate()\n \n PACKET = { \"CMD\" : \"SHOW_DEVS\" }\n PACKET[\"repo\"] = repo\n PACKET[\"userData\"] = userData \n\n SERIALIZED_PACKET = pickle.dumps(PACKET) \n clientSocket.send(SERIALIZED_PACKET) \n \n msg = clientSocket.recv(1024).decode() \n\n if msg == \"OK\":\n pass\n elif msg == \"NO_REPO\":\n click.secho(\"Repository does not exist.\",fg=\"red\")\n sys.exit()\n elif msg == \"NO_ACCOUNT\":\n click.secho(\"You do not have an account.\",fg=\"red\")\n sys.exit()\n elif msg == \"WRONG_PASSWORD\":\n click.secho(\"Incorrect password.\",fg=\"red\")\n sys.exit() \n else:\n click.secho(\"Invalid operation.Try again.\",fg=\"red\")\n sys.exit()\n\n SERIALIZED_DEV = clientSocket.recv(4096) \n DEV = pickle.loads(SERIALIZED_DEV) \n \n if not DEV.items():\n click.secho(f\"\\nNo developer other than you working on this repository.\\n\",fg=\"blue\")\n return\n\n click.secho(f\" Developer Permissions\",bg=\"blue\",blink=True,bold=True)\n for key,value in DEV.items():\n click.secho(f\" {key} : {value} \")", "def _send_xml_cli_show(self, command):\n assert isinstance(command, str)\n self.logger.debug(\"Sending show command {} to {}\".format(str(command), self.host))\n return self._ncc.nxoscli(command)", "def cmd( self, *args, **kwargs ):\n verbose = kwargs.get( 'verbose', False )\n log = info if verbose else debug\n log( '*** %s : %s\\n' % ( self.name, args ) )\n self.sendCmd( *args, **kwargs )\n return self.waitOutput( verbose )", "def show(dut, cli_type=\"\"):\n cli_type = st.get_ui_type(dut, cli_type=cli_type)\n cli_type = \"klish\" if cli_type in [\"rest-put\", \"rest-patch\"] else cli_type\n result = dict()\n if cli_type == \"klish\" or cli_type == \"click\":\n command = \"show sflow\"\n output = st.show(dut, command, type=cli_type)\n if output:\n result[\"collectors\"] = list()\n for data in output:\n for key, value in data.items():\n if value != \"\":\n if key not in [\"collector_ip\", \"collector_port\", \"collector_name\"]:\n result[key] = value\n else:\n result[\"collectors\"].append(\n {\"collector_name\": data[\"collector_name\"],\n \"collector_ip\": data[\"collector_ip\"], \"port\": data[\"collector_port\"]})\n if result:\n result[\"collectors\"] = utils_obj.remove_duplicate_dicts_from_list(result[\"collectors\"])\n else:\n return False\n elif cli_type == \"rest\":\n output = st.rest_read(dut, REST_URI)\n if output and output.get(\"status\") == 200 and output.get(\"output\"):\n if YANG_MODULE in output[\"output\"]:\n data = output[\"output\"][YANG_MODULE]\n if \"SFLOW\" in data:\n for key, value in data[\"SFLOW\"].items():\n if isinstance(value, list):\n for attributes in value:\n result.update({\"state\": attributes.get(\"admin_state\")})\n result.update({\"agent_id\": attributes.get(\"agent_id\")})\n result.update({\"polling_interval\": attributes.get(\"polling_interval\")})\n result.update({\"sflow_key\": attributes.get(\"sflow_key\")})\n if attributes.get(\"agent_id\"):\n ip_address = get_interface_ip_address(dut, attributes.get(\"agent_id\"))\n if ip_address:\n ip, _ = ip_address[0]['ipaddr'].split('/')\n result.update({\"agent_ip\": ip})\n if \"SFLOW_COLLECTOR\" in data:\n result.update({\"collectors_cnt\": len(data[\"SFLOW_COLLECTOR\"][\"SFLOW_COLLECTOR_LIST\"])})\n result.update({\"collectors\":list()})\n for value in data[\"SFLOW_COLLECTOR\"][\"SFLOW_COLLECTOR_LIST\"]:\n collector_data = dict()\n collector_data.update({\"port\":value.get(\"collector_port\", DEFAULT_COLLECTOR_PORT)})\n collector_data.update({\"collector_ip\":value.get(\"collector_ip\")})\n collector_data.update({\"collector_name\":value.get(\"collector_name\")})\n st.log(\"COLLECTORS {}\".format(collector_data))\n result[\"collectors\"].append(collector_data)\n else:\n st.log(\"{} not observed in ouput\".format(YANG_MODULE))\n else:\n st.log(\"REST show GET CALL --- {}\".format(output))\n else:\n st.log(\"UNSUPPORTED CLI TYPE -- {}\".format(cli_type))\n return result", "def run_show(self):\n self.update_files()\n self.check_config()\n child = subprocess.Popen([\"feh\", \"-FY\", \"-Sfilename\", \"-D\",\n str(self.config.delay()),\n self.local_directory])\n while True:\n try:\n if self.dbc.poll(self.remote_directory):\n child.kill()\n self.config.reload(self.local_directory + \"/\" + \"config.txt\")\n child = subprocess.Popen([\"feh\", \"-FY\", \"-Sfilename\", \"-D\",\n str(self.config.delay()),\n self.local_directory])\n except MaxRetryError as e:\n pass\n\n except rest.ErrorResponse as e:\n print str(datetime.datetime.now()) + \": \" + str(e)\n\n except Exception as e:\n print str(datetime.datetime.now()) + \": \" + str(e)", "def show_commands(net_connect, hostname, password, command, data_file):\n # check if in enable mode\n print('\\n' + sep)\n print('==> Sending commands...')\n print(sep)\n # apply the command\n res = net_connect.send_command(command)\n print('\\n' + sep)\n print(res)\n print('\\n' + sep + '\\n')\n # write config to file\n print('\\n' + sep + '\\n')\n print('==> Appending command output data to file...')\n content = '\\n' + sep + '\\n' + hostname + ' : '+ command + '\\n' + sep + '\\n' + res + '\\n' + sep + '\\n'\n append_data_to_file(data_file, content, hostname)\n print('==> Exiting...')", "def show(self, command, expect_string=None):\n self.enable()\n log.debug(\"Host %s: Successfully executed command 'show' with responses.\", self.host)\n if isinstance(command, list):\n responses = []\n entered_commands = []\n for command_instance in command:\n entered_commands.append(command_instance)\n try:\n responses.append(self._send_command(command_instance))\n except CommandError as e:\n raise CommandListError(entered_commands, command_instance, e.cli_error_msg)\n return responses\n return self._send_command(command, expect_string=expect_string)", "def infoCommand(driverName=None):\n with importDriver(driverName) as driver:\n device = getDevice(driver)\n if device:\n if isinstance(device, SonyMtpAppInstaller):\n info = installApp(device)\n print('')\n props = [\n ('Model', info['deviceinfo']['name']),\n ('Product code', info['deviceinfo']['productcode']),\n ('Serial number', info['deviceinfo']['deviceid']),\n ('Firmware version', info['deviceinfo']['fwversion']),\n ]\n else:\n dev = SonyExtCmdCamera(device)\n info = dev.getCameraInfo()\n updater = SonyUpdaterCamera(device)\n updater.init()\n firmwareOld, firmwareNew = updater.getFirmwareVersion()\n props = [\n ('Model', info.modelName),\n ('Product code', info.modelCode),\n ('Serial number', info.serial),\n ('Firmware version', firmwareOld),\n ]\n try:\n lensInfo = dev.getLensInfo()\n if lensInfo.model != 0:\n props.append(('Lens', 'Model 0x%x (Firmware %s)' % (lensInfo.model, lensInfo.version)))\n except (InvalidCommandException, UnknownMscException):\n pass\n try:\n gpsInfo = dev.getGpsData()\n props.append(('GPS Data', '%s - %s' % gpsInfo))\n except (InvalidCommandException, UnknownMscException):\n pass\n for k, v in props:\n print('%-20s%s' % (k + ': ', v))", "def test_show_cmd():\n FNAME = 'xonsh-SESSIONID.json'\n FNAME += '.show_cmd'\n cmds = ['ls', 'cat hello kitty', 'abc', 'def', 'touch me', 'grep from me']\n\n def format_hist_line(idx, cmd):\n \"\"\"Construct a history output line.\"\"\"\n return ' {:d}: {:s}\\n'.format(idx, cmd)\n\n def run_show_cmd(hist_args, commands, base_idx=0, step=1):\n \"\"\"Run and evaluate the output of the given show command.\"\"\"\n stdout.seek(0, io.SEEK_SET)\n stdout.truncate()\n history._hist_main(hist, hist_args)\n stdout.seek(0, io.SEEK_SET)\n hist_lines = stdout.readlines()\n yield assert_equal, len(commands), len(hist_lines)\n for idx, (cmd, actual) in enumerate(zip(commands, hist_lines)):\n expected = format_hist_line(base_idx + idx * step, cmd)\n yield assert_equal, expected, actual\n\n hist = History(filename=FNAME, here='yup', **HIST_TEST_KWARGS)\n stdout = io.StringIO()\n saved_stdout = sys.stdout\n sys.stdout = stdout\n\n with mock_xonsh_env({'HISTCONTROL': set()}):\n for ts,cmd in enumerate(cmds): # populate the shell history\n hist.append({'inp': cmd, 'rtn': 0, 'ts':(ts+1, ts+1.5)})\n\n # Verify an implicit \"show\" emits show history\n for x in run_show_cmd([], cmds):\n yield x\n\n # Verify an explicit \"show\" with no qualifiers emits\n # show history.\n for x in run_show_cmd(['show'], cmds):\n yield x\n\n # Verify an explicit \"show\" with a reversed qualifier\n # emits show history in reverse order.\n for x in run_show_cmd(['show', '-r'], list(reversed(cmds)),\n len(cmds) - 1, -1):\n yield x\n\n # Verify that showing a specific history entry relative to\n # the start of the history works.\n for x in run_show_cmd(['show', '0'], [cmds[0]], 0):\n yield x\n for x in run_show_cmd(['show', '1'], [cmds[1]], 1):\n yield x\n\n # Verify that showing a specific history entry relative to\n # the end of the history works.\n for x in run_show_cmd(['show', '-2'], [cmds[-2]],\n len(cmds) - 2):\n yield x\n\n # Verify that showing a history range relative to the start of the\n # history works.\n for x in run_show_cmd(['show', '0:2'], cmds[0:2], 0):\n yield x\n for x in run_show_cmd(['show', '1::2'], cmds[1::2], 1, 2):\n yield x\n\n # Verify that showing a history range relative to the end of the\n # history works.\n for x in run_show_cmd(['show', '-2:'], \n cmds[-2:], len(cmds) - 2):\n yield x\n for x in run_show_cmd(['show', '-4:-2'], \n cmds[-4:-2], len(cmds) - 4):\n yield x\n\n sys.stdout = saved_stdout\n os.remove(FNAME)", "def open(self):\n try:\n self.handle = self.rm.get_instrument(self.visaName)\n self.handle.write('*RST') #reset device to default\n time.sleep(.5)\n self.handle.write(':FORM:DATA ASC') #return ASCII\n except Exception:\n print('Dvm34411.open() failed !')\n raise\n return True", "def show_commands_and_config(device_data: Dict[str, Any]):\n dt_str = datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n device_name = device_data[\"device_name\"]\n output_file_path = OUTPUT_PATH / f\"{device_name}_{dt_str}.txt\"\n conn_data = create_conn_data(device_data)\n cfg = CFG.format(device=device_name).splitlines()\n with ConnectHandler(**conn_data) as conn, open(output_file_path, \"w\") as f:\n for command in COMMANDS:\n command_output = conn.send_command(command)\n f.write(f\"===== {command} ===== \\n{command_output}\\n\")\n\n f.write(\"\\nSending configuration...\\n\")\n output = conn.send_config_set(cfg)\n f.write(output)", "def cli(ctx, host, device_id, api_key, inching, wait):\n if ctx.invoked_subcommand == \"discover\":\n return\n\n if host is None and device_id is None:\n logger.error(\"No host name or device_id given, see usage below:\")\n click.echo(ctx.get_help())\n sys.exit(1)\n\n ctx.obj = {\n \"host\": host,\n \"device_id\": device_id,\n \"api_key\": api_key,\n \"inching\": inching,\n \"wait\": wait,\n }", "def gui_preview(self): \r\n\r\n if self.Debug_checkbox.isChecked() == True :\r\n cmdString = 'pyuic5 -p -d ' + self.fileName\r\n else:\r\n cmdString = 'pyuic5 -p ' + self.fileName\r\n \r\n output = subprocess.getoutput(cmdString)\r\n self.plainTextEdit.insertPlainText( output )", "def dev():\n trio.run(_dev_main)", "def devtester(argv):\n try:\n intf = ShellInterface(argv)\n return intf.run()\n except UsageError:\n return 2", "def reports_cli():", "def main():\n import optparse\n import sys\n parser = optparse.OptionParser()\n parser.add_option('-c', '--controller', default='NewController',\n help='Controller class to instantiate.')\n parser.add_option('-o', '--outputs', type='int', default=5,\n help='Number of outputs to use on the hardware.')\n options, _arguments = parser.parse_args()\n try:\n Demo(options.controller, options.outputs)\n except controller.ConnectionError:\n sys.exit('ABORT: Could not find a suitable device.')", "def showStateOnOs(self):\n\n deviceName = self.deviceName()\n\n if deviceName:\n rc = a.sys.net.lnx.device.IpLink.showDevice(self._log, deviceName) \n if a.sys.net.lnx.common.Command.isReturnOk(rc):\n print rc[1] # stdout\n else:\n print rc[2] # stderr", "def main():\n uilist = {\n 'joyride':(\"Uses a joystick for steering and outputs console text\", joyride),\n 'curses':(\"A simple curses-based output UI with very basic arrow-key steering\", cursesui),\n 'framebuffer':(\"An output intenteded for the on-board computer, with no steering\", framebuffer),\n }\n\n parser = OptionParser()\n\n uigroup = OptionGroup(parser, \"UI options\")\n uigroup.add_option('-u', '--ui', action=\"store\", type=\"choice\", dest=\"ui\", default=\"joyride\", choices=uilist.keys(),\n help=\"Interact with this type of UI [Default: joyride]\")\n uigroup.add_option('-j', '--joystick', action=\"store\", type=\"string\", dest=\"joystick_device\", default=None,\n help=\"Path to the device file of the joystick (for joyride UI) [Default: None]\")\n uigroup.add_option('-s', '--disable-sound', action=\"store_false\", dest=\"sound\", default=True,\n help=\"Disable sound [Default: False]\")\n uigroup.add_option('-i', '--disable-input', action=\"store_false\", dest=\"allow_input\", default=True,\n help=\"Disable input [Default: False]\")\n uigroup.add_option('-c', '--become-controller', action=\"store_true\", dest=\"become_controller\", default=False,\n help=\"Become exclusive controlling connection [Default: False]\")\n uigroup.add_option('-n', '--no-control', action=\"store_false\", dest=\"allow_control\", default=True,\n help=\"Ignore all UI commands from this client [Default: False]\")\n uigroup.add_option(\"--list\", action=\"store_true\", dest=\"list\", default=False,\n help=\"List the available UIs and exit\")\n parser.add_option_group(uigroup)\n\n netgroup = OptionGroup(parser, \"Network options\")\n netgroup.add_option('-a', '--host', action=\"store\", type=\"string\", dest=\"host\", default=\"localhost\",\n help=\"Host/address to connect to [Default: localhost]\")\n netgroup.add_option('-p', '--port', action=\"store\", type=\"int\", dest=\"port\", default=9999,\n help=\"Port the server is listening on [Default: 9999]\")\n parser.add_option_group(netgroup)\n\n options, args = parser.parse_args()\n\n list_and_exit = False\n if options.list:\n list_and_exit = True\n\n if not options.ui or options.ui not in uilist:\n print \"You must pick one of the available UIs with --ui\"\n\n if list_and_exit:\n print \"Available UIs:\"\n for name, info in uilist.items():\n print \"%s %s\" % (name.ljust(30), info[0])\n return 0\n\n # create the robot\n robot = Robot(options.host, options.port)\n status = robot.get_status()\n\n # handle gracefully disconnecting the robot if anything else fails\n try:\n # create the ui\n uimod = uilist[options.ui][1]\n ui = uimod.get_ui(**vars(options))\n\n # create the steerer\n steerer = steering.SteeringModel(status)\n\n if options.sound:\n player = sound.SoundPlayer(status)\n player.play(player.SOUNDS['startup'])\n else:\n player = None\n\n # create the robot client\n client = RobotClient(robot, ui, steerer, player, options.allow_control, options.become_controller)\n\n # start up all the pieces in the right order\n if player: player.start()\n try:\n ui.init()\n ui.start()\n try:\n client.run()\n finally:\n ui.stop()\n finally:\n if player:\n player.stop(player.SOUNDS['crash'])\n finally:\n if not robot.disconnected:\n robot.disconnect()", "def show_command(self, args):\n self._validate_common(args)\n self._set_manifests(args)\n\n use_local = not args.available\n\n manifest = self._manager._local if use_local else self._manager._remote\n if not use_local:\n manifest.load()\n\n records = self._get_matching_records(args, manifest)\n print('- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -')\n\n print('Local manifest path: ', self._manager._local._manifest_path)\n print('Remote manifest path: ', self._manager._remote._manifest_path)\n\n if not len(records):\n sys.exit(\"No matching items found.\")\n\n for record in records:\n print('- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -')\n pp(record)", "def main():\n extremehandle = {\n \"device_type\": \"extreme\",\n \"ip\": \"10.54.116.175\",\n \"username\": \"admin\",\n \"password\": \"\",\n }\n net_connect = ConnectHandler(**extremehandle)\n output = net_connect.send_command(\"show config vlan\")\n print(output)", "def do_show(self, arg):\n obj = self.verify(arg, 1)\n if obj:\n print(obj)" ]
[ "0.6001754", "0.56841195", "0.566696", "0.54662395", "0.54120153", "0.5365674", "0.5250371", "0.523401", "0.52098423", "0.5194957", "0.51602536", "0.51479226", "0.5107748", "0.5103756", "0.50766176", "0.5072563", "0.50322425", "0.5027172", "0.50200206", "0.5005128", "0.50011235", "0.49869746", "0.49676362", "0.4965029", "0.49595702", "0.4957898", "0.49542114", "0.49540994", "0.49532974", "0.49395207" ]
0.72180516
0
Account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.
def expected_bucket_owner(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "expected_bucket_owner")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expected_bucket_owner(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"expected_bucket_owner\")", "def owner_account_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"owner_account_id\")", "def owner_account_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"owner_account_id\")", "def bucket_id(self):\n return self._bucket_id", "def account_id():\n return client.get_caller_identity()['Account']", "def accountId():\n # save the lookup if we set the account to the environment\n if \"AWS_ACCOUNT_ID\" in os.environ:\n return os.environ[\"AWS_ACCOUNT_ID\"]\n conn = iamConn()\n funcs = [\n lambda: conn.get_user().get('get_user_response')\\\n .get('get_user_result').get('user').get('arn'),\n lambda: conn.list_roles(max_items=1).get('list_roles_response')\\\n .get('list_roles_result').get('roles')[0].get('arn'),\n ]\n for func in funcs:\n try:\n arn = func()\n break\n except (boto.exception.BotoServerError, IndexError):\n pass\n return arn.split(':')[4]", "def get_bucket_acl(Bucket=None):\n pass", "def bucket_arn(self) -> str:\n ...", "def _get_billing_account_id():\n org_client = boto3.client(\"organizations\")\n response = org_client.describe_organization()\n return response[\"Organization\"][\"MasterAccountId\"]", "def owner_id(self) -> str:\n return pulumi.get(self, \"owner_id\")", "def bucket_arn(self) -> str:\n return jsii.get(self, \"bucketArn\")", "def bucket_arn(self) -> str:\n return jsii.get(self, \"bucketArn\")", "def account_id(self):\n return self.get('/accounts')[0]['Id']", "def _get_owner(self):\n if self.resource.owner is not None:\n try:\n return pwd.getpwnam(self.resource.owner).pw_uid\n except KeyError:\n raise error.InvalidUser()", "def set_object_owner(self, bucket_name, object_name, uid, gid):\n\n return h3lib.set_object_owner(self._handle, bucket_name, object_name, uid, gid, self._user_id)", "def get_owner(self, obj):\n return obj.user.username", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def has_bucket_access(self, bucket, user_id):\n msg = \"has_bucket_access not implemented\"\n raise NotImplementedError(msg)", "def bucket_name(self) -> str:\n ...", "def storage_account_id(self) -> str:\n return pulumi.get(self, \"storage_account_id\")", "def test_owner_id(api: API):\n owner_id = 123456\n api.candlepin.get_owners.return_value = [{\"key\": owner_id}]\n account = Account(api, \"USERNAME\", \"PASSWORD\")\n\n assert account._owner_id is None\n account.owner_id\n assert account.owner_id == owner_id\n api.candlepin.get_owners.assert_called_once()", "def getOwnerIdFromToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def bucket_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket_arn\")", "def bucket_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket_arn\")", "def test_request_for_a_bucket_has_integer_id(self):\n with self.client:\n response = self.client.get(\n '/bucketlists/dsfgsdsg',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Please provide a valid Bucket Id')", "def owner_id(self) -> Optional[str]:\n return pulumi.get(self, \"owner_id\")", "def account(self, acct):\n aMgr = self.acctManager\n if len(aMgr.accounts) <= acct:\n raise Exception(\"requested unknown account number %i\" % acct)\n return aMgr.account(acct)", "def bucket_arn(self) -> typing.Optional[str]:\n return self._values.get('bucket_arn')", "def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")" ]
[ "0.7146499", "0.6446139", "0.6384591", "0.6065642", "0.5860553", "0.5854814", "0.58411705", "0.581899", "0.57273", "0.57180434", "0.5700793", "0.5700793", "0.5690769", "0.5674292", "0.5650694", "0.56504285", "0.5646915", "0.5642213", "0.5625961", "0.5623262", "0.56102705", "0.55813515", "0.55725735", "0.55725735", "0.5571697", "0.55683136", "0.5536886", "0.55337965", "0.55186343", "0.55186343" ]
0.70394754
1
Get an existing BucketLifecycleConfigurationV2 resource's state with the given name, id, and optional extra properties used to qualify the lookup.
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, bucket: Optional[pulumi.Input[str]] = None, expected_bucket_owner: Optional[pulumi.Input[str]] = None, rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketLifecycleConfigurationV2RuleArgs']]]]] = None) -> 'BucketLifecycleConfigurationV2': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _BucketLifecycleConfigurationV2State.__new__(_BucketLifecycleConfigurationV2State) __props__.__dict__["bucket"] = bucket __props__.__dict__["expected_bucket_owner"] = expected_bucket_owner __props__.__dict__["rules"] = rules return BucketLifecycleConfigurationV2(resource_name, opts=opts, __props__=__props__)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n minimal_action: Optional[pulumi.Input[str]] = None,\n most_disruptive_allowed_action: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n preserved_state: Optional[pulumi.Input[pulumi.InputType['RegionPerInstanceConfigPreservedStateArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n region_instance_group_manager: Optional[pulumi.Input[str]] = None,\n remove_instance_state_on_destroy: Optional[pulumi.Input[bool]] = None) -> 'RegionPerInstanceConfig':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RegionPerInstanceConfigState.__new__(_RegionPerInstanceConfigState)\n\n __props__.__dict__[\"minimal_action\"] = minimal_action\n __props__.__dict__[\"most_disruptive_allowed_action\"] = most_disruptive_allowed_action\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"preserved_state\"] = preserved_state\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"region\"] = region\n __props__.__dict__[\"region_instance_group_manager\"] = region_instance_group_manager\n __props__.__dict__[\"remove_instance_state_on_destroy\"] = remove_instance_state_on_destroy\n return RegionPerInstanceConfig(resource_name, opts=opts, __props__=__props__)", "def get_bucket_lifecycle_configuration(Bucket=None):\n pass", "def get_bucket_inventory_configuration(Bucket=None, Id=None):\n pass", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Canary':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = CanaryArgs.__new__(CanaryArgs)\n\n __props__.__dict__[\"artifact_config\"] = None\n __props__.__dict__[\"artifact_s3_location\"] = None\n __props__.__dict__[\"code\"] = None\n __props__.__dict__[\"delete_lambda_resources_on_canary_deletion\"] = None\n __props__.__dict__[\"execution_role_arn\"] = None\n __props__.__dict__[\"failure_retention_period\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"run_config\"] = None\n __props__.__dict__[\"runtime_version\"] = None\n __props__.__dict__[\"schedule\"] = None\n __props__.__dict__[\"start_canary_after_creation\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"success_retention_period\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"visual_reference\"] = None\n __props__.__dict__[\"vpc_config\"] = None\n return Canary(resource_name, opts=opts, __props__=__props__)", "def get_bucket(name):\n bucket_obj = boto3.resource(\"s3\").Bucket(name)\n if is_control_panel_bucket(name, bucket_obj):\n versioning = bucket_obj.Versioning()\n try:\n lifecycle_conf = bucket_obj.LifecycleConfiguration()\n lifecycle = lifecycle_conf.rules\n except ClientError as ex:\n lifecycle = \"\"\n return {\n \"bucket\": bucket_obj,\n \"versioning\": versioning.status,\n \"lifecycle\": lifecycle,\n }\n return None", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n bucket: Optional[pulumi.Input[str]] = None,\n default_acl: Optional[pulumi.Input[str]] = None,\n predefined_acl: Optional[pulumi.Input[str]] = None,\n role_entities: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'BucketACL':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _BucketACLState.__new__(_BucketACLState)\n\n __props__.__dict__[\"bucket\"] = bucket\n __props__.__dict__[\"default_acl\"] = default_acl\n __props__.__dict__[\"predefined_acl\"] = predefined_acl\n __props__.__dict__[\"role_entities\"] = role_entities\n return BucketACL(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n force: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None) -> 'InstanceState':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceStateState.__new__(_InstanceStateState)\n\n __props__.__dict__[\"force\"] = force\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"state\"] = state\n return InstanceState(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n access_configuration_id: Optional[pulumi.Input[str]] = None,\n access_configuration_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n directory_id: Optional[pulumi.Input[str]] = None,\n force_remove_permission_policies: Optional[pulumi.Input[bool]] = None,\n permission_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AccessConfigurationPermissionPolicyArgs']]]]] = None,\n relay_state: Optional[pulumi.Input[str]] = None,\n session_duration: Optional[pulumi.Input[int]] = None) -> 'AccessConfiguration':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AccessConfigurationState.__new__(_AccessConfigurationState)\n\n __props__.__dict__[\"access_configuration_id\"] = access_configuration_id\n __props__.__dict__[\"access_configuration_name\"] = access_configuration_name\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"directory_id\"] = directory_id\n __props__.__dict__[\"force_remove_permission_policies\"] = force_remove_permission_policies\n __props__.__dict__[\"permission_policies\"] = permission_policies\n __props__.__dict__[\"relay_state\"] = relay_state\n __props__.__dict__[\"session_duration\"] = session_duration\n return AccessConfiguration(resource_name, opts=opts, __props__=__props__)", "def get_state_by_id(state_id):\n for key, value in storage.all(\"State\").items():\n if state_id == value.id:\n return jsonify(value.to_dict())\n abort(404)", "def get_state_by_id(state_id):\n my_state = storage.get('State', state_id)\n if my_state is None:\n abort(404)\n return jsonify(my_state.to_dict())", "def a_state(id):\n state = storage.get(State, id)\n if state is not None:\n return jsonify(state.to_dict())\n abort(404)", "def state_by_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n config: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[pulumi.InputType['SyntheticsPrivateLocationMetadataArgs']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'SyntheticsPrivateLocation':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _SyntheticsPrivateLocationState.__new__(_SyntheticsPrivateLocationState)\n\n __props__.__dict__[\"config\"] = config\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"metadata\"] = metadata\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"tags\"] = tags\n return SyntheticsPrivateLocation(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'ResolverConfig':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ResolverConfigArgs.__new__(ResolverConfigArgs)\n\n __props__.__dict__[\"autodefined_reverse\"] = None\n __props__.__dict__[\"autodefined_reverse_flag\"] = None\n __props__.__dict__[\"owner_id\"] = None\n __props__.__dict__[\"resource_id\"] = None\n return ResolverConfig(resource_name, opts=opts, __props__=__props__)", "def get_state_by_id(state_id):\n state = storage.get(State, state_id)\n if not state:\n abort(404)\n return jsonify(state.to_dict()), 200", "def get(resource_name, id, opts=None, arn=None, aws_kms_key_arn=None, content_config=None, content_config_permissions=None, input_bucket=None, name=None, notifications=None, output_bucket=None, role=None, thumbnail_config=None, thumbnail_config_permissions=None):\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n __props__[\"arn\"] = arn\n __props__[\"aws_kms_key_arn\"] = aws_kms_key_arn\n __props__[\"content_config\"] = content_config\n __props__[\"content_config_permissions\"] = content_config_permissions\n __props__[\"input_bucket\"] = input_bucket\n __props__[\"name\"] = name\n __props__[\"notifications\"] = notifications\n __props__[\"output_bucket\"] = output_bucket\n __props__[\"role\"] = role\n __props__[\"thumbnail_config\"] = thumbnail_config\n __props__[\"thumbnail_config_permissions\"] = thumbnail_config_permissions\n return Pipeline(resource_name, opts=opts, __props__=__props__)", "def get(resource_name, id, opts=None, name=None, s3_destination=None):\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n __props__[\"name\"] = name\n __props__[\"s3_destination\"] = s3_destination\n return ResourceDataSync(resource_name, opts=opts, __props__=__props__)", "def state_by_id(state_id):\n states_values = storage.all(\"State\").values()\n for obj in states_values:\n if obj.id == state_id:\n return jsonify(obj.to_dict())\n abort(404)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'BlobServiceProperties':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = BlobServicePropertiesArgs.__new__(BlobServicePropertiesArgs)\n\n __props__.__dict__[\"automatic_snapshot_policy_enabled\"] = None\n __props__.__dict__[\"change_feed\"] = None\n __props__.__dict__[\"container_delete_retention_policy\"] = None\n __props__.__dict__[\"cors\"] = None\n __props__.__dict__[\"default_service_version\"] = None\n __props__.__dict__[\"delete_retention_policy\"] = None\n __props__.__dict__[\"is_versioning_enabled\"] = None\n __props__.__dict__[\"last_access_time_tracking_policy\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"restore_policy\"] = None\n __props__.__dict__[\"sku\"] = None\n __props__.__dict__[\"type\"] = None\n return BlobServiceProperties(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n license_count: Optional[pulumi.Input[int]] = None,\n license_count_hard_limit: Optional[pulumi.Input[bool]] = None,\n license_counting_type: Optional[pulumi.Input[str]] = None,\n license_rules: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n owner_account_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'LicenseConfiguration':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _LicenseConfigurationState.__new__(_LicenseConfigurationState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"license_count\"] = license_count\n __props__.__dict__[\"license_count_hard_limit\"] = license_count_hard_limit\n __props__.__dict__[\"license_counting_type\"] = license_counting_type\n __props__.__dict__[\"license_rules\"] = license_rules\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"owner_account_id\"] = owner_account_id\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n return LicenseConfiguration(resource_name, opts=opts, __props__=__props__)", "def properties(self):\n if not self.resource_id:\n return\n resource = None\n try:\n resource = self.client.get_pipeline_state(\n name=self.resource_id)\n except (ParamValidationError, ClientError):\n pass\n return resource", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'KafkaConfiguration':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = KafkaConfigurationArgs.__new__(KafkaConfigurationArgs)\n\n __props__.__dict__[\"consumer_group\"] = None\n __props__.__dict__[\"credentials\"] = None\n __props__.__dict__[\"event_hub_partition_id\"] = None\n __props__.__dict__[\"event_hub_resource_id\"] = None\n __props__.__dict__[\"event_hub_type\"] = None\n __props__.__dict__[\"event_streaming_state\"] = None\n __props__.__dict__[\"event_streaming_type\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"system_data\"] = None\n __props__.__dict__[\"type\"] = None\n return KafkaConfiguration(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n inter_region_traffic_qos_policy_description: Optional[pulumi.Input[str]] = None,\n inter_region_traffic_qos_policy_name: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[str]] = None,\n transit_router_attachment_id: Optional[pulumi.Input[str]] = None,\n transit_router_id: Optional[pulumi.Input[str]] = None) -> 'InterRegionTrafficQosPolicy':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InterRegionTrafficQosPolicyState.__new__(_InterRegionTrafficQosPolicyState)\n\n __props__.__dict__[\"inter_region_traffic_qos_policy_description\"] = inter_region_traffic_qos_policy_description\n __props__.__dict__[\"inter_region_traffic_qos_policy_name\"] = inter_region_traffic_qos_policy_name\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"transit_router_attachment_id\"] = transit_router_attachment_id\n __props__.__dict__[\"transit_router_id\"] = transit_router_id\n return InterRegionTrafficQosPolicy(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'ConfigurationAssociation':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ConfigurationAssociationArgs.__new__(ConfigurationAssociationArgs)\n\n __props__.__dict__[\"broker\"] = None\n __props__.__dict__[\"configuration\"] = None\n return ConfigurationAssociation(resource_name, opts=opts, __props__=__props__)", "def lookup(job_id: str) -> JobState:\n job = JobState(job_id)\n job.update()\n return job", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n hcxes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateCloudHcxArgs']]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n management_cluster: Optional[pulumi.Input[pulumi.InputType['PrivateCloudManagementClusterArgs']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n network_config: Optional[pulumi.Input[pulumi.InputType['PrivateCloudNetworkConfigArgs']]] = None,\n nsxes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateCloudNsxArgs']]]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None,\n uid: Optional[pulumi.Input[str]] = None,\n vcenters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateCloudVcenterArgs']]]]] = None) -> 'PrivateCloud':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _PrivateCloudState.__new__(_PrivateCloudState)\n\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"hcxes\"] = hcxes\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"management_cluster\"] = management_cluster\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"network_config\"] = network_config\n __props__.__dict__[\"nsxes\"] = nsxes\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"state\"] = state\n __props__.__dict__[\"uid\"] = uid\n __props__.__dict__[\"vcenters\"] = vcenters\n return PrivateCloud(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n event_bus_name: Optional[pulumi.Input[str]] = None,\n policy: Optional[pulumi.Input[str]] = None) -> 'EventBusPolicy':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _EventBusPolicyState.__new__(_EventBusPolicyState)\n\n __props__.__dict__[\"event_bus_name\"] = event_bus_name\n __props__.__dict__[\"policy\"] = policy\n return EventBusPolicy(resource_name, opts=opts, __props__=__props__)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Workflow':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = WorkflowArgs.__new__(WorkflowArgs)\n\n __props__.__dict__[\"acr\"] = None\n __props__.__dict__[\"aks_resource_id\"] = None\n __props__.__dict__[\"app_name\"] = None\n __props__.__dict__[\"auth_status\"] = None\n __props__.__dict__[\"branch_name\"] = None\n __props__.__dict__[\"builder_version\"] = None\n __props__.__dict__[\"deployment_properties\"] = None\n __props__.__dict__[\"docker_build_context\"] = None\n __props__.__dict__[\"dockerfile\"] = None\n __props__.__dict__[\"dockerfile_generation_mode\"] = None\n __props__.__dict__[\"dockerfile_output_directory\"] = None\n __props__.__dict__[\"generation_language\"] = None\n __props__.__dict__[\"image_name\"] = None\n __props__.__dict__[\"image_tag\"] = None\n __props__.__dict__[\"language_version\"] = None\n __props__.__dict__[\"last_workflow_run\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"manifest_generation_mode\"] = None\n __props__.__dict__[\"manifest_output_directory\"] = None\n __props__.__dict__[\"manifest_type\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"namespace\"] = None\n __props__.__dict__[\"oidc_credentials\"] = None\n __props__.__dict__[\"port\"] = None\n __props__.__dict__[\"pr_status\"] = None\n __props__.__dict__[\"pr_url\"] = None\n __props__.__dict__[\"pull_number\"] = None\n __props__.__dict__[\"repository_name\"] = None\n __props__.__dict__[\"repository_owner\"] = None\n __props__.__dict__[\"system_data\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n return Workflow(resource_name, opts=opts, __props__=__props__)", "def get_state(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n client_id: Optional[pulumi.Input[str]] = None,\n client_secret: Optional[pulumi.Input[str]] = None,\n consumer_id: Optional[pulumi.Input[str]] = None,\n hash_secret: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n redirect_uris: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'ConsumerOauth2':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ConsumerOauth2State.__new__(_ConsumerOauth2State)\n\n __props__.__dict__[\"client_id\"] = client_id\n __props__.__dict__[\"client_secret\"] = client_secret\n __props__.__dict__[\"consumer_id\"] = consumer_id\n __props__.__dict__[\"hash_secret\"] = hash_secret\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"redirect_uris\"] = redirect_uris\n __props__.__dict__[\"tags\"] = tags\n return ConsumerOauth2(resource_name, opts=opts, __props__=__props__)" ]
[ "0.58018064", "0.5353829", "0.5117951", "0.51060236", "0.50312877", "0.49301392", "0.48632613", "0.4778881", "0.47655228", "0.47473097", "0.4707761", "0.4704642", "0.46908763", "0.46863323", "0.4678712", "0.46439952", "0.46264488", "0.46231508", "0.46105427", "0.455522", "0.4554489", "0.45515102", "0.45423943", "0.45013872", "0.44960016", "0.4484474", "0.44455725", "0.4436387", "0.4410921", "0.44036308" ]
0.7716009
0
Account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.
def expected_bucket_owner(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "expected_bucket_owner")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expected_bucket_owner(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"expected_bucket_owner\")", "def expected_bucket_owner(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"expected_bucket_owner\")", "def owner_account_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"owner_account_id\")", "def owner_account_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"owner_account_id\")", "def bucket_id(self):\n return self._bucket_id", "def account_id():\n return client.get_caller_identity()['Account']", "def accountId():\n # save the lookup if we set the account to the environment\n if \"AWS_ACCOUNT_ID\" in os.environ:\n return os.environ[\"AWS_ACCOUNT_ID\"]\n conn = iamConn()\n funcs = [\n lambda: conn.get_user().get('get_user_response')\\\n .get('get_user_result').get('user').get('arn'),\n lambda: conn.list_roles(max_items=1).get('list_roles_response')\\\n .get('list_roles_result').get('roles')[0].get('arn'),\n ]\n for func in funcs:\n try:\n arn = func()\n break\n except (boto.exception.BotoServerError, IndexError):\n pass\n return arn.split(':')[4]", "def get_bucket_acl(Bucket=None):\n pass", "def bucket_arn(self) -> str:\n ...", "def _get_billing_account_id():\n org_client = boto3.client(\"organizations\")\n response = org_client.describe_organization()\n return response[\"Organization\"][\"MasterAccountId\"]", "def owner_id(self) -> str:\n return pulumi.get(self, \"owner_id\")", "def bucket_arn(self) -> str:\n return jsii.get(self, \"bucketArn\")", "def bucket_arn(self) -> str:\n return jsii.get(self, \"bucketArn\")", "def account_id(self):\n return self.get('/accounts')[0]['Id']", "def _get_owner(self):\n if self.resource.owner is not None:\n try:\n return pwd.getpwnam(self.resource.owner).pw_uid\n except KeyError:\n raise error.InvalidUser()", "def set_object_owner(self, bucket_name, object_name, uid, gid):\n\n return h3lib.set_object_owner(self._handle, bucket_name, object_name, uid, gid, self._user_id)", "def get_owner(self, obj):\n return obj.user.username", "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def has_bucket_access(self, bucket, user_id):\n msg = \"has_bucket_access not implemented\"\n raise NotImplementedError(msg)", "def bucket_name(self) -> str:\n ...", "def storage_account_id(self) -> str:\n return pulumi.get(self, \"storage_account_id\")", "def test_owner_id(api: API):\n owner_id = 123456\n api.candlepin.get_owners.return_value = [{\"key\": owner_id}]\n account = Account(api, \"USERNAME\", \"PASSWORD\")\n\n assert account._owner_id is None\n account.owner_id\n assert account.owner_id == owner_id\n api.candlepin.get_owners.assert_called_once()", "def getOwnerIdFromToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def bucket_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket_arn\")", "def bucket_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket_arn\")", "def owner_id(self) -> Optional[str]:\n return pulumi.get(self, \"owner_id\")", "def test_request_for_a_bucket_has_integer_id(self):\n with self.client:\n response = self.client.get(\n '/bucketlists/dsfgsdsg',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'Please provide a valid Bucket Id')", "def account(self, acct):\n aMgr = self.acctManager\n if len(aMgr.accounts) <= acct:\n raise Exception(\"requested unknown account number %i\" % acct)\n return aMgr.account(acct)", "def bucket_arn(self) -> typing.Optional[str]:\n return self._values.get('bucket_arn')", "def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")" ]
[ "0.7040738", "0.7040738", "0.6445131", "0.63837475", "0.6061159", "0.5858455", "0.5852843", "0.583966", "0.5817529", "0.5723972", "0.5716804", "0.5698139", "0.5698139", "0.5686788", "0.5675481", "0.5651604", "0.565157", "0.564837", "0.5641776", "0.562609", "0.5620628", "0.5610478", "0.55805695", "0.5569882", "0.5569882", "0.556741", "0.5567377", "0.5536349", "0.5531607", "0.5514971" ]
0.7147683
0
Get original model if the input model is a model wrapper.
def get_ori_model(model: nn.Module) -> nn.Module: if is_model_wrapper(model): return model.module else: return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_non_wrapped_model(model: nn.Module) -> nn.Module:\n from torch.nn import DataParallel\n from torch.nn.parallel import DistributedDataParallel\n\n if not isinstance(model, nn.Module):\n raise RuntimeError(\"Input model must be a subclass of nn.Module.\")\n\n if isinstance(model, (DataParallel, DistributedDataParallel)):\n model = model.module\n\n return model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self._model", "def get_model(self):\n return self._model", "def get_model_obj(self):\n if hasattr(self, 'model_obj'): return self.model_obj\n model_ct = ContentType.objects.get(\n app_label=self.kwargs.get('app_label'), model=self.kwargs.get('model'))\n self.model_obj = model_ct.model_class()\n return self.model_obj", "def get_model(self):\n return self.chain.model", "def get_default_model():\n models = PluginLoader.get_available_models()\n return 'original' if 'original' in models else models[0]", "def get_model(self):\n return self.model.module if isinstance(self.model, DDP) else self.model", "def model(self):\n return self.model_", "def get_model(self):\n\t\treturn self.object.__class__", "def _getModel(self):\r\n \r\n return self._model", "def getModel(self):\n return self.model", "def get_parent_model(self):\n return self._model", "def get_model(model=gin.REQUIRED):\n return model", "def model(self) -> Optional[CameraModel]:\n return self._model", "def get_model(self) -> torch.nn.Module:\n\n check.check_not_none(self.model)\n return cast(torch.nn.Module, self.model)", "def get_form_model(cls) -> BaseModel:\n # Get form model\n sig = inspect.signature(cls.item_to_form)\n form_model = sig.return_annotation\n if form_model is BaseModel:\n raise ValueError(\"item_to_form has incorrect return type annotation\")\n return form_model", "def model(self):\n return self._model", "def model(self):\n return self._model", "def model(self):\n return self._model", "def model(self):\n return self._model" ]
[ "0.6501197", "0.6491106", "0.6491106", "0.6491106", "0.6491106", "0.6491106", "0.6491106", "0.6491106", "0.6491106", "0.6491106", "0.6491106", "0.64641833", "0.64641833", "0.6462358", "0.64084584", "0.63638914", "0.6319201", "0.6311763", "0.6279905", "0.6277195", "0.6266046", "0.62572783", "0.6228601", "0.62056893", "0.620196", "0.61914873", "0.6185557", "0.6185557", "0.6185557", "0.6185557" ]
0.7013369
0
Load annotation from annotations.json file
def _load_annotations(self): annotation_file = self._filepath(self.ANNOTATION_FILE) with open(annotation_file) as f: json_data = json.load(f) return json_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_annotation(json_path):\n # Open the file containing the annotation\n with open(json_path) as annotation_file:\n\n # Parse the AI2D annotation from the JSON file into a dictionary\n annotation = json.load(annotation_file)\n\n # Return the annotation\n return annotation", "def load_annotations(path):\n annotations = joblib.load(path);\n return annotations", "def load_annotations(self, index):\n anns_file = open(os.path.join(self.folder_path, self.image_ids[index] + '.json'))\n labels = json.load(anns_file)\n labels = labels[\"shapes\"]\n anns_file.close()\n return labels.copy()", "def from_local(self):\n if self.local_annotations_filename is not None:\n with open(self.local_annotations_filename, 'r') as f:\n data = json.load(f)\n self.video_annotations = dl.AnnotationCollection.from_json(data['annotations'])", "def load_annotations(self):\n fname, aux = QFileDialog.getOpenFileName(self, 'Open file', '', \"(*.csv)\")\n if fname != '':\n self.model.AnnotationLoad(fname=fname)", "def load_annotation(file, klass, annotations):\n with open(file, \"rt\") as ann_file:\n for line in ann_file:\n if \"#\" in line:\n continue\n parsed_line = klass(line)\n if parsed_line.query_name in annotations:\n annotations[parsed_line.query_name].append(parsed_line)\n else:\n annotations[parsed_line.query_name] = [parsed_line]", "def load_annotations(self):\n if self.ann_file.endswith('.json'):\n return self.load_json_annotations()\n\n video_infos = []\n with open(self.ann_file, 'r') as fin:\n for line in fin:\n line_split = line.strip().split()\n if self.multi_class:\n assert self.num_classes is not None\n filename, label = line_split[0], line_split[1:]\n label = list(map(int, label))\n else:\n filename, label = line_split\n label = int(label)\n if self.data_prefix is not None:\n filename = osp.join(self.data_prefix, filename)\n video_infos.append(dict(filename=filename, label=label))\n while len(video_infos) < self.min_video_num:\n left_num = min(self.min_video_num - len(video_infos), len(video_infos))\n video_infos.extend(random.sample(video_infos, left_num))\n return video_infos", "def load_gene_annotation(self, file_path):\n\t\tpass", "def load_annos(self):\n data = None\n with open(self.anno_path, 'r') as file:\n if self.ext == '.json':\n data = json.load(file)\n\n # Label start at index 0\n if data is not None:\n for anno in data['annotations']:\n anno['category_id'] -= 1\n\n for anno in data['categories']:\n anno['id'] -= 1\n\n return data", "def get_annotations(data_folder):\n annotations_files = os.listdir('data_annotations')\n\n annotations = {}\n for file_name in annotations_files:\n annotation = json.load(\n open(f'{data_folder}/annotations/{file_name}', 'r')\n )\n key = int(annotation['entity_id'])\n annotations[key] = annotation['value']['value']\n\n return annotations", "def load_annotations(self, image_index):\n filename = self.image_names[image_index] + '.xml'\n try:\n tree = ET.parse(os.path.join(self.data_dir, 'Annotations', filename))\n return self.__parse_annotations(tree.getroot())\n except ET.ParseError as e:\n raise_from(ValueError('invalid annotations file: {}: {}'.format(filename, e)), None)\n except ValueError as e:\n raise_from(ValueError('invalid annotations file: {}: {}'.format(filename, e)), None)", "def _load_annotations(self):\n if self._raw_annotations is not None:\n return self._raw_annotations\n\n dataset_file = os.path.join(self._annotation_path, 'complete_dataset_v{}.pkl'.format(self._version))\n idx_file = os.path.join(self._annotation_path, 'splits_indices_v{}.pkl'.format(self._version))\n\n def get_split_from_ds(ds, idx):\n split = {}\n keys = sorted(ds.keys())\n for j in xrange(len(idx)):\n k = keys[idx[j]]\n split[k] = ds[k]\n return split\n\n with open(idx_file, 'rb') as fid:\n indices = cPickle.load(fid)[self._image_set]\n with open(dataset_file, 'rb') as fid:\n ds = cPickle.load(fid)\n self._raw_annotations = get_split_from_ds(ds, indices)\n\n return self._raw_annotations", "def load_annotations(self):\n for fp in self.ris_widget.flipbook_pages:\n annotations = getattr(fp, 'annotations', {})\n pose = annotations.get('pose')\n if pose in (None, (None, None)):\n annotation_path = pathlib.Path(fp[0].name).with_suffix('.pickle')\n #if there are no annotation pickle files do nothing\n if annotation_path.exists():\n with annotation_path.open('rb') as f:\n annotations = pickle.load(f)\n fp.annotations = annotations\n self.ris_widget.annotator.update_fields()", "def load_annotations_from_file_in_mtr_format(filepath: str) -> List[Union[str, int, float]]:\n with open(filepath, 'r') as f:\n json_obj = json.load(f)\n # print(json_obj)\n bounding_boxes = json_obj['bounding_boxes']\n \n # filter out noisy annotations\n # and convert the data to kitti MOTS data format\n \n # []\n annotation_list = []\n track_id = -1\n for bboxes in bounding_boxes:\n if(bboxes['center']['z'] is None or bboxes['height'] is None or bboxes['height'] < 0.01 \\\n or bboxes['width'] < 0.01 or bboxes['length'] < 0.01):\n continue\n # annotation = [frame_id, -1]\n annotation = []\n # print(\"type: \", str2id(bboxes['object_id']))\n # object_type = bboxes['object_id'] # suppress as 'pedestrian'\n object_type = 'pedestrian'\n # truncated = -1\n # occluded = -1\n # alpha = -1\n # bbox2d = [-1, -1, -1, -1]\n dimensions = [bboxes['height'], bboxes['width'], bboxes['length']]\n # dimensions = [bboxes['height'], bboxes['length'], bboxes['width']]\n location = [bboxes['center']['x'], bboxes['center']['y'], bboxes['center']['z']]\n rotation_y = bboxes['angle']\n\n annotation.append(object_type)\n # annotation.append(truncated)\n # annotation.append(occluded)\n # annotation.append(alpha)\n # annotation += bbox2d\n annotation += dimensions\n annotation += location\n annotation.append(rotation_y)\n annotation_list.append(annotation)\n return annotation_list\n\n \"\"\"\n \"\"\"", "def import_annotated_reads(infile):\n deserialized = None\n try:\n with open(infile, 'r') as file_handle:\n deserialized = json.load(file_handle, object_hook=decode_reads)\n except FileNotFoundError:\n deserialized = {}\n return deserialized", "def load_raw_annot(filename):\n with open(filename, 'r') as fp:\n data = json.loads(fp.read())\n\n mapping = _create_mapping()\n\n for k in data.keys():\n for i in xrange(len(data[k])):\n data[k][i] = eval_utils.revise_sentiment(data[k][i], mapping)\n return data", "def load_annotation(self,index):\n return self._load_derived_cls_annotation(index)", "def _get_annotations(self) -> List[Dict[int, Dict[str, Any]]]:\n annotations = []\n for item in self.collector:\n data_file_type = os.path.basename(item).split(\".\")[-1]\n annotations.append(\n load_annotation_file(\n os.path.join(\n self.annotation_folder,\n os.path.basename(item).replace(data_file_type, \"json\"),\n )\n )\n )\n\n return annotations", "def save_annotations(self):\n r = requests.get(\n f'{self.api_host}/v1/entity-annotations?'\n f'annotation_type=Source reliability (binary)&size=100',\n headers=self.get_request_headers()\n )\n\n entity_annotations = r.json().get('entity_annotations')\n\n for annotation in entity_annotations:\n annotation_id = annotation.get('entity_id')\n with open(\n f'{self.data_folder}/annotations/{annotation_id}.json',\n 'w'\n ) as f:\n json.dump(annotation, f)", "def _load_nimble_annotation(self, index):\n filename = os.path.join(self._data_path, 'Annotations_Python', index + '.json')\n #currently only one bbox is considered.\n assert os.path.exists(cache_file),'Annotation {} has to be here'.format(filename)\n \n num_objs = 1\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # \"Seg\" area for pascal is just the box area\n seg_areas = np.zeros((num_objs), dtype=np.float32)\n\n f = open(filename,'r')\n\n # Load object bounding boxes into a data frame.\n for ix, obj in enumerate(objs):\n cls = 1 \n gtboxes_1[ix, :] = obj.bbox\n gtboxes_2[ix,:] = obj.gtbbox\n gt_classes_1[ix] = cls\n overlaps_1[ix, cls] = 1.0\n seg_areas_1[ix] = 0\n gt_classes_1[ix] = cls\n overlaps_1[ix, cls] = 1.0\n seg_areas_1[ix] = 0\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n return {'donor_file' : donor_file,\n 'boxes_1' : gtboxes_1,\n 'boxes_2' : gtboxes_2,\n 'gt_classes_1': gt_classes_1,\n 'gt_overlaps_1' : overlaps_1,\n 'gt_classes_2': gt_classes_2,\n 'gt_overlaps_2' : overlaps_2,\n 'flipped' : False,\n 'seg_areas_1' : seg_areas_1,\n 'seg_areas_2' : seg_areas_2}", "def get_annotation_object(annots_path):\n\n lines = annots_path.read_text().split('\\n')\n \n annots = []\n for line in lines:\n if not line:\n continue\n \n annot = {}\n splot = line.split(' ')\n annot['class_id'] = int(splot[0])\n annot['center_x'] = float(splot[1])\n annot['center_y'] = float(splot[2])\n annot['width'] = float(splot[3])\n annot['height'] = float(splot[4])\n annot['class_name'] = splot[-1]\n \n if splot[5].startswith('px:'):\n px = splot[5].strip('px:')\n py = splot[6].strip('py:')\n \n if not (px == 'auto'):\n px = px.split(',')\n py = py.split(',')\n annot['px'] = [float(x) for x in px]\n annot['py'] = [float(x) for x in py]\n else:\n annot['px'] = 'auto'\n annot['py'] = 'auto'\n \n elif splot[5].startswith('conf:'):\n annot['conf'] = float(splot[5].split(':')[1])\n\n annots.append(annot)\n \n return annots", "def _convert_annotations(self, ast):\n self.annotations = IDLAnnotations(ast)", "def get_annotations_dict(dataset_name: str, train_val_test: str):\n annotations_dict_path = constants.ANNOTATIONS_DICT_PATH.format(dataset_name, train_val_test)\n\n if exists(annotations_dict_path):\n with open(annotations_dict_path, \"r\") as ann_dict_file:\n annotations_dict = json.load(ann_dict_file)\n return annotations_dict\n return None", "def load_annotations(self):\n # get keys\n with open(self.ann_file, 'r') as fin:\n keys = [line.strip().split(' ')[0] for line in fin]\n # get frame index list for LQ frames\n frame_index_list = []\n for i in range(self.num_input_frames):\n # Each clip of Vimeo90K has 7 frames starting from 1. So we use 9\n # for generating frame_index_list:\n # N | frame_index_list\n # 1 | 4\n # 3 | 3,4,5\n # 5 | 2,3,4,5,6\n # 7 | 1,2,3,4,5,6,7\n frame_index_list.append(i + (9 - self.num_input_frames) // 2)\n\n data_infos = []\n for key in keys:\n folder, subfolder = key.split('/')\n lq_paths = []\n for i in frame_index_list:\n lq_paths.append(\n osp.join(self.lq_folder, folder, subfolder, f'im{i}.png'))\n gt_paths = [osp.join(self.gt_folder, folder, subfolder, 'im4.png')]\n\n data_infos.append(\n dict(lq_path=lq_paths, gt_path=gt_paths, key=key))\n\n return data_infos", "def dump_annotations(self):\n fname = 'annotations'\n if self.split is not None:\n fname = 'annotations_{}'.format(self.split)\n fname = os.path.join(self.dest_folder, '{}.json'.format(fname))\n self.save(self.dataset, fname, \"annotations\")", "def load_metadata(self, path):\n self.paths = []\n self.annotations = []\n\n with open(path, \"r\") as f:\n for line in f:\n line = line.strip().split(\" \")\n \n rgb_path = line[0]\n\n if len(line) > 1:\n bounding_boxes = np.array([list(map(int, box.split(','))) for box in line[1:]])\n else:\n bounding_boxes = []\n \n self.annotations.append({\n \"rgb_path\": rgb_path, \n \"bounding_boxes\": bounding_boxes,\n })", "def load_annotations_from_file_in_kittimot_format(filepath: str, frame_id: int) -> List[Union[str, int, float]]:\n with open(filepath, 'r') as f:\n json_obj = json.load(f)\n # print(json_obj)\n bounding_boxes = json_obj['bounding_boxes']\n \n # filter out noisy annotations\n # and convert the data to kitti MOTS data format\n \n # []\n annotation_list = []\n track_id = -1\n for bboxes in bounding_boxes:\n if(bboxes['center']['z'] is None or bboxes['height'] is None or bboxes['height'] < 0.01 \\\n or bboxes['width'] < 0.01 or bboxes['length'] < 0.01):\n continue\n annotation = [frame_id, -1]\n # print(\"type: \", str2id(bboxes['object_id']))\n # object_type = bboxes['object_id'] # suppress as 'pedestrian'\n object_type = 'pedestrian'\n truncated = -1\n occluded = -1\n alpha = -1\n bbox2d = [-1, -1, -1, -1]\n dimensions = [bboxes['height'], bboxes['width'], bboxes['length']]\n # dimensions = [bboxes['height'], bboxes['length'], bboxes['width']]\n location = [bboxes['center']['x'], bboxes['center']['y'], bboxes['center']['z']]\n rotation_y = bboxes['angle']\n\n annotation.append(object_type)\n annotation.append(truncated)\n annotation.append(occluded)\n annotation.append(alpha)\n annotation += bbox2d\n annotation += dimensions\n annotation += location\n annotation.append(rotation_y)\n annotation_list.append(annotation)\n return annotation_list\n\n\n\n \"\"\"\n filepath is the absolute path to the annotations\n\n \"\"\"", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n frame_dir = video_info['filename']\n video_info['filename'] = osp.join(self.data_prefix, video_info['filename'])\n video_info['frame_dir'] = frame_dir\n video_info['index'] = i\n \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n\n return video_infos", "def _read_annotations(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n print('Loading annotations...')\n \n if self.caching:\n # Get last modified timestamp of the cache path\n cache_path = pathlib.Path(self.cache_path)\n cache_timestamp = cache_path.stat().st_mtime\n\n is_empty = not any(cache_path.iterdir())\n\n data_path = pathlib.Path(os.path.join(self.dataset_dir, self.dataset_name + '_unroll.json'))\n data_timestamp = data_path.stat().st_mtime\n\n # If the data is newer than the cached data, we have to update it. Else we take the cached data\n if is_empty or (data_timestamp > cache_timestamp):\n with open(os.path.join(self.dataset_dir, self.dataset_name + '_unroll.json')) as f:\n # Read file by line: persons, objects, actions\n for i, json_obj in enumerate(f):\n json_data = json.loads(json_obj)\n\n # Process and separate data into persons, objects and activities\n if i == 0:\n persons, person_ids = self._process_persons(json_data['persons'])\n elif i == 1:\n objects, object_ids = self._process_objects(json_data['objects'])\n elif i == 2:\n activities, activity_names = self._process_activities(json_data['actions'])\n else:\n print('Incorrect format in annotation file')\n exit()\n \n # Update cached files\n np.save(os.path.join(cache_path, 'persons.npy'), persons)\n np.save(os.path.join(cache_path, 'person_ids.npy'), person_ids)\n np.save(os.path.join(cache_path, 'objects.npy'), objects)\n np.save(os.path.join(cache_path, 'object_ids.npy'), object_ids)\n np.save(os.path.join(cache_path, 'activities.npy'), activities)\n np.save(os.path.join(cache_path, 'activity_names.npy'), activity_names)\n else:\n persons = np.load(os.path.join(cache_path, 'persons.npy'), allow_pickle=True)\n person_ids = np.load(os.path.join(cache_path, 'person_ids.npy'), allow_pickle=True)\n objects = np.load(os.path.join(cache_path, 'objects.npy'), allow_pickle=True)\n object_ids = np.load(os.path.join(cache_path, 'object_ids.npy'), allow_pickle=True)\n activities = np.load(os.path.join(cache_path, 'activities.npy'), allow_pickle=True)\n activity_names = np.load(os.path.join(cache_path, 'activity_names.npy'), allow_pickle=True)\n else:\n with open(os.path.join(self.dataset_dir, self.dataset_name + '_unroll.json')) as f:\n # Read file by line: persons, objects, actions\n for i, json_obj in enumerate(f):\n json_data = json.loads(json_obj)\n\n # Process and separate data into persons, objects and activities\n if i == 0:\n persons, person_ids = self._process_persons(json_data['persons'])\n elif i == 1:\n objects, object_ids = self._process_objects(json_data['objects'])\n elif i == 2:\n activities, activity_names = self._process_activities(json_data['actions'])\n else:\n print('Incorrect format in annotation file')\n exit()\n\n return persons, objects, activities, person_ids, object_ids, activity_names", "def load_data(self, annotation_json, images_dir):\r\n # Load json from file\r\n json_file = open(annotation_json)\r\n coco_json = json.load(json_file)\r\n json_file.close()\r\n \r\n # Add the class names using the base method from utils.Dataset\r\n source_name = \"coco_like\"\r\n ids={}\r\n i=0\r\n for category in coco_json['categories']:\r\n i+=1\r\n class_id = category['id']\r\n ids[class_id]=i\r\n class_name = category['name']\r\n if class_id < 1:\r\n print('Error: Class id for \"{}\" cannot be less than one. (0 is reserved for the background)'.format(class_name))\r\n return\r\n \r\n self.add_class(source_name, class_id, class_name)\r\n for annotation in coco_json['annotations']:\r\n annotation[\"category_id\"]=ids[annotation[\"category_id\"]]\r\n \r\n # Get all annotations\r\n \r\n annotations = {}\r\n for annotation in coco_json['annotations']:\r\n image_id = annotation['image_id']\r\n if image_id not in annotations:\r\n annotations[image_id] = []\r\n annotations[image_id].append(annotation)\r\n \r\n # Get all images and add them to the dataset\r\n seen_images = {}\r\n for image in coco_json['images']:\r\n image_id = image['id']\r\n if image_id in seen_images:\r\n print(\"Warning: Skipping duplicate image id: {}\".format(image))\r\n else:\r\n seen_images[image_id] = image\r\n try:\r\n image_file_name = image['file_name']\r\n image_width = image['width']\r\n image_height = image['height']\r\n except KeyError as key:\r\n print(\"Warning: Skipping image (id: {}) with missing key: {}\".format(image_id, key))\r\n \r\n image_path = os.path.abspath(os.path.join(images_dir, image_file_name))\r\n image_annotations = annotations[image_id]\r\n \r\n # Add the image using the base method from utils.Dataset\r\n self.add_image(\r\n source=source_name,\r\n image_id=image_id,\r\n path=image_path,\r\n width=image_width,\r\n height=image_height,\r\n annotations=image_annotations\r\n )" ]
[ "0.7888858", "0.7325585", "0.7084303", "0.6923192", "0.687097", "0.68155825", "0.66877896", "0.6673177", "0.65827876", "0.6560132", "0.6559868", "0.6558796", "0.65527225", "0.64227253", "0.63591856", "0.6338805", "0.6284784", "0.6264161", "0.62268883", "0.62000585", "0.61847454", "0.6140274", "0.612206", "0.60980636", "0.60980326", "0.6082721", "0.60780305", "0.6075901", "0.60513926", "0.60361415" ]
0.8038193
0
Load the data indices txt file.
def _load_split_indices(self): split_file = self.SPLITS.get(self.split) indices_file = self._filepath(split_file) with open(indices_file) as txt_file: idx_data = [int(i) for i in txt_file.readline().split()] return idx_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_index_data(data_path):\n index_keywords = []\n with open(data_path) as data:\n for line in data:\n index_keywords.append(line.rstrip())\n return index_keywords", "def load_info():\n data = np.loadtxt(\"u_sol_meta.txt\", dtype=int)\n return data", "def load_labels_index_map(self, file_path):\n with open(file_path) as handle:\n self._labels_2_index = json.loads(handle.read())\n self._index_2_labels = {i: label.lower() for label, i in self._labels_2_index.items()}\n self._labels_dim = len(self._labels_2_index)", "def load_data(self):\n with open(self.file_name) as f:\n lines = f.readlines()\n\n labels = list()\n all_dat = list()\n for i, l in enumerate(lines):\n\n labels.append(int(l[0]))\n\n l = gensim.utils.any2unicode(l)\n all_dat.append(LabeledSentence(l.split(\"\\t\")[-1], [i]))\n\n return all_dat, np.asarray(labels)", "def load_data():\n data = []\n with euler.Resource('triangle.txt') as datafile:\n for line in datafile.readlines():\n data.insert(0, map(int, line.strip().split()))\n return data", "def load_indices(mode='char', words=None, counts=None):\n if os.path.exists(mode+'indices.p'):\n indices = pickle.load(open(mode+'indices.p', 'rb'), encoding='latin1')\n else:\n indices = {}\n i = 0\n for word in counts.keys():\n indices[word] = int(i)\n indices[i] = str(word)\n i += 1\n print(\"i is: \" + str(i))\n print(\"len is: \" + str(len(indices.keys())))\n pickle.dump(indices, open(mode+'indices.p', 'wb'))\n return indices", "def load_input(path):\n counts = defaultdict(int)\n if not os.path.exists(mode+'indices.p'):\n root = '/'.join(path.split('/')[0:-1])\n all_paths = [root+'/'+x for x in os.listdir(root)] #'/'.join(path.split('/')[0:-1]))\n else:\n all_paths = [path]\n \n for path in all_paths:\n print(path)\n with open(path) as f:\n if mode == 'word':\n words = tokenize(f.read())\n else:\n words = f.read()\n\n for word in words:\n counts[word] += 1 \n\n words = [x for x in words if len(x) > 0]\n return words, counts", "def get_data_as_indices(self, file_name):\n X, Y = [],[]\n org_X, org_Y = [], []\n\n for (words, tags) in read_conll_file(file_name):\n word_indices, word_char_indices = self.get_features(words)\n tag_indices = [self.tag2idx.get(tag) for tag in tags]\n X.append((word_indices,word_char_indices))\n Y.append(tag_indices)\n org_X.append(words)\n org_Y.append(tags)\n return X, Y #, org_X, org_Y - for now don't use", "def load_data():\n x = np.genfromtxt(X_FILE, usecols=(0, 1))\n y = np.genfromtxt(Y_FILE, usecols=(0))\n\n return x, y", "def read_from(self, filename):\n self.x, self.y = np.loadtxt(filename, unpack=True, usecols=(0, 1))", "def data_index(self):\n if not self._data_index:\n self._data_index = self.index_terms(self.get_files(config[\"data_subdir\"], ['.csv', '.txt']))\n return self._data_index", "def _read_indices(path):\n paths = sorted(tf.io.gfile.glob('%s-*-of-*_index.json' % path))\n all_indices = []\n for path in paths:\n json_str = epath.Path(path).read_text()\n # parse it back into a proto.\n shard_index = json.loads(json_str)\n all_indices.append(list(shard_index['index']))\n return [os.path.basename(p) for p in paths], all_indices", "def read_dataset(path_to_dataset_folder, index_filename):\n ###############################################################\n # Fill your code in this function\n ###############################################################\n # Hint: open(path_to_dataset_folder+'/'+index_filename,'r')\n with open(path_to_dataset_folder + '/' + index_filename, 'r') as index_file:\n lines = index_file.readlines()\n txt_paths = {}\n for line in lines:\n txt_path = line.split()\n txt_paths[txt_path[1]] = txt_path[0]\n\n A = []\n T = []\n for sample_file, label in txt_paths.items():\n A_16dim = []\n with open(path_to_dataset_folder + '/' + sample_file, 'r') as dim_values:\n lines = dim_values.readlines()\n for line in lines:\n dim_value = line.split()\n A_helper = [1]\n for element in dim_value:\n A_helper.append(float(element))\n A.append(A_helper)\n label = int(label)\n T_helper = [label]\n T.append(T_helper)\n A = np.array(A)\n T = np.array(T)\n return A, T", "def load_embedding(self, glove_dir='glove.6B/'):\n\n f = open(os.path.join(glove_dir, 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n self.embeddings_index[word] = np.asarray(values[1:], dtype='float32')\n f.close()", "def load_labels(label_path):\r\n\r\n with open(label_path, \"r\") as f:\r\n\r\n lines = f.readlines()\r\n \r\n label = {}\r\n index = []\r\n for i, line in enumerate(lines):\r\n sp = line.split()\r\n label[sp[0]] = [int(sp[1]),int(sp[2]),int(sp[3])]\r\n index.append([int(sp[3]),int(sp[2]),int(sp[1])])\r\n\r\n return label, index", "def load_data_and_labels(data_file):\n # Load data from files\n obj = open(data_file, \"r\")\n y, x_text, query= [],[],[]\n for ele in obj:\n ele = ele.strip().split(\"\\t\")\n if len(ele) !=5 or ele[0].strip() not in [\"1\", \"-1\"]:\n #print ele\n continue\n if (ele[0].strip() == \"1\"):\n y.append([0])\n else:\n y.append([1])\n\n query_text = ele[1].strip().decode(\"utf8\")\n doc_text = ele[2].strip().decode(\"utf8\")\n x_text.append( \" \".join( jieba.cut(doc_text) ) )\n query.append( \" \".join( jieba.cut(query_text) ) )\n return [x_text, np.array(y), np.array(query)]", "def load_index(index_file):\n index_dict = {}\n with open(index_file) as f:\n for line in f:\n title, path = line.strip().split()\n index_dict[title] = path\n return index_dict", "def load_from_planetoid_files(dataset_name, path):\n\n def _sample_mask(idx, l):\n \"\"\"Create mask.\"\"\"\n mask = np.zeros(l)\n mask[idx] = 1\n return np.array(mask, dtype=np.bool)\n\n def _parse_index_file(filename):\n \"\"\"Parse index file.\"\"\"\n index = []\n for line in open(filename):\n index.append(int(line.strip()))\n return index\n\n def _load_file(name):\n \"\"\"Load from data file.\"\"\"\n filename = 'ind.{}.{}'.format(dataset_name, name)\n filename = os.path.join(path, filename)\n with open(filename, 'rb') as f:\n if sys.version_info > (3, 0):\n return pickle.load(f, encoding='latin1') # pylint: disable=unexpected-keyword-arg\n else:\n return pickle.load(f)\n\n x = _load_file('x')\n y = _load_file('y')\n tx = _load_file('tx')\n ty = _load_file('ty')\n allx = _load_file('allx')\n ally = _load_file('ally')\n graph = _load_file('graph')\n\n filename = 'ind.{}.test.index'.format(dataset_name)\n filename = os.path.join(path, filename)\n test_idx_reorder = _parse_index_file(filename)\n test_idx_range = np.sort(test_idx_reorder)\n\n if dataset_name == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph).\n # Find isolated nodes, add them as zero-vecs into the right position.\n test_idx_range_full = range(\n min(test_idx_reorder),\n max(test_idx_reorder) + 1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range - min(test_idx_range), :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range - min(test_idx_range), :] = ty\n ty = ty_extended\n\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\n\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n\n idx_test = test_idx_range.tolist()\n idx_train = range(len(y))\n idx_val = range(len(y), len(y) + 500)\n\n train_mask = _sample_mask(idx_train, labels.shape[0])\n val_mask = _sample_mask(idx_val, labels.shape[0])\n test_mask = _sample_mask(idx_test, labels.shape[0])\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n\n return (adj, features, y_train, y_val, y_test, train_mask, val_mask,\n test_mask, labels)", "def loadDataFile(self, filename):\n self.datafile = vocloadlib.readTabFile(filename,\n [ 'term', 'accID', 'status', 'abbreviation',\n 'note', 'comment', 'synonyms', 'synonymTypes',\n 'otherIDs', 'start', 'end', 'parent' \n ])", "def load_data():\n with open('../data/dataset.txt', 'r') as data_file:\n return data_file.read().split('\\n')", "def load_word2index(self):\n word2index = {}\n with open(self.nodes_file, 'r') as reader:\n for index, line in enumerate(reader):\n node = line.strip()\n word2index[node] = index\n\n return word2index", "def load_data(path_to):\n Y = np.load(path_to + \"Y.npy\")\n path_source_token_idxs = np.load(path_to + \"path_source_token_idxs.npy\")\n path_idxs = np.load(path_to + \"path_idxs.npy\")\n path_target_token_idxs = np.load(path_to + \"path_target_token_idxs.npy\")\n context_valid_masks = np.load(path_to + \"context_valid_masks.npy\")\n X = path_source_token_idxs, path_idxs, path_target_token_idxs, context_valid_masks\n\n return X, Y", "def read_data(data_path):\n tr = data_path + 'train_vectors.txt'\n v = data_path + 'val_vectors.txt'\n tst = data_path + 'test_vectors.txt'\n return tr, v, tst", "def read_data():\n ADV_MAT = np.load('ADV.npy');\n ADJ_MAT = np.load('ADJ.npy');\n PR_MAT = np.load('PR.npy'); \n NN_MAT = np.load('NN.npy');\n for i in range(ADV_MAT.shape[0]):RUNNING_DATA['ADV___'+str(i)] = ADV_MAT[i];\n for i in range(ADJ_MAT.shape[0]):RUNNING_DATA['ADJ___'+str(i)] = ADJ_MAT[i];\n for i in range(PR_MAT.shape[0]):RUNNING_DATA['PR___'+str(i)] = PR_MAT[i];\n for i in range(NN_MAT.shape[0]):RUNNING_DATA['NN___'+str(i)] = NN_MAT[i];", "def load_data(filename):\n\n with open(filename) as f_obj: # Open file to read & assign file object\n for line in f_obj: # Read each line as text\n print(int(line)) # Convert to int & display", "def read_data(feature_file, label_file):", "def main_load_ind(indfile):\n print \"- Loading the following file with attainment indicator values:\"\n print \" *\", indfile\n print\n \n point_ind = datasets.load_ind(indfile, flat=True)\n return point_ind", "def loadtxt(filepath,comments='#',delimiter=None,skiprows=0,usecols=None,index_offset=1):\n X = loadtxt(filepath,comments=comments,delimiter=delimiter,skiprows=skiprows,usecols=usecols)\n return fast_sparse_matrix(X)", "def load_index(self, fn):\n # print('Load ', fn)\n # if fn[len(fn)-4:] == '.pkl':\n # fn = fn[0:len(fn)-4]\n fn = 'idx_bench'\n inverted_index = utils.load_obj(fn)\n return inverted_index", "def load(self):\n self.index = nmslib.init(method='hnsw', space='cosinesimil')\n self.index.loadIndex(c.index_path('hnsw.index'))\n self.ys = joblib.load(\"%s.ys\" % self.index_file_prefix)" ]
[ "0.67228645", "0.64503235", "0.6270136", "0.62359387", "0.62125915", "0.6206149", "0.61973083", "0.6185728", "0.61774135", "0.61695915", "0.61617666", "0.61599284", "0.61492556", "0.61449534", "0.61161757", "0.6100025", "0.60692877", "0.602712", "0.6014525", "0.60091174", "0.59976465", "0.59890383", "0.5982772", "0.5970109", "0.59659106", "0.595895", "0.59501845", "0.5940496", "0.59277993", "0.5918126" ]
0.68765545
0
Convert the bbox record to BBox2D objects.
def _convert_to_bbox2d(single_bbox): label = single_bbox["label_id"] bbox = single_bbox["bbox"] canonical_bbox = BBox2D( x=bbox[0], y=bbox[1], w=bbox[2], h=bbox[3], label=label ) return canonical_bbox
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bbox2fields():\n bbox2label = {\n 'gt_bboxes': 'gt_labels',\n 'gt_bboxes_ignore': 'gt_labels_ignore'\n }\n bbox2mask = {\n 'gt_bboxes': 'gt_masks',\n 'gt_bboxes_ignore': 'gt_masks_ignore'\n }\n bbox2seg = {\n 'gt_bboxes': 'gt_semantic_seg',\n }\n return bbox2label, bbox2mask, bbox2seg", "def _decode_bbox(self, normalized_bbox):\n #apply the inverse of transformation\n y1,x1,y2,x2 = preprocess.apply_transformation(normalized_bbox,\n np.linalg.inv(self.transformation))\n\n w,h = self.image_size\n y1,x1,y2,x2 = y1*h,x1*w,y2*h,x2*w\n return vot.Rectangle(x1,y1,x2-x1,y2-y1)", "def transform_bbox(box, format=BBox.LTWH, to=BBox.XYWH):\n if format == BBox.LTWH:\n if to == BBox.LTWH:\n return list(box)\n elif to == BBox.LTRB:\n return box_ltwh_to_ltrb(box)\n else:\n return box_ltwh_to_xywh(box)\n elif format == BBox.LTRB:\n if to == BBox.LTWH:\n return box_ltrb_to_ltwh(box)\n elif to == BBox.LTRB:\n return list(box)\n else:\n return box_ltrb_to_xywh(box)\n else:\n if to == BBox.LTWH:\n return box_xywh_to_ltwh(box)\n elif to == BBox.LTRB:\n return box_xywh_to_ltrb(box)\n else:\n return list(box)", "def convert_bbox(bbox, width, height):\n min_x, min_y, max_x, max_y = bbox\n # scale X axis\n min_x *= width\n max_x *= width\n # invert Y axis and scale\n min_y = (1 - min_y) * height\n max_y = (1 - max_y) * height\n\n return min_x, min_y, max_x, max_y", "def roi2vertex2d(self,bboxes):\n if not bboxes.fix_vertex_to_bb:\n return (None,None,None)\n\n return ( larcv.Pixel2D( int(bboxes.vertices[0][\"pos\"][0]), int(bboxes.vertices[0][\"pos\"][1]) ),\n larcv.Pixel2D( int(bboxes.vertices[1][\"pos\"][0]), int(bboxes.vertices[1][\"pos\"][1]) ),\n larcv.Pixel2D( int(bboxes.vertices[2][\"pos\"][0]), int(bboxes.vertices[2][\"pos\"][1]) ) )", "def to_bounding_box(self):\n if self.bbox is not None:\n return self.bbox\n from .bbox import BBox\n\n xx = self.xx\n yy = self.yy\n self.bbox = BBox(xmin=min(xx), xmax=max(xx), ymin=min(yy), ymax=max(yy), label=self.label, **self.fields)\n return self.bbox", "def bbox(bbox = [(-1, -1), (3, 4)], layer = 0):\n D = Device(name = 'bbox')\n (a,b), (c,d) = bbox\n points = ((a,b), (c,b), (c,d), (a,d))\n D.add_polygon(points, layer = layer)\n return D", "def transform_bboxes(boxes, format=BBox.LTWH, to=BBox.XYWH, inplace=False):\n if format == BBox.LTWH:\n if to == BBox.LTWH:\n return boxes\n elif to == BBox.LTRB:\n return boxes_ltwh_to_ltrb(boxes, inplace=inplace)\n else:\n return boxes_ltwh_to_xywh(boxes, inplace=inplace)\n elif format == BBox.LTRB:\n if to == BBox.LTWH:\n return boxes_ltrb_to_ltwh(boxes, inplace=inplace)\n elif to == BBox.LTRB:\n return boxes\n else:\n return boxes_ltrb_to_xywh(boxes, inplace=inplace)\n else:\n if to == BBox.LTWH:\n return boxes_xywh_to_ltwh(boxes, inplace=inplace)\n elif to == BBox.LTRB:\n return boxes_xywh_to_ltrb(boxes, inplace=inplace)\n else:\n return boxes", "def get_2d_bb_points(bbox):\n\n cords = np.zeros((4, 4))\n extent = bbox.extent\n # arrange in clockwise to satisify the requirement in PyVisiLibity\n cords[0, :] = np.array([extent.x, extent.y, 0, 1])\n cords[1, :] = np.array([extent.x, -extent.y, 0, 1])\n cords[2, :] = np.array([-extent.x, -extent.y, 0, 1])\n cords[3, :] = np.array([-extent.x, extent.y, 0, 1])\n\n T_world = BboxUtils.get_matrix(carla.Transform(bbox.location, bbox.rotation))\n world_cords = np.dot(T_world, np.transpose(cords))[:2, :]\n return world_cords", "def bbox_transform(bbox):\n with tf.variable_scope('bbox_transform') as scope:\n cx, cy, w, h = bbox\n out_box = [[]]*4\n out_box[0] = cx-w/2\n out_box[1] = cy-h/2\n out_box[2] = cx+w/2\n out_box[3] = cy+h/2\n\n return out_box", "def convert_bbox(bboxes, old_shape, new_shape):\n bboxes = np.array(bboxes, np.float32)\n if bboxes.shape[0] == 0:\n return bboxes\n\n oh, ow = old_shape\n nh, nw = new_shape\n\n bboxes[:, 0] = bboxes[:, 0] * nh / oh\n bboxes[:, 1] = bboxes[:, 1] * nw / ow\n bboxes[:, 2] = bboxes[:, 2] * nh / oh\n bboxes[:, 3] = bboxes[:, 3] * nw / ow\n\n bboxes = rectify_bbox(bboxes, new_shape)\n return bboxes", "def __bbox2square(self, bboxes):\n height = bboxes[:, 2] - bboxes[:, 0] + 1\n width = bboxes[:, 3] - bboxes[:, 1] + 1\n side = np.maximum(width, height).T\n bboxes[:, 0] += (height - side) * 0.5\n bboxes[:, 1] += (width - side) * 0.5\n bboxes[:, 2] = np.around(bboxes[:, 0] + side - 1);\n bboxes[:, 3] = np.around(bboxes[:, 1] + side - 1);\n bboxes[:, :2] = np.around(bboxes[:, :2])\n return bboxes", "def bbox_transform(bbox):\n with tf.variable_scope('bbox_transform') as scope:\n cx = bbox[..., 0]\n cy = bbox[..., 1]\n w = bbox[..., 2]\n h = bbox[..., 3]\n out_box = np.stack(\n [cx-w/2, cy-h/2, cx+w/2, cy+h/2],\n axis=-1\n )\n return out_box", "def denormalize_bbox(bbox: TBox, rows: int, cols: int) -> TBox:\n tail: Tuple[Any, ...]\n (x_min, y_min, x_max, y_max), tail = bbox[:4], tuple(bbox[4:])\n\n if rows <= 0:\n raise ValueError(\"Argument rows must be positive integer\")\n if cols <= 0:\n raise ValueError(\"Argument cols must be positive integer\")\n\n x_min, x_max = x_min * cols, x_max * cols\n y_min, y_max = y_min * rows, y_max * rows\n\n return cast(BoxType, (x_min, y_min, x_max, y_max) + tail) # type: ignore", "def astype(self, newtype) -> 'BoundingBox2D':\n return BoundingBox2D(newtype(self.xmin), newtype(self.ymin), newtype(self.xmax), newtype(self.ymax))", "def _box2d_to_bbox(pg_box2d: str) -> Tuple[float, float, float, float]:\n m = _BOX2D_PATTERN.match(pg_box2d)\n if m is None:\n raise RuntimeError(f\"Unexpected postgis box syntax {pg_box2d!r}\")\n\n # We know there's exactly four groups, but type checker doesn't...\n # noinspection PyTypeChecker\n return tuple(float(m) for m in m.groups())", "def bbox_transpose(\n bbox: KeypointInternalType, axis: int, rows: int, cols: int\n) -> KeypointInternalType: # skipcq: PYL-W0613\n x_min, y_min, x_max, y_max = bbox[:4]\n if axis not in {0, 1}:\n raise ValueError(\"Axis must be either 0 or 1.\")\n if axis == 0:\n bbox = (y_min, x_min, y_max, x_max)\n if axis == 1:\n bbox = (1 - y_max, 1 - x_max, 1 - y_min, 1 - x_min)\n return bbox", "def _conv_bbox_to_array(bbox):\n array = list()\n for r in range(bbox[0, 1], bbox[1, 1] + 1):\n row = list()\n for c in range(bbox[0, 0], bbox[1, 0] + 1):\n row.append([c, r])\n array.append(row)\n return np.array(array)", "def bbox2points(bbox):\r\n l, x, y, w, h = bbox\r\n xmin = int(round(x - (w / 2)))\r\n xmax = int(round(x + (w / 2)))\r\n ymin = int(round(y - (h / 2)))\r\n ymax = int(round(y + (h / 2)))\r\n return (l, xmin, ymin, xmax, ymax)", "def coco_box_to_bbox(box):\n bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],\n dtype=np.float32)\n return bbox.reshape(1,4)", "def bboxtransform(bbox):\n gta = np.zeros((len(bbox),4))\n for i in range(len(bbox)):\n cx = bbox[i,0]\n cy = bbox[i,1]\n w = bbox[i,2]\n h = bbox[i,3]\n gta[i,0] = cx - (w / 2.)\n gta[i,1] = cy - (h / 2.)\n gta[i,2] = cx + (w / 2.)\n gta[i,3] = cy + (h / 2.)\n return gta", "def read_bounding_box_2d(annotation, label_mappings=None):\n bboxes = []\n for b in annotation:\n label_id = b[\"label_id\"]\n x = b[\"x\"]\n y = b[\"y\"]\n w = b[\"width\"]\n h = b[\"height\"]\n if label_mappings and label_id not in label_mappings:\n continue\n box = BBox2D(label=label_id, x=x, y=y, w=w, h=h)\n bboxes.append(box)\n\n return bboxes", "def bbox_to_geom(bbox: Tuple[float, float, float, float]) -> Dict:\n # TODO: Handle dateline crossing geometry\n return {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [bbox[0], bbox[3]],\n [bbox[0], bbox[1]],\n [bbox[2], bbox[1]],\n [bbox[2], bbox[3]],\n [bbox[0], bbox[3]],\n ]\n ],\n }", "def coco_box_to_bbox(box):\n bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32)\n return bbox", "def bbox_format(self) -> bbox_utils.BBoxFormat:\n raise NotImplementedError", "def denormalize_bbox(bbox, rows, cols):\n x_min, y_min, x_max, y_max = bbox[:4]\n denormalized_bbox = [x_min * cols, y_min * rows, x_max * cols, y_max * rows]\n return denormalized_bbox + list(bbox[4:])", "def bbox(\n bbox: Tuple[Coordinate, Coordinate] = ((-1.0, -1.0), (3.0, 4.0)),\n layer: Tuple[int, int] = (1, 0),\n top: float = 0,\n bottom: float = 0,\n left: float = 0,\n right: float = 0,\n) -> gf.Component:\n D = gf.Component()\n (xmin, ymin), (xmax, ymax) = bbox\n points = [\n [xmin - left, ymin - bottom],\n [xmax + right, ymin - bottom],\n [xmax + right, ymax + top],\n [xmin - left, ymax + top],\n ]\n D.add_polygon(points, layer=layer)\n return D", "def bbox(self):\n return np.array(\n [[self.position[0], self.position[1]], [self.position[0], self.position[1]]]\n )", "def bbox(self, obj):\n return self.phy2abs.bbox(obj)", "def bounding_box_2_polygon(bbox, bbox_type='ltrb'):\n\n # convert bbox type to ltrb\n if bbox_type != 'ltrb':\n bbox = Box.change_bounding_box_type(bbox, bbox_type, 'ltrb')\n\n # extract bounding box parameters\n left = bbox[0]\n top = bbox[1]\n right = bbox[2]\n bottom = bbox[3]\n\n # set bounding box standard form:\n # traverse boundary clock-wise starting - and ending - at the top-left corner\n bbox_standard_form = [(left, top), (right, top), (right, bottom), (left, bottom), (left, top)] # closed line\n\n # define polygon\n polygon = Polygon(bbox_standard_form)\n\n return polygon" ]
[ "0.6832664", "0.6783777", "0.6691991", "0.63218105", "0.6319875", "0.6284736", "0.6246083", "0.6223461", "0.61668444", "0.6120401", "0.6079261", "0.6019865", "0.6007134", "0.59077746", "0.5902924", "0.5898657", "0.58965296", "0.5879593", "0.5847342", "0.58367205", "0.58200914", "0.5819641", "0.58146614", "0.57987005", "0.5797426", "0.5790555", "0.5790374", "0.5744547", "0.57439256", "0.5729902" ]
0.75232357
0
Download dataset from GCS
def download(self): cloud_path = f"gs://{const.GCS_BUCKET}/{self.GCS_PATH}" # download label file label_zip = download_file_from_gcs( cloud_path, self.root, self.LABEL_ZIP ) with zipfile.ZipFile(label_zip, "r") as zip_dir: zip_dir.extractall(self.root) # download tfexamples for a dataset split tfexamples_zip = download_file_from_gcs( cloud_path, self.root, self.SPLITS_ZIP.get(self.split) ) with zipfile.ZipFile(tfexamples_zip, "r") as zip_dir: zip_dir.extractall(self.root)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_dataset(self):\n raise NotImplementedError", "def download_dataset(url=DATASET_URL):\n # disable insecure https warning\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n c = urllib3.PoolManager()\n with c.request(\"GET\", url, preload_content=False) as res, open(\n LOCAL_FILE_NAME, \"wb\"\n ) as out_file:\n shutil.copyfileobj(res, out_file)\n logging.info(\"Download completed.\")", "def download_compressed_dataset(url):\n raise NotImplementedError", "def download_model_from_gcs(self):\n # download model\n download_file_from_gcs(self.config.model_bucket_name,\n self.config.model_gcs_path,\n self.config.model_local_path)\n\n # download lable columns\n download_file_from_gcs(self.config.model_bucket_name,\n self.config.labels_gcs_path,\n self.config.labels_local_path)", "def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()", "def download(args):\n with_dataset(args, Dataset._download)", "def download_from_gcs(gcs_uri, target_path):\n bucket, blob_name = gcs_uri.replace('gs://', '').split('/', 1)\n client = storage.Client(project='embdatalab')\n bucket = client.get_bucket(bucket)\n prefix = blob_name.split('*')[0]\n unzipped = open(target_path, 'w')\n cmd = \"gunzip -c -f %s >> %s\"\n for blob in bucket.list_blobs(prefix=prefix):\n with tempfile.NamedTemporaryFile(mode='rb+') as f:\n logger.info(\"Downloading %s to %s\" % (blob.path, f.name))\n blob.chunk_size = 2 ** 30\n blob.download_to_file(f)\n f.flush()\n f.seek(0)\n subprocess.check_call(\n cmd % (f.name, unzipped.name), shell=True)\n return unzipped.name", "def download_dataset(target_dataset, comet):\n data_paths = list(get_data_paths().values())[0]\n data_store = StoreManager(path=data_paths)\n\n logging.info('STARTING tar download')\n comet.log_dataset_info(name=target_dataset, version=None, path=data_paths)\n start = time.time()\n data_store.download_file(target_dataset)\n end = time.time()\n logging.info('DOWNLOAD time taken: ' + str(end - start))\n comet.log_dataset_hash(target_dataset)\n if target_dataset.endswith('.tar.gz'):\n logging.info('STARTING untarring')\n tf = tarfile.open(target_dataset)\n tf.extractall()\n logging.info('COMPLETING untarring')", "def gcs_download_blob(bucket_name, source_blob_name, destination_file_name):\n storage_client = client #storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n blob.download_to_filename(destination_file_name)", "def download(dataset_name,dataset_url):\n directory = \"tmp\"\n if not os.path.exists(os.path.join(directory,dataset_name)):\n os.makedirs(os.path.join(directory,dataset_name))\n for url, filename in get_all_data(dataset_url):\n if not os.path.exists(os.path.join(directory,dataset_name,filename)):\n print(\"Downloading \"+filename+\":\",)\n ul.urlretrieve(url,os.path.join(directory,dataset_name,filename),reporthook)\n unzip_ecco_tcp_xmls(os.path.join(directory, dataset_name), os.path.join(directory, dataset_name + \"_unzipped\"))\n shutil.rmtree(os.path.join(directory, dataset_name))\n shutil.move(os.path.join(directory, dataset_name + \"_unzipped\"), os.path.join(directory, dataset_name))\n headers_to_csv(directory, dataset_name)\n corpus_to_csv(directory, dataset_name)\n erase_all_files_with_extension(directory, dataset_name, \".hdr\")\n erase_all_files_with_extension(directory, dataset_name, \".xml\")", "def download_dataset():\n \n ID = \"1-3_oB5iSF-c_V65-uSdUlo024NzlgSYZ\"\n script1 = f\"\"\"\n wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id='{ID} -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=\"{ID} -O Data.zip && rm -rf /tmp/cookies.txt\n \"\"\"\n script2 = \"\"\"unzip Data.zip\"\"\"\n\n os.system(script1)\n os.system(script2)", "def download_entire_dataset(dataset_name, num_data, labels, method, cache_dir):\n\n print('Downloading {}...'.format(dataset_name))\n preprocessor = preprocess_method_dict[method]()\n\n # Select the first `num_data` samples from the dataset.\n target_index = numpy.arange(num_data) if num_data >= 0 else None\n dataset_parts = D.molnet.get_molnet_dataset(dataset_name, preprocessor,\n labels=labels,\n target_index=target_index)\n dataset_parts = dataset_parts['dataset']\n\n # Cache the downloaded dataset.\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n for i, part in enumerate(['train', 'valid', 'test']):\n filename = dataset_part_filename(part, num_data)\n path = os.path.join(cache_dir, filename)\n NumpyTupleDataset.save(path, dataset_parts[i])\n return dataset_parts", "def _DownloadData(data_dir, data_path, vm):\n\n vm.Install('google_cloud_sdk')\n vm.RemoteCommand(\n 'if [ ! -d \\\"{data_path}\\\" ]; then '\n ' sudo mkdir -p {data_path} && '\n ' sudo chmod a+w {data_path} && '\n ' {gsutil_path} -m cp -r {data_dir}/* {data_path} ;'\n 'fi'.format(\n data_dir=data_dir,\n gsutil_path=google_cloud_sdk.GSUTIL_PATH,\n data_path=data_path))", "def download_dataset(dataset, subset='train', format='bow', root='./data', verbose=False):\n dataset_meta = _get_data_meta(dataset, subset=subset, format=format)\n dataset_dest = path.join(root, dataset.lower() + '_' + format + \".zip\")\n data_dir = path.join(root, dataset_meta['dir'])\n file_path = dataset_meta[subset]\n\n if isinstance(file_path, str):\n file_path = [file_path]\n elif isinstance(file_path, dict):\n file_path = file_path.values()\n if not all(path.exists(path.join(data_dir, f)) for f in file_path):\n if 'drive.google.com' in dataset_meta['url']:\n _download_file_from_google_drive(dataset_meta['url'], dataset_dest, unzip=True, overwrite=True, delete_zip=True, verbose=verbose)", "def download(self, verbose):\n\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from COVID-19 Open Data by Google Cloud Platform https://github.com/GoogleCloudPlatform/covid-19-open-data\")\n # Index\n i_cols = [\"location_key\", \"country_name\", \"subregion1_name\", \"subregion2_name\", \"iso_3166_1_alpha_3\"]\n i_df = pd.read_csv(self.URL_I, usecols=i_cols)\n # Mobility\n m_df = pd.read_csv(self.URL_M)\n m_df = (m_df.set_index([\"date\", \"location_key\"]) + 100).reset_index()\n # Combine data\n df = m_df.merge(i_df, how=\"left\", on=\"location_key\")\n # Location (country/province)\n df = df.loc[df[\"subregion2_name\"].isna()]\n df[self.PROVINCE] = df[\"subregion1_name\"].fillna(self.UNKNOWN).apply(unidecode)\n df[\"country_name\"] = df[\"country_name\"].replace(\n {\n # CIV\n \"Ivory Coast\": \"Cote d'Ivoire\",\n }\n )\n return df", "def download_dataset(urls, path):\n\n # check if the path exist or not\n os.makedirs(os.path.normpath(path), exist_ok=True)\n\n # Download the dataset\n for key in urls:\n _L(\"Downloading \" + _P(urls[key]) + \" in \" + _S(path))\n # if (urls[key].split('.')[-1] != 'tar'):\n os.system(\"wget {} -P {}\".format(urls[key], path))", "def download(self):\n if not self.url:\n raise RuntimeError(self.tips)\n\n download_file_name = os.path.join(\n self.raw_path, os.path.splitext(os.path.basename(self.url))[0]\n )\n file_format = self.url.split(\".\")[-1]\n if \"amazon\" in self.url:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.json.{file_format}\"\n )\n else:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if \"1drv.ms\" in self.url:\n file_format = \"zip\"\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if not os.path.exists(raw_file_path):\n print(f\"download_file: url: {self.url}, raw_file_path: {raw_file_path}\")\n download_file(self.url, raw_file_path)\n if \"amazon\" in raw_file_path:\n # amazon dataset do not unzip\n print(\"amazon dataset do not decompress\")\n return\n elif file_format == \"gz\":\n file_name = raw_file_path.replace(\".gz\", \"\")\n with gzip.open(raw_file_path, \"rb\") as fin:\n with open(file_name, \"wb\") as fout:\n shutil.copyfileobj(fin, fout)\n else:\n shutil.unpack_archive(\n raw_file_path, self.raw_path, format=get_format(file_format)\n )\n\n if not os.path.exists(download_file_name):\n return\n elif os.path.isdir(download_file_name):\n os.rename(\n download_file_name, os.path.join(self.raw_path, self.dataset_name)\n )\n else:\n os.rename(\n download_file_name,\n os.path.join(\n self.raw_path,\n f'{self.dataset_name}.{download_file_name.split(\".\")[-1]}',\n ),\n )", "def download_and_extract_data(tmp_dir, dataset):\n url = dataset[0]\n print(dataset)\n compressed_filename = os.path.basename(url)\n compressed_file = generator_utils.maybe_download(\n tmp_dir, compressed_filename, url)\n\n for file in dataset[1]:\n tf.logging.info(\"Reading file: %s\" % file)\n filepath = os.path.join(tmp_dir, file)\n\n # Extract from tar if needed.\n if not tf.gfile.Exists(filepath):\n with tarfile.open(compressed_file, \"r:gz\") as corpus_tar:\n corpus_tar.extractall(tmp_dir)\n\n documents_filename, labels_filename = dataset[1]\n documents_filepath = os.path.join(tmp_dir, documents_filename)\n labels_filepath = os.path.join(tmp_dir, labels_filename)\n return documents_filepath, labels_filepath", "def download_dataset(url=DATASET_URL):\n df = pd.read_csv(url, index_col=0)\n \n # ディレクトリが無ければ,作成する\n if not os.path.isdir(BASE_DIR):\n os.makedirs(BASE_DIR)\n \n df.to_csv(LOCAL_FILE_NAME)", "def download_dataset(dataset, destination):\n\n # Get images belonging to the requested dataset from cache\n cache_df = pd.read_csv(cache_file)\n df = cache_df.loc[cache_df['dataset.name'] == dataset]\n assert (df.shape[0] > 0), \"Dataset {0} does not exist\".format(dataset)\n\n # Create metadata for dataset that includes the file image paths\n print(\"Preprocessing metadata.\")\n files = []\n for _, row in df.iterrows():\n\n if type(row[\"meta.clinical.diagnosis\"]) == str:\n path = os.path.join(row[\"dataset.name\"], slugify(row[\"meta.clinical.diagnosis\"]))\n elif type(row[\"meta.clinical.diagnosis\"]) == str:\n path = os.path.join(row[\"dataset.name\"], slugify(row[\"meta.clinical.benign_malignant\"]))\n else:\n path = os.path.join(row[\"dataset.name\"], \"unknown\")\n\n files.append(os.path.join(path, \"{}.jpg\".format(row[\"_id\"])))\n df[\"file\"] = files\n df.to_csv(os.path.join(destination, \"{0}.csv\".format(dataset)), index=False)\n\n # Download images\n print(\"Downloading images from dataset: {}\".format(dataset))\n for _, row in tqdm(df.iterrows(), total=df.shape[0], desc=\"Downloading images\", unit=\"img\"):\n isic.download_image(row[\"_id\"], os.path.join(destination,row[\"file\"]))", "def download_all_data(self) -> None:\n print(\"Download in progress.\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_CHARACTERS\"], \"nextcloud\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_FRAGMENTS\"], \"nextcloud\")\n self.download_data(os.environ[\"HABBAKUK_URL\"], \"generic_url\")\n print(\"Download complete!\")", "def cli(ctx, dataset_collection_id, file_path):\n return ctx.gi.dataset_collections.download_dataset_collection(dataset_collection_id, file_path)", "def _download_metafile(dataset, path=None):\n if not path:\n path = sunpy.config.get('downloads', 'sample_dir')\n base_url = 'https://spdf.gsfc.nasa.gov/pub/software/cdawlib/0MASTERS/'\n fname = dataset.lower() + '_00000000_v01.cdf'\n url = base_url + fname\n try:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=True)\n except ModuleNotFoundError:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=False)\n return downloaded_file", "def blob_download(blob_url):\n blob = storage.Object.from_url(blob_url)\n blobc = blob.download()\n return blobc", "def _download(self, url, output_dir, dataset, chunk_size=1024):\n r = self.session.get(url, stream=True, allow_redirects=True)\n if not r.ok:\n r = self.session.get(r.url, stream=True, allow_redirects=True, auth=(self._username, self._password))\n file_size = int(r.headers['Content-Length'])\n\n with tqdm(total=file_size, unit_scale=True, unit='B', unit_divisor=1024) as pbar:\n ### GET FILE NAME ###\n if \"Content-Disposition\" in r.headers.keys():\n local_filename = re.findall(\"filename=(.+)\", r.headers[\"Content-Disposition\"])[0]\n else:\n local_filename = url.split(\"/\")[-3]\n local_filename = self.api.lookup(dataset, local_filename)[0]\n local_filename = local_filename + util.convert_to_extension(r.headers['content-type'])\n print(\"*** FNAME\", local_filename)\n\n local_filename = os.path.join(output_dir, local_filename)\n\n ### WRITE FILE ###\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=chunk_size):\n if chunk:\n f.write(chunk)\n pbar.update(chunk_size)\n return local_filename", "def downloadDataset(datasetName, url):\n\n baseFolder = os.path.dirname(os.path.abspath(__file__))\n destinationFolder = os.path.join(baseFolder, \"DataSets\", datasetName)\n testFolder = os.path.join(destinationFolder, \"test\")\n trainFolder = os.path.join(destinationFolder, \"train\")\n\n if not os.path.exists(os.path.join(destinationFolder, \"test\")):\n filename = os.path.join(destinationFolder, \"NISTSpecialDatabase4GrayScaleImagesofFIGS.zip\")\n if not os.path.exists(filename):\n print(\"Downloading data from \" + url + \"...\")\n urlretrieve(url, filename)\n\n try:\n print(\"Extracting \" + filename + \"...\")\n with zipfile.ZipFile(filename) as myzip:\n myzip.extractall(destinationFolder)\n print(\"Distributing the Dataset...\")\n distributeDataset(destinationFolder, testFolder, trainFolder)\n print(\"Renaming the files...\")\n renameFiles(testFolder)\n renameFiles(trainFolder)\n finally:\n os.remove(filename)\n print(\"Done.\")\n else:\n print(\"Data already available at \" + baseFolder + \"/\" + datasetName)", "def download():\r\n reader = GSODDataReader()\r\n year_list = range(2001, 2012)\r\n austin = reader.collect_data(year_list, exact_station=True,\r\n station_name='AUSTIN CAMP MABRY', state='TX', country='US')\r\n houston = reader.collect_data(year_list, exact_station=True,\r\n station_name='HOUSTON/D.W. HOOKS', state='TX', country='US')\r\n new_york = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEW YORK/LA GUARDIA', state='NY', country='US')\r\n newark = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEWARK INTL AIRPORT', state='NJ', country='US')\r\n punta_arenas = reader.collect_data(year_list, exact_station=True,\r\n station_name='PUNTA ARENAS', country='CH')\r\n wellington = reader.collect_data(year_list, exact_station=True,\r\n station_name='WELLINGTON AIRPORT', country='NZ')\r\n store = HDFStore('weather.h5')\r\n store['austin'] = austin\r\n store['houston'] = houston\r\n store['nyc'] = new_york\r\n store['newark'] = newark\r\n store['punta_arenas'] = punta_arenas\r\n store['wellington'] = wellington\r\n store.close()", "def _download_http(source_uri, dest_path, version):\n\n try:\n logger.info(\"Downloading the dataset.\")\n download_file(source_uri=source_uri, dest_path=dest_path)\n except DownloadError as e:\n logger.info(\n f\"The request download from {source_uri} -> {dest_path} can't \"\n f\"be completed.\"\n )\n raise e\n expected_checksum = GroceriesReal.GROCERIES_REAL_DATASET_TABLES[\n version\n ].checksum\n try:\n validate_checksum(dest_path, expected_checksum)\n except ChecksumError as e:\n logger.info(\"Checksum mismatch. Delete the downloaded files.\")\n os.remove(dest_path)\n raise e", "def _download_and_uncompress_dataset(dataset_dir):\n filename = _DATA_URL.split('/')[-1]\n filepath = os.path.join(dataset_dir, filename)\n\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(_DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dataset_dir)", "def download():\n raise NotImplementedError" ]
[ "0.7185856", "0.68503934", "0.6714282", "0.67088765", "0.66851914", "0.6538648", "0.6525635", "0.6497461", "0.64432293", "0.6433408", "0.6422011", "0.639159", "0.63663715", "0.6344865", "0.6338407", "0.62408376", "0.6234367", "0.62312293", "0.6227992", "0.6183774", "0.6181906", "0.6168266", "0.61625284", "0.61549693", "0.6123016", "0.60875237", "0.603908", "0.6037315", "0.5989672", "0.5964318" ]
0.75893414
0
Finds number of documents in the Tweet collection matching a given search_term (and location, if provided).
def count_tweets(search_term, location=None): if location: return len(Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location))) else: return len(Tweet.objects(keyword_search_term=search_term))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _count(self):\n if self._count_valid:\n return self._total_results\n\n url = self._build_url(\"/_search\")\n request = self._build_request(0, -1)\n resp = self._cb.post_object(url, body=request)\n result = resp.json()\n\n self._total_results = result[\"num_found\"]\n self._count_valid = True\n\n return self._total_results", "def _count(self):\n if self._count_valid:\n return self._total_results\n\n url = self._build_url(\"/_search\")\n request = self._build_request(0, -1)\n resp = self._cb.post_object(url, body=request)\n result = resp.json()\n\n self._total_results = result[\"num_found\"]\n self._count_valid = True\n\n return self._total_results", "def count_term_in_document(self, term, document):\n doc = self.get_document(document)\n for docterm, value in doc.get_terms():\n if docterm == term:\n return value\n return 0", "async def count(\n self, *,\n where: t.Mapping[str, t.Any] = None,\n limit: int = None,\n offset: int = None,\n ) -> int:\n\n extra = {}\n\n if limit:\n extra['limit'] = limit\n\n if offset:\n extra['offset'] = offset\n\n return await self.collection.count_documents(filter=where or {}, **extra)", "def count_term(self, term):\n term_entry = self.get_term(term)\n if term_entry:\n return term_entry.count\n else:\n return 0", "def count(self, ngram, options):\n return len(self.find_docs(ngram, options))", "def count_term_distinct_documents(self, term):\n term_entry = self.get_term(term)\n if term_entry:\n return term_entry.distinct_docs\n else:\n return 0", "def __query_tf(query, term):\n count = 0\n for q_term in query.split():\n if term == q_term:\n count += 1\n return count", "def track_count(search_string):\n track_results = sp.search(search_string, type='track', limit=1, offset=0)\n return track_results['tracks']['total']", "def search_tweets(q, count=100, result_type=\"recent\"):\n\n return t.search.tweets(q=q, result_type=result_type, count=count)", "def search(term, location, search_limit):\n\n payload = {\n 'term': term.replace(' ', '+'),\n 'location': location.replace(' ', '+'),\n 'limit': search_limit\n }\n\n return request(SEARCH_PATH, payload)", "def get_documents_count(self, index, **kwargs):\n return self._build_search(index, **kwargs).count()", "def search(self, query):\n if query is None:\n return -1\n\n count = 0\n\n for field in [self.key, self.name] + self.aliases + self.lines:\n count += field.lower().count(query.lower())\n\n return count", "def book_count(self):\n\n try:\n cur = self._db.cursor()\n cur.execute('SELECT COUNT(*) FROM books')\n return cur.fetchone()[0]\n except sqlite3.Error as e:\n raise BookError(f'Error searching for books with search term {term}') from e", "def tf_query(self, term: str, query_words: List) -> int:\n return query_words.count(term)", "def search(term, location):\n \n url_params = {\n 'term': term.replace(' ', '+'),\n 'location': location.replace(' ', '+'),\n 'limit': SEARCH_LIMIT\n }\n return request(API_HOST, SEARCH_PATH, url_params=url_params)", "def print_num_search_results(driver, keyword, location):\n # scroll to top of page so first result is in view\n driver.execute_script(\"window.scrollTo(0, 0);\")\n selector = \"div.results-context div strong\"\n try:\n num_results = driver.find_element_by_css_selector(selector).text\n except Exception as e:\n num_results = ''\n print(\"**************************************************\")\n print(\"\\n\\n\\n\\n\\nSearching {} results for '{}' jobs in '{}' \" \\\n \"\\n\\n\\n\\n\\n\".format(num_results, keyword, location))", "def count(cls, **kwargs):\n kwargs.setdefault('params', {})\n kwargs['params'].update({'search_type': 'count'})\n res = cls.search(raw_result=True, **kwargs)\n return res['hits']['total']", "def search(term, location):\n\n url_params = {\n 'term': term.replace(' ', '+'),\n 'location': location.replace(' ', '+'),\n 'limit': SEARCH_LIMIT\n }\n return request(API_HOST, SEARCH_PATH, url_params=url_params)", "def get_number_of_document_word_occurs_in(self, word):\n return len(self.dictionary[word]['docs'])", "def count(self, query=None):\n return self.create_search(query).count()", "def count_go_term(self, query_term=\"growth\"):\n count = 0\n for go_term in self.dict_go.values():\n if query_term in go_term.name:\n count += 1\n return count", "def count_documents(self):\n return self.count_term_distinct_documents(ANY)", "def search(bearer_token, term, location, offset = None, SEARCH_LIMIT = 3):\n #'limit': SEARCH_LIMIT,\n url_params = {\n 'term': term.replace(' ', '+'),\n 'location': location.replace(' ', '+'),\n 'limit': None,\n 'offset':offset\n }\n return request(API_HOST, SEARCH_PATH, bearer_token, url_params=url_params)", "def keyword_count(searches, doc):\n for search in searches:\n print \"\\\"{0}\\\": {1}\".format(search, len(re.findall(searches[search], doc)))", "def _count(self):\n if self._count_valid:\n return self._total_results\n\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n\n self._total_results = len(results)\n self._count_valid = True\n\n return self._total_results", "async def count(self) -> int:\n return (\n await self.document_model.get_motor_collection().count_documents(\n self.get_filter_query()\n )\n )", "def run_and_get_word_count(self) -> int:\n r = requests.get(self.url)\n if r.status_code != status.HTTP_200_OK:\n raise ScraperException\n soup = BeautifulSoup(r.content, \"html.parser\")\n matches = soup(text=re.compile(f\"{self.word}\"))\n count = 0\n for match in matches:\n words = re.findall(fr\"\\b{self.word}\\b\", match)\n count = count + len(words)\n return count", "def get_word_frequency(client, search_term=''):\n\tif not search_term:\n\t\tprint \"Enter a word to count its frequency:\"\n\t\tsearch_term = raw_input()\n\n\tlists = client.get_item_lists()\n\n\tprimary_text = ''\n\t# Iterate through only the personal item list\n\tfor l in lists['own']:\n\t\titem_list = client.get_item_list(l['item_list_url'])\n\t\tfor i in item_list:\n\t\t\t# get item object from the item_url\n\t\t\titem = client.get_item(i)\n\t\t\tprimary_text = primary_text + item.get_primary_text()\n\n\twords = word_tokenize(primary_text)\n\n\tword_frequency = words.count(search_term)\n\tprint word_frequency\n\treturn word_frequency", "def search(api_key, term, location):\r\n\r\n url_params = {\r\n 'term': term.replace(' ', '+'),\r\n 'location': location.replace(' ', '+'),\r\n 'limit': SEARCH_LIMIT\r\n }\r\n return request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)" ]
[ "0.67774284", "0.67774284", "0.64773935", "0.6338761", "0.63320786", "0.6253776", "0.61553955", "0.6143456", "0.6119857", "0.605183", "0.60442805", "0.6041794", "0.6023299", "0.60119075", "0.59792614", "0.59583217", "0.5956584", "0.59333336", "0.59300566", "0.5897961", "0.585787", "0.58092713", "0.5773547", "0.577101", "0.57684046", "0.57635385", "0.5760088", "0.57101226", "0.57050276", "0.56854296" ]
0.81229687
0
Calculates a keyword's historical sentiment (restricted within a location, if provided).
def get_historical_sentiment(search_term, location=None): if location: positive = len(Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location) & Q(sentiment_type="positive"))) negative = len(Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location) & Q(sentiment_type="negative"))) neutral = len(Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location) & Q(sentiment_type="neutral"))) else: positive = len(Tweet.objects(Q(keyword_search_term=search_term) & Q(sentiment_type="positive"))) negative = len(Tweet.objects(Q(keyword_search_term=search_term) & Q(sentiment_type="negative"))) neutral = len(Tweet.objects(Q(keyword_search_term=search_term) & Q(sentiment_type="neutral"))) result = [["Positive", positive], ["Neutral", neutral], ["Negative", negative]] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_historical_sentiment_avg(search_term, location=None):\r\n\r\n total = 0\r\n\r\n if location:\r\n tweets = Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location))\r\n count = len(tweets)\r\n else:\r\n tweets = Tweet.objects(Q(keyword_search_term=search_term))\r\n count = len(tweets)\r\n\r\n for tweet in tweets:\r\n total += tweet.sentiment_score\r\n\r\n # Calculate average\r\n avg = total / count\r\n avg = float(\"{0:.2f}\".format((float(avg))))\r\n\r\n return avg", "def getSentiment(tweets, location):\n sentiment = [0, 0, 0]\n for tweet in tweets:\n analyser(tweets[tweet], sentiment,location)\n return sentiment", "def get_sentiment_overtime(keyword, location=None):\r\n\r\n # Get date 10 days ago\r\n ten_days_ago = datetime.now() - timedelta(days=10)\r\n\r\n # Get raw PyMongo collection\r\n collection = Tweet._get_collection()\r\n\r\n if location:\r\n match = {\r\n \"$match\":\r\n {\r\n \"keyword_search_term\": keyword,\r\n \"location_address\": location,\r\n \"tweet_time\": {\"$gt\": ten_days_ago}\r\n }\r\n }\r\n else:\r\n match = {\r\n \"$match\":\r\n {\r\n \"keyword_search_term\": keyword,\r\n \"tweet_time\": {\"$gt\": ten_days_ago}\r\n }\r\n }\r\n\r\n project = {\r\n \"$project\":\r\n {\r\n \"sentiment_score\": 1,\r\n \"day\":\r\n {\r\n \"$substr\": [\"$tweet_time\", 0, 10]\r\n }\r\n }\r\n }\r\n\r\n group = {\r\n \"$group\":\r\n {\r\n \"_id\": \"$day\",\r\n \"average\":\r\n {\r\n \"$avg\": \"$sentiment_score\"\r\n }\r\n }\r\n }\r\n\r\n limit = {\"$limit\": 10}\r\n\r\n # Perform aggregate query\r\n result = collection.aggregate([match, project, group, limit])\r\n\r\n # Add query results to list\r\n l = []\r\n for i in result['result']:\r\n average = \"{0:.2f}\".format(i['average'])\r\n t = [i['_id'], average]\r\n l.append(t)\r\n\r\n return l", "def sentiment(self) -> Dict[str, float]:", "def sentiment(text):\n words = pattern_split.split(text.lower())\n sentiments = map(lambda word: afinn.get(word, 0), words)\n if sentiments:\n # How should you weight the individual word sentiments? \n # You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)\n sentiment = float(sum(sentiments))/math.sqrt(len(sentiments))\n \n else:\n sentiment = 0\n return sentiment", "def get_sentiment():\n # USER REQUEST PARAMETERS\n hashtag = request.args.get('hashtag', '')\n if hashtag == \"\":\n return \"Please specify a non null hashtag\"\n nb_days = request.args.get('nb_days', 7,type=int)\n nb_days = int(min(max(nb_days, 1), 7))\n nb_tweets = max(request.args.get('nb_tweets', nb_days * 10), nb_days,type=int)\n get_topic_words = bool(int(request.args.get('get_topic_words',\"1\")))\n n_topics = request.args.get('n_topics', 1,type=int)\n n_words_per_topic = request.args.get('n_words_per_topic', 10,type=int)\n lda_passes = request.args.get('lda_passes', 4,type=int)\n return_tweets = bool(int(request.args.get('return_tweets', \"0\")))\n language = request.args.get('language', \"en\")\n\n # TWITTER REQUEST PARAMETERS\n days_offsets = range(-nb_days + 1, 1)\n query_key_value = \" -is:retweet -is:quote lang:\" + language\n tweet_fields = \"created_at,public_metrics,author_id\"\n max_nb_tweets_per_day = nb_tweets // len(days_offsets)\n query_string = \"#\" + hashtag.strip() + query_key_value\n\n # COMPUTE RESULTS\n tweets = get_tweets(query_string, days_offsets, tweet_fields,\n max_nb_tweets_per_day, nb_tweets, search_tweets_args)\n sentiments_df, cleaned_tweets_texts, filtered_tweets_df = compute_sentiment(\n tweets, model, tokenizer)\n\n if get_topic_words:\n top_topics = get_topics_from_tweets(NLTK_DATA_PATH, cleaned_tweets_texts, n_topics=n_topics,\n n_words_per_topic=n_words_per_topic, n_passes=lda_passes,\n force_download=False)\n\n if return_tweets:\n sentiments_tweets_df = pd.concat(\n (sentiments_df, filtered_tweets_df.reset_index(drop=True)), axis=1)\n\n results = {\"sentiments_json\": sentiments_tweets_df.to_json()}\n else:\n results = {\"sentiments_json\": sentiments_df.to_json()}\n\n if get_topic_words:\n results[\"top_topics_json\"] = top_topics.to_json()\n\n return json.dumps(results)", "def text_analytics(self):\n\n headers = {\n # Request headers\n 'Content-Type': 'application/json',\n 'Ocp-Apim-Subscription-Key': self.keys['text_analytics'],\n }\n \n sentiment_url = 'https://westus.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment'\n \n raw_text = self.article_params['text']\n\n # Build post for sentiment\n try:\n sentences = tokenize.sent_tokenize(str(raw_text))\n content = []\n for i, sentence in enumerate(sentences):\n content.append({'id': str(i), 'language': 'en', 'text': sentence})\n body = json.dumps({\"documents\": content}).encode('utf-8')\n\n request = urllib.request.Request(sentiment_url, body, headers)\n response = urllib.request.urlopen(request)\n json_response = json.loads(response.read().decode('utf-8'))\n \n # A list of dictionaries, with each dictionary containing a sentence\n # sentiment score\n sentiments_list = json_response['documents']\n\n # Calculate the articles average sentiment from all the sentences\n cumulative_sentiment_score = 0\n for sent in sentiments_list:\n cumulative_sentiment_score += sent['score']\n avg_article_sentiment = cumulative_sentiment_score/len(sentiments_list)\n\n # Put article sentiments in bucket from 1 to 5, with 1 being very\n # negative and 5 being very positive\n if avg_article_sentiment < 0.2:\n sentiment = 1\n elif 0.2 <= avg_article_sentiment < 0.4:\n sentiment = 2\n elif 0.4 <= avg_article_sentiment < 0.6:\n sentiment = 3\n elif 0.6 <= avg_article_sentiment < 0.8:\n sentiment = 4\n else:\n sentiment = 5\n\n except Exception as e:\n print('Unable to process sentiment for article. Assuming '\n 'sentiment is neutral.')\n sentiment = 3\n\n return sentiment", "def get_overall_sentiment(text):\n return alchemy_language.sentiment(text=text)", "def get_sentiment(sentence):\n\tblob = tb.TextBlob(sentence.decode('utf-8','ignore'))\n\treturn blob.sentiment[0]", "def stockSentiment(stockName, numTweets=100):\n\n listOfTweets = user.search(stockName, count=numTweets)\n threshold = posSentTweet = negSentTweet = 0\n\n for tweet in listOfTweets:\n analysis = TextBlob(tweet.text)\n if analysis.sentiment.polarity >= threshold:\n posSentTweet = posSentTweet + 1\n else:\n negSentTweet = negSentTweet + 1\n\n if posSentTweet > negSentTweet:\n print(\"Overall Positive\")\n return True\n else:\n print(\"Overall Negative\")\n return False", "def get_sentiment(phrase):\n print('Getting the sentiment...')\n try:\n model = get_ml_model()\n prediction = model.predict([phrase])\n return int(prediction)\n except RedisError as e:\n print('Getting the sentiment was not successful and threw an error.')\n print(e)", "def add_sentiment(self):\n self.record = 0\n letter_series = self.dataframe.letter \n sentiment_call = lambda letter_text: self._evaluate_sentiment(letter_text)\n sentiment_data = letter_series.map(sentiment_call)\n self.dataframe['sentiment'] = sentiment_data\n self._unpack_sentiment_data()", "def searchByKeywordPro(self, query, since=\"\", until=\"\", maxResults=None):\n\n tweetsList = []\n if(not maxResults):\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since)\n tweetsList.append(tweetList)\n while(next_token):\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since, next=next_token)\n tweetsList.append(tweetList)\n else:\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since)\n tweetsList.append(tweetList)\n maxResults -= len(tweetList)\n while(next_token and maxResults > 0):\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since, next=next_token)\n tweetsList.append(tweetList)\n maxResults -= len(tweetList)\n for status in tweetsList:\n createdDate = parser.parse(str(status._json[\"created_at\"]).strip())\n createdDate = createdDate.replace(\n tzinfo=pytz.utc) - createdDate.utcoffset()\n status_refined = {\n 'keyword': query,\n '_id': status._json[\"id\"],\n 'created_at': createdDate,\n 'tweetText': status._json[\"text\"],\n 'hashtags': status._json[\"entities\"][\"hashtags\"],\n 'userLoc': status._json[\"user\"][\"location\"],\n 'tweetGeo': status._json[\"geo\"],\n 'tweetCoordinates': status._json[\"coordinates\"],\n 'tweetPlace': status._json[\"place\"],\n 'retweet': {},\n 'quote': {},\n }\n if hasattr(status, \"quoted_status\"):\n if \"extended_tweet\" in status._json[\"quoted_status\"].keys():\n print(\"Taking the expanded tweet\")\n status_refined['tweetText'] = status._json[\"quoted_status\"][\"extended_tweet\"][\"full_text\"]\n else:\n status_refined['tweetText'] = status._json[\"quoted_status\"][\"text\"]\n status_refined['quote'] = {\n 'original_retweet_id': status._json[\"quoted_status\"][\"id\"],\n 'origUserLoc': status._json[\"quoted_status\"][\"user\"][\"location\"],\n 'origTweetLoc': status._json[\"quoted_status\"][\"geo\"],\n 'origTweetPlace': status._json[\"quoted_status\"][\"place\"],\n 'origTweetCoord': status._json[\"quoted_status\"][\"coordinates\"],\n 'origHashtags': status._json[\"quoted_status\"][\"entities\"][\"hashtags\"],\n 'retweet_count': status._json[\"quote_count\"],\n }\n elif hasattr(status, \"retweeted_status\"):\n print(status._json[\"retweeted_status\"])\n if \"extended_tweet\" in status._json[\"retweeted_status\"].keys():\n print(\"Taking the expanded tweet\")\n status_refined['tweetText'] = status._json[\"retweeted_status\"][\"extended_tweet\"][\"full_text\"]\n else:\n status_refined['tweetText'] = status._json[\"retweeted_status\"][\"text\"]\n status_refined['retweet'] = {\n 'original_retweet_id': status._json[\"retweeted_status\"][\"id\"],\n 'origUserLoc': status._json[\"retweeted_status\"][\"user\"][\"location\"],\n 'origTweetLoc': status._json[\"retweeted_status\"][\"geo\"],\n 'origTweetPlace': status._json[\"retweeted_status\"][\"place\"],\n 'origTweetCoord': status._json[\"retweeted_status\"][\"coordinates\"],\n 'origHashtags': status._json[\"retweeted_status\"][\"entities\"][\"hashtags\"],\n 'retweet_count': status._json[\"retweet_count\"],\n }\n elif hasattr(status, \"extended_tweet\"):\n if \"extended_tweet\" in status._json.keys():\n status_refined['tweetText'] = status._json[\"extended_tweet\"][\"full_text\"]\n self.tweets.append(status_refined)\n return self.tweets", "def getSentiment(s):\n headers = {\"Ocp-Apim-Subscription-Key\" : \"4c28d3a67a12442cad6666a3200c49f5\",\n \"Content-Type\" : \"application/json\", \"Accept\" : \"application/json\"}\n url = \"https://westus.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment\"\n json = {\"documents\": [{\"language\": \"en\", \"id\" : \"1\"}]}\n json['documents'][0]['text'] = s\n sentiment = r.post(url, headers = headers, json = json)\n sentiment = j.loads(sentiment.text)\n return sentiment['documents'][0]['score']", "def perform_bing_sentiment_lexicon_lookup(tweets):\n words = []\n for t in tweets:\n for phrase in t.tagged_words:\n for word in phrase:\n try:\n if word[\"pos\"] in TYPECRAFT_SENTIWORDNET:\n words.append(word['word'])\n except KeyError:\n continue \n \n \n lex = Lexicon(BingTranslater(words), SentiWordNetLexicon())\n words_with_sentimentvalues=[]#list of dicts\n print \"Getting sentiment values\"\n for t in tweets:\n sentiwords =[]\n sentiwords_with_values={}\n for phrase in t.tagged_words:\n for word in phrase:\n try:\n if word[\"pos\"] in TYPECRAFT_SENTIWORDNET:\n sentiwords.append(word['word'])\n except KeyError:\n continue\n for sentiword in sentiwords:\n sentivalues = lex.translate_and_get_lexicon_sentiment(sentiword)\n if sentivalues!=None:\n print \"Adding sentivalues: \",sentivalues\n sentiwords_with_values[sentiword] = sentivalues\n words_with_sentimentvalues.append(sentiwords_with_values)\n \n return words_with_sentimentvalues", "def perform_google_sentiment_lexicon_lookup(tweets):\n \n lex = Lexicon(GoogleTranslater(), SentiWordNetLexicon())\n print \"Getting sentiment values\"\n tweet_sentiments = []\n for t in tweets:\n tweet_sentiments.append(lex.translate_sentence_and_get_lexicon_sentiment(t.text))\n \n print tweet_sentiments\n reduced_tweet_sentiments = []\n for sentiments in tweet_sentiments:\n polar_sum = sum([s[0] for s in sentiments])\n negative_sum = sum([s[1] for s in sentiments])\n objective_sum = sum([s[2] for s in sentiments])\n reduced_tweet_sentiments.append((polar_sum, negative_sum, objective_sum))\n print reduced_tweet_sentiments\n return reduced_tweet_sentiments", "def AnalyzeSentiment(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def AnalyzeSentiment(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def process_sentiment(self):\r\n\r\n\r\n print(\"Beginning sentiment analysis\")\r\n # textblob time\r\n #tweet_sentiment = [TextBlob(tweet['filtered_text']).sentiment for index, tweet in self.tweet_dataframe.iterrows()]\r\n #self.tweet_dataframe['polarity'] = [i.polarity for i in tweet_sentiment]\r\n #self.tweet_dataframe['subjectivity'] = [i.subjectivity for i in tweet_sentiment]\r\n\r\n #vader time\r\n #http://t-redactyl.io/blog/2017/04/applying-sentiment-analysis-with-vader-and-the-twitter-api.html\r\n sentiment = []\r\n\r\n analyzer = SentimentIntensityAnalyzer()\r\n\r\n for tweet in self.tweet_dataframe['filtered_text']:\r\n vs = analyzer.polarity_scores(tweet)\r\n sentiment.append(vs['compound'])\r\n\r\n self.tweet_dataframe['vader_polarity'] = pd.Series(sentiment)", "def AnalyzeSentiment(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def calculate_sentiment(positive, negative):\n denominator = (positive - negative)\n numerator = (positive + negative)\n if numerator == 0:\n return 0\n return 0.268 * (denominator / numerator)", "def count_tweets_based_on_words(word, positve_sentiment, negative_sentiment):\n tweet_counter = dict()\n index_db = couch[config.get(\"database\", \"DB_INDEX\")]\n items = index_db.view(view_text_index)\n try:\n tweet_id_holder = items[word].rows[0].value\n except Exception as e:\n return tweet_counter\n\n tweets_all = tweet_database.view(view_tweet_info)\n\n for tweet_id in tweet_id_holder:\n # check redundancy\n if tweet_id not in tweet_index:\n tweet = tweets_all[tweet_id]\n tweet_index.add(tweet_id)\n try:\n # set polarity value\n if negative_sentiment:\n if tweet.rows[0].value[3] < 0:\n tweet_counter = add_state_count(tweet, tweet_counter)\n elif positve_sentiment:\n if tweet.rows[0].value[3] > 0:\n tweet_counter = add_state_count(tweet, tweet_counter)\n else:\n tweet_counter = add_state_count(tweet, tweet_counter)\n except:\n return tweet_counter\n return tweet_counter", "def feature_sentiment(sentence: str, lemmatize_text: bool = False\n ) -> Union[collections.Counter, Tuple[collections.Counter, str]]:\n sent_dict = collections.Counter()\n sentence = _NLP(sentence)\n debug = 0\n for token in sentence:\n # check if the word is an opinion word, then assign sentiment\n if token.text.lower() in _OPINION_WORDS:\n sentiment = 1 if token.text.lower() in _POS_WORDS else -1\n # if target is an adverb modifier (i.e. pretty, highly, etc.)\n # but happens to be an opinion word, ignore and pass\n if (token.dep_ == \"advmod\"):\n continue\n elif (token.dep_ == \"amod\"):\n sent_dict[token.head.text.lower()] += sentiment\n # for opinion words that are adjectives, adverbs, verbs...\n else:\n for child in token.children:\n # if there's a adj modifier (i.e. very, pretty, etc.) add more weight to sentiment\n # This could be better updated for modifiers that either positively or negatively emphasize\n if ((child.dep_ == \"amod\") or (child.dep_ == \"advmod\")) and (child.text.lower() in _OPINION_WORDS):\n sentiment *= 1.5\n # check for negation words and flip the sign of sentiment\n if child.dep_ == \"neg\":\n sentiment *= -1\n for child in token.children:\n # if verb, check if there's a direct object\n if (token.pos_ == \"VERB\") & (child.dep_ == \"dobj\"):\n sent_dict[child.text.lower()] += sentiment\n # check for conjugates (a AND b), then add both to dictionary\n subchildren = []\n conj = 0\n for subchild in child.children:\n if subchild.text.lower() == \"and\":\n conj=1\n if (conj == 1) and (subchild.text.lower() != \"and\"):\n subchildren.append(subchild.text.lower())\n conj = 0\n for subchild in subchildren:\n sent_dict[subchild] += sentiment\n\n # check for negation\n for child in token.head.children:\n noun = \"\"\n if ((child.dep_ == \"amod\") or (child.dep_ == \"advmod\")) and (child.text.lower() in _OPINION_WORDS):\n sentiment *= 1.5\n # check for negation words and flip the sign of sentiment\n if (child.dep_ == \"neg\"):\n sentiment *= -1\n\n # check for nouns\n for child in token.head.children:\n noun = \"\"\n if (child.pos_ == \"NOUN\") and (child.text not in sent_dict):\n noun = child.text.lower()\n # Check for compound nouns\n for subchild in child.children:\n if subchild.dep_ == \"compound\":\n noun = subchild.text.lower() + \" \" + noun\n sent_dict[noun] += sentiment\n debug += 1\n if lemmatize_text:\n # Lemmatize using spaCy\n text = \" \".join([word.lemma_ if word.lemma_ != '-PRON-' else word.text\n for word in sentence])\n # Leave only letter characters\n text = re.sub(\"[^a-zA-z\\s]\", \" \", text)\n # Substitute any white space character with a single space\n text = \" \".join(text.split())\n return sent_dict, text.lower()\n return sent_dict", "def sents(path):\n\n data = pd.read_csv( path , sep = \"\\t\", index_col=False, encoding='latin-1', low_memory=False)\n df = DataFrame(data)\n# print(df['Sentiment'])\n labelCount = df.groupby(df['Sentiment']).count()\n #print(labelCount)\n x = df['SentimentText'].str.replace('http\\S+|www.\\S+', '', case=False)\n y = df['Sentiment']\n x = x.str.replace('[^a-zA-Z]', ' ') #\n x_check = [\" \".join([lemmatize(word) for word in sentence.split(\" \")]) for sentence in x]\n stopset = set(stopwords.words('English'))\n x_check = [' '.join(w for w in sentence.split() if w.lower() not in stopset)\n for sentence in x\n ]\n #print(x_check)\n return x_check, y", "def analyze_trending_keyword(keyword=\"pokemon\", count=100, keep_all=False, debug=False):\n print('analyzing keyword: {}'.format(keyword))\n tweets = get_search_tweets(query=keyword, count=count, debug=debug)\n\n return process_tweets(tweets, keep_all=keep_all, debug=debug)", "def update_pos_neg_neutral_cache(sentiment_term, df):\n THRESHOLD=0.3\n pos = len(list([x for x in df[\"sentiment\"] if float(x)>=THRESHOLD]))\n neg = len(list([x for x in df[\"sentiment\"] if float(x)<=-THRESHOLD]))\n neutral = len(list([x for x in df[\"sentiment\"] if float(x)<THRESHOLD and float(x)>-THRESHOLD]))\n old_pos = cache.get(\"positive_count_{}\".format(sentiment_term))\n old_neg = cache.get(\"negative_count_{}\".format(sentiment_term))\n old_neu = cache.get(\"neutral_count_{}\".format(sentiment_term))\n if old_pos:\n cache.client.incr(\"positive_count_{}\".format(sentiment_term), pos)\n else:\n cache.set(\"positive_count_{}\".format(sentiment_term), pos)\n \n if old_neg:\n cache.client.incr(\"negative_count_{}\".format(sentiment_term), neg)\n else:\n cache.set(\"negative_count_{}\".format(sentiment_term), neg)\n \n if old_neu:\n cache.client.incr(\"neutral_count_{}\".format(sentiment_term), neutral)\n else:\n cache.set(\"neutral_count_{}\".format(sentiment_term), neutral)\n return (pos, neg, neutral)", "def analyze(self, text): #takes the text to be analyzed for sentiment\n #initialize inicial score to 0\n score = 0\n #Create tokenizer instance\n tokenizer = nltk.tokenize.TweetTokenizer()\n #create list of words in a tweets\n tokens = tokenizer.tokenize(text)\n \n #iterate over tokens(list of words)\n for word in tokens:\n #check if word is positive or negative\n if word.lower() in self.positives_words:\n score+=1\n if word.lower() in self.negatives_words:\n score-=1\n #neutral if its neither, doesnt add anything, 0\n return score", "def sentimentAnalysis(fileName, city, outFileName):\n tweetTokenizer = TweetTokenizer()\n punct = list(string.punctuation)\n stopwordList = stopwords.words('english') + punct + ['rt', 'via', '...']\n vaderSent = vaderSentimentAnalysis(fileName, tweetTokenizer, stopwordList)\n vaderSent['city'] = city\n vaderSent = vaderSent[vaderSent['sentiment'] < 0]\n vaderSent.to_csv(outFileName)", "def analyze(self, text):\n tknzr = nltk.tokenize.casual.TweetTokenizer(preserve_case=True, reduce_len=False, strip_handles=False)\n tknTxt = tknzr.tokenize(text)\n sentiment = 0\n \n for i in range(len(tknTxt)):\n if tknTxt[i] in self.posTxt:\n #print(\"POS\")\n #print(tknTxt[i])\n sentiment += 1\n elif tknTxt[i] in self.negTxt:\n #print(\"NEG\")\n #print(tknTxt[i])\n sentiment -= 1\n \n return sentiment", "def analyze(self, text):\n\n tokenizer = nltk.tokenize.TweetTokenizer()\n \n tokens = tokenizer.tokenize(text)\n \n sentiment = 0\n \n for word in tokens:\n if word in self.__positives:\n sentiment += 1\n elif word in self.__negatives:\n sentiment -= 1\n \n return sentiment" ]
[ "0.67188215", "0.67036396", "0.6334149", "0.6167096", "0.59021384", "0.5850361", "0.5808837", "0.57456166", "0.55691725", "0.55315197", "0.55021304", "0.5500344", "0.5474109", "0.54653317", "0.5465105", "0.5422208", "0.5408632", "0.5408632", "0.54046506", "0.5381399", "0.5356791", "0.5338209", "0.53111", "0.5310745", "0.5304526", "0.53033805", "0.53006715", "0.52953094", "0.5284263", "0.5278341" ]
0.704126
0
Calculates the average sentiment score for a given keyword (restricted within a location, if provided).
def get_historical_sentiment_avg(search_term, location=None): total = 0 if location: tweets = Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location)) count = len(tweets) else: tweets = Tweet.objects(Q(keyword_search_term=search_term)) count = len(tweets) for tweet in tweets: total += tweet.sentiment_score # Calculate average avg = total / count avg = float("{0:.2f}".format((float(avg)))) return avg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sentiment_overtime(keyword, location=None):\r\n\r\n # Get date 10 days ago\r\n ten_days_ago = datetime.now() - timedelta(days=10)\r\n\r\n # Get raw PyMongo collection\r\n collection = Tweet._get_collection()\r\n\r\n if location:\r\n match = {\r\n \"$match\":\r\n {\r\n \"keyword_search_term\": keyword,\r\n \"location_address\": location,\r\n \"tweet_time\": {\"$gt\": ten_days_ago}\r\n }\r\n }\r\n else:\r\n match = {\r\n \"$match\":\r\n {\r\n \"keyword_search_term\": keyword,\r\n \"tweet_time\": {\"$gt\": ten_days_ago}\r\n }\r\n }\r\n\r\n project = {\r\n \"$project\":\r\n {\r\n \"sentiment_score\": 1,\r\n \"day\":\r\n {\r\n \"$substr\": [\"$tweet_time\", 0, 10]\r\n }\r\n }\r\n }\r\n\r\n group = {\r\n \"$group\":\r\n {\r\n \"_id\": \"$day\",\r\n \"average\":\r\n {\r\n \"$avg\": \"$sentiment_score\"\r\n }\r\n }\r\n }\r\n\r\n limit = {\"$limit\": 10}\r\n\r\n # Perform aggregate query\r\n result = collection.aggregate([match, project, group, limit])\r\n\r\n # Add query results to list\r\n l = []\r\n for i in result['result']:\r\n average = \"{0:.2f}\".format(i['average'])\r\n t = [i['_id'], average]\r\n l.append(t)\r\n\r\n return l", "def get_query_sentiment_avg(tweets):\r\n\r\n total = 0\r\n count = len(tweets)\r\n\r\n for tweet in tweets:\r\n total += tweet.sentiment_score\r\n\r\n # Calculate average\r\n avg = total / count\r\n avg = float(\"{0:.2f}\".format((float(avg))))\r\n\r\n return avg", "def analyze_trending_keyword(keyword=\"pokemon\", count=100, keep_all=False, debug=False):\n print('analyzing keyword: {}'.format(keyword))\n tweets = get_search_tweets(query=keyword, count=count, debug=debug)\n\n return process_tweets(tweets, keep_all=keep_all, debug=debug)", "def _find_average_score(self, sentenceValue):\n sumValues = 0\n for entry in sentenceValue: \n sumValues += sentenceValue[entry]\n \n try:\n average = (sumValues / len(sentenceValue))\n except:\n average = 0\n return average", "def sentiment(text):\n words = pattern_split.split(text.lower())\n sentiments = map(lambda word: afinn.get(word, 0), words)\n if sentiments:\n # How should you weight the individual word sentiments? \n # You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)\n sentiment = float(sum(sentiments))/math.sqrt(len(sentiments))\n \n else:\n sentiment = 0\n return sentiment", "def average_score(self, sentenceValue):\r\n sumValues = 0\r\n for entry in sentenceValue:\r\n sumValues += sentenceValue[entry]\r\n\r\n # Average value of a sentence from original summary_text\r\n average = (sumValues / len(sentenceValue))\r\n\r\n return average", "def getSentiment(tweets, location):\n sentiment = [0, 0, 0]\n for tweet in tweets:\n analyser(tweets[tweet], sentiment,location)\n return sentiment", "def average_score(sentence_scores):\r\n sumValues = 0\r\n for score in sentence_scores:\r\n sumValues += sentence_scores[score]\r\n\r\n # Average value of a sentence from original text\r\n average = (sumValues / len(sentence_scores))\r\n\r\n return average", "def get_sentiment_trends(order):\r\n\r\n # Get date seven days ago\r\n seven_days_ago = datetime.now() - timedelta(days=7)\r\n\r\n # Get raw PyMongo collection\r\n collection = Tweet._get_collection()\r\n\r\n # Perform aggregate query\r\n result = collection.aggregate([\r\n {\r\n \"$match\":\r\n {\r\n \"tweet_time\": {\"$gt\": seven_days_ago}\r\n }\r\n },\r\n {\r\n \"$group\":\r\n {\r\n \"_id\": \"$keyword_search_term\",\r\n \"average\":\r\n {\r\n \"$avg\": \"$sentiment_score\"\r\n }\r\n }\r\n },\r\n {\r\n \"$sort\":\r\n {\r\n \"average\": order\r\n }\r\n },\r\n {\r\n \"$limit\": 10\r\n }\r\n ])\r\n\r\n return result", "def sentence_to_avg(sentence, word_to_vec_map):\n # Get a valid word contained in the word_to_vec_map. \n any_word = list(word_to_vec_map.keys())[0]\n \n ### START CODE HERE ###\n # Step 1: Split sentence into list of lower case words (≈ 1 line)\n words = sentence.lower().split()\n\n # Initialize the average word vector, should have the same shape as your word vectors.\n avg = np.zeros(word_to_vec_map[any_word].shape)\n \n # Initialize count to 0\n count = 0\n \n # Step 2: average the word vectors. You can loop over the words in the list \"words\".\n for w in words:\n # Check that word exists in word_to_vec_map\n if w in word_to_vec_map:\n avg += word_to_vec_map[w]\n # Increment count\n count +=1\n \n if count > 0:\n # Get the average. But only if count > 0\n avg = avg / count\n \n ### END CODE HERE ###\n \n return avg", "def score(self, sentence):\n # count each incremented word\n for word in sentence:\n if word not in self.unigramCounts:\n self.zeroCount += 1\n\n # apply laplace smoothing to unigram model\n score = 0.0\n for word in sentence:\n count = self.unigramCounts[word]\n score += math.log(count + 1)\n score -= math.log(self.totalCount + self.zeroCount)\n return score", "def analyze(self, text): #takes the text to be analyzed for sentiment\n #initialize inicial score to 0\n score = 0\n #Create tokenizer instance\n tokenizer = nltk.tokenize.TweetTokenizer()\n #create list of words in a tweets\n tokens = tokenizer.tokenize(text)\n \n #iterate over tokens(list of words)\n for word in tokens:\n #check if word is positive or negative\n if word.lower() in self.positives_words:\n score+=1\n if word.lower() in self.negatives_words:\n score-=1\n #neutral if its neither, doesnt add anything, 0\n return score", "def mean_average_position():\n pass", "def analyze(content):\r\n client = language.LanguageServiceClient()\r\n\r\n document = types.Document(\r\n content=content,\r\n type=enums.Document.Type.PLAIN_TEXT)\r\n annotations = client.analyze_sentiment(document=document)\r\n\r\n # Write results to GCS \r\n return annotations.document_sentiment.score", "def get_average_sentiment(self, list_sentiments):\n average_polarity = 0\n for sentiment in list_sentiments: \n polarity = sentiment[1]\n average_polarity += polarity \n average_polarity /= len(list_sentiments)\n return average_polarity", "def sentiment_score(review):\n return sum([sentence_score(sentence, None, 0.0) for sentence in review])", "def mean_avg_precision(top_k_results, relevance):\n map_score = 0.0\n for j, scores in relevance.items():\n precision, _ = calculate_precision_recall(top_k_results[j - 1], scores)\n relevant = set()\n for x in scores:\n relevant.add(x[0])\n \n precision_score, cnt = 0.0, 0\n for i in range(len(top_k_results[j - 1])):\n if top_k_results[j - 1][i] in relevant:\n precision_score += precision[i]\n cnt += 1\n \n map_score += precision_score if cnt == 0 else precision_score / cnt\n \n map_score /= len(relevance)\n \n return map_score", "def question_sentiment_analysis(self):\n sentiments = get_sentiments()\n student_data = self.responses\n question_text = 'In one word'\n\n # Set up data for calculations\n num_scores = 0\n sentiment_sum = 0\n score_list = list()\n\n for response in student_data:\n\n if question_text in response.question.text:\n words = response.response.lower().split()\n\n # Find the sentiment score for each word, and add it to our data\n for word in words:\n # Ignore the word if it's not in the sentiment dictionary\n if word in sentiments:\n sentiment_sum += sentiments[word]\n num_scores += 1\n score_list.append(sentiments[word])\n\n average = sentiment_sum / num_scores\n standard_dev = statistics.stdev(score_list)\n\n return average, standard_dev", "def analyze(self, text):\n #analize every word in the text a value -1, 1 or 0 and calculate total score\n #tokens allow us to split words in single tokens we can initialize tokens like this:\n\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text.lower())\n\n score = 0\n\n if tokens[0] in self.negatives:\n score =- 1\n elif tokens[0] in self.positives:\n score =+ 1\n else:\n score = 0\n\n #print('', text)\n\n return score", "def analyze(self, text):\n #Check each word in text\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n total_score = 0\n #Sum the total score\n for token in tokens:\n token = token.lower()\n if token in self.positives:\n total_score = total_score + 1\n elif token in self.negatives:\n total_score = total_score - 1\n else:\n total_score = total_score + 0\n \n return total_score", "def scorePetition(petition, *keywords):\n\n score = 0\n\n for key in keywords:\n\n # Normalize both key and strings to be searched\n key = key.lower()\n title = petition.title.lower()\n content = petition.content.lower()\n\n # Score eache part of the Petition depending on the number of occurancies\n titleScore = title.count(key)\n contentScore = content.count(key)\n\n # Give more weight to the keys found in title\n score += titleScore*50 + contentScore\n\n return score", "def get_overall_sentiment(text):\n return alchemy_language.sentiment(text=text)", "def analyze(self, text):\n\n score = 0.0;\n\n words = text.split(' ')\n # match each word in either the positives or negatives list adding or subtracting 1 from the score if present\n for word in words:\n for w in self.positives:\n if w == word.lower():\n score += 1.0\n continue\n \n for w in self.negatives:\n if w == word.lower():\n score -= 1.0\n continue\n\n return score", "def test_get_average_of_sentiment_scores():\n\n dict_of_avg_scores = get_average_of_sentiment_scores(\n 'politics_30_months_comments_cleaned_standardized_vader_flair.csv')\n print('average sentiment scores all comments')\n for key, value in dict_of_avg_scores.items():\n print(key, value)\n print()", "def analyze(self, text):\n\n # TODO\n # tokens = tokenizer.tokenize(tweet)\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n score = 0\n\n for word in tokens:\n # iterate over tokens#str.lower\n\n if word.lower() in self.positives:\n score = score+1\n\n elif word.lower() in self.negatives:\n score = score-1\n\n else:\n continue\n return score", "def geo_data_analysis(search_term):\n map_pol = dict()\n\n #A list of tweet texts from each region\n NE_text = geo_collect_tweets(search_term,42.781158,-71.398729,'250mi')\n S_text = geo_collect_tweets(search_term,33.000000,-84.000000,'500mi')\n MW_text = geo_collect_tweets(search_term,40.000000,-100.000000,'1000mi')\n W_text = geo_collect_tweets(search_term,35.000000,-120.000000,'250mi')\n \n #A list of sentiment values for the tweets from each region \n NE_sentiment_values = sentiment(NE_text)\n S_sentiment_values = sentiment(S_text)\n MW_sentiment_values = sentiment(MW_text)\n W_sentiment_values = sentiment(W_text)\n\n #find the average sentiment value for each region\n NE_avg = sum(NE_sentiment_values)/len(NE_sentiment_values)\n S_avg = sum(S_sentiment_values)/len(S_sentiment_values)\n MW_avg = sum(MW_sentiment_values)/len(MW_sentiment_values)\n W_avg = sum(W_sentiment_values)/len(W_sentiment_values)\n\n return [W_avg,S_avg,NE_avg,MW_avg]", "def analyze(text):\n client = language_service_client.LanguageServiceClient()\n\n # with open(movie_review_filename, 'r') as review_file:\n # Instantiates a plain text document.\n \n # content = text.read()\n content=text\n document = language_v1.types.Document(\n content=content,\n type=enums.Document.Type.PLAIN_TEXT,\n language='en'\n )\n # type='PLAIN_TEXT',\n # )\n \n try:\n response = client.analyze_sentiment(\n document=document,\n encoding_type='UTF32',\n )\n sentiment = response.document_sentiment\n return (sentiment.score)\n except InvalidArgument:\n sentiment=0.0\n return sentiment", "def analyze(self, text):\n score =0\n token = TweetTokenizer()\n tokens = token.tokenize(text)\n for token in tokens:\n if token.lower() in self.pos_list:\n score+=1\n elif token.lower() in self.neg_list:\n score-=1\n\n return score", "def score(self, sentence):\n score = 0.0\n last_token = None\n for token in sentence:\n if not last_token:\n last_token = token\n continue\n tup = (last_token, token)\n if tup in self.counts:\n score += self.s[tup]\n else: # stupid backoff to add-one smoothed unigram\n if self.s[token]: score += self.s[token]\n else: score += math.log(1.0 * (self.counts[token] + 1) / (self.ntokens * 2))\n last_token = token\n return score", "def sentiment_analyzer(text):\n\n\tlower_text = text.lower()\n\t\t\n\thashtag_scaling = 0.3\n\texclamation_scaling = 0.5\n\tuppercase_scaling = 0.2\n\n\n\tsent_index = 0\n\n\tfor x in range(len(positive_words)):\n\t\tsent_index += lower_text.count(positive_words[x])\n\tfor x in range(len(negative_words)):\n\t\tsent_index -= lower_text.count(negative_words[x])\n\tif '!' in text:\n\t\tsent_index *= exclamation_scaling * lower_text.count('!') + 1\n\tif '#' in text:\n\t\tsent_index *= hashtag_scaling * lower_text.count('#') + 1\n\tsent_index *= uppercase_scaling * sum(1 for c in text if c.isupper())\n\t\t\n\treturn sent_index" ]
[ "0.70859677", "0.63445497", "0.615348", "0.6074", "0.607026", "0.6054127", "0.60071707", "0.59709895", "0.58449465", "0.58252454", "0.5823854", "0.58144957", "0.57648957", "0.57458204", "0.5664769", "0.5651309", "0.56352067", "0.56242144", "0.56193566", "0.5584524", "0.55805176", "0.55715257", "0.5557865", "0.55449075", "0.5499219", "0.54990834", "0.5484504", "0.5446697", "0.54220617", "0.5421664" ]
0.74165547
0
Calculates the average sentiment score in a given query set of Tweets.
def get_query_sentiment_avg(tweets): total = 0 count = len(tweets) for tweet in tweets: total += tweet.sentiment_score # Calculate average avg = total / count avg = float("{0:.2f}".format((float(avg)))) return avg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_query_statistics(tweets, sentiment_aggregate_list):\r\n\r\n total = len(tweets)\r\n positive_percentage = float(\"{0:.2f}\".format((float(sentiment_aggregate_list[0][1]/total*100))))\r\n neutral_percentage = float(\"{0:.2f}\".format((float(sentiment_aggregate_list[1][1]/total*100))))\r\n negative_percentage = float(\"{0:.2f}\".format((float(sentiment_aggregate_list[2][1]/total*100))))\r\n\r\n result = {\"%Positive\": positive_percentage, \"%Neutral\": neutral_percentage, \"%Negative\": negative_percentage, \"Total\": total}\r\n return result", "def get_sentiment_trends(order):\r\n\r\n # Get date seven days ago\r\n seven_days_ago = datetime.now() - timedelta(days=7)\r\n\r\n # Get raw PyMongo collection\r\n collection = Tweet._get_collection()\r\n\r\n # Perform aggregate query\r\n result = collection.aggregate([\r\n {\r\n \"$match\":\r\n {\r\n \"tweet_time\": {\"$gt\": seven_days_ago}\r\n }\r\n },\r\n {\r\n \"$group\":\r\n {\r\n \"_id\": \"$keyword_search_term\",\r\n \"average\":\r\n {\r\n \"$avg\": \"$sentiment_score\"\r\n }\r\n }\r\n },\r\n {\r\n \"$sort\":\r\n {\r\n \"average\": order\r\n }\r\n },\r\n {\r\n \"$limit\": 10\r\n }\r\n ])\r\n\r\n return result", "def get_average_sentiment(self, list_sentiments):\n average_polarity = 0\n for sentiment in list_sentiments: \n polarity = sentiment[1]\n average_polarity += polarity \n average_polarity /= len(list_sentiments)\n return average_polarity", "def get_historical_sentiment_avg(search_term, location=None):\r\n\r\n total = 0\r\n\r\n if location:\r\n tweets = Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location))\r\n count = len(tweets)\r\n else:\r\n tweets = Tweet.objects(Q(keyword_search_term=search_term))\r\n count = len(tweets)\r\n\r\n for tweet in tweets:\r\n total += tweet.sentiment_score\r\n\r\n # Calculate average\r\n avg = total / count\r\n avg = float(\"{0:.2f}\".format((float(avg))))\r\n\r\n return avg", "def sentiment_score(review):\n return sum([sentence_score(sentence, None, 0.0) for sentence in review])", "def get_average_of_sentiment_scores(input_file_name):\n\n subreddit_name = input_file_name.split('_')[0]\n\n list_of_columns_to_be_graphed = ['vader_compound_score', 'vader_negative_score', 'vader_neutral_score',\n 'vader_positive_score', 'whole_comment_sentiment_flair']\n\n avg_scores = {'avg_vader_compound_score': 0, 'avg_vader_negative_score': 0, 'avg_vader_neutral_score': 0,\n 'avg_vader_positive_score': 0, 'avg_whole_comment_sentiment_flair': 0}\n\n # gets the dataframe\n df = get_df_from_csv(input_file_name)\n\n # creates date object column for matplotlib\n df['date'] = df['created_utc'].apply(lambda x: mdate.epoch2num(x))\n\n # sorts df according to created_utc\n df = df.sort_values(by=['date'])\n\n # get total number of comments\n num_comments = len(df)\n\n # avg_vader_compound_score = df['vader_compound_score'].mean()\n # avg_vader_negative_score = df['vader_negative_score'].mean()\n # avg_vader_neutral_score = df['vader_neutral_score'].mean()\n # avg_vader_positive_score = df['vader_positive_score'].mean()\n # avg_whole_comment_sentiment_flair = df['whole_comment_sentiment_flair'].mean()\n\n for col in list_of_columns_to_be_graphed:\n # print('Average ' + col + ':', df[col].mean())\n avg_scores['avg_' + col] = df[col].mean()\n\n return avg_scores", "def question_sentiment_analysis(self):\n sentiments = get_sentiments()\n student_data = self.responses\n question_text = 'In one word'\n\n # Set up data for calculations\n num_scores = 0\n sentiment_sum = 0\n score_list = list()\n\n for response in student_data:\n\n if question_text in response.question.text:\n words = response.response.lower().split()\n\n # Find the sentiment score for each word, and add it to our data\n for word in words:\n # Ignore the word if it's not in the sentiment dictionary\n if word in sentiments:\n sentiment_sum += sentiments[word]\n num_scores += 1\n score_list.append(sentiments[word])\n\n average = sentiment_sum / num_scores\n standard_dev = statistics.stdev(score_list)\n\n return average, standard_dev", "def getSentiment(tweets, location):\n sentiment = [0, 0, 0]\n for tweet in tweets:\n analyser(tweets[tweet], sentiment,location)\n return sentiment", "def averages():\r\n totalsubs = 0\r\n for sub in subs:\r\n totalsubs += sub\r\n avgsubs = totalsubs / len(subs)\r\n\r\n totalsent = 0\r\n for sent in sentiments:\r\n totalsent += sent\r\n avgsent = totalsent / len(sentiments)\r\n print('The average subjectivity is: ' + str(avgsubs))\r\n print('The average sentiment is: ' + str(avgsent))", "def aggregate_sentiment(tweets):\r\n\r\n positive = 0\r\n negative = 0\r\n neutral = 0\r\n\r\n for tweet in tweets:\r\n if tweet.sentiment_type == \"positive\":\r\n positive += 1\r\n elif tweet.sentiment_type == \"negative\":\r\n negative += 1\r\n else:\r\n neutral += 1\r\n\r\n result = [[\"Positive\", positive], [\"Neutral\", neutral], [\"Negative\", negative]]\r\n return result", "def do_sentiment_analysis(self):\n\n tweets_sentiment = []\n\n for tweet in self.tweets:\n parsed_tweet = {}\n parsed_tweet['text'] = tweet\n sentiment_data = self.tweet_sentiment_analysis(tweet)\n parsed_tweet['sentiment'] = sentiment_data[0]\n parsed_tweet['polarity'] = sentiment_data[1]\n parsed_tweet['subjectivity'] = sentiment_data[2]\n\n tweets_sentiment.append(parsed_tweet)\n\n self.sentiment_data = tweets_sentiment\n self.positive_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Positive']\n self.negative_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Negative']\n self.neutral_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Neutral']\n\n return tweets_sentiment", "def average_score(sentence_scores):\r\n sumValues = 0\r\n for score in sentence_scores:\r\n sumValues += sentence_scores[score]\r\n\r\n # Average value of a sentence from original text\r\n average = (sumValues / len(sentence_scores))\r\n\r\n return average", "def test_get_average_of_sentiment_scores():\n\n dict_of_avg_scores = get_average_of_sentiment_scores(\n 'politics_30_months_comments_cleaned_standardized_vader_flair.csv')\n print('average sentiment scores all comments')\n for key, value in dict_of_avg_scores.items():\n print(key, value)\n print()", "def statistics(all_new_tweets, all_retweets, all_quote_tweets):\n length_all_quote_tweets = len(all_quote_tweets)\n length_all_retweets = len(all_retweets)\n length_all_tweets = len(all_new_tweets)\n\n # print(db_twitter.collections.stats())\n total_tweets = length_all_quote_tweets + length_all_retweets + length_all_tweets\n print(\n f\"Number of all tweets via streaming collected: {total_tweets - return_rest_tweets_number()}\"\n )\n print(f\"Number of new tweets collected: {length_all_tweets}\")\n print(f\"Number of retweets collected: {length_all_retweets}\")\n print(f\"Number of quote tweets collected: {length_all_quote_tweets}\")\n print(f\"Number of tweets collected via rest is {return_rest_tweets_number()}\")\n\n # Calculates mean sentiment, where 1 is very positive, -1 is very negative\n mean_sentiment = 0.0\n\n for tweet in all_new_tweets:\n mean_sentiment += tweet[\"sentiment_polarity\"]\n mean_sentiment = mean_sentiment / length_all_tweets\n print(\"The mean sentiment of tweets is: \", mean_sentiment)\n\n # Calculates mean subjectivity, where 1 is very subjective, -1 is very objective\n mean_subjectivity = 0.0\n\n for tweet in all_new_tweets:\n mean_subjectivity += tweet[\"subjectivity\"]\n mean_subjectivity = mean_subjectivity / length_all_tweets\n print(\"The mean subjectivity of retweets is: \", mean_subjectivity)\n return mean_sentiment, mean_subjectivity, total_tweets", "def scoreTweet(tweet, scores):\n try:\n lang = tweet['lang']\n twext = tweet['text']\n except: \n lang = 'Not english'\n twext = ''\n # Initialise some data\n tweet_total_score = 0\n # Prepare the tweet\n clean_tweet = cleanTweetText(twext).split(' ')\n for word in clean_tweet: \n try: word_score = scores[word]\n except: word_score = 0 \n # Add the word's score to the overall tweet score\n tweet_total_score += word_score\n # Print some output\n # print clean_tweet\n return tweet_total_score", "def analyze(self, text): #takes the text to be analyzed for sentiment\n #initialize inicial score to 0\n score = 0\n #Create tokenizer instance\n tokenizer = nltk.tokenize.TweetTokenizer()\n #create list of words in a tweets\n tokens = tokenizer.tokenize(text)\n \n #iterate over tokens(list of words)\n for word in tokens:\n #check if word is positive or negative\n if word.lower() in self.positives_words:\n score+=1\n if word.lower() in self.negatives_words:\n score-=1\n #neutral if its neither, doesnt add anything, 0\n return score", "def _find_average_score(self, sentenceValue):\n sumValues = 0\n for entry in sentenceValue: \n sumValues += sentenceValue[entry]\n \n try:\n average = (sumValues / len(sentenceValue))\n except:\n average = 0\n return average", "def calculate_tweet_score(tweet,scores):\n\ttweet = tweet.split(\" \")\n\ttweet_score = 0\n\tfor word in tweet:\n\t\ttweet_score += scores.get(word,0)\n\treturn tweet_score", "def average_score(self, sentenceValue):\r\n sumValues = 0\r\n for entry in sentenceValue:\r\n sumValues += sentenceValue[entry]\r\n\r\n # Average value of a sentence from original summary_text\r\n average = (sumValues / len(sentenceValue))\r\n\r\n return average", "def average_result(set_):\n db = TinyDB(CARD_DATA_FILE)\n card_data = db.table('card_data')\n set_results = card_data.search(where('set') == set_)\n print(set_results)\n c = r = e = l = g_c = g_r = g_e = g_l = total = 0\n # TODO: can revamp with some collections.counter usage, probably\n for entry in set_results:\n total += 1\n c += entry['commons']\n r += entry['rares']\n e += entry['epics']\n l += entry['legendaries']\n g_c += entry['g_commons']\n g_r += entry['g_rares']\n g_e += entry['g_epics']\n g_l += entry['g_legendaries']\n\n print('Average of: {} commons, {} rares, {} epics, {} legendaries \\n'\n ' {} golden commons, {} golden rares, {} golden epics, {} '\n 'golden legendaries'.format(c/total, r/total, e/total, l/total, g_c/total, g_r/total, g_e/total, g_l/total))\n\n pass", "def process_batch_tweets(tweets, sentiment):\n from collections import Counter\n scoreboard = Counter()\n region_count = Counter()\n \n for tweet in tweets:\n score, region = process_one_tweet(tweet, sentiment)\n if region != \"None\":\n scoreboard[region] += score\n region_count[region] += 1\n return [scoreboard, region_count]", "def get_scores(self, query):\n score = np.zeros(self.data['corpus_size'])\n doc_len = np.array(self.data['doc_len'])\n for q in query:\n q_freq = np.array([(doc.get(q) or 0) for doc in self.data['doc_freqs']])\n score += (self.data['idf'].get(q) or 0) * (q_freq * (self.data['k1'] + 1) /\n (q_freq + self.data['k1'] * (\n 1 - self.data['b'] + self.data['b'] * doc_len /\n self.data['average_docs_len'])))\n return score", "def analyze(self, tweet):\n \n # keeping track of the score\n score = 0\n \n # filtering though tweets exstracting the useful words\n # preserve_case = false maks them lowercase\n tokenizer = nltk.tokenize.TweetTokenizer(preserve_case = False)\n tokens = tokenizer.tokenize(tweet)\n \n # checking word for word the intension and keeping score\n for word in tokens:\n if word in self.dic:\n if self.dic[word] == 1:\n score += 1\n else:\n score -= 1\n# score += self.dic[word]\n return score", "def get_score(self, query: list, index: int, e: int = 0.5) -> float:\n score = 0.0\n total = sum(self.counts[index].values())\n for token in query:\n if token not in self.counts[index]:\n continue\n idf = math.log((self.document_count + e) / (self.token_docs[token] + e))\n score += (self.counts[index][token] / total) * idf\n\n return score", "def get_mean_score(rating_scores):\n return sum(rating_scores) / len(rating_scores)", "def average(entry):\n return entry['total time (s)'] / float(entry['correct answers'] + entry['wrong answers'])", "def createReport(query):\n sentiments = get_sentiments(query)\n print(\"Based on the query, %s has an average sentiment value of %d\", query, sentiments)", "def sentiment(text):\n words = pattern_split.split(text.lower())\n sentiments = map(lambda word: afinn.get(word, 0), words)\n if sentiments:\n # How should you weight the individual word sentiments? \n # You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)\n sentiment = float(sum(sentiments))/math.sqrt(len(sentiments))\n \n else:\n sentiment = 0\n return sentiment", "def test(self, tweets, without_neutral=True):\n correct = 0\n total = 0\n for tweet in tweets:\n assert tweet.polarity is not None\n if tweet.is_neutral() and without_neutral:\n continue\n\n if tweet.polarity == self.predict_sentiment_enum(tweet, without_neutral):\n correct += 1\n\n total += 1\n\n print(\"correct = \", correct, \"total = \", total)\n return correct / total", "def get_sentiment(self, sentances):\n sentiment_total = 0\n # Add each sentances combined sentiment to a total tally\n for sentance in sentances:\n sentiment = self.sentiment_analyzer.polarity_scores(sentance)\n sentiment_total += sentiment['compound']\n return sentiment_total / len(sentances)" ]
[ "0.67024827", "0.66712636", "0.66350305", "0.66138715", "0.65283984", "0.6518941", "0.6491312", "0.6487166", "0.64857775", "0.6405299", "0.62509996", "0.6194273", "0.6162189", "0.6128368", "0.61244524", "0.6088544", "0.6022574", "0.6017512", "0.60000277", "0.59923315", "0.59785146", "0.59769034", "0.59565276", "0.5955905", "0.595546", "0.5893742", "0.5870898", "0.5866351", "0.5863289", "0.58492804" ]
0.854272
0
Generates basic statistics for a given query set of Tweets.
def get_query_statistics(tweets, sentiment_aggregate_list): total = len(tweets) positive_percentage = float("{0:.2f}".format((float(sentiment_aggregate_list[0][1]/total*100)))) neutral_percentage = float("{0:.2f}".format((float(sentiment_aggregate_list[1][1]/total*100)))) negative_percentage = float("{0:.2f}".format((float(sentiment_aggregate_list[2][1]/total*100)))) result = {"%Positive": positive_percentage, "%Neutral": neutral_percentage, "%Negative": negative_percentage, "Total": total} return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def statistics(all_new_tweets, all_retweets, all_quote_tweets):\n length_all_quote_tweets = len(all_quote_tweets)\n length_all_retweets = len(all_retweets)\n length_all_tweets = len(all_new_tweets)\n\n # print(db_twitter.collections.stats())\n total_tweets = length_all_quote_tweets + length_all_retweets + length_all_tweets\n print(\n f\"Number of all tweets via streaming collected: {total_tweets - return_rest_tweets_number()}\"\n )\n print(f\"Number of new tweets collected: {length_all_tweets}\")\n print(f\"Number of retweets collected: {length_all_retweets}\")\n print(f\"Number of quote tweets collected: {length_all_quote_tweets}\")\n print(f\"Number of tweets collected via rest is {return_rest_tweets_number()}\")\n\n # Calculates mean sentiment, where 1 is very positive, -1 is very negative\n mean_sentiment = 0.0\n\n for tweet in all_new_tweets:\n mean_sentiment += tweet[\"sentiment_polarity\"]\n mean_sentiment = mean_sentiment / length_all_tweets\n print(\"The mean sentiment of tweets is: \", mean_sentiment)\n\n # Calculates mean subjectivity, where 1 is very subjective, -1 is very objective\n mean_subjectivity = 0.0\n\n for tweet in all_new_tweets:\n mean_subjectivity += tweet[\"subjectivity\"]\n mean_subjectivity = mean_subjectivity / length_all_tweets\n print(\"The mean subjectivity of retweets is: \", mean_subjectivity)\n return mean_sentiment, mean_subjectivity, total_tweets", "def query_all_tweets(query):\n year = 2006\n month = 3\n\n limits = []\n while date(year=year, month=month, day=1) < date.today():\n nextmonth = month + 1 if month < 12 else 1\n nextyear = year + 1 if nextmonth == 1 else year\n\n limits.append(\n (date(year=year, month=month, day=1),\n date(year=year, month=month, day=10))\n )\n limits.append(\n (date(year=year, month=month, day=10),\n date(year=year, month=month, day=20))\n )\n limits.append(\n (date(year=year, month=month, day=20),\n date(year=nextyear, month=nextmonth, day=1))\n )\n year, month = nextyear, nextmonth\n\n queries = ['{} since:{} until:{}'.format(query, since, until)\n for since, until in reversed(limits)]\n\n pool = Pool(20)\n all_tweets = []\n try:\n for new_tweets in pool.imap_unordered(query_tweets_once, queries):\n all_tweets.extend(new_tweets)\n logging.info(\"Got {} tweets ({} new).\".format(\n len(all_tweets), len(new_tweets)))\n except KeyboardInterrupt:\n logging.info(\"Program interrupted by user. Returning all tweets \"\n \"gathered so far.\")\n\n return sorted(all_tweets)", "def get_tweets(query, pages=25):\n\n logger = Logger()\n after_part = 'include_available_features=1&include_entities=1&include_new_items_bar=true'\n if query.startswith('#'):\n query = quote(query)\n url = 'https://twitter.com/i/search/timeline?f=tweets&vertical=default&q={}&src=tyah&reset_error_state=false&'.format(query)\n else:\n url = 'https://twitter.com/i/profiles/show/{}/timeline/tweets?'.format(query)\n url += after_part\n \n headers = {\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Referer': 'https://twitter.com/{}'.format(query),\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8',\n 'X-Twitter-Active-User': 'yes',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Accept-Language': 'en-US'\n }\n\n def gen_tweets(pages):\n logger.add(\"MSG: Sending request to url '{}'...\".format(url))\n r = session.get(url, headers=headers)\n\n logger.add(\"MSG: Parsing result...\".format(url))\n while pages > 0:\n try:\n html = BeautifulSoup(r.json()['items_html'], parser='html', features=\"lxml\")\n except KeyError:\n raise ValueError(\n 'Oops! Either \"{}\" does not exist or is private.'.format(query))\n\n comma = \",\"\n dot = \".\"\n tweets = []\n for tweet in html.select('.stream-item'):\n # 10~11 html elements have `.stream-item` class and also their `data-item-type` is `tweet`\n # but their content doesn't look like a tweet's content\n try:\n text = tweet.select('.tweet-text')[0].get_text()\n except IndexError: # issue #50\n continue\n\n tweet_id = tweet['data-item-id']\n\n time = datetime.fromtimestamp(int(tweet.select('._timestamp')[0]['data-time-ms']) / 1000.0)\n\n interactions = [\n x.get_text()\n for x in tweet.select('.ProfileTweet-actionCount')\n ]\n\n replies = int(\n interactions[0].split(' ')[0].replace(comma, '').replace(dot, '')\n or interactions[3]\n )\n\n retweets = int(\n interactions[1].split(' ')[0].replace(comma, '').replace(dot, '')\n or interactions[4]\n or interactions[5]\n )\n\n likes = int(\n interactions[2].split(' ')[0].replace(comma, '').replace(dot, '')\n or interactions[6]\n or interactions[7]\n )\n\n hashtags = [\n hashtag_node.get_text()\n for hashtag_node in tweet.select('.twitter-hashtag')\n ]\n urls = [\n url_node['data-expanded-url']\n for url_node in tweet.select('a.twitter-timeline-link:not(.u-hidden)')\n ]\n photos = [\n photo_node['data-image-url']\n for photo_node in tweet.select('.AdaptiveMedia-photoContainer')\n ]\n\n is_retweet = False\n if tweet.select('.js-stream-tweet')[0].has_attr('data-retweet-id'):\n is_retweet = True\n\n is_pinned = False\n if tweet.select(\".pinned\"):\n is_pinned = True\n\n videos = []\n video_nodes = tweet.select(\".PlayableMedia-player\")\n for node in video_nodes:\n styles = node['style'].split()\n for style in styles:\n if style.startswith('background'):\n tmp = style.split('/')[-1]\n video_id = tmp[:tmp.index('.jpg')]\n videos.append({'id': video_id})\n\n tweets.append({\n 'tweetId': tweet_id,\n 'isRetweet': is_retweet,\n 'time': time,\n 'text': text,\n 'replies': replies,\n 'retweets': retweets,\n 'likes': likes,\n 'isPinned': is_pinned,\n 'entries': {\n 'hashtags': hashtags, 'urls': urls,\n 'photos': photos, 'videos': videos\n }\n })\n\n\n last_tweet = html.select('.stream-item')[-1]['data-item-id']\n\n for tweet in tweets:\n if tweet:\n tweet['text'] = re.sub(r'\\Shttp', ' http', tweet['text'], 1)\n tweet['text'] = re.sub(r'\\Spic\\.twitter', ' pic.twitter', tweet['text'], 1)\n yield tweet\n\n r = session.get(url, params={'max_position': last_tweet}, headers=headers)\n pages += -1\n yield from gen_tweets(pages)", "def createReport(query):\n sentiments = get_sentiments(query)\n print(\"Based on the query, %s has an average sentiment value of %d\", query, sentiments)", "def scrape_all():\n for query in queries:\n for i, tweet in enumerate(sntwitter.TwitterSearchScraper(query + 'lang:en' + 'since:2019-11-06 '\n 'until:2019-12-13').get_items()):\n tweet_data = write_tweet(tweet)\n try:\n all_writer.writerow(tweet_data)\n except UnicodeEncodeError:\n pass", "def get_tweets_count_times(twitter, count, query=None):\n # get id to start from\n oldest_id, newest_id = _get_oldest_id(query=query)\n newest_id = newest_id or oldest_id\n\n all_tweets = []\n i = 0\n while i < count:\n i += 1\n # use search api to request 100 tweets. Twitter returns the most recent (max_id) first\n if oldest_id <= newest_id:\n tweets = get_tweets(query=query, max_id=oldest_id - 1, count=TWEETS_PER_SEARCH, twitter=twitter)\n else:\n tweets = get_tweets(query=query, max_id=oldest_id - 1, since_id=newest_id, count=TWEETS_PER_SEARCH, twitter=twitter)\n rate_limit_remaining = twitter.get_lastfunction_header('x-rate-limit-remaining')\n rate_limit_reset = twitter.get_lastfunction_header('x-rate-limit-reset')\n\n if not len(tweets):\n # not rate limitted, just no tweets returned by query\n oldest_id = oldest_id + ((newest_id or oldest_id) - oldest_id + 1) * 10000\n break\n elif isinstance(tweets, dict):\n # rate limit hit, or other twython response error\n print(tweets)\n break\n\n all_tweets.extend(tweets)\n\n # determine new oldest id\n tweet_ids = {t['id'] for t in tweets}\n if oldest_id:\n tweet_ids.add(oldest_id)\n oldest_id, newest_id = min(tweet_ids), max(tweet_ids)\n if rate_limit_remaining == 1:\n time.sleep(rate_limit_reset - time.time())\n\n save_tweets(all_tweets, query=query)\n\n # set id to start from for next time\n _set_oldest_id(oldest_id, newest_id, query=query)\n\n if len(all_tweets) == 0:\n os.remove(make_oldest_id_path(query))\n\n return len(all_tweets), twitter.get_lastfunction_header('x-rate-limit-remaining')", "def process_tweet(tweet):\n global start_date\n global end_date\n global geo_enabled_tweets\n global retweets\n\n # Check for filters before processing any further\n if args.filter and tweet.source:\n if not args.filter.lower() in tweet.source.lower():\n return\n\n tw_date = tweet.created_at\n\n # Updating most recent tweet\n end_date = end_date or tw_date\n start_date = tw_date\n\n # Handling retweets\n try:\n # We use id to get unique accounts (screen_name can be changed)\n rt_id_user = tweet.retweeted_status.user.id_str\n retweeted_users[rt_id_user] += 1\n\n if tweet.retweeted_status.user.screen_name not in id_screen_names:\n id_screen_names[rt_id_user] = \"@%s\" % tweet.retweeted_status.user.screen_name\n\n retweets += 1\n except:\n pass\n\n # Adding timezone from profile offset to set to local hours\n if tweet.user.utc_offset and not args.no_timezone:\n tw_date = (tweet.created_at + datetime.timedelta(seconds=tweet.user.utc_offset))\n\n if args.utc_offset:\n tw_date = (tweet.created_at + datetime.timedelta(seconds=args.utc_offset))\n\n # Updating our activity datasets (distribution maps)\n activity_hourly[\"%s:00\" % str(tw_date.hour).zfill(2)] += 1\n activity_weekly[str(tw_date.weekday())] += 1\n\n # Updating langs\n detected_langs[tweet.lang] += 1\n\n # Updating sources\n detected_sources[tweet.source] += 1\n\n # Detecting geolocation\n if tweet.place:\n geo_enabled_tweets += 1\n tweet.place.name = tweet.place.name\n detected_places[tweet.place.name] += 1\n\n # Updating hashtags list\n if tweet.entities['hashtags']:\n for ht in tweet.entities['hashtags']:\n ht['text'] = \"#%s\" % ht['text']\n detected_hashtags[ht['text']] += 1\n\n # Updating domains list\n if tweet.entities['urls']:\n for url in tweet.entities['urls']:\n domain = urlparse(url['expanded_url']).netloc\n if domain != \"twitter.com\": # removing twitter.com from domains (not very relevant)\n detected_domains[domain] += 1\n\n # Updating mentioned users list\n if tweet.entities['user_mentions']:\n for ht in tweet.entities['user_mentions']:\n mentioned_users[ht['id_str']] += 1\n if not ht['screen_name'] in id_screen_names:\n id_screen_names[ht['id_str']] = \"@%s\" % ht['screen_name']", "def get_tweets():\r\n tweets = models.Tweet.query.all()\r\n output = []\r\n\r\n for tweet in tweets:\r\n tweet_data = {'id': tweet.id,\r\n 'content': tweet.text_content,\r\n 'username': tweet.username,\r\n 'timestamp': tweet.timestamp.isoformat(),\r\n 'likes_count': models.Like.query.filter(models.Like.post_id == tweet.id).count(),\r\n 'retweets_count': models.Retweet.query.filter(models.Retweet.post_id == tweet.id).count()}\r\n\r\n output.append(tweet_data)\r\n\r\n return {\"tweets\": output}", "def getTweets(self, query, start, end):\n gettweets = Twitter.GetTweets(self.rootpath, self.folderpath,\n start, end, query)\n gettweets.start_getTweets()", "def get_tweets(self, query, count=10):\n # empty list to store parsed tweets\n tweets = []\n\n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q=query, count=count)\n\n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n\n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets\n\n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))", "def get_summaries(query, **kwargs):\n kwargs.update(stop=40)\n results = search(query, **kwargs)\n return results", "def print_query_results(top, ranked_docs, tweets_dict):\n print(\"\\n======================\\nTop {} results out of {} for the seached query:\\n\".format(top, len(ranked_docs)))\n for tweet_id in ranked_docs[:top]:\n tweet_object = tweets_dict[tweet_id]\n txt = tweet_object[\"text\"]\n usr = tweet_object[\"user\"][\"name\"]\n date = tweet_object[\"created_at\"]\n hashtags = tweet_object[\"entities\"][\"hashtags\"]\n favs = tweet_object[\"favorite_count\"]\n rt = tweet_object[\"retweet_count\"]\n urls = tweet_object[\"entities\"][\"urls\"]\n print(\"\\n==================================================================\\n\")\n print(\"Username %s | Tweet: %s\\n Date %s\\n Likes %s| Retweets %s\"%(usr, txt, date, favs, rt))\n if hashtags:\n print(\"Hashtags: \")\n for hashtag in hashtags:\n print(hashtag)\n if urls:\n print(\"URLs: \")\n for url in urls:\n print(url[\"url\"])", "def compute_query_summary_statistics(data):\n return {\n nameserver: _compute_summary_stats(entries)\n for nameserver, entries in data.items()\n }", "def analyze_tweets(tweets, model, w2v_model):\n # TODO DO EVERYTHING HERE\n #tweets = [(\"StarWars\", tc.query_tweets(\"StarWars\"))]\n \n #tweets = tc.query_tweets('starwars')\n df = pd.DataFrame(columns=['pos', 'neu', 'neg'])\n if not os.path.isdir('results'):\n os.mkdir('results')\n for topic, topic_tweets in tweets:\n tokenized_tweets = tp.process_raw_tweets(topic_tweets)\n df.loc[topic], dummy = classify_tweets(tokenized_tweets, model, w2v_model)\n vis.word_cloud_from_frequencies(tp.count_tokens(tokenized_tweets), f\"results/{topic}_cloud.png\", width=800, height=400,)\n \n vis.bar_plot_from_dataframe(df, 'results/results.png')\n print(\"\\n\")\n print(df)", "def write_stats(self, directory):\n\n target_dir = os.path.join(directory, 'tweet_stats')\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n\n # general stats\n self.stats_summary.append(\"%-30s\\t%12d\\n\" % ('Number of tweets', len(self)))\n self.stats_summary.append('%-30s\\t%-12s\\t%-12s' % ('Index', 'Type count', 'Token count'))\n\n for k in self.stats:\n k_stats = self.stats[k]\n\n rank = 0\n token_count = 0\n lines = []\n\n # Sort by frequency of words, pairs, triples, urls etc.\n k_stats_sorted = sorted(k_stats.iteritems(), key=operator.itemgetter(1), reverse=True)\n\n for val, card in k_stats_sorted:\n rank += 1\n token_count += card\n lines.append(\"%4d %-60s %5d\" % (rank, val, card))\n\n self.write_file(target_dir, \"%s.txt\" % k, \"\\n\".join(lines))\n\n # update summary with index name and corresponding type and token counts\n self.stats_summary.append('%-30s\\t%12d\\t%12d' % (k, len(k_stats), token_count))\n\n # write summary info\n self.write_file(target_dir, 'general.txt', \"\\n\".join(self.stats_summary))", "def extract_important(tweet_objects_list):\n # This section extracts important information such as most common hashtags\n hashtag_dictionary = {}\n for tweet in tweet_objects_list:\n if \"hashtags\" in tweet:\n for individual_hashtag in tweet[\"hashtags\"]:\n if not individual_hashtag[\"text\"].lower() in hashtag_dictionary:\n hashtag_dictionary[individual_hashtag[\"text\"].lower()] = 1\n else:\n hashtag_dictionary[individual_hashtag[\"text\"].lower()] += 1\n frequency = Counter(hashtag_dictionary)\n most_frequent_hashtags = frequency.most_common(50)\n\n user_dictionary = {}\n for tweet in tweet_objects_list:\n if \"user_mentions\" in tweet:\n for individual_user in tweet[\"user_mentions\"]:\n if not individual_user[\"screen_name\"] in user_dictionary:\n user_dictionary[individual_user[\"screen_name\"].lower()] = 1\n else:\n user_dictionary[individual_user[\"screen_name\"].lower()] += 1\n frequency = Counter(user_dictionary)\n most_frequent_users = frequency.most_common(50)\n symbol_dictionary = {}\n for tweet in tweet_objects_list:\n if \"symbols\" in tweet:\n for individual_symbol in tweet[\"symbols\"]:\n if not individual_symbol[\"text\"] in symbol_dictionary:\n symbol_dictionary[individual_symbol[\"text\"]] = 1\n else:\n symbol_dictionary[individual_symbol[\"text\"]] += 1\n frequency = Counter(symbol_dictionary)\n most_frequent_symbols = frequency.most_common(50)\n return most_frequent_hashtags, most_frequent_users, most_frequent_symbols", "def fetch_tweets(self, screen_name, count):\n return {}", "def printHashtagsAndMentions(searchText=None, filterTerms=False, tweetLimit=0):\n tweets = db.Tweet.select()\n if searchText is not None:\n tweets = tweets.filter(db.Tweet.q.message.contains(searchText))\n tweets = tweets.limit(tweetLimit)\n\n hashtags, mentions, plain = getHashtagsAndMentions(tweets)\n\n if searchText and filterTerms:\n hashtags = Counter(\n {k: v for k, v in hashtags.items() if searchText.lower() in k.lower()}\n )\n mentions = Counter(\n {k: v for k, v in mentions.items() if searchText.lower() in k.lower()}\n )\n plain = Counter(\n {k: v for k, v in plain.items() if searchText.lower() in k.lower()}\n )\n\n # Unique word count for each area.\n hashtagWC = len(hashtags)\n mentionWC = len(mentions)\n plainWC = len(plain)\n\n print(\"Summary\")\n print(\"==============\")\n # Count items in the sliced selection since .count() does not work with\n # a limit.\n count = len(list(tweets)) if tweetLimit else tweets.count()\n print(\"{0:7,d} tweets\".format(count))\n print(\"{0:7,d} unique words\".format(hashtagWC + mentionWC + plainWC))\n print(\"{0:7,d} unique hashtags\".format(hashtagWC))\n print(\"{0:7,d} unique mentions\".format(mentionWC))\n print(\"{0:7,d} unique plain words\".format(plainWC))\n print()\n\n print(\"Hashtags\")\n print(\"========\")\n printCounterByCount(hashtags)\n print()\n\n print(\"Mentions\")\n print(\"========\")\n printCounterByCount(mentions)\n\n \"\"\"\n # Removal of stopwords and handling of URIs is needed to make this\n # useful.\n print 'Plain'\n print '========'\n printCounterByCount(plain)\n \"\"\"", "def report(statistics, filename=None, template=\"report.mako\", **kwargs):\n\n class QueryGroup:\n def __init__(self):\n self.queries = []\n self.stacks = collections.defaultdict(int)\n self.callers = {}\n self.max = 0\n self.min = sys.maxsize\n self.sum = 0\n self.mean = 0\n self.median = 0\n\n def find_user_fn(self, stack):\n \"\"\" rough heuristic to try to figure out what user-defined func\n in the call stack (i.e. not sqlalchemy) issued the query\n \"\"\"\n for frame in reversed(stack):\n # frame[0] is the file path to the module\n if 'sqlalchemy' not in frame[0]:\n return frame\n\n def add(self, q):\n if not bool(self.queries):\n self.text = str(q.text)\n self.first_word = self.text.split()[0]\n self.queries.append(q)\n self.stacks[q.stack_text] += 1\n self.callers[q.stack_text] = self.find_user_fn(q.stack)\n\n self.max = max(self.max, q.duration)\n self.min = min(self.min, q.duration)\n self.sum = self.sum + q.duration\n self.mean = self.sum / len(self.queries)\n\n def calc_median(self):\n queries = sorted(self.queries, key=lambda q: q.duration, reverse=True)\n length = len(queries)\n if not length % 2:\n x1 = queries[length // 2].duration\n x2 = queries[length // 2 - 1].duration\n self.median = (x1 + x2) / 2\n else:\n self.median = queries[length // 2].duration\n\n query_groups = collections.defaultdict(QueryGroup)\n all_group = QueryGroup()\n\n # group together statistics for the same query\n for qstats in statistics:\n qstats.stack_text = \\\n ''.join(traceback.format_list(qstats.stack)).strip()\n\n group = query_groups[str(qstats.text)]\n group.add(qstats)\n all_group.add(qstats)\n\n query_groups = sorted(query_groups.values(), key=lambda g: g.sum, reverse=True)\n\n # calculate the median for each group\n for g in query_groups:\n g.calc_median()\n\n # create the template lookup\n # (we need this for extensions inheriting the base template)\n tmpl_dir = os.path.join(os.path.dirname(__file__), \"templates\")\n lookup = mako.lookup.TemplateLookup(tmpl_dir, default_filters=['unicode', 'h']) # mako fixes unicode -> str on py3k\n\n # render the template\n html = lookup.get_template(template).render(\n query_groups = query_groups,\n all_group = all_group,\n name = \"SQLTap Profiling Report\",\n **kwargs\n )\n\n # write it out to a file if you asked for it\n if filename:\n with open(filename, 'w') as f:\n f.write(html)\n \n return html", "def _get_stats(self):\n self.stats = set()\n self._bstats = set()\n self._h_bstats = set()\n self._tstats = set()\n self._ftstats = set()\n for cl in self.data_classes:\n for stat in cl._bstats:\n self.stats.add(stat)\n self._bstats.add(stat)\n for stat in cl._hbstats:\n self.stats.add(stat)\n self._h_bstats.add(stat)\n for stat in cl._tstats:\n self._tstats.add(stat)\n self.stats.add(stat)\n try:\n trips = cl.triples\n f_stats = cl.read_tfstats(trips,eq=False,lande=False)\n for trip in f_stats:\n for stat in f_stats[trip]:\n self._ftstats.add(stat)\n self.stats.add(stat)\n except:\n AttributeError", "def generate_report():\n\n # Fetch the top 3 most viewed articles and number of views and print them\n articles_query = get_articles_query()\n popular_articles = execute_query(articles_query)\n print_top_articles(popular_articles)\n\n # Fetch the most popular authors and print them\n authors_query = get_authors_query()\n popular_authors = execute_query(authors_query)\n print_authors(popular_authors)\n\n # Print the days when there were more than 1% errors in HTTP requests\n errors_query = get_errorData_query()\n error_data = execute_query(errors_query)\n print_error_data(error_data)", "def run_queries(self, query_list, random_command):\n output_dict = {'postingsList': {},\n 'postingsListSkip': {},\n 'daatAnd': {},\n 'daatAndSkip': {},\n 'daatAndTfIdf': {},\n 'daatAndSkipTfIdf': {},\n 'sanity': self.sanity_checker(random_command)}\n\n for query in tqdm(query_list):\n \"\"\" Run each query against the index. You should do the following for each query:\n 1. Pre-process & tokenize the query.\n 2. For each query token, get the postings list & postings list with skip pointers.\n 3. Get the DAAT AND query results & number of comparisons with & without skip pointers.\n 4. Get the DAAT AND query results & number of comparisons with & without skip pointers, \n along with sorting by tf-idf scores.\"\"\"\n raise NotImplementedError\n\n input_term_arr = [] # Tokenized query. To be implemented.\n\n for term in input_term_arr:\n postings, skip_postings = None, None\n\n \"\"\" Implement logic to populate initialize the above variables.\n The below code formats your result to the required format.\n To be implemented.\"\"\"\n\n output_dict['postingsList'][term] = postings\n output_dict['postingsListSkip'][term] = skip_postings\n\n and_op_no_skip, and_op_skip, and_op_no_skip_sorted, and_op_skip_sorted = None, None, None, None\n and_comparisons_no_skip, and_comparisons_skip, \\\n and_comparisons_no_skip_sorted, and_comparisons_skip_sorted = None, None, None, None\n \"\"\" Implement logic to populate initialize the above variables.\n The below code formats your result to the required format.\n To be implemented.\"\"\"\n and_op_no_score_no_skip, and_results_cnt_no_skip = self._output_formatter(and_op_no_skip)\n and_op_no_score_skip, and_results_cnt_skip = self._output_formatter(and_op_skip)\n and_op_no_score_no_skip_sorted, and_results_cnt_no_skip_sorted = self._output_formatter(and_op_no_skip_sorted)\n and_op_no_score_skip_sorted, and_results_cnt_skip_sorted = self._output_formatter(and_op_skip_sorted)\n\n output_dict['daatAnd'][query.strip()] = {}\n output_dict['daatAnd'][query.strip()]['results'] = and_op_no_score_no_skip\n output_dict['daatAnd'][query.strip()]['num_docs'] = and_results_cnt_no_skip\n output_dict['daatAnd'][query.strip()]['num_comparisons'] = and_comparisons_no_skip\n\n output_dict['daatAndSkip'][query.strip()] = {}\n output_dict['daatAndSkip'][query.strip()]['results'] = and_op_no_score_skip\n output_dict['daatAndSkip'][query.strip()]['num_docs'] = and_results_cnt_skip\n output_dict['daatAndSkip'][query.strip()]['num_comparisons'] = and_comparisons_skip\n\n output_dict['daatAndTfIdf'][query.strip()] = {}\n output_dict['daatAndTfIdf'][query.strip()]['results'] = and_op_no_score_no_skip_sorted\n output_dict['daatAndTfIdf'][query.strip()]['num_docs'] = and_results_cnt_no_skip_sorted\n output_dict['daatAndTfIdf'][query.strip()]['num_comparisons'] = and_comparisons_no_skip_sorted\n\n output_dict['daatAndSkipTfIdf'][query.strip()] = {}\n output_dict['daatAndSkipTfIdf'][query.strip()]['results'] = and_op_no_score_skip_sorted\n output_dict['daatAndSkipTfIdf'][query.strip()]['num_docs'] = and_results_cnt_skip_sorted\n output_dict['daatAndSkipTfIdf'][query.strip()]['num_comparisons'] = and_comparisons_skip_sorted\n\n return output_dict", "def compute_statistics(self):", "def _write_stats(self, stat_type, user=None, summ_type=None):\n if stat_type == \"full collection\":\n self.summary_file.write(\"\\n\\nDataset: {c}\\n\".format(c=self.dataset_name))\n self.summary_file.write(\"Number of unique urls: {u}\\nNumber of unique sites: {s}\\n\".format(u=len(set(self.stat_dict['urls'])), s=len(set(self.stat_dict['sites'])))\n )\n site_cnts = Counter(self.stat_dict['sites']).most_common()\n for site in site_cnts:\n self.summary_file.write(\"{s}: {n}\\n\".format(s=site[0], n=site[1]))\n\n if stat_type == \"token_counts\":\n self.summary_file.write(\"\\n\\nDataset: {c}\\n\".format(c=self.dataset_name))\n for doc_type in self.stat_dict:\n if user is not None:\n self.summary_file.write(\"\\n{0}, {1}\\n\".format(user, summ_type))\n\n self.summary_file.write(\n \"\\nNumber of {d}s: {p}\\nAverage tokens/{d}: {t}\\nAverage sentences/{d}: {s}\\n\".format(\n d=doc_type, p=len(self.stat_dict[doc_type][0]), t=sum(self.stat_dict[doc_type][1])/len(self.stat_dict[doc_type][1]), s=sum(self.stat_dict[doc_type][0])/len(self.stat_dict[doc_type][0])\n )\n )\n\n self.summary_file.write(\n \"Median tokens/{d}: {p}\\nStandard deviation tokens/{d}: {t}\\n\".format(\n d=doc_type, p=np.median(self.stat_dict[doc_type][1]), t=np.std(self.stat_dict[doc_type][1])\n )\n )\n\n self.summary_file.write(\n \"Median sentences/{d}: {p}\\nStandard deviation sentences/{d}: {t}\\n\".format(\n d=doc_type, p=np.median(self.stat_dict[doc_type][0]), t=np.std(self.stat_dict[doc_type][0])\n )\n )", "def perform_query(tweets_dict, index, tf, idf, rt, likes, score, get_input=True, query=None):\n print(\"Insert your query:\\n\")\n if get_input:\n query = input()\n ranked_docs = search(query, index, idf, tf, rt, likes, score) \n return query, ranked_docs", "def load_all_tweets(self, count):\n\n for influencer in tqdm(self.influencers.allInfluencers, desc='Gathering Tweets'):\n self.get_tweets(influencer, count)", "def getuserstatistics(self):\n userstatistics = []\n userstatistics.append({'text': _('Suggestions Accepted'), 'count': self.suggester.filter(state='accepted').count()})\n userstatistics.append({'text': _('Suggestions Pending'), 'count': self.suggester.filter(state='pending').count()})\n userstatistics.append({'text': _('Suggestions Reviewed'), 'count': self.reviewer.count()})\n userstatistics.append({'text': _('Submissions Made'), 'count': self.submission_set.count()})\n return userstatistics", "def get_tweets():\n\n # Read bearer token from secrets file\n with open(\"./secrets.yml\", \"r\") as f:\n bearer_token = yaml.load(f, Loader=yaml.FullLoader)[\"BEARER_TOKEN\"]\n\n # Set start and end times as current time rounded down to nearest minute with supplied offset\n dt_fmt = \"%Y-%m-%dT%H:%M:00Z\"\n dt_now = datetime.datetime.now().replace(second=0, microsecond=0)\n start_time_offset = int(sys.argv[1])\n end_time_offset = int(sys.argv[2])\n dt_end = dt_now - datetime.timedelta(minutes=end_time_offset)\n dt_start = dt_now - datetime.timedelta(minutes=start_time_offset)\n dt_end = dt_end.strftime(dt_fmt)\n dt_start = dt_start.strftime(dt_fmt)\n\n # Make request, checking for mentions in specified time period\n logging.info(\"Getting mentions from Twitter\")\n uri = \"https://api.twitter.com/2/tweets/search/recent\"\n headers = {\"Authorization\": f\"Bearer {bearer_token}\"}\n query = {\"query\": f\"@{ACCOUNT_NAME}\",\n \"expansions\" : \"author_id\",\n \"user.fields\" : \"username\",\n \"start_time\" : dt_start,\n \"end_time\" : dt_end}\n response = requests.get(uri, headers=headers, params=query)\n\n # Make connection to local database\n connection = sqlite3.connect(\"../database/procrystaldb.db\")\n cursor = connection.cursor()\n\n # Get current total number of rows in database\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n initial_rows = cursor.fetchall()[0][0]\n\n # Get usernames and tweet ids from tweets and save to database\n if response.status_code == 200:\n content = response.json()\n num_results = content[\"meta\"][\"result_count\"]\n if num_results > 0:\n # First get dictionary of usernames\n user_id_to_name = {}\n for user in content[\"includes\"][\"users\"]:\n user_id_to_name[user[\"id\"]] = user[\"username\"]\n # Then get tweet id, username and save to database\n for result in content[\"data\"]:\n # if KEYWORD in result[\"text\"].lower():\n tweet_id = result[\"id\"]\n username = user_id_to_name[result[\"author_id\"]]\n sql_insert = f\"\"\"\n INSERT OR IGNORE INTO Twitter (tweet_id, username, reply_sent)\n VALUES ('{tweet_id}', '{username}', false);\n \"\"\"\n cursor.execute(sql_insert)\n logging.info(f\"Mentions fetched: {num_results}\")\n else:\n logging.error(f\"Get mentions errored with: {response.json()}\")\n\n # Get final total number of rows in database and therefore number of rows added\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n final_rows = cursor.fetchall()[0][0]\n rows_added = final_rows - initial_rows\n logging.info(f\"New mentions added: {rows_added}\")\n\n # Close database connection\n connection.commit()\n connection.close()\n\n return rows_added", "def stats():\n conn = sqlite3.connect('live/twitter_scraper.db')\n c = conn.cursor()\n\n c.execute('SELECT COUNT() FROM info')\n num_photos = c.fetchone()[0]\n\n c.execute('SELECT COUNT() FROM tweet_text')\n num_tweets = c.fetchone()[0]\n\n mtime = datetime.utcfromtimestamp(os.path.getmtime('live/phash_index.ann'))\n now = datetime.utcnow()\n time_diff = secs_to_str((now - mtime).seconds)\n\n conn.close()\n return num_photos, num_tweets, time_diff", "def list_tweets():\n tweets = []\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE])\n for tuple in tuples:\n tweet = {}\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweets.append(tweet)\n return jsonify({'tweets':tweets}),200" ]
[ "0.69248664", "0.620815", "0.59848654", "0.59815437", "0.5954515", "0.5952684", "0.5883914", "0.5861081", "0.57721597", "0.57669294", "0.5752889", "0.5752543", "0.564709", "0.564132", "0.5640209", "0.5572535", "0.55670476", "0.55010945", "0.5492158", "0.5483679", "0.54743016", "0.5474085", "0.54696286", "0.54607", "0.5455997", "0.5449125", "0.5445563", "0.5443805", "0.5441125", "0.54349405" ]
0.7054746
0
Aggregates sentiment types for a given tweet collection.
def aggregate_sentiment(tweets): positive = 0 negative = 0 neutral = 0 for tweet in tweets: if tweet.sentiment_type == "positive": positive += 1 elif tweet.sentiment_type == "negative": negative += 1 else: neutral += 1 result = [["Positive", positive], ["Neutral", neutral], ["Negative", negative]] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collect(self, collect_types: List[str]) -> None:\n valid_types = [x for x in collect_types if x in self._valid_types.keys()]\n for ctype in valid_types:\n self._collect_tweets(ctype)", "def do_sentiment_analysis(self):\n\n tweets_sentiment = []\n\n for tweet in self.tweets:\n parsed_tweet = {}\n parsed_tweet['text'] = tweet\n sentiment_data = self.tweet_sentiment_analysis(tweet)\n parsed_tweet['sentiment'] = sentiment_data[0]\n parsed_tweet['polarity'] = sentiment_data[1]\n parsed_tweet['subjectivity'] = sentiment_data[2]\n\n tweets_sentiment.append(parsed_tweet)\n\n self.sentiment_data = tweets_sentiment\n self.positive_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Positive']\n self.negative_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Negative']\n self.neutral_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Neutral']\n\n return tweets_sentiment", "def process_tweets(collection):\n\n\twith open('positive-tweets.txt') as p:\n\t\tprint \"{0}: Inserting positive tweets into mongo...\".format(datetime.now())\n\t\tfor tweet in p.readlines():\n\t\t\tcollection.insert({'tweet': tweet, 'sentiment': 1})\n\tp.close()\n\n\twith open('negative-tweets.txt') as n:\n\t\tprint \"{0}: Inserting negative tweets into mongo...\".format(datetime.now())\n\t\tfor tweet in n.readlines():\n\t\t\tcollection.insert({'tweet': tweet, 'sentiment': 0})\n\tn.close()", "def process_batch_tweets(tweets, sentiment):\n from collections import Counter\n scoreboard = Counter()\n region_count = Counter()\n \n for tweet in tweets:\n score, region = process_one_tweet(tweet, sentiment)\n if region != \"None\":\n scoreboard[region] += score\n region_count[region] += 1\n return [scoreboard, region_count]", "def tweet_sentiment_analysis(self, tweet):\n analysis = TextBlob(self.clean_tweet(tweet))\n\n if analysis.sentiment.polarity > 0:\n return ['Positive', analysis.sentiment.polarity, analysis.sentiment.subjectivity]\n elif analysis.sentiment.polarity == 0:\n return ['Neutral', analysis.sentiment.polarity, analysis.sentiment.subjectivity]\n else:\n return ['Negative', analysis.sentiment.polarity, analysis.sentiment.subjectivity]", "def get_query_statistics(tweets, sentiment_aggregate_list):\r\n\r\n total = len(tweets)\r\n positive_percentage = float(\"{0:.2f}\".format((float(sentiment_aggregate_list[0][1]/total*100))))\r\n neutral_percentage = float(\"{0:.2f}\".format((float(sentiment_aggregate_list[1][1]/total*100))))\r\n negative_percentage = float(\"{0:.2f}\".format((float(sentiment_aggregate_list[2][1]/total*100))))\r\n\r\n result = {\"%Positive\": positive_percentage, \"%Neutral\": neutral_percentage, \"%Negative\": negative_percentage, \"Total\": total}\r\n return result", "def statistics(all_new_tweets, all_retweets, all_quote_tweets):\n length_all_quote_tweets = len(all_quote_tweets)\n length_all_retweets = len(all_retweets)\n length_all_tweets = len(all_new_tweets)\n\n # print(db_twitter.collections.stats())\n total_tweets = length_all_quote_tweets + length_all_retweets + length_all_tweets\n print(\n f\"Number of all tweets via streaming collected: {total_tweets - return_rest_tweets_number()}\"\n )\n print(f\"Number of new tweets collected: {length_all_tweets}\")\n print(f\"Number of retweets collected: {length_all_retweets}\")\n print(f\"Number of quote tweets collected: {length_all_quote_tweets}\")\n print(f\"Number of tweets collected via rest is {return_rest_tweets_number()}\")\n\n # Calculates mean sentiment, where 1 is very positive, -1 is very negative\n mean_sentiment = 0.0\n\n for tweet in all_new_tweets:\n mean_sentiment += tweet[\"sentiment_polarity\"]\n mean_sentiment = mean_sentiment / length_all_tweets\n print(\"The mean sentiment of tweets is: \", mean_sentiment)\n\n # Calculates mean subjectivity, where 1 is very subjective, -1 is very objective\n mean_subjectivity = 0.0\n\n for tweet in all_new_tweets:\n mean_subjectivity += tweet[\"subjectivity\"]\n mean_subjectivity = mean_subjectivity / length_all_tweets\n print(\"The mean subjectivity of retweets is: \", mean_subjectivity)\n return mean_sentiment, mean_subjectivity, total_tweets", "def feat_eng(self, tweets):\n self.tweets['emojis'] = get_emojis(self.tweets['text']) # get emojis as text\n self.tweets['polarity'] = self.tweets['text'].map(\n lambda x: TextBlob(x).sentiment.polarity)\n self.tweets['word_count'] = self.tweets['text'].map(lambda x: len(str(x).split()))", "def getSentiment(tweets, location):\n sentiment = [0, 0, 0]\n for tweet in tweets:\n analyser(tweets[tweet], sentiment,location)\n return sentiment", "def analyse(self, tweet):\n\n if (type(tweet) == dict):\n text = self.clean_tweet(self.to_text(tweet))\n else:\n text = self.clean_tweet(tweet)\n\n analysis = TextBlob(text)\n polarity = analysis.polarity\n subjectivity = analysis.subjectivity\n\n res = []\n\n # if polarity > 0.3:\n # res.append(\"positive\")\n # elif polarity < -0.3:\n # res.append(\"negative\")\n # else:\n # res.append(\"neutral\")\n #\n # if subjectivity > 0.6:\n # res.append(\"subject\")\n # elif subjectivity < 0.3:\n # res.append(\"objective\")\n # else:\n # res.append(\"neutral\")\n\n res.append(polarity)\n res.append(subjectivity)\n\n return res", "def analyse_tweet(self, tweet):\r\n sentiment = 0\r\n subjects = []\r\n\r\n is_comparison = False # sentiment will be the LHS of the comparison\r\n seen_not = False\r\n for word in myparser.parse(tweet,self.company_names,True):\r\n if word == \"not\" or word == \"don't\":\r\n seen_not = True\r\n elif word in self.positive_words:\r\n sentiment = sentiment + 1\r\n elif word in self.negative_words:\r\n sentiment = sentiment - 1\r\n if word in self.company_names:\r\n subjects += [word]\r\n for (p, c) in self.product_names:\r\n if word == p:\r\n subjects += [c]\r\n for (c,s) in self.comparisons:\r\n if word == c:\r\n sentiment = s\r\n is_comparison = True\r\n if seen_not:\r\n sentiment = -sentiment\r\n\r\n #print((tweet, subjects, sentiment, is_comparison))\r\n\r\n if is_comparison:\r\n subjects += [None, None]\r\n return[(subjects[0], sentiment), (subjects[1], -sentiment)]\r\n else:\r\n return [(sub, sentiment) for sub in subjects]", "def get_mentions(self, column, list_of_types, total=False, average=False):\n for mbti_type in list_of_types:\n self.df[mbti_type + '_mentions'] = [sum([x.casefold().count(mbti_type.casefold()) for x in post]) for post in self.df[column]]\n if total == True:\n mention_cols = [col for col in self.df.columns if 'mentions' in col]\n self.df['total_mentions'] = self.df.filter(mention_cols).sum(axis=1)\n if average == True:\n self.df['avg_mentions_per_post'] = self.df['total_mentions'] / self.df['count_posts']", "def generateSentimentAnalysis(self, fs_db, cleaned_submissions, cleaned_tweets):\n all_posts = []\n\n for p in range(len(cleaned_submissions)):\n print('reddit', self.clean(cleaned_submissions[p][3]))\n all_posts.append(self.clean(cleaned_submissions[p][3]))\n\n for t in range(len(cleaned_tweets)):\n print('twitter', self.clean(cleaned_tweets[t][2]))\n all_posts.append(self.clean(cleaned_tweets[t][2]))\n \n if len(all_posts) == 0:\n raise Exception(\"No crawled data\")\n\n count = 0\n\n for c in all_posts:\n blob = TextBlob(c)\n\n polarity = blob.sentiment.polarity\n subjectivity = blob.sentiment.subjectivity\n\n doc_ref = fs_db.collection(u'sentimentAnalysis').document('first')\n if (polarity != 0 and subjectivity != 0):\n count += 1\n doc_ref.set({str(count): {'post': c, 'polarity': polarity, 'subjectivity':subjectivity}}, merge=True)\n\n with open('wc.txt', 'w') as output:\n for data in all_posts:\n output.write('%s\\n' % data)", "def collect_twitter_sentiment():\r\n # Open/create a file to append data to\r\n csvFile = open(NAME+'_posts.csv', 'a')\r\n # Use csv writer\r\n csvWriter = csv.writer(csvFile)\r\n # Calling the user function with current parameters\r\n results = twitter.user_timeline(id=NAME, count=TWEET_COUNT)\r\n for tweet in results:\r\n print(tweet.created_at, tweet.text)\r\n csvWriter.writerow([tweet.created_at, tweet.text.encode('utf-8')])\r\n return csvFile", "def process_sentiment(self):\r\n\r\n\r\n print(\"Beginning sentiment analysis\")\r\n # textblob time\r\n #tweet_sentiment = [TextBlob(tweet['filtered_text']).sentiment for index, tweet in self.tweet_dataframe.iterrows()]\r\n #self.tweet_dataframe['polarity'] = [i.polarity for i in tweet_sentiment]\r\n #self.tweet_dataframe['subjectivity'] = [i.subjectivity for i in tweet_sentiment]\r\n\r\n #vader time\r\n #http://t-redactyl.io/blog/2017/04/applying-sentiment-analysis-with-vader-and-the-twitter-api.html\r\n sentiment = []\r\n\r\n analyzer = SentimentIntensityAnalyzer()\r\n\r\n for tweet in self.tweet_dataframe['filtered_text']:\r\n vs = analyzer.polarity_scores(tweet)\r\n sentiment.append(vs['compound'])\r\n\r\n self.tweet_dataframe['vader_polarity'] = pd.Series(sentiment)", "def update_entities_stats(self, tweet):\n\n if not tweet.has_key('text'):\n return\n\n entities = self.get_entities(tweet['text'])\n for ent in entities:\n if entities[ent]:\n e_list = entities[ent]\n for k in e_list:\n v = None\n if k.has_key('url'):\n v = k['url']\n # FIXME Further normalize text?\n if k.has_key('text'):\n v = k['text'].lower()\n if v:\n tweet_stats = self.stats\n if not tweet_stats.has_key(ent):\n tweet_stats[ent] = {}\n if not tweet_stats[ent].has_key(v):\n tweet_stats[ent][v] = 1\n else:\n tweet_stats[ent][v] += 1", "def classify(tweets, positives, negatives):\n sentiment_list = makelist(tweets, positives, negatives)\n n_positives = 0\n n_negatives = 0\n n_neutral = 0\n\n # Counts the amount of times each number is in sentiment_list\n for i in sentiment_list:\n if i == 1:\n n_positives += 1\n elif i == -1:\n n_negatives += 1\n else:\n n_neutral += 1\n\n print(\"Trump's tweets classified:\")\n print(\" positive: {}\".format(n_positives))\n print(\" negative: {}\".format(n_negatives))\n print(\" neutral : {}\".format(n_neutral))", "def classify(self, tweets):\n classified = []\n for t in tweets:\n #use the SVM to predict the polarity\n t.polarity = self.m_learner.predict_from_tweet(t)\n #append the tweet to the list\n classified.append(t)\n\n return classified", "def get_tweet_sentiment(self, tweet):\n\n analyzer = SentimentIntensityAnalyzer()\n vs = analyzer.polarity_scores(tweet)\n # set sentiment\n if vs['compound'] >= 0.05:\n return 'positive'\n elif -0.5 < vs['compound'] < 0.05:\n return 'neutral'\n else:\n return 'negative'", "def type_count():\n types = []\n for typ in Statistics.all_type():\n types.append({'label': typ.lower(), 'y': Statistics.type_count(typ)})\n fix_types = []\n for i in sorted(types, key=lambda k: k['y']):\n if i['y'] != 0:\n fix_types.append(i)\n return jsonify(result=fix_types)", "def sentiment_aspects(docs: Iterable[tokens.Doc]) -> List[collections.Counter]:\n sent_dict_list = []\n start_time = time.time()\n\n for doc in docs:\n sent_dict = collections.Counter()\n for token in doc:\n # check if the word is an opinion word, then assign sentiment\n if token.text.lower() in _OPINION_WORDS:\n sentiment = 1 if token.text.lower() in _POS_WORDS else -1\n if (token.dep_ == \"advmod\"):\n # if target is an adverb modifier (i.e. pretty, highly, etc.)\n # but happens to be an opinion word, ignore and pass\n continue\n\n elif (token.dep_ == \"amod\"):\n sent_dict[token.head.text.lower()] += sentiment\n\n else:\n for child in token.children:\n # if there's a adj modifier (i.e. very, pretty, etc.) add\n # more weight to sentiment\n # This could be better updated for modifiers that either\n # positively or negatively emphasize\n if _is_opinion_mod(child):\n sentiment *= 1.5\n # check for negation words and flip the sign of sentiment\n if child.dep_ == \"neg\":\n sentiment *= -1\n for child in token.children:\n if (token.pos_ == \"VERB\") & (child.dep_ == \"dobj\"):\n # if verb, check if there's a direct object\n sent_dict[child.text.lower()] += sentiment\n # check for conjugates (a AND b), then add both to dictionary\n subchildren = []\n conj = 0\n for subchild in child.children:\n if subchild.text.lower() == \"and\": conj=1\n if (conj == 1) and (subchild.text.lower() != \"and\"):\n subchildren.append(subchild.text.lower())\n conj = 0\n for subchild in subchildren:\n sent_dict[subchild] += sentiment\n\n # check for negation\n for child in token.head.children:\n noun = \"\"\n if _is_opinion_mod(child):\n sentiment *= 1.5\n if (child.dep_ == \"neg\"):\n # check for negation words and flip the sign of sentiment\n sentiment *= -1\n\n # check for nouns\n for child in token.head.children:\n noun = \"\"\n if (child.pos_ == \"NOUN\") and (child.text not in sent_dict):\n noun = child.text.lower()\n # Check for compound nouns\n for subchild in child.children:\n if subchild.dep_ == \"compound\":\n noun = subchild.text.lower() + \" \" + noun\n sent_dict[noun] += sentiment\n sent_dict_list.append(collections.Counter(sent_dict))\n\n print(\"\\nFound aspects on {} reviews.\".format(len(sent_dict_list)))\n print(time.time() - start_time)\n return sent_dict_list", "def analyze(self, tweet):\n \n # keeping track of the score\n score = 0\n \n # filtering though tweets exstracting the useful words\n # preserve_case = false maks them lowercase\n tokenizer = nltk.tokenize.TweetTokenizer(preserve_case = False)\n tokens = tokenizer.tokenize(tweet)\n \n # checking word for word the intension and keeping score\n for word in tokens:\n if word in self.dic:\n if self.dic[word] == 1:\n score += 1\n else:\n score -= 1\n# score += self.dic[word]\n return score", "def get_query_sentiment_avg(tweets):\r\n\r\n total = 0\r\n count = len(tweets)\r\n\r\n for tweet in tweets:\r\n total += tweet.sentiment_score\r\n\r\n # Calculate average\r\n avg = total / count\r\n avg = float(\"{0:.2f}\".format((float(avg))))\r\n\r\n return avg", "def set_analyzed_tweets(self, tweets):\n slim_tweets = [SlimTweet(tweet) for tweet in tweets]\n self.analyzed_tweets = sort_tweets(slim_tweets)", "def get_sentiment(self, sentances):\n sentiment_total = 0\n # Add each sentances combined sentiment to a total tally\n for sentance in sentances:\n sentiment = self.sentiment_analyzer.polarity_scores(sentance)\n sentiment_total += sentiment['compound']\n return sentiment_total / len(sentances)", "def process_type(self):\n return 'sentiment Analysys'", "def perform_google_sentiment_lexicon_lookup(tweets):\n \n lex = Lexicon(GoogleTranslater(), SentiWordNetLexicon())\n print \"Getting sentiment values\"\n tweet_sentiments = []\n for t in tweets:\n tweet_sentiments.append(lex.translate_sentence_and_get_lexicon_sentiment(t.text))\n \n print tweet_sentiments\n reduced_tweet_sentiments = []\n for sentiments in tweet_sentiments:\n polar_sum = sum([s[0] for s in sentiments])\n negative_sum = sum([s[1] for s in sentiments])\n objective_sum = sum([s[2] for s in sentiments])\n reduced_tweet_sentiments.append((polar_sum, negative_sum, objective_sum))\n print reduced_tweet_sentiments\n return reduced_tweet_sentiments", "def sentiment(self) -> Dict[str, float]:", "def get_tweet_sentiment(self, tweet):\n # create TextBlob object of passed tweet text\n analysis = TextBlob(self.clean_tweet(tweet))\n\n # set sentiment\n if analysis.sentiment.polarity > 0:\n return 'positive'\n elif analysis.sentiment.polarity == 0:\n return 'neutral'\n else:\n return 'negative'", "def update_word_stats(self, tweet):\n\n if not self.text:\n return\n\n words = self.text.split()\n\n # process single words\n for word in words:\n self.update_stats('words', word)\n\n # process 2 word lists\n pairs = self.get_phrase_list(words, 2)\n if pairs is not None:\n for word_pair in pairs:\n self.update_stats('word_pairs', self.get_index_from_list(word_pair))\n\n # process 3 word lists\n triples = self.get_phrase_list(words, 3)\n if triples is not None:\n for word_triple in triples:\n self.update_stats('word_triples', self.get_index_from_list(word_triple))" ]
[ "0.59929293", "0.59618926", "0.58570933", "0.5779308", "0.57485133", "0.573751", "0.56903654", "0.56657684", "0.55670786", "0.5521338", "0.54893357", "0.542505", "0.53890103", "0.53886825", "0.53880984", "0.533604", "0.52402407", "0.52377105", "0.5190812", "0.51520646", "0.5147069", "0.51282305", "0.51268566", "0.50917774", "0.50597405", "0.50491446", "0.5028703", "0.5025474", "0.5020084", "0.5015096" ]
0.72262305
0
Gets the predominant sentiment type from a list of sentiments. (Eg [[positive, 3],[neutral, 10],[negative,15]])
def predominant_sentiment(sentiment_aggregate_list): positive = int(sentiment_aggregate_list[0][1]) neutral = int(sentiment_aggregate_list[1][1]) negative = int(sentiment_aggregate_list[2][1]) if positive > neutral and positive > negative: return "positive" elif neutral > positive and neutral > negative: return "neutral" elif negative > positive and negative > neutral: return "negative" else: return "mixed"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def classify_sentiment(sent_index):\n\n\tif sent_index < -0.5:\n\t\treturn 'negative'\n\tif sent_index <= 0.5 and sent_index >= -0.5:\n\t\treturn 'neutral'\n\tif sent_index >= 0.5:\n\t\treturn 'positive'", "def classify(tweets, positives, negatives):\n sentiment_list = makelist(tweets, positives, negatives)\n n_positives = 0\n n_negatives = 0\n n_neutral = 0\n\n # Counts the amount of times each number is in sentiment_list\n for i in sentiment_list:\n if i == 1:\n n_positives += 1\n elif i == -1:\n n_negatives += 1\n else:\n n_neutral += 1\n\n print(\"Trump's tweets classified:\")\n print(\" positive: {}\".format(n_positives))\n print(\" negative: {}\".format(n_negatives))\n print(\" neutral : {}\".format(n_neutral))", "def predict_tweet(tweet):\n text = TextBlob(tweet)\n \n if text.sentiment.polarity > 0:\n response = 'positive'\n elif text.sentiment.polarity == 0:\n response = 'neutral'\n else:\n response = 'negative'\n return response, text.sentiment.polarity, text.sentiment.subjectivity", "def get_sentiment(string_list):\n sentiment = []\n subjectivity = []\n \n for text in string_list:\n blob = textblob.TextBlob(text)\n sentiment.append(blob.sentiment.polarity)\n subjectivity.append(blob.sentiment.subjectivity)\n \n return sentiment, subjectivity", "def get_tweet_sentiment(self, tweet):\n\n analyzer = SentimentIntensityAnalyzer()\n vs = analyzer.polarity_scores(tweet)\n # set sentiment\n if vs['compound'] >= 0.05:\n return 'positive'\n elif -0.5 < vs['compound'] < 0.05:\n return 'neutral'\n else:\n return 'negative'", "def sentiment(text):\n words = pattern_split.split(text.lower())\n sentiments = map(lambda word: afinn.get(word, 0), words)\n if sentiments:\n # How should you weight the individual word sentiments? \n # You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)\n sentiment = float(sum(sentiments))/math.sqrt(len(sentiments))\n \n else:\n sentiment = 0\n return sentiment", "def __value_of(sentiment):\n if sentiment == 'positive': return 1\n if sentiment == 'negative': return -1\n return 0", "def predict(self, tweets):\n if isinstance(tweets, (list, tuple)):\n output = []\n for tweet in tweets:\n output.append(self.discriminant(tweet.words))\n return array(output).squeeze()\n elif isinstance(tweets, PrepTweet):\n return self.discriminant(tweets.words)", "def get_average_sentiment(self, list_sentiments):\n average_polarity = 0\n for sentiment in list_sentiments: \n polarity = sentiment[1]\n average_polarity += polarity \n average_polarity /= len(list_sentiments)\n return average_polarity", "def get_sentiment(sentence):\n\tblob = tb.TextBlob(sentence.decode('utf-8','ignore'))\n\treturn blob.sentiment[0]", "def map_sentiment(cls, sentiment):\n if sentiment > 0.60:\n return (\"pos\")\n elif sentiment < 0.40:\n return (\"neg\")\n else:\n return (\"neutral\")", "def get_sentiment(desc):\n # create TextBlob object of passed tweet text\n analysis = TextBlob(desc)\n # set sentiment\n if analysis.sentiment.polarity > 0:\n return 'positive'\n elif analysis.sentiment.polarity == 0:\n return 'neutral'\n else:\n return 'negative'", "def get_sentiment_of_emotions(emotion):\n POS = ['joy', 'trust', 'anticipation', 'surprise']\n NEG = ['sad', 'fear', 'disgust', 'anger', 'hopelessness', 'loneliness', 'distress']\n\n if emotion in POS:\n return 'POS'\n elif emotion in NEG:\n return 'NEG'\n else:\n return None", "def classify(tweets,table,positives,negatives,p_tweets,n_tweets):\n\n\n st = LancasterStemmer()\n\n n_words = len(table)\n in_table = 0\n not_in_table = 0\n\n\n y_pred = np.zeros(len(tweets)).astype('int32')\n\n for i in range(len(tweets)):\n likelihood_pos = 0\n likelihood_neg = 0\n \n # MAP negatives and positives\n for word in tweets[i].split():\n word = st.stem(word.decode('utf-8'))\n if word in table:\n in_table += 1\n likelihood_pos += m.log((table[word][0]+1)/float(positives + 1*n_words))\n likelihood_neg += m.log((table[word][1]+1)/float(negatives + 1*n_words))\n \n else:\n not_in_table += 1\n likelihood_pos += m.log(1/float(positives + 1*n_words))\n likelihood_neg += m.log(1/float(negatives + 1*n_words))\n\n likelihood_pos += m.log(p_tweets/float(p_tweets + n_tweets))\n likelihood_neg += m.log(n_tweets/float(p_tweets + n_tweets))\n\n\n\n # Classify as positive or negative\n if likelihood_neg < likelihood_pos: \n y_pred[i] = 1\n\n prediction = np.bincount(y_pred)\n\n print \"Known words: %d\" % in_table\n print \"Unknown words %d\\n\" % not_in_table\n\n positive_ratio = prediction[1]/float(prediction[1] + prediction[0])\n\n group = \"Positive\" if positive_ratio > 0.5 else \"Negative\" \n\n\n return positive_ratio,group", "def sentiment_score(text, loaded_model = loaded_model, vectorizer = tokenizer):\n # tweet_tf_idf = vect_char.transform(text)\n tweet_token = tokenizer.texts_to_sequences(text)\n tweet_token = pad_sequences(tweet_token, maxlen = 40)\n sentiment = loaded_model.predict_proba(tweet_token)\n neg_prob = sentiment[0][0]\n pos_prob = sentiment[0][1]\n return neg_prob, pos_prob", "def aggregate_sentiment(tweets):\r\n\r\n positive = 0\r\n negative = 0\r\n neutral = 0\r\n\r\n for tweet in tweets:\r\n if tweet.sentiment_type == \"positive\":\r\n positive += 1\r\n elif tweet.sentiment_type == \"negative\":\r\n negative += 1\r\n else:\r\n neutral += 1\r\n\r\n result = [[\"Positive\", positive], [\"Neutral\", neutral], [\"Negative\", negative]]\r\n return result", "def tweet_sentiment_analysis(self, tweet):\n analysis = TextBlob(self.clean_tweet(tweet))\n\n if analysis.sentiment.polarity > 0:\n return ['Positive', analysis.sentiment.polarity, analysis.sentiment.subjectivity]\n elif analysis.sentiment.polarity == 0:\n return ['Neutral', analysis.sentiment.polarity, analysis.sentiment.subjectivity]\n else:\n return ['Negative', analysis.sentiment.polarity, analysis.sentiment.subjectivity]", "def pred_sentiment(self):\n return self._pred_sentiment", "def get_sentiment(self, sentances):\n sentiment_total = 0\n # Add each sentances combined sentiment to a total tally\n for sentance in sentances:\n sentiment = self.sentiment_analyzer.polarity_scores(sentance)\n sentiment_total += sentiment['compound']\n return sentiment_total / len(sentances)", "def classify(self, tweets):\n classified = []\n for t in tweets:\n #use the SVM to predict the polarity\n t.polarity = self.m_learner.predict_from_tweet(t)\n #append the tweet to the list\n classified.append(t)\n\n return classified", "def get_polarity(text):\n blob = TextBlob(text)\n return blob.sentiment[0]", "def analyze_sentiment(blob):\n intensity = list(blob.sentiment)[0]\n if intensity > 0:\n sentiment = 'pos'\n elif intensity < 0:\n sentiment = 'neg'\n else:\n sentiment = 'neu'\n\n return sentiment", "def predict_sentiment(tweet_vectorizer, my_model, tweet):\n \n test_tweet_vectors = vectorize_tweets(tweet_vectorizer, [tweet]) ##first vectorize your new tweet\n test_tweet_sentiments = my_model.predict(test_tweet_vectors) ##use your machine learning model to predict the sentiment\n for i in test_tweet_sentiments: \n if i == 0:\n print('Negative')\n elif i == 4:\n print('Positive')", "def sentiment(sentences: List[str]) -> List[List[float]]:\n try:\n _create_unverified_https_context = ssl._create_unverified_context\n except AttributeError:\n pass\n else:\n ssl._create_default_https_context = _create_unverified_https_context\n\n nltk.download('vader_lexicon')\n darth = SentimentIntensityAnalyzer()\n collector = []\n for sentence in sentences:\n ss = darth.polarity_scores(sentence)\n temp = []\n for k in ss.values():\n temp.append(k)\n collector.append(temp)\n return collector", "def get_subjectivity(text):\n blob = TextBlob(text)\n return blob.sentiment[1]", "def test(self, tweets, without_neutral=True):\n correct = 0\n total = 0\n for tweet in tweets:\n assert tweet.polarity is not None\n if tweet.is_neutral() and without_neutral:\n continue\n\n if tweet.polarity == self.predict_sentiment_enum(tweet, without_neutral):\n correct += 1\n\n total += 1\n\n print(\"correct = \", correct, \"total = \", total)\n return correct / total", "def get_tweet_sentiment(self, tweet):\n # create TextBlob object of passed tweet text\n analysis = TextBlob(self.clean_tweet(tweet))\n\n # set sentiment\n if analysis.sentiment.polarity > 0:\n return 'positive'\n elif analysis.sentiment.polarity == 0:\n return 'neutral'\n else:\n return 'negative'", "def sentiment(self) -> Dict[str, float]:", "def sentiment(sense, out_scores, out_labels, model, max_decimals=6, lexicon=None):\n\n if not lexicon:\n lexicon = util.PickledLexicon(model)\n # Otherwise use pre-loaded lexicon (from catapult)\n\n sense = util.read_annotation(sense)\n result_scores = {}\n result_labels = {}\n\n for token in sense:\n # Get set of senses for each token and sort them according to their probabilities\n token_senses = [tuple(s.rsplit(util.SCORESEP, 1)) if util.SCORESEP in s else (s, -1.0)\n for s in sense[token].split(util.DELIM) if s]\n token_senses.sort(key=lambda x: x[1], reverse=True)\n\n # Lookup the sentiment score for the most probable sense and assign a sentiment label\n if token_senses:\n best_sense = token_senses[0][0]\n score = lexicon.lookup(best_sense, None)\n else:\n score = None\n\n if score:\n result_scores[token] = score\n result_labels[token] = SENTIMENT_LABLES.get(int(score))\n else:\n result_scores[token] = None\n result_labels[token] = None\n\n util.write_annotation(out_scores, result_scores)\n util.write_annotation(out_labels, result_labels)", "def process_type(self):\n return 'sentiment Analysys'" ]
[ "0.6732237", "0.63710135", "0.6355821", "0.6258131", "0.61799365", "0.611913", "0.6108495", "0.60978174", "0.6085284", "0.5958469", "0.59254676", "0.5863128", "0.5860238", "0.5843611", "0.5822473", "0.58167547", "0.5785102", "0.5766148", "0.5755716", "0.5731452", "0.5707599", "0.5675733", "0.56754375", "0.5674143", "0.5673824", "0.56718034", "0.56645185", "0.56590575", "0.55932194", "0.55843586" ]
0.7360832
0
Gets sentiment statistics for average sentiment for a given keyword (and location, if specified) over the past 10 days.
def get_sentiment_overtime(keyword, location=None): # Get date 10 days ago ten_days_ago = datetime.now() - timedelta(days=10) # Get raw PyMongo collection collection = Tweet._get_collection() if location: match = { "$match": { "keyword_search_term": keyword, "location_address": location, "tweet_time": {"$gt": ten_days_ago} } } else: match = { "$match": { "keyword_search_term": keyword, "tweet_time": {"$gt": ten_days_ago} } } project = { "$project": { "sentiment_score": 1, "day": { "$substr": ["$tweet_time", 0, 10] } } } group = { "$group": { "_id": "$day", "average": { "$avg": "$sentiment_score" } } } limit = {"$limit": 10} # Perform aggregate query result = collection.aggregate([match, project, group, limit]) # Add query results to list l = [] for i in result['result']: average = "{0:.2f}".format(i['average']) t = [i['_id'], average] l.append(t) return l
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_historical_sentiment_avg(search_term, location=None):\r\n\r\n total = 0\r\n\r\n if location:\r\n tweets = Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location))\r\n count = len(tweets)\r\n else:\r\n tweets = Tweet.objects(Q(keyword_search_term=search_term))\r\n count = len(tweets)\r\n\r\n for tweet in tweets:\r\n total += tweet.sentiment_score\r\n\r\n # Calculate average\r\n avg = total / count\r\n avg = float(\"{0:.2f}\".format((float(avg))))\r\n\r\n return avg", "def get_historical_sentiment(search_term, location=None):\r\n\r\n if location:\r\n positive = len(Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location) & Q(sentiment_type=\"positive\")))\r\n negative = len(Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location) & Q(sentiment_type=\"negative\")))\r\n neutral = len(Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location) & Q(sentiment_type=\"neutral\")))\r\n else:\r\n positive = len(Tweet.objects(Q(keyword_search_term=search_term) & Q(sentiment_type=\"positive\")))\r\n negative = len(Tweet.objects(Q(keyword_search_term=search_term) & Q(sentiment_type=\"negative\")))\r\n neutral = len(Tweet.objects(Q(keyword_search_term=search_term) & Q(sentiment_type=\"neutral\")))\r\n\r\n result = [[\"Positive\", positive], [\"Neutral\", neutral], [\"Negative\", negative]]\r\n return result", "def get_sentiment_trends(order):\r\n\r\n # Get date seven days ago\r\n seven_days_ago = datetime.now() - timedelta(days=7)\r\n\r\n # Get raw PyMongo collection\r\n collection = Tweet._get_collection()\r\n\r\n # Perform aggregate query\r\n result = collection.aggregate([\r\n {\r\n \"$match\":\r\n {\r\n \"tweet_time\": {\"$gt\": seven_days_ago}\r\n }\r\n },\r\n {\r\n \"$group\":\r\n {\r\n \"_id\": \"$keyword_search_term\",\r\n \"average\":\r\n {\r\n \"$avg\": \"$sentiment_score\"\r\n }\r\n }\r\n },\r\n {\r\n \"$sort\":\r\n {\r\n \"average\": order\r\n }\r\n },\r\n {\r\n \"$limit\": 10\r\n }\r\n ])\r\n\r\n return result", "def getSentiment(tweets, location):\n sentiment = [0, 0, 0]\n for tweet in tweets:\n analyser(tweets[tweet], sentiment,location)\n return sentiment", "def analyze_trending_keyword(keyword=\"pokemon\", count=100, keep_all=False, debug=False):\n print('analyzing keyword: {}'.format(keyword))\n tweets = get_search_tweets(query=keyword, count=count, debug=debug)\n\n return process_tweets(tweets, keep_all=keep_all, debug=debug)", "def get_query_sentiment_avg(tweets):\r\n\r\n total = 0\r\n count = len(tweets)\r\n\r\n for tweet in tweets:\r\n total += tweet.sentiment_score\r\n\r\n # Calculate average\r\n avg = total / count\r\n avg = float(\"{0:.2f}\".format((float(avg))))\r\n\r\n return avg", "def search_insights(keyword=None, **kwargs):\n \n instance = Ceic._get_instance()\n\n if keyword is not None and keyword.strip() != \"\":\n kwargs[\"keyword\"] = keyword\n\n search_insights_method = instance._insights_facade.search_insights\n result = instance._make_request(search_insights_method, **kwargs)\n\n return result", "def get_google_trends_data(keyword, from_date, to_date):\r\n \r\n from_year, from_month = datetime.date.fromisoformat(from_date).year, datetime.date.fromisoformat(from_date).month\r\n to_year, to_month = datetime.date.fromisoformat(to_date).year, datetime.date.fromisoformat(to_date).month\r\n\r\n data = dailydata.get_daily_data(keyword, from_year, from_month, to_year, to_month)\r\n \r\n return data[keyword]", "def searchByKeyword(self, keyword, until=\"\", since=\"\", count=None, result_type=\"recent\"):\n if count is None:\n tweets = tweepy.Cursor(self.api.search, q=keyword, until=until, since=since, result_type=result_type,\n full_text=True, tweet_mode=\"extended\", lang=\"en\").items()\n else:\n tweets = tweepy.Cursor(self.api.search, q=keyword, until=until, since=since, result_type=result_type,\n full_text=True, tweet_mode=\"extended\", lang=\"en\").items(count)\n\n for status in tweets:\n createdDate = parser.parse(str(status._json[\"created_at\"]).strip())\n createdDate = createdDate.replace(\n tzinfo=pytz.utc) - createdDate.utcoffset()\n status_refined = {\n 'keyword': keyword,\n '_id': status._json[\"id\"],\n 'created_at': createdDate,\n 'tweetText': status._json[\"full_text\"],\n 'hashtags': status._json[\"entities\"][\"hashtags\"],\n 'userLoc': status._json[\"user\"][\"location\"],\n 'tweetGeo': status._json[\"geo\"],\n 'tweetCoordinates': status._json[\"coordinates\"],\n 'tweetPlace': status._json[\"place\"],\n 'retweet': {},\n }\n if hasattr(status, \"retweeted_status\"):\n status_refined['tweetText'] = status._json[\"retweeted_status\"][\"full_text\"]\n status_refined['retweet'] = {\n 'original_retweet_id': status._json[\"retweeted_status\"][\"id\"],\n 'origUserLoc': status._json[\"retweeted_status\"][\"user\"][\"location\"],\n 'origTweetLoc': status._json[\"retweeted_status\"][\"geo\"],\n 'origTweetPlace': status._json[\"retweeted_status\"][\"place\"],\n 'origTweetCoord': status._json[\"retweeted_status\"][\"coordinates\"],\n 'origHashtags': status._json[\"retweeted_status\"][\"entities\"][\"hashtags\"],\n 'retweet_count': status._json[\"retweet_count\"],\n }\n self.tweets.append(status_refined)\n return self.tweets", "def searchByKeywordPro(self, query, since=\"\", until=\"\", maxResults=None):\n\n tweetsList = []\n if(not maxResults):\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since)\n tweetsList.append(tweetList)\n while(next_token):\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since, next=next_token)\n tweetsList.append(tweetList)\n else:\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since)\n tweetsList.append(tweetList)\n maxResults -= len(tweetList)\n while(next_token and maxResults > 0):\n tweetList, next_token = self.api.search_30_day(\n environment_name=\"developer\", query=query, toDate=until, fromDate=since, next=next_token)\n tweetsList.append(tweetList)\n maxResults -= len(tweetList)\n for status in tweetsList:\n createdDate = parser.parse(str(status._json[\"created_at\"]).strip())\n createdDate = createdDate.replace(\n tzinfo=pytz.utc) - createdDate.utcoffset()\n status_refined = {\n 'keyword': query,\n '_id': status._json[\"id\"],\n 'created_at': createdDate,\n 'tweetText': status._json[\"text\"],\n 'hashtags': status._json[\"entities\"][\"hashtags\"],\n 'userLoc': status._json[\"user\"][\"location\"],\n 'tweetGeo': status._json[\"geo\"],\n 'tweetCoordinates': status._json[\"coordinates\"],\n 'tweetPlace': status._json[\"place\"],\n 'retweet': {},\n 'quote': {},\n }\n if hasattr(status, \"quoted_status\"):\n if \"extended_tweet\" in status._json[\"quoted_status\"].keys():\n print(\"Taking the expanded tweet\")\n status_refined['tweetText'] = status._json[\"quoted_status\"][\"extended_tweet\"][\"full_text\"]\n else:\n status_refined['tweetText'] = status._json[\"quoted_status\"][\"text\"]\n status_refined['quote'] = {\n 'original_retweet_id': status._json[\"quoted_status\"][\"id\"],\n 'origUserLoc': status._json[\"quoted_status\"][\"user\"][\"location\"],\n 'origTweetLoc': status._json[\"quoted_status\"][\"geo\"],\n 'origTweetPlace': status._json[\"quoted_status\"][\"place\"],\n 'origTweetCoord': status._json[\"quoted_status\"][\"coordinates\"],\n 'origHashtags': status._json[\"quoted_status\"][\"entities\"][\"hashtags\"],\n 'retweet_count': status._json[\"quote_count\"],\n }\n elif hasattr(status, \"retweeted_status\"):\n print(status._json[\"retweeted_status\"])\n if \"extended_tweet\" in status._json[\"retweeted_status\"].keys():\n print(\"Taking the expanded tweet\")\n status_refined['tweetText'] = status._json[\"retweeted_status\"][\"extended_tweet\"][\"full_text\"]\n else:\n status_refined['tweetText'] = status._json[\"retweeted_status\"][\"text\"]\n status_refined['retweet'] = {\n 'original_retweet_id': status._json[\"retweeted_status\"][\"id\"],\n 'origUserLoc': status._json[\"retweeted_status\"][\"user\"][\"location\"],\n 'origTweetLoc': status._json[\"retweeted_status\"][\"geo\"],\n 'origTweetPlace': status._json[\"retweeted_status\"][\"place\"],\n 'origTweetCoord': status._json[\"retweeted_status\"][\"coordinates\"],\n 'origHashtags': status._json[\"retweeted_status\"][\"entities\"][\"hashtags\"],\n 'retweet_count': status._json[\"retweet_count\"],\n }\n elif hasattr(status, \"extended_tweet\"):\n if \"extended_tweet\" in status._json.keys():\n status_refined['tweetText'] = status._json[\"extended_tweet\"][\"full_text\"]\n self.tweets.append(status_refined)\n return self.tweets", "def get_keyword_stats(self, adgroup_id, batch=False):\n path = '%s/keywordstats' % adgroup_id\n return self.make_request(path, 'GET', batch=batch)", "def text_analytics(self):\n\n headers = {\n # Request headers\n 'Content-Type': 'application/json',\n 'Ocp-Apim-Subscription-Key': self.keys['text_analytics'],\n }\n \n sentiment_url = 'https://westus.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment'\n \n raw_text = self.article_params['text']\n\n # Build post for sentiment\n try:\n sentences = tokenize.sent_tokenize(str(raw_text))\n content = []\n for i, sentence in enumerate(sentences):\n content.append({'id': str(i), 'language': 'en', 'text': sentence})\n body = json.dumps({\"documents\": content}).encode('utf-8')\n\n request = urllib.request.Request(sentiment_url, body, headers)\n response = urllib.request.urlopen(request)\n json_response = json.loads(response.read().decode('utf-8'))\n \n # A list of dictionaries, with each dictionary containing a sentence\n # sentiment score\n sentiments_list = json_response['documents']\n\n # Calculate the articles average sentiment from all the sentences\n cumulative_sentiment_score = 0\n for sent in sentiments_list:\n cumulative_sentiment_score += sent['score']\n avg_article_sentiment = cumulative_sentiment_score/len(sentiments_list)\n\n # Put article sentiments in bucket from 1 to 5, with 1 being very\n # negative and 5 being very positive\n if avg_article_sentiment < 0.2:\n sentiment = 1\n elif 0.2 <= avg_article_sentiment < 0.4:\n sentiment = 2\n elif 0.4 <= avg_article_sentiment < 0.6:\n sentiment = 3\n elif 0.6 <= avg_article_sentiment < 0.8:\n sentiment = 4\n else:\n sentiment = 5\n\n except Exception as e:\n print('Unable to process sentiment for article. Assuming '\n 'sentiment is neutral.')\n sentiment = 3\n\n return sentiment", "def tags_stats():\n # Update total search count of tag\n yesterday = timezone.now() - timezone.timedelta(days=1)\n yesterdays_tag_stats = DailyStats.objects.filter(date=yesterday)\n for daily_stat in yesterdays_tag_stats:\n tag_stat, created = TagStats.objects.get_or_create(tag=daily_stat.tag)\n tag_stat.total_search_count += daily_stat.count\n tag_stat.save()\n\n # Reset last week's search count to 0 before adding this week's results\n # As last week's tag might not have been searched this week.\n TagStats.objects.all().update(last_week_search_count=0)\n\n # Calculate search count in last week for tags\n last_week_date = timezone.now() - timezone.timedelta(days=7)\n last_week_tag_stats = DailyStats.objects.order_by('tag').filter(date__gt=last_week_date). \\\n values('tag').annotate(weekely_count=Sum('count'))\n for tag in last_week_tag_stats:\n tag_stat, created = TagStats.objects.get_or_create(tag_id=tag.get('tag', ''))\n tag_stat.last_week_search_count = tag.get('weekely_count', '')\n tag_stat.save()", "def geo_data_analysis(search_term):\n map_pol = dict()\n\n #A list of tweet texts from each region\n NE_text = geo_collect_tweets(search_term,42.781158,-71.398729,'250mi')\n S_text = geo_collect_tweets(search_term,33.000000,-84.000000,'500mi')\n MW_text = geo_collect_tweets(search_term,40.000000,-100.000000,'1000mi')\n W_text = geo_collect_tweets(search_term,35.000000,-120.000000,'250mi')\n \n #A list of sentiment values for the tweets from each region \n NE_sentiment_values = sentiment(NE_text)\n S_sentiment_values = sentiment(S_text)\n MW_sentiment_values = sentiment(MW_text)\n W_sentiment_values = sentiment(W_text)\n\n #find the average sentiment value for each region\n NE_avg = sum(NE_sentiment_values)/len(NE_sentiment_values)\n S_avg = sum(S_sentiment_values)/len(S_sentiment_values)\n MW_avg = sum(MW_sentiment_values)/len(MW_sentiment_values)\n W_avg = sum(W_sentiment_values)/len(W_sentiment_values)\n\n return [W_avg,S_avg,NE_avg,MW_avg]", "def get_tweets(api, listOfTweets, keyword, numOfTweets=20, date_since='2019-1-1', lang=\"en\"):\n spinner = yaspin()\n spinner.start()\n for tweet in tweepy.Cursor(api.search, q=keyword, lang=lang, since=date_since).items(numOfTweets):\n # Add tweets in this format\n dict_ = {'Screen Name': tweet.user.screen_name,\n 'User Name': tweet.user.name,\n 'Tweet Created At': str(tweet.created_at),\n 'Tweet Text': tweet.text,\n 'Cleaned Tweet Text': func.clean_tweets(tweet.text),\n 'User Location': str(tweet.user.location),\n 'Tweet Coordinates': str(tweet.coordinates),\n 'Retweet Count': str(tweet.retweet_count),\n 'Retweeted': str(tweet.retweeted),\n 'Phone Type': str(tweet.source),\n 'Favorite Count': str(tweet.favorite_count),\n 'Favorited': str(tweet.favorited),\n 'Replied': str(tweet.in_reply_to_status_id_str)\n }\n listOfTweets.append(dict_)\n spinner.stop()\n return listOfTweets", "def get_average_mood(mood_data, past_days=None):\n mood_sum = 0\n total_days = 0\n if past_days is None:\n past_days = (datetime.now() - datetime(1970, 1, 1)).days\n start_date = datetime.now() - timedelta(days=past_days-1)\n for date, mood in mood_data[:-past_days:-1]:\n if date > start_date:\n mood_sum += int(mood)\n total_days += 1\n return round(mood_sum/total_days, 2)", "def question_sentiment_analysis(self):\n sentiments = get_sentiments()\n student_data = self.responses\n question_text = 'In one word'\n\n # Set up data for calculations\n num_scores = 0\n sentiment_sum = 0\n score_list = list()\n\n for response in student_data:\n\n if question_text in response.question.text:\n words = response.response.lower().split()\n\n # Find the sentiment score for each word, and add it to our data\n for word in words:\n # Ignore the word if it's not in the sentiment dictionary\n if word in sentiments:\n sentiment_sum += sentiments[word]\n num_scores += 1\n score_list.append(sentiments[word])\n\n average = sentiment_sum / num_scores\n standard_dev = statistics.stdev(score_list)\n\n return average, standard_dev", "def get_twitter_data(keyword, from_date, to_date):\r\n # Creating list to append tweet data to\r\n counts_list = []\r\n dates_list = []\r\n \r\n days = pd.date_range(start = from_date, end = to_date)\r\n \r\n for i in range(len(days)-1):\r\n \r\n # Using TwitterSearchScraper to count daily tweets\r\n daily_count = 0\r\n for item in sntwitter.TwitterSearchScraper(keyword + ' since:' + str(days[i].date()) + ' until:' + str(days[i+1].date())).get_items():\r\n daily_count = daily_count + 1\r\n \r\n print(\"Day\", str(days[i].date()), \"had:\", daily_count, \". Going to next day...\")\r\n \r\n dates_list.append(days[i].date())\r\n counts_list.append(daily_count)\r\n \r\n return pd.DataFrame({'date': dates_list, 'tweets': counts_list})", "def get_sentiment():\n # USER REQUEST PARAMETERS\n hashtag = request.args.get('hashtag', '')\n if hashtag == \"\":\n return \"Please specify a non null hashtag\"\n nb_days = request.args.get('nb_days', 7,type=int)\n nb_days = int(min(max(nb_days, 1), 7))\n nb_tweets = max(request.args.get('nb_tweets', nb_days * 10), nb_days,type=int)\n get_topic_words = bool(int(request.args.get('get_topic_words',\"1\")))\n n_topics = request.args.get('n_topics', 1,type=int)\n n_words_per_topic = request.args.get('n_words_per_topic', 10,type=int)\n lda_passes = request.args.get('lda_passes', 4,type=int)\n return_tweets = bool(int(request.args.get('return_tweets', \"0\")))\n language = request.args.get('language', \"en\")\n\n # TWITTER REQUEST PARAMETERS\n days_offsets = range(-nb_days + 1, 1)\n query_key_value = \" -is:retweet -is:quote lang:\" + language\n tweet_fields = \"created_at,public_metrics,author_id\"\n max_nb_tweets_per_day = nb_tweets // len(days_offsets)\n query_string = \"#\" + hashtag.strip() + query_key_value\n\n # COMPUTE RESULTS\n tweets = get_tweets(query_string, days_offsets, tweet_fields,\n max_nb_tweets_per_day, nb_tweets, search_tweets_args)\n sentiments_df, cleaned_tweets_texts, filtered_tweets_df = compute_sentiment(\n tweets, model, tokenizer)\n\n if get_topic_words:\n top_topics = get_topics_from_tweets(NLTK_DATA_PATH, cleaned_tweets_texts, n_topics=n_topics,\n n_words_per_topic=n_words_per_topic, n_passes=lda_passes,\n force_download=False)\n\n if return_tweets:\n sentiments_tweets_df = pd.concat(\n (sentiments_df, filtered_tweets_df.reset_index(drop=True)), axis=1)\n\n results = {\"sentiments_json\": sentiments_tweets_df.to_json()}\n else:\n results = {\"sentiments_json\": sentiments_df.to_json()}\n\n if get_topic_words:\n results[\"top_topics_json\"] = top_topics.to_json()\n\n return json.dumps(results)", "def getSentiment(s):\n headers = {\"Ocp-Apim-Subscription-Key\" : \"4c28d3a67a12442cad6666a3200c49f5\",\n \"Content-Type\" : \"application/json\", \"Accept\" : \"application/json\"}\n url = \"https://westus.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment\"\n json = {\"documents\": [{\"language\": \"en\", \"id\" : \"1\"}]}\n json['documents'][0]['text'] = s\n sentiment = r.post(url, headers = headers, json = json)\n sentiment = j.loads(sentiment.text)\n return sentiment['documents'][0]['score']", "def get_overall_sentiment(text):\n return alchemy_language.sentiment(text=text)", "def test_get_average_of_sentiment_scores():\n\n dict_of_avg_scores = get_average_of_sentiment_scores(\n 'politics_30_months_comments_cleaned_standardized_vader_flair.csv')\n print('average sentiment scores all comments')\n for key, value in dict_of_avg_scores.items():\n print(key, value)\n print()", "def sentiment_plot(self, top_words=25):\n if top_words > 25:\n warnings.warn('Including more than 25 words on the X-axis will cause words to be excluded from the axis')\n\n daily_comments = self.comments[(self.comments['days_after_release'].\\\n isin(list(range(self.day_window[0], self.day_window[1] + 1))))]\n if len(daily_comments) == 0:\n warnings.warn('No comments found for this day, trying future dates until comments are found')\n\n while len(daily_comments) == 0:\n if self.day_window[1] > self.comments['days_after_release'].max():\n raise KeyError('Reached bounds of comment dates available. Make sure all comments are present')\n self.day_window[1] += 1\n daily_comments = self.comments[(self.comments['days_after_release'].\\\n isin(list(range(self.day_window[0], self.day_window[1] + 1))))]\n\n print('Now looking at {} to {} days after release'.format(self.day_window[0], self.day_window[1]))\n\n if 'pos' not in daily_comments['sentiment'].values or 'neu' not in daily_comments['sentiment'].values or \\\n 'neg' not in daily_comments['sentiment'].values:\n warnings.warn('No negative or positive sentiments found on this day, trying future dates until positive or negative comments are found')\n\n while 'pos' not in daily_comments['sentiment'].values or 'neu' not in daily_comments['sentiment'].values or \\\n 'neg' not in daily_comments['sentiment'].values:\n if self.day_window[1] > self.comments['days_after_release'].max():\n raise KeyError('Reached bounds of comment dates available. Make sure all comments are present')\n self.day_window[1] += 1\n daily_comments = self.comments[(self.comments['days_after_release']. \\\n isin(list(range(self.day_window[0], self.day_window[1] + 1))))]\n\n print('Now looking at {} to {} days after release'.format(self.day_window[0], self.day_window[1]))\n\n res_positive = daily_comments[(daily_comments['sentiment']=='pos')]['comment_message'].str.split(expand=True)\\\n .stack().value_counts().to_dict()\n res_neutral = daily_comments[(daily_comments['sentiment']=='neu')]['comment_message'].str.split(expand=True)\\\n .stack().value_counts().to_dict()\n res_negative = daily_comments[daily_comments['sentiment']=='neg']['comment_message'].str.split(expand=True)\\\n .stack().value_counts().to_dict()\n\n fig = make_subplots(rows=3, cols=1,\n y_title='Count',\n subplot_titles=('Positive', 'Neutral', 'Negative'))\n trace = fig.add_trace(px.bar(x=list(res_positive.keys())[:top_words], y=list(res_positive.values())[:top_words]).data[0],\n row=1, col=1)\n fig.append_trace(px.bar(x=list(res_neutral.keys())[:top_words], y=list(res_neutral.values())[:top_words]).data[0],\n row=2, col=1)\n fig.append_trace(px.bar(x=list(res_negative.keys())[:top_words], y=list(res_negative.values())[:top_words]).data[0],\n row=3, col=1)\n\n left = np.where(self.day_window[0] < 0, 'Before', 'After')\n right = np.where(self.day_window[1] < 0, 'Before', 'After')\n fig.update_layout(\n title='Top {} Words at {} Days {} Release to {} Days {} Release'.format(top_words,\n self.day_window[0], left,\n self.day_window[1], right)\n )\n fig.show()", "def count_tweets(search_term, location=None):\r\n\r\n if location:\r\n return len(Tweet.objects(Q(keyword_search_term=search_term) & Q(location_address=location)))\r\n else:\r\n return len(Tweet.objects(keyword_search_term=search_term))", "def get_keywords_and_impressions(client, customer_id, page_size):\n ga_service = client.get_service('GoogleAdsService', version='v2')\n results = []\n\n query = ('SELECT ad_group_criterion.keyword.text, '\n 'metrics.impressions, metrics.clicks, metrics.cost_micros '\n 'FROM keyword_view WHERE segments.date DURING LAST_7_DAYS '\n 'AND ad_group.status = \\'ENABLED\\' '\n 'AND ad_group_criterion.status IN (\\'ENABLED\\', \\'PAUSED\\') '\n 'ORDER BY metrics.impressions DESC '\n 'LIMIT 100')\n response = ga_service.search(customer_id, query, page_size=page_size)\n try:\n for row in response:\n criterion = row.ad_group_criterion\n metrics = row.metrics\n results+= [criterion.keyword.text.value,\n metrics.impressions.value,\n metrics.cost_micros.value],\n return [[results],['Keyword', 'Impressions', 'Cost_Micros']]\n except GoogleAdsException as ex:\n print('Request with ID {} failed with status {} and includes the '\n 'following errors:'.format(ex.request_id, ex.error.code().name))\n return None", "def get_tweets(self, query, count=10):\n # empty list to store parsed tweets\n tweets = []\n\n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q=query, count=count)\n\n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n\n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets\n\n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))", "def get_summaries(query, **kwargs):\n kwargs.update(stop=40)\n results = search(query, **kwargs)\n return results", "def sentiment(self) -> Dict[str, float]:", "def display_sentiment(ticker: str, n_tweets: int, n_days_past: int, export: str = \"\"):\n # Date format string required by twitter\n dtformat = \"%Y-%m-%dT%H:%M:%SZ\"\n\n # Algorithm to extract\n dt_recent = datetime.now() - timedelta(seconds=20)\n dt_old = dt_recent - timedelta(days=n_days_past)\n print(\n f\"From {dt_recent.date()} retrieving {n_tweets*24} tweets ({n_tweets} tweets/hour)\"\n )\n\n df_tweets = pd.DataFrame(\n columns=[\n \"created_at\",\n \"text\",\n \"sentiment\",\n \"positive\",\n \"negative\",\n \"neutral\",\n ]\n )\n while True:\n # Iterate until we haven't passed the old number of days\n if dt_recent < dt_old:\n break\n # Update past datetime\n dt_past = dt_recent - timedelta(minutes=60)\n\n temp = twitter_model.load_analyze_tweets(\n ticker,\n n_tweets,\n start_time=dt_past.strftime(dtformat),\n end_time=dt_recent.strftime(dtformat),\n )\n\n if temp.empty:\n return\n\n df_tweets = pd.concat([df_tweets, temp])\n\n if dt_past.day < dt_recent.day:\n print(\n f\"From {dt_past.date()} retrieving {n_tweets*24} tweets ({n_tweets} tweets/hour)\"\n )\n\n # Update recent datetime\n dt_recent = dt_past\n\n # Sort tweets per date\n df_tweets.sort_index(ascending=False, inplace=True)\n df_tweets[\"cumulative_compound\"] = df_tweets[\"sentiment\"].cumsum()\n df_tweets[\"prob_sen\"] = 1\n\n # df_tweets.to_csv(r'notebooks/tweets.csv', index=False)\n df_tweets.reset_index(inplace=True)\n df_tweets[\"Month\"] = pd.to_datetime(df_tweets[\"created_at\"]).apply(\n lambda x: x.month\n )\n df_tweets[\"Day\"] = pd.to_datetime(df_tweets[\"created_at\"]).apply(lambda x: x.day)\n df_tweets[\"date\"] = pd.to_datetime(df_tweets[\"created_at\"])\n df_tweets = df_tweets.sort_values(by=\"date\")\n df_tweets[\"cumulative_compound\"] = df_tweets[\"sentiment\"].cumsum()\n _, ax = plt.subplots(2, 1, figsize=plot_autoscale(), dpi=cfg_plot.PLOT_DPI)\n ax[0].plot(\n pd.to_datetime(df_tweets[\"created_at\"]),\n df_tweets[\"cumulative_compound\"].values,\n lw=3,\n c=\"cyan\",\n )\n ax[0].set_ylabel(\"Cumulative VADER Sentiment\")\n xlocations = []\n xlabels = []\n for _, day_df in df_tweets.groupby(by=\"Day\"):\n day_df[\"time\"] = pd.to_datetime(day_df[\"created_at\"])\n day_df = day_df.sort_values(by=\"time\")\n ax[0].plot(day_df[\"time\"], day_df[\"sentiment\"].cumsum(), c=\"tab:blue\")\n xlocations.append(day_df.time.values[0])\n xlabels.append(day_df[\"time\"].apply(lambda x: x.strftime(\"%m-%d\")).values[0])\n\n ax[1].bar(df_tweets[\"date\"], df_tweets[\"positive\"], color=\"green\", width=0.02)\n ax[1].bar(df_tweets[\"date\"], -1 * df_tweets[\"negative\"], color=\"red\", width=0.02)\n ax[0].grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\", lw=1.5, alpha=0.5)\n ax[0].minorticks_on()\n ax[0].grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n ax[0].set_xticks(xlocations)\n ax[0].set_xticklabels(xlabels)\n\n ax[1].grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\", lw=1.5, alpha=0.5)\n ax[1].minorticks_on()\n ax[1].grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n ax[1].set_ylabel(\"VADER Polarity Scores\")\n ax[1].set_xticks(xlocations)\n ax[1].set_xticklabels(xlabels)\n plt.suptitle(\n f\"Twitter's {ticker} total compound sentiment over time is {np.sum(df_tweets['sentiment'])}\"\n )\n if gtff.USE_ION:\n plt.ion()\n plt.show()\n print(\"\")\n export_data(\n export, os.path.dirname(os.path.abspath(__file__)), \"sentiment\", df_tweets\n )", "def do_sentiment_analysis(self):\n\n tweets_sentiment = []\n\n for tweet in self.tweets:\n parsed_tweet = {}\n parsed_tweet['text'] = tweet\n sentiment_data = self.tweet_sentiment_analysis(tweet)\n parsed_tweet['sentiment'] = sentiment_data[0]\n parsed_tweet['polarity'] = sentiment_data[1]\n parsed_tweet['subjectivity'] = sentiment_data[2]\n\n tweets_sentiment.append(parsed_tweet)\n\n self.sentiment_data = tweets_sentiment\n self.positive_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Positive']\n self.negative_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Negative']\n self.neutral_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Neutral']\n\n return tweets_sentiment" ]
[ "0.7494988", "0.6593404", "0.64326733", "0.62750417", "0.5912352", "0.54747236", "0.5449991", "0.5329276", "0.5303148", "0.52833545", "0.52704436", "0.50593525", "0.5013312", "0.49506775", "0.49489254", "0.4912962", "0.4879828", "0.48639044", "0.4842615", "0.48359329", "0.48138368", "0.48039252", "0.48030668", "0.48023894", "0.4801353", "0.47888154", "0.47622916", "0.4751501", "0.4739105", "0.4703925" ]
0.8139415
0
Gets the top 10 most positive / negative sentiment triggers from the past 7 days.
def get_sentiment_trends(order): # Get date seven days ago seven_days_ago = datetime.now() - timedelta(days=7) # Get raw PyMongo collection collection = Tweet._get_collection() # Perform aggregate query result = collection.aggregate([ { "$match": { "tweet_time": {"$gt": seven_days_ago} } }, { "$group": { "_id": "$keyword_search_term", "average": { "$avg": "$sentiment_score" } } }, { "$sort": { "average": order } }, { "$limit": 10 } ]) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_extreme_tweets(self, sentiment, count=1, num_score=False):\n def return_polarity(tweet):\n return tweet['polarity']\n\n print(\"The top {} most {} tweets:\".format(count, sentiment))\n\n if sentiment == 'positive':\n sorted_tweet = sorted(self.positive_tweets, key=return_polarity, reverse=True)\n elif sentiment == 'negative':\n sorted_tweet = sorted(self.negative_tweets, key=return_polarity)\n else:\n raise ValueError(\"Sentiment must be a string that is either 'positive' or 'negative'.\")\n \n for tweet in sorted_tweet[:count]:\n print(tweet['text'])\n if num_score:\n print(\"Polarity: {} | Subjectivity: {}\".format(tweet['polarity'], tweet['subjectivity']), \"\\n\")", "def get_top_tweets():\n Tweet.top_tweets = [(k, v) for k, v in sorted(Tweet.hashtag_counter.items(), key=lambda item: item[1], reverse=True)]\n top_10_tweets = {}\n top_10_tweets['top_tweets'] = []\n for tweet in Tweet.top_tweets[:10]:\n top_10_tweets['top_tweets'].append({'hashtag': \"#\"+tweet[0], 'count': tweet[1]})\n return top_10_tweets", "def print_recent_tweets(self, sentiment, count=5):\n print(\"\\nMost recent {} tweets:\".format(sentiment))\n\n def print_tweet(tweets, count):\n for tweet in tweets[:count]:\n print(tweet['text'], \"\\n\")\n\n if sentiment == 'positive':\n print_tweet(self.positive_tweets, count)\n elif sentiment == 'negative':\n print_tweet(self.negative_tweets, count)\n elif sentiment == 'neutral':\n print_tweet(self.neutral_tweets, count)\n else:\n raise ValueError(\"Sentiment must be a string that is 'positive', 'negative', or 'neutral'.\")", "def high_pol_tweets(self):\n positive = sdf.loc[sdf.polarity == sdf.polarity.max(), ['text']].sample(5).values\n [print(text[0], '\\n') for text in positive];", "def sentiment_plot(self, top_words=25):\n if top_words > 25:\n warnings.warn('Including more than 25 words on the X-axis will cause words to be excluded from the axis')\n\n daily_comments = self.comments[(self.comments['days_after_release'].\\\n isin(list(range(self.day_window[0], self.day_window[1] + 1))))]\n if len(daily_comments) == 0:\n warnings.warn('No comments found for this day, trying future dates until comments are found')\n\n while len(daily_comments) == 0:\n if self.day_window[1] > self.comments['days_after_release'].max():\n raise KeyError('Reached bounds of comment dates available. Make sure all comments are present')\n self.day_window[1] += 1\n daily_comments = self.comments[(self.comments['days_after_release'].\\\n isin(list(range(self.day_window[0], self.day_window[1] + 1))))]\n\n print('Now looking at {} to {} days after release'.format(self.day_window[0], self.day_window[1]))\n\n if 'pos' not in daily_comments['sentiment'].values or 'neu' not in daily_comments['sentiment'].values or \\\n 'neg' not in daily_comments['sentiment'].values:\n warnings.warn('No negative or positive sentiments found on this day, trying future dates until positive or negative comments are found')\n\n while 'pos' not in daily_comments['sentiment'].values or 'neu' not in daily_comments['sentiment'].values or \\\n 'neg' not in daily_comments['sentiment'].values:\n if self.day_window[1] > self.comments['days_after_release'].max():\n raise KeyError('Reached bounds of comment dates available. Make sure all comments are present')\n self.day_window[1] += 1\n daily_comments = self.comments[(self.comments['days_after_release']. \\\n isin(list(range(self.day_window[0], self.day_window[1] + 1))))]\n\n print('Now looking at {} to {} days after release'.format(self.day_window[0], self.day_window[1]))\n\n res_positive = daily_comments[(daily_comments['sentiment']=='pos')]['comment_message'].str.split(expand=True)\\\n .stack().value_counts().to_dict()\n res_neutral = daily_comments[(daily_comments['sentiment']=='neu')]['comment_message'].str.split(expand=True)\\\n .stack().value_counts().to_dict()\n res_negative = daily_comments[daily_comments['sentiment']=='neg']['comment_message'].str.split(expand=True)\\\n .stack().value_counts().to_dict()\n\n fig = make_subplots(rows=3, cols=1,\n y_title='Count',\n subplot_titles=('Positive', 'Neutral', 'Negative'))\n trace = fig.add_trace(px.bar(x=list(res_positive.keys())[:top_words], y=list(res_positive.values())[:top_words]).data[0],\n row=1, col=1)\n fig.append_trace(px.bar(x=list(res_neutral.keys())[:top_words], y=list(res_neutral.values())[:top_words]).data[0],\n row=2, col=1)\n fig.append_trace(px.bar(x=list(res_negative.keys())[:top_words], y=list(res_negative.values())[:top_words]).data[0],\n row=3, col=1)\n\n left = np.where(self.day_window[0] < 0, 'Before', 'After')\n right = np.where(self.day_window[1] < 0, 'Before', 'After')\n fig.update_layout(\n title='Top {} Words at {} Days {} Release to {} Days {} Release'.format(top_words,\n self.day_window[0], left,\n self.day_window[1], right)\n )\n fig.show()", "def get_most_viewed_hashtag():\n tags = HashTags.objects.order_by('-no_of_times_viewed').distinct()[:10]\n return tags", "def top10(self) -> List[Word]:\n return self._top10", "def _compute_trigger_scores(self, trigger_embeddings, cls_projected, trigger_mask):\n cls_repeat = cls_projected.unsqueeze(dim=1).repeat(1, trigger_embeddings.size(1), 1)\n trigger_embeddings = torch.cat([trigger_embeddings, cls_repeat], dim=-1)\n if self._trigger_attention_context:\n context = self._trigger_attention(trigger_embeddings, trigger_mask)\n trigger_embeddings = torch.cat([trigger_embeddings, context], dim=2)\n trigger_scores = self._trigger_scorer(trigger_embeddings)\n # Give large negative scores to masked-out elements.\n mask = trigger_mask.unsqueeze(-1)\n trigger_scores = util.replace_masked_values(trigger_scores, mask, -1e20)\n dummy_dims = [trigger_scores.size(0), trigger_scores.size(1), 1]\n dummy_scores = trigger_scores.new_zeros(*dummy_dims)\n trigger_scores = torch.cat((dummy_scores, trigger_scores), -1)\n # Give large negative scores to the masked-out values.\n return trigger_scores", "def stockSentiment(stockName, numTweets=100):\n\n listOfTweets = user.search(stockName, count=numTweets)\n threshold = posSentTweet = negSentTweet = 0\n\n for tweet in listOfTweets:\n analysis = TextBlob(tweet.text)\n if analysis.sentiment.polarity >= threshold:\n posSentTweet = posSentTweet + 1\n else:\n negSentTweet = negSentTweet + 1\n\n if posSentTweet > negSentTweet:\n print(\"Overall Positive\")\n return True\n else:\n print(\"Overall Negative\")\n return False", "def display_sentiment(ticker: str, n_tweets: int, n_days_past: int, export: str = \"\"):\n # Date format string required by twitter\n dtformat = \"%Y-%m-%dT%H:%M:%SZ\"\n\n # Algorithm to extract\n dt_recent = datetime.now() - timedelta(seconds=20)\n dt_old = dt_recent - timedelta(days=n_days_past)\n print(\n f\"From {dt_recent.date()} retrieving {n_tweets*24} tweets ({n_tweets} tweets/hour)\"\n )\n\n df_tweets = pd.DataFrame(\n columns=[\n \"created_at\",\n \"text\",\n \"sentiment\",\n \"positive\",\n \"negative\",\n \"neutral\",\n ]\n )\n while True:\n # Iterate until we haven't passed the old number of days\n if dt_recent < dt_old:\n break\n # Update past datetime\n dt_past = dt_recent - timedelta(minutes=60)\n\n temp = twitter_model.load_analyze_tweets(\n ticker,\n n_tweets,\n start_time=dt_past.strftime(dtformat),\n end_time=dt_recent.strftime(dtformat),\n )\n\n if temp.empty:\n return\n\n df_tweets = pd.concat([df_tweets, temp])\n\n if dt_past.day < dt_recent.day:\n print(\n f\"From {dt_past.date()} retrieving {n_tweets*24} tweets ({n_tweets} tweets/hour)\"\n )\n\n # Update recent datetime\n dt_recent = dt_past\n\n # Sort tweets per date\n df_tweets.sort_index(ascending=False, inplace=True)\n df_tweets[\"cumulative_compound\"] = df_tweets[\"sentiment\"].cumsum()\n df_tweets[\"prob_sen\"] = 1\n\n # df_tweets.to_csv(r'notebooks/tweets.csv', index=False)\n df_tweets.reset_index(inplace=True)\n df_tweets[\"Month\"] = pd.to_datetime(df_tweets[\"created_at\"]).apply(\n lambda x: x.month\n )\n df_tweets[\"Day\"] = pd.to_datetime(df_tweets[\"created_at\"]).apply(lambda x: x.day)\n df_tweets[\"date\"] = pd.to_datetime(df_tweets[\"created_at\"])\n df_tweets = df_tweets.sort_values(by=\"date\")\n df_tweets[\"cumulative_compound\"] = df_tweets[\"sentiment\"].cumsum()\n _, ax = plt.subplots(2, 1, figsize=plot_autoscale(), dpi=cfg_plot.PLOT_DPI)\n ax[0].plot(\n pd.to_datetime(df_tweets[\"created_at\"]),\n df_tweets[\"cumulative_compound\"].values,\n lw=3,\n c=\"cyan\",\n )\n ax[0].set_ylabel(\"Cumulative VADER Sentiment\")\n xlocations = []\n xlabels = []\n for _, day_df in df_tweets.groupby(by=\"Day\"):\n day_df[\"time\"] = pd.to_datetime(day_df[\"created_at\"])\n day_df = day_df.sort_values(by=\"time\")\n ax[0].plot(day_df[\"time\"], day_df[\"sentiment\"].cumsum(), c=\"tab:blue\")\n xlocations.append(day_df.time.values[0])\n xlabels.append(day_df[\"time\"].apply(lambda x: x.strftime(\"%m-%d\")).values[0])\n\n ax[1].bar(df_tweets[\"date\"], df_tweets[\"positive\"], color=\"green\", width=0.02)\n ax[1].bar(df_tweets[\"date\"], -1 * df_tweets[\"negative\"], color=\"red\", width=0.02)\n ax[0].grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\", lw=1.5, alpha=0.5)\n ax[0].minorticks_on()\n ax[0].grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n ax[0].set_xticks(xlocations)\n ax[0].set_xticklabels(xlabels)\n\n ax[1].grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\", lw=1.5, alpha=0.5)\n ax[1].minorticks_on()\n ax[1].grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n ax[1].set_ylabel(\"VADER Polarity Scores\")\n ax[1].set_xticks(xlocations)\n ax[1].set_xticklabels(xlabels)\n plt.suptitle(\n f\"Twitter's {ticker} total compound sentiment over time is {np.sum(df_tweets['sentiment'])}\"\n )\n if gtff.USE_ION:\n plt.ion()\n plt.show()\n print(\"\")\n export_data(\n export, os.path.dirname(os.path.abspath(__file__)), \"sentiment\", df_tweets\n )", "def get_five_latest(self):\r\n selection = []\r\n sorted(self.tweets, key=lambda tweet: tweet.date, reverse=True)\r\n amount = 5\r\n if self.get_length() < 5:\r\n amount = self.get_length()\r\n for i in range(amount):\r\n selection.append(self.tweets[i])\r\n return selection", "def __r7(soup):\n news = []\n ps = soup.find_all('p', class_='trends-thermometer-description')\n i = 0\n\n for p in ps:\n if i == 6: # Six trending topics\n break\n i += 1\n a = p.parent.parent.parent.a\n news.append(dict(title=a['title'], link=a['href']))\n return news", "def forecast_weekly():\n forecast = get_forecast()\n daily = forecast.daily()\n return daily.summary", "def most_viewed(self, days=None):\n # Set amount of days for top stories\n days_options = [1, 7, 30]\n if days is None:\n days = 1\n\n # Raise an Exception if number of days is invalid\n if days not in days_options:\n raise ValueError(\"You can only select 1, 7 or 30 days\")\n\n # Load the data\n url = BASE_MOST_POPULAR + \"viewed/\" + str(days) + \".json\"\n result = self._load_data(url)\n\n parsed_date_result = self._parse_dates(result, \"date-only\", [\"published_date\"])\n parsed_result = self._parse_dates(parsed_date_result, \"date-time\", [\"updated\"])\n\n return parsed_result", "def least_popular_influencers(self, influencerTopSim, count):\n infPopularity = {influencer: 0 for influencer in influencerTopSim}\n for influencer in influencerTopSim:\n infTweetPop = self.userTweetsStat[influencer]\n avgPop = []\n for tweet in influencerTopSim[influencer]:\n infTweet = infTweetPop[len(infTweetPop)-1]\n avgPop.append(self.assign_popularity_to_tweet(infTweet,tweet))\n infPopularity[influencer] = np.mean(avgPop)\n \n tmp = {key: rank for rank, key in enumerate(sorted(set(infPopularity.values()), reverse=True), 1)}\n rankInfluencer = {k: tmp[v] for k,v in infPopularity.items()}\n leastPopInfluencer = [a for a in dict(sorted(rankInfluencer.items(), key=operator.itemgetter(1), reverse=True)[:count]).keys()]\n \n return leastPopInfluencer", "def most_popular(self, n):\n return popular_tags", "def get_recently_articles(cls, num):\n return cls.objects.values('title', 'view_times', 'update_time', 'author')\\\n .filter(status=0).order_by('-update_time')[:num]", "def _calculate_top(self,\n words_percentage_hit: List[Tuple[str, float]]) -> List[Tuple[str, float]]:\n return sorted(words_percentage_hit, key=(lambda tup: tup[1]))[:self._top_values]", "def get_todays_posts():\n \n return sorted(requests.get(TODAY_URL).json()['hunts'], \n key=lambda post: post['rank'])", "def get_top_spammers(self, n):\n sql_command = \"SELECT * FROM points ORDER BY amount DESC;\"\n cursor, connection = self.execute_command_get_connection(sql_command, [])\n all = cursor.fetchall()\n\n return all[:n]", "def positive_word(tweets, positives):\n\n wordcount = {}\n\n positive_words = set(positives)\n\n # Makes a dictionary of all positive words to be able to store the appearances\n for i in positives:\n wordcount[i] = 0\n\n for tweet in tweets:\n for word in tweet:\n if word in positive_words:\n wordcount[word] += 1\n\n # Sorts the dictionary so the first 5 words are the top used words\n items = wordcount.items()\n sorted_dic = sorted(items, key=lambda wordcount: wordcount[1], reverse=True)\n print(\"\\nTrump's top 5 most used positive words:\")\n for i in range(5):\n print(\" \" + sorted_dic[i][0] + \" \" + str(sorted_dic[i][1]))", "def tweet_df(n):\n # Retrieve the tweet contents\n first_tweet = get_value(df_1t, n)\n second_tweet = get_value(df_2t, n) \n third_tweet = get_value(df_3t, n)\n fourth_tweet = get_value(df_4t, n)\n fifth_tweet = get_value(df_5t, n)\n sixth_tweet = get_value(df_6t, n)\n seventh_tweet = get_value(df_7t, n)\n eighth_tweet = get_value(df_8t, n)\n nineth_tweet = get_value(df_9t, n)\n tenth_tweet = get_value(df_10t, n) \n \n # Sentiment of each tweet\n sa_first_tweet = sentiment_analyzer_scores(first_tweet)\n sa_second_tweet = sentiment_analyzer_scores(second_tweet)\n sa_third_tweet = sentiment_analyzer_scores(third_tweet)\n sa_fourth_tweet = sentiment_analyzer_scores(fourth_tweet)\n sa_fifth_tweet = sentiment_analyzer_scores(fifth_tweet)\n sa_sixth_tweet = sentiment_analyzer_scores(sixth_tweet)\n sa_seventh_tweet = sentiment_analyzer_scores(seventh_tweet)\n sa_eighth_tweet = sentiment_analyzer_scores(eighth_tweet)\n sa_nineth_tweet = sentiment_analyzer_scores(nineth_tweet)\n sa_tenth_tweet = sentiment_analyzer_scores(tenth_tweet)\n \n # Compute the compound score for obtaining a sentiment class\n compound_score_first_tweet = sentiment_logic((list(sa_first_tweet.values())[list(sa_first_tweet.keys()).index('compound')] ))\n compound_score_second_tweet = sentiment_logic((list(sa_second_tweet.values())[list(sa_second_tweet.keys()).index('compound')] )) \n compound_score_third_tweet = sentiment_logic((list(sa_third_tweet.values())[list(sa_third_tweet.keys()).index('compound')] ))\n compound_score_fourth_tweet = sentiment_logic((list(sa_fourth_tweet.values())[list(sa_fourth_tweet.keys()).index('compound')] ))\n compound_score_fifth_tweet = sentiment_logic((list(sa_fifth_tweet.values())[list(sa_fifth_tweet.keys()).index('compound')] ))\n compound_score_sixth_tweet = sentiment_logic((list(sa_sixth_tweet.values())[list(sa_sixth_tweet.keys()).index('compound')] ))\n compound_score_seventh_tweet = sentiment_logic((list(sa_seventh_tweet.values())[list(sa_seventh_tweet.keys()).index('compound')] ))\n compound_score_eighth_tweet = sentiment_logic((list(sa_eighth_tweet.values())[list(sa_eighth_tweet.keys()).index('compound')] ))\n compound_score_nineth_tweet = sentiment_logic((list(sa_nineth_tweet.values())[list(sa_nineth_tweet.keys()).index('compound')] ))\n compound_score_tenth_tweet = sentiment_logic((list(sa_tenth_tweet.values())[list(sa_tenth_tweet.keys()).index('compound')] ))\n \n # Create a new temporary dataframe for the tweet contents and sentiment\n compound_score_list = [compound_score_first_tweet, compound_score_second_tweet,\n compound_score_third_tweet, compound_score_fourth_tweet,\n compound_score_fifth_tweet, compound_score_sixth_tweet, \n compound_score_seventh_tweet, compound_score_eighth_tweet,\n compound_score_nineth_tweet, compound_score_tenth_tweet]\n \n \n first_col = [first_tweet, second_tweet,\n third_tweet, fourth_tweet,\n fifth_tweet, sixth_tweet,\n seventh_tweet, eighth_tweet,\n nineth_tweet, tenth_tweet]\n \n second_col = compound_score_list\n \n tmp_df = pd.DataFrame(data = {'Tweets' : first_col, \n 'Sentiment' : second_col})\n \n \n return tmp_df.to_json(date_format = 'iso', orient = 'split')", "def get_top_sents(sent_scores, top=None):\n sorted_sents = sorted(sent_scores, key=lambda k: k['sent_score'], reverse=True)\n\n if top:\n return sorted_sents[:top]\n else:\n return sorted_sents", "def top_sentences(query, sentences, idfs, n):\n rank = []\n\n for sentence in sentences:\n sentence_values = [sentence, 0, 0]\n\n for word in query:\n if word in sentences[sentence]:\n # Compute matching word measure. Sum of IDF values.\n sentence_values[1] += idfs[word]\n # Compute query term density. Proportion of words in a sentence that are in the query.\n sentence_values[2] += sentences[sentence].count(\n word) / len(sentences[sentence])\n\n rank.append(sentence_values)\n\n rank = sorted(rank, key=lambda x: (x[1], x[2]), reverse=True)[:n]\n \n return [sentence for sentence, mwm, qtd in rank]", "def get_top_featured_entries(number=5):\n return list(Entry.published.filter(featured=True)[:number])", "def top_sentences(query, sentences, idfs, n):\n\n # claculate idfs of each sentence\n sent_score = dict()\n for sentence in sentences:\n sent_score[sentence] = 0\n for query_word in query:\n if query_word in sentences[sentence]:\n sent_score[sentence] += idfs[query_word]\n\n # create sorted list of sentences\n sorted_sentences = sorted(sent_score, key= lambda item: sent_score[item], reverse= True)\n\n # re-order sentences with the same rank of idfs according to query term density\n loop_sentences = sorted_sentences.copy()\n for sentence1 in loop_sentences:\n for sentence2 in loop_sentences:\n if sentence1 != sentence2:\n if sent_score[sentence1] == sent_score[sentence2]:\n qtd1 = query_term_density(sentence1, query, sentences)\n qtd2 = query_term_density(sentence2, query, sentences)\n index1 = sorted_sentences.index(sentence1)\n index2 = sorted_sentences.index(sentence2)\n if qtd1 > qtd2:\n if index1 > index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n elif qtd1 < qtd2:\n if index1 < index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n\n # get list contains top n sentences\n top_sentences = []\n for index in range(n):\n top_sentences.append(sorted_sentences[index]) \n\n return top_sentences", "def get_seven_days_stat(cls):\n return cls.get_specified_days_stat(7)", "def getTopNTweets(retrievedTweets, numberOfTweets):\n if sortBy=='newest':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['id'], reverse=True)\n elif sortBy=='oldest':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['id'],reverse=False)\n elif sortBy=='favorite_count':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['favorite_count'],reverse=True)\n elif sortBy=='retweet_count':\n retrievedTweets = sorted(retrievedTweets, key=lambda k: k['retweet_count'],reverse=True)\n else:\n retrievedTweets = random.sample(retrievedTweets, numberOfTweets)\n return retrievedTweets[:numberOfTweets]", "def get_top_words(self, label, n):\n score_list = []\n if('sod' in label):\n for term in self.vocab:\n score = self.cond_prob_sod[term] / self.cond_prob_pop[term]\n score_list.append((score,term)) \n else:\n for term in self.vocab:\n score = self.cond_prob_pop[term] / self.cond_prob_sod[term]\n score_list.append((score,term))\n score_list = sorted(score_list, key=lambda x:x[0],reverse=True)[:n]\n return score_list \n pass", "def top10(self, top10: List[Word]):\n\n self._top10 = top10" ]
[ "0.5406669", "0.54055756", "0.5391852", "0.536155", "0.5360676", "0.5214797", "0.5178173", "0.51120335", "0.50702655", "0.5060932", "0.5052575", "0.501701", "0.5007129", "0.50032103", "0.49881318", "0.49561754", "0.49547327", "0.49525204", "0.4947948", "0.49175078", "0.4904402", "0.48932648", "0.48711705", "0.48575914", "0.48542547", "0.48525265", "0.485228", "0.48437354", "0.4839257", "0.4793245" ]
0.54947144
0
Load the feed url into self.entries using the feedparser module.
def __init__(self, url=URL): self.entries = feedparser.parse(url).entries
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_feed(self):\n parsed_feed = feedparser.parse(self.rss_url)\n # Check for malformed feed\n if parsed_feed['bozo']:\n raise Exception('malformed rss feed!')\n self.parsed_feed = parsed_feed", "def feed(self):\n feed_dict = feedparser.parse(self.URL)\n return [self.entry_dict(entry) for entry in feed_dict['entries']]", "def fetch_url_feed(self, url, **args):\n return self.fetch(\"/url\", url=url, **args)", "def _retrieveFeed(self):\n url = self.url\n if url!='':\n self._last_update_time_in_minutes = time.time()/60\n self._last_update_time = DateTime()\n d = feedparser.parse(url)\n if getattr(d, 'bozo', 0) == 1 and not isinstance(d.get('bozo_exception'),\n ACCEPTED_FEEDPARSER_EXCEPTIONS):\n self._loaded = True # we tried at least but have a failed load\n self._failed = True\n return False\n self._title = d.feed.title\n self._siteurl = d.feed.link\n self._items = []\n for item in d['items']:\n try:\n link = item.links[0]['href']\n itemdict = {\n 'title': item.title,\n 'url': link,\n 'summary': item.get('description', ''),\n }\n if hasattr(item, \"updated\"):\n try:\n itemdict['updated'] = DateTime(item.updated)\n except DateTimeError:\n # It's okay to drop it because in the\n # template, this is checked with\n # ``exists:``\n pass\n except AttributeError:\n continue\n self._items.append(itemdict)\n self._loaded = True\n self._failed = False\n return True\n self._loaded = True\n self._failed = True # no url set means failed\n return False # no url set, although that actually should not really happen", "def get_posts(url):\r\n feed = feedparser.parse(url)\r\n return feed.entries", "def call_feed(url: str) -> dict:\n\n if not url:\n return {}\n feed = feedparser.parse(url)\n return feed", "def get_feed(self):\n possible_endings = ('rss', 'rss/')\n if not self.url or not self.url.endswith(possible_endings):\n print('Please check URL(is RSS?) and Internet connection')\n sys.exit()\n try:\n data = feedparser.parse(self.url)\n except urllib.error.URLError:\n print('Please input correct URL')\n sys.exit()\n self.get_content(data)\n return self.items", "def fetch(feed):\n # Fetch the feed data.\n data = feedparser.parse(feed.ext_url)\n new_articles = []\n\n # If the `bozo` value is anything\n # but 0, there was an error parsing (or connecting) to the feed.\n if data.bozo:\n # Some errors are ok.\n if not isinstance(data.bozo_exception, feedparser.CharacterEncodingOverride) and not isinstance(data.bozo_exception, feedparser.NonXMLContentType):\n raise data.bozo_exception\n\n for entry in data.entries:\n\n # URL for this entry.\n url = entry['links'][0]['href']\n\n # Check for an existing Article.\n # If one exists, skip.\n if Article.objects(ext_url=url).first():\n continue\n\n data = extractor.extract(url, existing_data=entry)\n\n if data is None:\n continue\n\n # Secondary check for an existing Article,\n # by checking the title and source.\n existing = Article.objects(title=data['title']).first()\n if existing and existing.feed.source == feed.source:\n continue\n\n data['feed'] = feed\n\n article = Article(**data)\n article.save()\n new_articles.append(article)\n\n return new_articles", "def request_rss(self, url):\n return feedparser.parse(url)", "def process(url):\n feed = feedparser.parse(url)\n entries = feed.entries\n ret = []\n for entry in entries:\n print entry\n guid = entry.guid\n title = translate_html(entry.title)\n link = entry.link\n summary = translate_html(entry.summary)\n try:\n subject = translate_html(entry.tags[0]['term'])\n except AttributeError:\n subject = \"\"\n newsStory = NewsStory(guid, title, subject, summary, link)\n ret.append(newsStory)\n return ret", "def fetch_entries(self):\n entries = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n [entries.append(FeedDocument(entry.get('title', ''), entry.get('summary', ''))) for entry in feed.get('entries', [])]\n return entries", "def _populate_posts(self, channel, url):\n import feedparser\n\n Post = get_model('articles', 'Post')\n Image = get_model('images', 'Image')\n\n parser = feedparser.parse(url)\n\n for entry in parser['entries']:\n # Some entries are incomplete and have only the title, need to\n # ignore these entries.\n if not entry.get('summary'):\n continue\n\n # The title may have only 140 characters\n title = self._truncate_string(entry['title'], 140)\n slug = slugify(title)\n headline = entry['summary']\n\n # Some entries do not have the 'content' field, in this case we\n # get the 'summary' field instead.\n if entry.get('content'):\n content = entry['content'][0]['value']\n else:\n content = entry['summary']\n\n # When we find a entry that already is registered we don't need\n # continue because the following registries already be registered.\n exists = Post.objects.filter(slug=slug).count()\n if exists:\n break\n\n # Check if has some image in the post content.\n # NOTE: For the best user experience we use only the posts that\n # have images.\n image_url = self._get_image_url_in_content(content)\n if image_url:\n main_image = Image.objects.create(\n title=title,\n slug=slug,\n archive_link=image_url,\n published=True,\n user=self._user\n )\n # Generate the 'short_title' based on 'content'\n short_title = re.sub('<[^<]+?>', '', content).encode('utf-8')\n short_title = self._truncate_string(short_title.strip(), 140)\n\n post = Post.objects.create(\n title=title,\n short_title=short_title,\n slug=slug,\n headline=headline,\n content=content,\n channel=channel,\n main_image=main_image,\n show_on_root_channel=True,\n published=True,\n hat='',\n user=self._user\n )", "def __init__(self, URL):\n\n # add to topics list to retreive different topics from CBC RSS feed\n base = \"/cmlink/rss-\"\n topics = [\"politics\", \"technology\", \"sports\"]\n article_id = 1\n self.articles = []\n\n for topic in topics:\n\n # build our url string to make it dynamic\n full_url = URL + base + topic\n # gives us all article urls\n urls = getArticleURLs(full_url)\n\n for url in urls:\n new_article = Article(url, topic, article_id)\n\n # is it a valid article url?\n if new_article.article_id != -1:\n article_id += 1\n self.articles.append(new_article)\n \n # break # remove this to get all articles", "def add_by_url(self, feed_url, name=None):\n feed_data = {\"url\": feed_url}\n if name:\n feed_data['name'] = name\n else:\n f = feedparser.parse(feed_url)\n feed_data['name'] = f.feed.title\n feed = Feed(feed_data, self)\n feed._save()\n self.feeds.append(feed)", "def retrieveFeed(self, rss_url):\n url = 'http://{}'.format(rss_url)\n result = feedparser.parse(url)\n if result.status != 200:\n sys.stdout.write('request failed for retrieve this RSS ({})\\n'.format(url))\n else:\n self.storeFeeds(url, result['items'])", "def feed_loader(self, feed_name):\n if feed_name in self.feeds:\n return self.feeds[feed_name]\n for filename in os.listdir(self.FEED_DIR):\n name, ext = os.path.splitext(filename)\n if ext == \".py\" and name == feed_name:\n # remember, this can fail if script overlays existing module\n try:\n mod = __import__(feed_name)\n if hasattr(mod, \"feed\"):\n spec = inspect.getargspec(mod.feed)\n if len(spec[0]) != 3:\n continue\n self.feeds[feed_name] = mod.feed\n return mod.feed\n except:\n pass\n return None", "def for_url(self, url):\n if url is None or url == '':\n raise BadURLException('Did you forget to provide a feed URL?')\n def txn():\n feed = RegisteredFeed.get_by_key_name(url)\n if feed is None:\n u = urlparse( url )\n q = parse_qs( u.query )\n if u.scheme != 'http' or u.netloc != 'rss.netflix.com' or 'id' not in q:\n raise BadURLException('Invalid Netflix feed URL was provided')\n feed = RegisteredFeed(\n key_name = url,\n id = q['id'][0],\n feed_type = u.path,\n rand = random.random()\n )\n feed.put()\n return feed\n feed = db.run_in_transaction(txn)\n if feed.slug is None:\n feed.slug = get_slug()\n feed.put()\n return feed", "def fetch_parsed_feed(feed_url):\n feed = feedparser.parse(feed_url)\n parse_error = hasattr(feed, 'bozo_exception') and (\n isinstance(feed.bozo_exception, SAXException))\n if not feed.bozo or not parse_error:\n return feed", "def add_rss(url):", "def __init__(self, data, feed_repo):\n super(Feed, self).__init__()\n self.url = data['url']\n self.name = data.get('name')\n read_stamp = data.get(\"last_read\")\n if read_stamp:\n self.last_read = datetime.datetime.fromtimestamp(read_stamp, tz=datetime.timezone.utc)\n else:\n self.last_read = datetime.datetime.fromtimestamp(0, tz=datetime.timezone.utc)\n self._repo = feed_repo", "def pull_feed(feed_url):\n app.logger.debug('Parsing content from %s.', feed_url)\n feed = feedparser.parse(feed_url)\n\n # Process html to remove unwanted mark-up and fix links\n post = ''\n if feed['entries']:\n soup = BeautifulSoup(feed['entries'][0].summary, 'html.parser')\n\n # Remove edited by paragraph\n soup.p.extract()\n\n # Remove final div in the feed\n feed_div = soup.find('div', class_='feed')\n children_divs = feed_div.findAll('div')\n children_divs[len(children_divs) - 1].extract()\n\n # Translate any in page links to use relative URL\n base = feed['entries'][0].summary_detail.base\n links = feed_div.select('a[href^=\"' + base + '\"]')\n for link in links:\n link['href'] = link['href'].replace(base, '')\n post = str(soup)\n\n elif feed.get('bozo_exception'):\n app.logger.error('Error retrieving feed for % with error %'.format(feed_url,\n str(feed.get('bozo_exception'))))\n return post", "def __add_entries(entries, feed):\n\n for entry in entries:\n try:\n # If there is entry with such title in this feed\n Entry.objects.get(title=entry.title, feed=feed)\n continue\n except Entry.DoesNotExist:\n pass\n\n # Try to find another entries with such title\n e = Entry.objects.filter(title=entry.title)\n # If found\n if len(e) != 0:\n e = e[0]\n # Copy all containing\n entry_obj = Entry(title=e.title,\n description=e.description,\n entry=e.entry, feed=feed)\n entry_obj.save()\n # Or create new Entry from scratch\n else:\n entry_name = entry.title + '.html'\n # If bad link or entry name\n try:\n urlretrieve(entry.link, entry_name)\n\n entry_file = open(entry_name)\n entry_file = File(entry_file)\n\n entry_obj = Entry(title=entry.title,\n description=entry.description,\n entry=entry_file, feed=feed)\n entry_obj.save()\n\n os.remove(entry_name)\n except:\n # Go to next entry\n continue", "def parse(self, url):\n pass", "def get_content(self, data):\n self.name = name = data['feed'].get('title')\n for feed in data['entries']:\n title = feed.get('title', 'Absence of title')\n link = feed.get('link', 'Absence of link')\n date = feed.get('published_parsed', 'Absence of date')\n img = get_img_container(link)\n summary_list = []\n links = []\n if feed.get('summary'):\n summary_list = [feed.get('summary')]\n if feed.get('links'):\n uncleaned_links = feed.get('links')\n links = string_handlers.get_links(uncleaned_links)\n img.extend(if_link_is_image(uncleaned_links))\n fields = 'name, title, link, date, img, content, links'\n item = namedtuple('item', fields)._make((name, title, link, date, img, summary_list, links))\n save_feed_into_cache(item)\n self.items.append(item)", "def load(self, url):\n pass", "def load(self, url):\n pass", "def zhihu_rss_fetcher(ctx):\n URL = 'http://www.zhihu.com/rss'\n coll = ctx.get_mongo_collection()\n\n for entry in fetch_rss(URL).entries:\n try:\n coll.insert({'_id': entry.link})\n except DuplicateKeyError:\n continue\n ctx.new_item(TextOnlyItem(entry.title, entry.description), ['zhihu'],\n parse_entry_time(entry),\n {'id': entry.link})\n log_info(u'zhihu: new entry: {} {}'.format(entry.link,\n entry.title))", "def rss_feed(rss_url):\n try:\n # Use feedparser to analyze given RSS feed, if it is valid RSS.\n d = feedparser.parse(rss_url)\n except:\n return \"Sorry, invalid RSS feed. Please check and try again later.\"\n \n total = len(d['entries'])\n updates = dict()\n for index, item in enumerate(d['entries']):\n # Convert publish time from ctime format to iso-time format.\n a_time = time_convert(item.published)\n # Set article url ad dictionary key, with publish date as value. \n updates[str(item.link)] = a_time \n return (total, updates)", "def __update_feed(feed_obj):\n\n url = feed_obj.url\n feed = feedparser.parse(url)\n\n try:\n feed.feed.title\n except AttributeError:\n return\n\n # List of new entries in downloaded XML\n new_entries = feed.entries\n new_entries_titles = [entry.title for entry in new_entries]\n\n # List of current entries in database\n old_entries = Entry.objects.filter(feed=feed_obj)\n old_entries_titles = [entry.title for entry in old_entries]\n\n # Check what old entries arn't in new entries\n # They will be deleted\n for entry_title in old_entries_titles:\n if entry_title not in new_entries_titles:\n Entry.objects.get(title=entry_title, feed=feed_obj).delete()\n\n # Add all new entries\n __add_entries(new_entries, feed_obj)\n\n # Update time and save\n feed_obj.time = datetime.now()\n feed_obj.save()", "def __init__(self, main_feed):\n self.main_feed = main_feed" ]
[ "0.7183806", "0.68700135", "0.66769147", "0.6660742", "0.6519246", "0.6506825", "0.6443622", "0.6431346", "0.64295334", "0.6419183", "0.6348568", "0.6227926", "0.61853313", "0.6180963", "0.610789", "0.6107114", "0.607002", "0.6055648", "0.60153824", "0.5997344", "0.5982748", "0.59666306", "0.5947592", "0.59391445", "0.5932164", "0.5932164", "0.5900087", "0.58668697", "0.5855802", "0.5853992" ]
0.8170361
0
Return a list of episode IDs (itunes_episode attribute) of the episodes the pass in domain was mentioned in.
def get_episode_numbers_for_mentioned_domain(self, domain: str) -> list: return [ep.itunes_episode for ep in self.entries if domain.lower() in ep.summary.lower()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def episodes(self):\n episodes = []\n for series in self.series:\n episodes.extend(series.episodes)\n return episodes", "def episodes(self):\n episodes = []\n for season in self.seasons:\n episodes.extend(season.episodes)\n return episodes", "def _get_episodes(self):\n return [series_episode for series_episode in SeriesEpisode.objects.filter(series=self)]", "def get_episode_list(self):\n if self.episodes is not None:\n return self.episodes\n\n self.episodes = []\n\n # Now before we return pre-emptively fetch the episode list.\n # XXX We _could_ put this in a 'get_episode_list' method that does the\n # fetching then.. but for now we are just going to have it done\n # here.\n #\n if len(self.episode_guide_urls) == 0:\n return self.episodes\n\n for url in self.episode_guide_urls:\n url_data = url.get()\n\n # Now we run the GetEpisodeList rules on this data that\n # we just retrieved.\n #\n self.scraper.parser.set_buffer(1, url_data)\n self.scraper.parser.set_buffer(2, url.url)\n\n # This gets us a XML string with the list of episodes in it.\n # parse this in to a dom and then go through each <episode>\n # element creating an Episode object to append to our episode\n # list\n ep_list_result = self.scraper.parser.parse(FN_GET_EPISODE_LIST,\n self.scraper.settings)\n dom = parseString(ep_list_result)\n eps = dom.firstChild\n ep = first_child(eps, \"episode\")\n while ep:\n self.episodes.append(Episode(ep, self, self.scraper))\n ep = next_sibling(ep, \"episode\")\n dom.unlink()\n dom = None\n \n return self.episodes", "def get_episode_ids(self, show_id, season):\n # get the page of the season of the show\n logger.info('Getting the page of show id %d, season %d', show_id, season)\n r = self.session.get(self.server_url + 'tvshow-%d-%d.html' % (show_id, season), timeout=10)\n soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])\n\n # loop over episode rows\n episode_ids = {}\n for row in soup.select('table#table5 tr'):\n # skip rows that do not have a link to the episode page\n if not row('a', href=episode_id_re):\n continue\n\n # extract data from the cells\n cells = row('td')\n episode = int(cells[0].text.split('x')[1])\n episode_id = int(cells[1].a['href'][8:-5])\n episode_ids[episode] = episode_id\n\n if episode_ids:\n logger.debug('Found episode ids %r', episode_ids)\n else:\n logger.warning('No episode ids found')\n\n soup.decompose()\n soup = None\n\n return episode_ids", "def episodes(self):\n if self._episode_cache_ver == self._db._version:\n return self._episode_cache\n\n # Get only rows from the preferred provider (provider id is not null) except\n # for season 0 (specials), where we take all episodes.\n if self.number > 0:\n idmap = {self.series.provider.IDATTR: db.QExpr('>', u'')}\n else:\n idmap = {}\n dbrows = self._db.query(type='episode', parent=self.series._dbrow, season=self.number, **idmap)\n self._episode_cache = [Episode(self._db, self.series, self, dbrow) for dbrow in dbrows]\n self._episode_cache.sort(key=lambda e: e.number)\n self._episode_cache_ver = self._db._version\n return self._episode_cache", "def episodes(self):\n for episode in self._root.iter('Episode'):\n entry = {}\n entry['season'] = int(episode.find('SeasonNumber').text)\n entry['episode'] = int(episode.find('EpisodeNumber').text)\n entry['title'] = unicode(episode.find('EpisodeName').text)\n if entry['title'] == '':\n continue\n entry['description'] = unicode(episode.find('Overview').text)\n entry['firstAired'] = episode.find('FirstAired').text\n yield entry", "def get_eids(self):\n return [d['eid'] for d in self._json]", "def get_episode_list(self, show):\n\n # If the show has no episode guide url's, then there is nothing\n # we can fetch..\n #\n if len(show.episode_guide_urls) == 0:\n return []\n\n episode_list = []\n\n for url in show.episode_guide_urls:\n self.logger.debug(\"get_episode_list, data from: %s\" % url.url)\n url_data = url.get()\n\n # Now we run the GetEpisodeList rules on this data that\n # we just retrieved.\n #\n self.parser.set_buffer(1, url_data)\n self.parser.set_buffer(2, url.url)\n\n # This gets us a XML string with the list of episodes in it.\n # parse this in to a dom and then go through each <episode>\n # element creating an Episode object to append to our episode\n # list\n ep_list_result = self.parser.parse(FN_GET_EPISODE_LIST,\n self.settings)\n dom =parseString(ep_list_result)\n eps = dom.firstChild\n ep = first_child(eps, \"episode\")\n while ep:\n episode_list.append(Episode(ep, show, self))\n ep = next_sibling(ep, \"episode\")\n dom.unlink()\n return episode_list", "def filter_aired_episodes(self, episodes):\n #Set now one day in the past or check download\n now = datetime.datetime.now() - datetime.timedelta(days=1)\n aired_episodes = [episode for episode in episodes if\n episode.get_first_aired() and\n datetime.datetime.strptime(episode.get_first_aired(),\n \"%Y-%m-%d\")\n <= now]\n return aired_episodes", "def create_episodes_from_feed(self, entries):\n guids = self.podcast.episode_set.values_list(\"guid\", flat=True)\n entries = [entry for entry in entries if entry[\"id\"] not in guids]\n\n episodes = [\n episode\n for episode in [self.create_episode_from_feed(entry) for entry in entries]\n if episode\n ]\n return Episode.objects.bulk_create(episodes, ignore_conflicts=True)", "def _get_ids_from_hostname(self, hostname):\r\n results = self.list_instances(hostname=hostname, mask=\"id\")\r\n return [result['id'] for result in results]", "def scraping_episodes(self, serie_data, episodes_list):\n episodes_data = []\n for episode in episodes_list:\n # Se arma este dict para localizar los campos\n # en el json y que sea mas facil procesarlos mas adelante\n epi_details = episode[0]['body']['details']\n epi_dict = {\n 'ParentId': serie_data.id,\n 'ParentTitle': serie_data.clean_title,\n 'Id': episode[0]['id'],\n 'Title': epi_details['title'],\n 'Type': 'episode',\n 'JSON': {\n 'Synopsis': epi_details['description'],\n 'Metadata': epi_details['metadata'].replace('\\xa0', ''),\n 'Rating': epi_details['localizedRating']['value'],\n 'Image': epi_details,\n 'Groups': episode[1]['body']['groups'],\n 'SeasonAndNumber': episode[2]['body']['metadata'],\n 'isFree': episode[0]['body']['isFree']\n }\n }\n payload_epi = self.build_payload(epi_dict)\n # Si la serie es original sus episodios también\n payload_epi.is_original = serie_data.is_original\n episodes_data.append(payload_epi)\n payload_epi = payload_epi.payload_episode()\n Datamanager._checkDBandAppend(\n self, payload_epi, self.scraped_epi, self.payloads_epi,\n isEpi=True\n )\n return episodes_data", "def get_podcast_episodes(url):\n\n def parse_pubdate(date_string):\n \"\"\"\n Change pubdate string to datetime object. Tries a bunch of\n possible formats, but if none of them is a match, it will\n return a epoch = 0 datetime object\n\n :param date_string: A string representing a date\n :return: datetime object\n \"\"\"\n date_formats = (\n '%a, %d %b %Y %H:%M:%S +0000',\n '%a, %d %b %Y',\n '%a, %d %b %Y%H:%M:%S +0000',\n '%a, %d %b %Y %H:%M',\n '%a, %d %b %Y %H.%M'\n )\n df_generator = (format for format in date_formats)\n\n date = None\n while date is None:\n try:\n date = datetime.strptime(date_string, next(df_generator))\n except ValueError:\n pass\n except StopIteration:\n date = datetime.fromtimestamp(0)\n\n return date\n\n doc = get_document(url)\n\n return (\n {\n 'url': item.select('guid')[0].text,\n 'Premiered': parse_pubdate(\n item.select('pubdate')[0].text\n ).strftime(\"%d.%m.%Y\"),\n # 'Duration': duration_to_seconds(item.find('itunes:duration').text),\n 'title': item.title.text,\n 'Plot': item.description.text\n }\n for item in doc.find_all(\"item\")\n )", "def get_tv_episodes(self) -> int:\n return len(glob.glob(os.path.join(\n os.path.dirname(self.file),\n f\"*{os.path.splitext(self.file)[-1]}\"\n )))", "def parse_episode(filename):\n print_info('Extracting episode from {0}'.format(filename))\n for regex in EPISODE_NUM_REGEXS:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_ep = m.group('Episode').lower()\n print_info('Extracted episode: {0}'.format(extracted_ep))\n\n if '-' in extracted_ep:\n print_info('Multiple Episodes found')\n tokens = extracted_ep.split('-e')\n first_token = tokens[0]\n last_token = tokens[len(tokens)-1]\n return parse_episode(first_token) + '-' + parse_episode(last_token)\n else:\n ep_num = int(extracted_ep)\n if ep_num is not None and ep_num > 0:\n print_info('Episode might be: {0}'.format(ep_num))\n return 'E' + format_num(ep_num)\n\n return None", "def _get_feed_episodes(self, show_key, **kwargs):\n\t\tinfo(\"Getting episodes for Nyaa/{}\".format(show_key))\n\t\tif \"domain\" not in self.config or not self.config[\"domain\"]:\n\t\t\terror(\" Domain not specified in config\")\n\t\t\treturn list()\n\t\t\n\t\t# Send request\n\t\tquery = re.sub(\"[`~!@#$%^&*()+=:;,.<>?/|\\\"]+\", \" \", show_key)\n\t\tquery = re.sub(\"season\", \" \", query, flags=re.I)\n\t\tquery = re.sub(\" +\", \" \", query)\n\t\tquery = re.sub(\"(?:[^ ])-\", \" \", query) # do not ignore the NOT operator\n\t\tdebug(\" query={}\".format(query))\n\t\tquery = url_quote(query, safe=\"\", errors=\"ignore\")\n\t\t\n\t\tdomain = self.config.get(\"domain\", \"nyaa.si\")\n\t\tfilter_ = self.config.get(\"filter\", \"2\")\n\t\texcludes = self.config.get(\"excluded_users\", \"\").replace(\" \", \"\")\n\t\turl = self._search_base.format(domain=domain, filter=filter_, excludes=excludes, q=query)\n\t\tresponse = self.request(url, rss=True, **kwargs)\n\t\tif response is None:\n\t\t\terror(\"Cannot get latest show for Nyaa/{}\".format(show_key))\n\t\t\treturn list()\n\t\t\n\t\t# Parse RSS feed\n\t\tif not _verify_feed(response):\n\t\t\twarning(\"Parsed feed could not be verified, may have unexpected results\")\n\t\treturn response.get(\"entries\", list())", "def get_existing_artists(self):\n\t\tartists = self.db.artists.find()\n\t\tids = []\n\t\tfor artist in artists:\n\t\t\tids.append(artist['id'])\n\t\treturn ids", "def GetEpisodes(self, start=0, end=0, sortmethod='episode', sortorder='ascending', tvshowid=None, hidewatched=False, filter=''):\n self.logger.debug(\"Loading information for TVID\" + str(tvshowid))\n xbmc = Server(self.url('/jsonrpc', True))\n sort = {'order': sortorder, 'method': sortmethod, 'ignorearticle': True}\n properties = ['episode', 'season', 'thumbnail', 'plot', 'file', 'playcount']\n limits = {'start': int(start), 'end': int(end)}\n filter = {'field': 'title', 'operator': 'contains', 'value': filter}\n if hidewatched == \"1\":\n filter = {\"and\": [filter, {'field': 'playcount', 'operator': 'is', 'value': '0'}]}\n episodes = xbmc.VideoLibrary.GetEpisodes(sort=sort, tvshowid=int(tvshowid), properties=properties, limits=limits, filter=filter)\n return episodes", "def get_es_ids(self):\n search = self.search.source(['uri']).sort(['uri'])\n es_ids = [item.meta.id for item in search.scan()]\n return es_ids", "def external_ids(self, **kwargs):\n path = self._get_movie_id_path('external_ids')\n resp = self._get_method(path, kwargs)\n return resp", "def get_instances_ids(self):\n reservations = self.__get_reservations()\n instances_ids = []\n instances,_ = self.__get_multi_instances(reservations)\n for instance in instances:\n instances_ids.append(instance.id.encode(\"latin-1\"))\n return instances_ids", "def get_episodes_for_sub(self, sub_id):\n html = self._get_html_for_subject_eps(sub_id)\n return BangumiEpisode.eps_from_html(html)", "def _get_ids_from_hostname(self, hostname):\r\n results = self.list_hardware(hostname=hostname, mask=\"id\")\r\n return [result['id'] for result in results]", "def get_data_source_identifiers(self):\n _identifiers = []\n for lang, feed in config.RSS_NEWS_FEEDS.items():\n logger.debug(u\"consultando press release (lang: %s), feed: %s\" % (lang, feed))\n feed_url_by_lang = feed['url'].format(lang) # ex: http://blog.scielo.org/en/feed/\n feed_entries_list = self.get_feed_entries(feed_url_by_lang)\n for raw_feed_entry in feed_entries_list:\n _identifiers.append(raw_feed_entry)\n return _identifiers", "def episodeNumber(self):\n return self.index", "def get_num_episodes(self) -> int:\n return len(self.episodes)", "def get_all_answers(self, player: Player, episode: Episode, max_episode: int) -> List[Answer]:\n all_answers = []\n for qid, answer in self.answers.items():\n if isinstance(answer, DelayedAnswer):\n if answer.known_from > max_episode:\n continue\n answer = answer.answer\n\n question = episode.questions[qid]\n answer = set(question.answers[answer])\n all_answers.append(Answer(player, episode, question, answer))\n\n return all_answers", "def list_episodes(title, uri):\r\n\r\n # Set plugin category. It is displayed in some skins as the name\r\n # of the current section.\r\n xbmcplugin.setPluginCategory(_handle, title)\r\n\r\n # Get the list of videos in the category.\r\n result = _get_data(uri)\r\n # Iterate through videos.\r\n #logger.info(\"######: {}, log: {}########\".format('rk1', result['items']))\r\n for video in result['items']:\r\n # {\r\n # \"title\": \"Sakthi returns to India\",\r\n # \"contentId\": 1000036012,\r\n # \"uri\": \"https://api.hotstar.com/o/v1/episode/detail?id=80096&contentId=\r\n # 1000036012&offset=0&size=20&tao=0&tas=5\",\r\n # \"description\": \"Saravanana and Meenakshi's oldest son, Sakthi, returns to\r\n # India 25 years after his parents had left it. He wants to search for a bride,\",\r\n # \"duration\": 1332,\r\n # \"contentType\": \"EPISODE\",\r\n # \"contentProvider\": \"Global Villagers\",\r\n # \"cpDisplayName\": \"Global Villagers\",\r\n # \"assetType\": \"EPISODE\",\r\n # \"genre\": [\r\n # \"Family\"\r\n # ],\r\n # \"lang\": [\r\n # \"Tamil\"\r\n # ],\r\n # \"channelName\": \"Star Vijay\",\r\n # \"seasonNo\": 1,\r\n # \"episodeNo\": 520,\r\n # \"premium\": false,\r\n # \"live\": false,\r\n # \"hboContent\": false,\r\n # \"encrypted\": false,\r\n # \"startDate\": 1416649260,\r\n # \"endDate\": 4127812200,\r\n # \"broadCastDate\": 1382367600,\r\n # \"showName\": \"Saravanan Meenatchi\",\r\n # \"showId\": 99,\r\n # \"showShortTitle\": \"Saravanan Meenatchi\",\r\n # \"seasonName\": \"Chapter 1\",\r\n # \"playbackUri\": \"https://api.hotstar.com/h/v1/play?contentId=1000036012\",\r\n # \"contentDownloadable\": false\r\n # },\r\n _add_video_item(video)\r\n #logger.info(\"######: {}, log: {}########\".format('rk2', video))\r\n\r\n _add_next_page_and_search_item(result['nextPage'], 'episodes', title)\r\n\r\n # Add a sort method for the virtual folder items (alphabetically, ignore articles)\r\n xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_NONE)\r\n\r\n # Finish creating a virtual folder.\r\n xbmcplugin.endOfDirectory(_handle)", "def n_episodes(self):\n raise NotImplementedError" ]
[ "0.68623555", "0.6790614", "0.64584225", "0.62703633", "0.6171073", "0.616064", "0.5982251", "0.58765996", "0.5858648", "0.5848835", "0.57939684", "0.5776999", "0.57678777", "0.57606727", "0.57389754", "0.56692666", "0.5666441", "0.56479317", "0.5601704", "0.55696493", "0.5528999", "0.5490239", "0.54353815", "0.54064524", "0.5405624", "0.54002", "0.5390178", "0.5389565", "0.5388684", "0.53677523" ]
0.8150568
0
Return the number of episodes that had one of more special guests featured (use SPECIAL_GUEST).
def number_episodes_with_special_guest(self) -> int: return len([ep for ep in self.entries if SPECIAL_GUEST in ep.summary])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_episodes(self) -> int:\n return len(self.episodes)", "def n_episodes(self):\n raise NotImplementedError", "def get_tv_episodes(self) -> int:\n return len(glob.glob(os.path.join(\n os.path.dirname(self.file),\n f\"*{os.path.splitext(self.file)[-1]}\"\n )))", "def n_featured():\r\n sql = text('''select count(*) from featured;''')\r\n results = db.engine.execute(sql)\r\n for row in results:\r\n count = row[0]\r\n return count", "def get_nopes_count():\n return Flag.objects.filter(flag=Flag.NOPE_FLAG).count()", "def __len__(self) -> int:\n return len(self.episodes)", "def episodes_done_inc(self):\n with _MonitorEnv._lock:\n self._episodes_done += 1\n return self._episodes_done", "def count_sheeps(sheep):\n return sheep.count(True)", "def count_explorations():\n return exp_models.ExplorationModel.get_exploration_count()", "def getTrainEpisodes(self):\n print(\"Do you want to train the IA?\")\n while True:\n num_episodes = raw_input(\"Type number iterations to train, [0] to not train: \")\n try:\n if int(num_episodes) >= 0:\n return int(num_episodes)\n print(\"Invalid input, try again\")\n except:\n print(\"Invalid input, try again\")\n return", "def max_guests(appetite: list[int], cake: list[int]) -> int:\n guest_count = 0\n\n appetite_index = len(appetite) - 1\n cake_index = len(cake) - 1\n\n while appetite_index >= 0 and cake_index >= 0:\n appetite_size = appetite[appetite_index]\n cake_size = cake[cake_index]\n\n if cake_size >= appetite_size:\n # cake is fed\n cake_index -= 1\n guest_count += 1\n\n # else, the person is skipped\n appetite_index -= 1\n\n return guest_count", "def fruit_nb(x):\r\n return len([y for y in metamer(x) if Feature(y, 'fruit')])", "def guests_counter(window, n_guests):\r\n window.write_event_value('-COUNT-', n_guests)", "def episodeNumber(self):\n return self.index", "def house_nays(self):\n return self.house_votes[1]", "def get_guests(self, grab, json_info):\n guests = json_info.get('guests')\n if guests is not None:\n return int(guests)\n guests = self.find_boatview__stats(grab, \"Max. guests\")\n if guests is not None:\n return int(guests)\n else:\n logging.debug(\"Guests not found in: %s\" % grab.doc.url)\n return None", "def get_number_watched(self):\n movies_watched = 0\n for movie in self.movies:\n if movie.is_watched:\n movies_watched += 1\n return movies_watched", "def get_number_un_watched(self):\n movies_un_watched = 0\n for movie in self.movies:\n if not movie.is_watched:\n movies_un_watched += 1\n return movies_un_watched", "def num_animals(self):\n return self._num_herbs + self._num_carns", "def nb_elephants(self):\n return self.__nb_elephants", "def test_selecting_only_audio_episodes(\n only_audio_episodes: List[LepEpisode],\n) -> None:\n assert len(only_audio_episodes) == 14 # Without duplicates", "def compute_way(episode):\n episode_classes, _ = tf.unique(episode.train_labels)\n way = tf.size(episode_classes)\n return way", "def get_episode_numbers_for_mentioned_domain(self, domain: str) -> list:\n return [ep.itunes_episode\n for ep in self.entries\n if domain.lower() in ep.summary.lower()]", "def count_evens(list):\n pass", "def run_episode(self):\n # Reset environment\n self.agent.env.reset()\n done = False\n step_count = 0\n total_reward = 0\n\n while not done:\n reward, done = self.agent.explore()\n step_count += 1\n if step_count % 100 == 0:\n print('step count {}'.format(step_count))\n total_reward += self.agent.params['gamma']**step_count * reward\n return step_count, total_reward", "def get_item_guests(self, soup: BeautifulSoup) -> None:\n try:\n guests = (\n soup.find(\"div\", class_=\"_kqh46o\")\n .find_all(\"span\", class_=\"_3hmsj\")[0]\n .get_text()\n )\n guests = re.findall(\"[0-9]+\", guests)[0]\n except (AttributeError, IndexError):\n guests = None\n self.__collected_dic[\"guests\"].append(guests)", "def per_cell_animal_count(self):\n print self.island.individuals()", "def countGuesses(hidden):\r\n guess = random.choice(range(0, 100)) # 0 to 99, inclusive\r\n numguesses = 1 # we just made one guess, above\r\n while guess != hidden:\r\n guess = random.choice(range(0, 100)) # guess again!\r\n numguesses += 1 # add one to our number of guesses\r\n return numguesses", "def episodeNumber(self):\r\n if self.ignore_radio.isChecked():\r\n return tv_types.UNRESOLVED_KEY\r\n else:\r\n return self.episode_combo_box.itemData(self.episode_combo_box.currentIndex()).toInt()[0]", "def sixes(dice):\n return sum([x for x in dice if x == 6])" ]
[ "0.649917", "0.62428266", "0.5887413", "0.5634373", "0.552352", "0.54988056", "0.54770404", "0.53177136", "0.5314232", "0.52770793", "0.52533907", "0.5198246", "0.5186868", "0.5182951", "0.51779616", "0.5158745", "0.5125548", "0.51043326", "0.5093988", "0.50591505", "0.50513995", "0.50337696", "0.50266016", "0.5019169", "0.49873656", "0.49862736", "0.4969825", "0.49668127", "0.49610776", "0.49581206" ]
0.8896974
0
Return the average duration in seconds of a Python Bytes episode, as
def get_average_duration_episode_in_seconds(self) -> NamedTuple: times = [ep.itunes_duration for ep in self.entries] format_times = [] for time in times: if not time.startswith('00'): time = '0' + time format_times.append(time) dts = [datetime.strptime(x, '%H:%M:%S') for x in format_times] secs = [timedelta( hours=x.hour, minutes=x.minute, seconds=x.second ).seconds for x in dts] return Duration( floor(mean(secs)), max(format_times), min(format_times) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_wav_duration(wav_bytes: bytes) -> float:\n with io.BytesIO(wav_bytes) as wav_buffer:\n wav_file: wave.Wave_read = wave.open(wav_buffer, \"rb\")\n with wav_file:\n frames = wav_file.getnframes()\n rate = wav_file.getframerate()\n return frames / float(rate)", "def duration():\r\n elapsed_time, duration = video_time()\r\n return duration", "def find_average_duration(video: dict):\n global num_videos\n global total_duration\n\n if duration := video.get('duration'):\n with data_lock:\n num_videos += 1\n total_duration += (duration/1000)\n show_progress()", "def bus_interruptions_avg_duration(self) -> float:\n return self.dss_obj.BUSF(8, 0)", "def to_length_secs(self):\n return (self.bpm / 60.0) / self.period", "def duration(self):\n return self.sound.nframes", "def duration(self):\n with audioread.audio_open(self.path) as f:\n return f.duration", "def duration(self) -> float:\n return float(len(self.__samples))/float(self.__rate)", "def getDuration(self):\n #return np.sum(self.subintinfo['TSUBINT']) #This is constant.\n return np.sum(self.getSubintinfo('TSUBINT')) #This is constant.", "def avg_record_time(self):\n summed_time = 0\n for time_rec in self:\n try:\n summed_time += float(sanitize(time_rec))\n except Exception as err:\n return err\n return summed_time / len(self)", "def get_total_seconds(td):\n return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6", "def duration(self):\n return self._end - self._begin", "def ms(self):\n # my clock uses seconds internally\n return 1000 * self.read()", "def decode(self, data):\r\n return Duration.from_sec(float(data))", "def averageTime(self):\n \n pass", "def get_duration(file):\n cmd = 'ffprobe -i \"{}\" -show_entries format=duration -v quiet -of csv=\"p=0\"'.format(file)\n try:\n output = subprocess.check_output(\n cmd,\n shell=True, # Let this run in the shell\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n print(e.output)\n output = 0\n # return round(float(output)) # ugly, but rounds your seconds up or down\n return float(output)", "def duration(self):\n duration = 0\n for wf in self._waveforms:\n duration += wf.duration\n return duration", "def avgtime(self):\n return (self._total_time['value'] / 1000) / self._total_time['count'] if self._total_time['count'] else 0", "def get_duration(filename):\n cmd = ('ffprobe -v 0 -of flat=s=_ -select_streams v:0 -show_entries '\n 'stream=duration -of default=nokey=1:noprint_wrappers=1 ' +\n filename).split()\n pid = subprocess.run(cmd, universal_newlines=True,\n stdout=subprocess.PIPE)\n if pid.returncode != 0:\n return None\n\n duration_exp = pid.stdout.rstrip()\n try:\n duration = float(duration_exp)\n except:\n duration = 0.\n return duration", "def average_seconds(data, board):\n return str(\n timedelta(seconds=mean(data[board]))\n ).split(\".\")[0]", "def get_duration(self):\n duration_ns = self.stream.InitialTimeToWaitGet()\n duration_ns += self.stream.NumberOfFramesGet() * self.stream.InterFrameGapGet()\n return datetime.timedelta(seconds=duration_ns / 1e9)", "def avg_inference_time(self):\n return self._avg_inference_time", "def duration(self) -> float:\n return self.delta_t * len(self)", "def duration(self) -> float:\n return self.delta_t * len(self)", "def duration_seconds(self):\n duration = 0.0\n if self.is_video() or self.is_audio():\n if self.__dict__['duration']:\n try:\n duration = float(self.__dict__['duration'])\n except ValueError:\n raise FFProbeError('None numeric duration')\n return duration", "def get_duration(self):\n duration = 0\n\n for entry in self.entries:\n duration += entry.get_duration()\n return duration", "def _duration(self):\n if getattr(self, '_duration_cache', None):\n return self._duration_cache\n duration = extractMetadata(guessParser(\\\n InputIOStream(self))).get('duration')\n if not duration:\n raise Exception(u'Not an audio file')\n else:\n duration = duration.seconds\n self._duration_cache = duration\n return duration", "def total_seconds(td):\n return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6", "def get_duration(self):\n frame_dur = self.get_frame_duration()\n num_frames = self.get_num_frames()\n motion_dur = frame_dur * (num_frames - 1)\n return motion_dur", "def find_duration(data):\n t = [i[0] for i in data]\n duration = t[len(t) - 1] - t[0]\n logging.info('Calculated duration: %s', duration)\n return duration" ]
[ "0.67595243", "0.65047795", "0.64271915", "0.64189667", "0.6367844", "0.6341444", "0.6284898", "0.6283205", "0.6277745", "0.62206906", "0.62178415", "0.6192925", "0.6191467", "0.6185916", "0.61730164", "0.6169899", "0.61567783", "0.6137445", "0.6130938", "0.6128692", "0.61263585", "0.6100325", "0.6096427", "0.6096427", "0.6083685", "0.6074", "0.6071608", "0.60646313", "0.60160536", "0.60068566" ]
0.71605974
0
Build an index from word to set of document indexes This does the exact same thing as create_index() except that it uses your htable. As a number of htable buckets, use 4011. Returns a listofbuckets hashtable representation.
def myhtable_create_index(files): res_buckets = htable(4011) for id, file in enumerate(files): if file[-4:] == '.txt': word_list = words(get_text(file)) for word in word_list: value = htable_get(res_buckets, word) if value == None: htable_put(res_buckets, word, {id}) else: value.add(id) htable_put(res_buckets, word, value) return res_buckets
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createIndex(pages): \n index = defaultdict(list)\n for url, content, links in pages:\n counts = getNumberTerms(content)\n for term, count in counts.items():\n index[term].append((url, count))\n return index", "def perform_indexing(self, words_list):\n\n indexer_table = {}\n\n for word in words_list:\n hash_value = self.calculate_weighted_hash(word)\n freq_table = calculate_frequency_table(word)\n\n if hash_value not in indexer_table:\n indexer_table[hash_value] = {}\n indexer_table[hash_value][as_set(freq_table)] = [word]\n else:\n if as_set(freq_table) not in indexer_table[hash_value]:\n indexer_table[hash_value][as_set(freq_table)] = [word]\n else:\n indexer_table[hash_value][as_set(freq_table)].append(word)\n\n return indexer_table", "def createindexes():\n index = [{}, {}, {}, {}]\n readcorpus(index)\n buildindex4(index[2], index[3])\n writeindextofile(index)\n return index", "def create_index(files):\n\n wordlist = [words(get_text(files[i])) for i in range(len(files))]\n\n combinelist = defaultdict(set)\n\n for i in range(len(files)):\n d = dict.fromkeys(wordlist[i], i)\n for key, value in d.items():\n combinelist[key].add(value)\n\n return combinelist", "def create_index_tables(self):\n # List of urls that have been indexed\n self.con.execute('create table urllist(url)')\n # List of words\n self.con.execute('create table wordlist(word)')\n # What doc the word is and where it is in the doc\n self.con.execute('create table wordlocation(urlid, wordid, location)')\n # Indicates a link from one url to another\n self.con.execute('create table link(fromid integer, toid integer)')\n # which words are actually in a link\n self.con.execute('create table linkwords(wordid, linkid)')\n self.con.execute('create index wordidx on wordlist(word)')\n self.con.execute('create index urlidx on urllist(url)')\n self.con.execute('create index wordurlidx on wordlocation(wordid)')\n self.con.execute('create index urltoidx on link(toid)')\n self.con.execute('create index urlfromidx on link(fromid)')\n self.dbcommit()", "def htable(nbuckets):", "def _make_index(self, fname, sents, words):\n for w in words:\n # word index for this file only\n findex = []\n\n for ixS, s in enumerate(sents):\n # iterate over each word in the sentencep\n for ixT, token in enumerate(s):\n # could use regex for substring matching instead\n if w == token.lower():\n findex.append((ixS, ixT))\n # keep track of word use frequency\n self._freq[w] += 1\n\n # grow the main index \n self._index[w][fname]= findex", "def makewordindex(wordset):\n indexmap = {}\n sortwords = sorted(list(wordset))\n for i in range(len(sortwords)):\n word = sortwords[i]\n indexmap[word] = i\n return indexmap", "def create_index():", "def new(num_buckets=256):\n aMap=[]", "def build_index():\n pass", "def weigthIndex(index, nPages): \n weighted_index = defaultdict(list)\n for term, docs in index.items():\n df = len(docs)\n for url, count in docs:\n weight = tf_idf(count, nPages, df)\n weighted_index[term].append((url, weight))\n return weighted_index", "def create_index(self, vocabulary=[]) -> dict:\n try:\n out = {}\n for word in vocabulary:\n if word in out:\n out[word] += 1\n else: \n out[word] = 1\n return(out)\n except Exception as error:\n print(f\"Error: self.create_index([...]) -> {error}\")", "def create_index(args, client):\n policy = {}\n client.index_geo2dsphere_create(args.nspace, args.set,\n LOCBIN, LOCNDX, policy)\n client.index_integer_create(args.nspace, args.set,\n HSHBIN, HSHNDX, policy)", "def buildindex4(invertedindex, index):\n sortedbycount = sorted(invertedindex.items(), key=lambda x: x[1][1])\n startindex = math.floor((2*len(sortedbycount))/100) + 1\n for word, count in sortedbycount[startindex:]:\n index[word] = count\n return", "def index_schema_builder(table):\n conn = table.parent.parent.connection\n\n idx = OrderedDict()\n indexes = conn.execute(\"SHOW INDEXES FROM `%s`.`%s`\" % (table.parent.name, table.name))\n\n if not indexes:\n return idx\n\n for index in indexes:\n n = index['Key_name']\n if n not in idx:\n indexitem = IndexSchema(name=n, parent=table)\n indexitem.non_unique = (bool(index['Non_unique'])) # == not unique\n indexitem.table_name = index['Table']\n\n key_type = index['Index_type'].upper()\n\n if index['Key_name'].upper() == \"PRIMARY\":\n indexitem.kind = \"PRIMARY\"\n elif not indexitem.non_unique:\n indexitem.kind = \"UNIQUE\"\n elif key_type in ('FULLTEXT', 'SPATIAL'):\n indexitem.kind = key_type\n else:\n indexitem.kind = \"INDEX\"\n\n if key_type in ('BTREE', 'HASH', 'RTREE'):\n indexitem.type = key_type\n\n indexitem.collation = index['Collation']\n indexitem.comment = index['Comment']\n\n idx[n] = indexitem\n\n if index['Column_name'] not in idx[n].fields:\n idx[n].fields.insert(index['Seq_in_index'], (index['Column_name'], index['Sub_part'] or 0))\n\n return idx", "def _init_word_to_index(self, words: Set[str]) -> OrderedDict:\n word_to_index = OrderedDict()\n for word in words:\n if len(word_to_index) >= self.max_cache_size:\n break\n word_to_index[word] = len(word_to_index)\n\n return word_to_index", "def build_index(in_dir, out_dict, out_postings):\n print('indexing...')\n\n maxInt = sys.maxsize\n\n while True:\n # decrease the maxInt value by factor 10 \n # as long as the OverflowError occurs.\n try:\n csv.field_size_limit(maxInt)\n break\n except OverflowError:\n maxInt = int(maxInt/10)\n\n #Dicitionary for saving the normalized weights for document vector\n lengths = dict()\n\n #Number of docs read from csv\n total_docs = 1\n max_docs = 1000\n\n #Data stored in csv read file line by line and save columns data\n with open(os.path.join(in_dir), 'r', encoding=\"utf8\") as data_csv:\n reader = csv.DictReader(data_csv)\n #each line corresponds to a document\n for doc in reader:\n\n #if(total_docs > max_docs):\n # break\n\n #If line is blank, just skip\n if doc is None:\n continue\n \n #save the different columns of the doc\n doc_id = int(doc[\"document_id\"])\n #Remove punctuation in title and content\n doc_title = re.sub(r\"[,;@#?!&$()%\\[\\]°~^_.+=\\\"><`|}{*':/]+ *\", \" \", doc[\"title\"])\n doc_content = re.sub(r\"[,;@#?!&$()%\\[\\]°~^_.+=\\\"><`|}{*':/]+ *\", \" \", doc[\"content\"])\n doc_date = doc[\"date_posted\"]\n doc_year = doc_date[0:4]\n doc_court = doc[\"court\"]\n\n #The dictionaryies are updated, postings lists are updated or new terms added\n update_terms_zones_dictionary(doc_id, doc_title, \".title\")\n update_terms_zones_dictionary(doc_id, doc_content, \".content\")\n update_date_field_dictionary(doc_id, doc_year)\n update_court_field_dictionary(doc_id, doc_court)\n\n total_docs += 1\n\n data_csv.close()\n\n #This section stores the Log TF using the word counts in the postings in the dictionary\n #It saves the Log TF in an auxiliary dictionary named lengths\n for word in dictionary:\n #Get postings list for the word\n postings_list = dictionary[word]\n\n for docID_termF in postings_list:\n #Get the vector for the doc, where the docId is docID_termF[0]\n #If there is no vector for this doc, then create a new dict\n #I am using dictionaries as the vector for the word only for the calculations\n doc_vector = lengths.get(docID_termF[0], dict())\n #I add the logarithmic term frequency to that document vector\n doc_vector[word] = 1 + math.log(docID_termF[1], 10)\n #Save that to its corresponding doc\n lengths[docID_termF[0]] = doc_vector\n\n #This section normalizes the Log TFs \n for doc_vector in lengths.values():\n #We store each of the values in a list and then use:\n #np.linalg.norm to do the normalization = sqrt(sum(values^2))\n weights = doc_vector.values()\n #We get the vectors magnitude\n magnitude = np.linalg.norm(np.array(list(weights)))\n for word in doc_vector.keys():\n #For every word entry in the vector \n #normalize by dividing the weight by the magnitude\n doc_vector[word] = doc_vector[word] / magnitude\n\n #This section replaces the word count in the tuple of the dictionary with the Normalized Log TF\n #It also sorts the postings list by doc ID\n for word in dictionary:\n #Get postings list for the word\n postings_list = dictionary[word]\n new_postings_list = list()\n for docID_termF in postings_list:\n docID_termF = ( docID_termF[0], lengths[docID_termF[0]][word] )\n new_postings_list.append(docID_termF)\n new_postings_list.sort()\n dictionary[word] = new_postings_list\n\n ''' \n with open('ugly_dictionary.txt', 'w') as fp:\n json.dump(dictionary, fp)\n '''\n #Determine the relevance of each doc by the court that it has in its court field\n #Save the relevant docs and their relevance\n relevant_courts_dict = { \"SG Court of Appeal\":2, \"SG Privy Council\":2, \"UK House of Lords\":2, \"UK Supreme Court\":2,\n \"High Court of Australia\":2, \"CA Supreme Court\":2, \"SG High Court\":1.5, \"Singapore International Commercial Court\":1.5,\n \"HK High Court\": 1.5, \"HK Court of First Instance\": 1.5, \"UK Crown Court\": 1.5, \"UK Court of Appeal\": 1.5, \"UK High Court\": 1.5, \n \"Federal Court of Australia\": 1.5, \"NSW Court of Appeal\": 1.5, \"NSW Court of Criminal Appeal\": 1.5, \"NSW Supreme Court\": 1.5}\n\n relevant_docs = dict()\n \n for court_name in relevant_courts_dict:\n court_postings_list = court_dictionary.get(court_name, -1)\n if(court_postings_list != -1):\n for docid in court_postings_list:\n #save a dictionary of docID and its relevance (2 or 1.5) according to its court\n relevant_docs[docid] = relevant_courts_dict[court_name]\n\n #This section traverse each word (key) in the dictionary, get its postings list and save it in a different file \n postings_list_file = open(out_postings, \"wb\") \n for word in dictionary:\n #Get postings list for the word\n postings_list = dictionary[word]\n #Get the document frequency\n document_frequency = len(postings_list)\n #Know the starting position for the pointer\n postings_list_position = postings_list_file.tell()\n # Writing to file \n pickle.dump(postings_list, postings_list_file)\n #Replace postings list with reference to the position\n dictionary[word] = (document_frequency, postings_list_position)\n for date in date_dictionary:\n #Get postings list for the date\n postings_list = date_dictionary[date]\n #Get the document frequency\n document_frequency = len(postings_list)\n #Know the starting position for the pointer\n postings_list_position = postings_list_file.tell()\n # Writing to file \n pickle.dump(postings_list, postings_list_file)\n #Replace postings list with reference to the position\n date_dictionary[date] = (document_frequency, postings_list_position)\n for court in court_dictionary:\n #Get postings list for the date\n postings_list = court_dictionary[court]\n #Get the document frequency\n document_frequency = len(postings_list)\n #Know the starting position for the pointer\n postings_list_position = postings_list_file.tell()\n # Writing to file \n pickle.dump(postings_list, postings_list_file)\n #Replace postings list with reference to the position\n court_dictionary[court] = (document_frequency, postings_list_position)\n #Close the postings lists file\n postings_list_file.close() \n\n #Now open the dictionary file and save the three dictionaries\n with open(out_dict, 'wb') as dictionary_file:\n pickle.dump(total_docs, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(dictionary, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(date_dictionary, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(court_dictionary, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(relevant_docs, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n \n '''\n The structure we have is:\n\n dictionary.txt: Has three dictionaries\n {word.zone : [doc_freq, pointer], word.zone: [doc_freq, pointer], ...}\n {date : [doc_freq, pointer], date: [doc_freq, pointer], ...}\n {court : [doc_freq, pointer], court: [doc_freq, pointer], ...}\n\n postings.txt: Has the postings for the three dictionaries\n For the dictionary postings:\n [[docID,termFrequency],[docID,termFrequency]]\n [[docID,termFrequency]] ...\n For the date_dictionary postings:\n [docId, docId, docId, docId]\n For the court_dictionary postings:\n [docId, docId, docId, docId]\n ...\n\n Both documents together would be:\n { word.zone: [doc_freq, [[docID,termFrequency], ... ]], \n word.zone: [doc_freq, [[docID,termFrequency].}, ...]] }\n { date: [doc_freq, [docID, docID, ... ]], date: [doc_freq, [docID, docID, ... ]] }\n { court: [doc_freq, [docID, docID, ... ]], date: [doc_freq, [docID, docID, ... ]] }\n\n lengths.txt\n [document: [word: weight, word: weight, ...], document: [word: weight, word: weight, ...]]\n Decided to make it like this to keep control of which weights correspond to which words\n Although for a document I will traverse all the weights to get the score\n If the word is not in the document vector [which in my case is a dictionary], then its weight is 0\n This way I am no using a sparse matrix\n\n '''", "def get_indexes(self, x):\n indexes = []\n for index_hashes in self.hash_functions:\n combined_index = []\n for idx_spec, hash_func in zip(self.config.index_specs, index_hashes):\n combined_index.append(idx_spec.distribution.get_index(hash_func(x)))\n indexes.append(tuple(combined_index))\n return indexes", "def build_index(self):\n self.rebuild_index()", "def build_index(text: Iterable) -> Dict[str, List[Tuple[int, int]]]:\n index = defaultdict(list)\n for line_no, line in enumerate(text, 1):\n for match in WORD_RE.finditer(line):\n word = match.group()\n column_no = match.start() + 1\n location = (line_no, column_no)\n index[word].append(location)\n return index", "def build_idx(vocab):\n word2index = {}\n index2word = {}\n\n word2index['PAD'] = 0\n index2word[0] = 'PAD'\n\n word2index['UNK'] = 1\n index2word[1] = 'UNK'\n\n for i,word in enumerate(vocab):\n word2index[word.lower()] = i+2\n index2word[i+2] = word.lower()\n\n return word2index, index2word", "def get_indices(self):\n\n def query(rel): \n return \"\"\"SELECT pg_class.relname, pg_index.indkey\n FROM pg_class, pg_index\n WHERE (pg_index.indexrelid = pg_class.oid)\n AND (pg_index.indrelid = (SELECT pg_class.oid FROM pg_class WHERE pg_class.relname = \\'{}\\'));\n \"\"\".format(rel)\n\n rels = tpch.schema.keys()\n idxs = dict.fromkeys(rels)\n\n with self.tpch_cxn.cursor() as curs:\n for rel in rels:\n curs.execute(query(rel))\n idxs_ = curs.fetchall()\n idxs_ = dict(idxs_) # index -> index keys \n \n # TODO this can be done cleanly in query\n # pg_index.indkey is a SQL array of attributes indices in their respective tables\n split=lambda attrs: attrs.split() \n cast=lambda attrs: list(map(lambda attr: int(attr)-1, attrs))\n invertindex=lambda attrs: list(np.array(schema[rel])[attrs])\n\n attrs = idxs_.values() \n attrs = list(map(split, attrs))\n attrs = list(map(cast, attrs))\n attrs = list(map(invertindex, attrs))\n\n idxs_ = {key : attrs[i] for i, key in enumerate(idxs_.keys())}\n idxs[rel] = idxs_\n return idxs", "def create_indices():\n destroy_indices()\n\n ActionDocument._index.create(ignore=[400, 404])\n ClassificationDocument._index.create(ignore=[400, 404])\n FunctionDocument._index.create(ignore=[400, 404])\n PhaseDocument._index.create(ignore=[400, 404])\n RecordDocument._index.create(ignore=[400, 404])\n\n yield\n\n destroy_indices()", "def build_inverted_index():\r\n # vacabulary list (with out common_words)\r\n file_read = read_file()\r\n vacabulary_list = []\r\n common_words = read_common_words()\r\n for key in file_read:\r\n for element in file_read[key]:\r\n if (element not in vacabulary_list) & (element not in common_words):\r\n vacabulary_list.append(element)\r\n\r\n # word list of each file\r\n content = remove_common_words(file_read, common_words) # content = stopping()\r\n\r\n # generate direction to save result\r\n inverted_index = {}\r\n for item in vacabulary_list:\r\n inverted_index[item] = {}\r\n\r\n for file_id in content.keys():\r\n frequency = Counter(\r\n content[file_id]) # the frequency of words in a file : {'slipstream': 5, 'lift': 4, 'wing': 3}\r\n for word in frequency.keys():\r\n inverted_index[word][file_id] = frequency[word]\r\n\r\n inverted_index = sorted(inverted_index.items(), key=lambda d: d[0], reverse=False)\r\n inverted_index = dict(inverted_index)\r\n return inverted_index", "def get_indexes_for_word (self,word):\r\n\r\n if self.using_database:\r\n value_tuple = (notebookname,word,)\r\n db_cursor.execute(\"SELECT note_index\"\r\n +\" FROM word_to_indexes\"\r\n +\" WHERE notebook=? and word=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n\r\n return self.word_dict[word]", "def build_index(self):\n # Init the HNSWLIB index\n self.create_index()\n logger.info(f\"Building HNSWLIB index, max_elements: {len(self.corpus)}\")\n logger.debug(f\"Parameters Required: M: {self.M}\")\n logger.debug(f\"Parameters Required: ef_construction: {self.ef_construction}\")\n logger.debug(f\"Parameters Required: ef(>topn): {self.ef}\")\n\n # Then we train the index to find a suitable clustering\n self.index.add_items(self.corpus_embeddings, list(range(len(self.corpus_embeddings))))", "def index(self):\n print(\"Indexing...\")\n # ------------------------------------------------------------------\n # TODO: Create an inverted, positional index.\n # Granted this may not be a linked list as in a proper\n # implementation.\n # This index should allow easy access to both \n # 1) the documents in which a particular word is contained, and \n # 2) for every document, the positions of that word in the document \n # Some helpful instance variables:\n # * self.docs = List of documents\n # * self.titles = List of titles\n inv_index = defaultdict(set)\n self.tf = defaultdict(Counter)\n \n for word in self.vocab:\n inv_index[word] = {} # create dictionary with words in V\n\n # Generate inverted index here\n for doc in range(len(self.docs)):\n for word in self.docs[doc]:\n self.tf[doc][word] += 1 # represents how many times word 'word' is mentioned in document 'i'\n \n for doc, title in zip(self.docs, self.titles):\n for word in self.vocab:\n inv_index[word][title] = [] # list for each word in vocabulary for all titles\n for pos, word in enumerate(doc):\n inv_index[word][title].append(pos)\n\n self.inv_index = inv_index\n # ------------------------------------------------------------------\n\n # turn self.docs into a map from ID to bag of words\n id_to_bag_of_words = {}\n for d, doc in enumerate(self.docs):\n bag_of_words = set(doc)\n id_to_bag_of_words[d] = bag_of_words\n self.docs = id_to_bag_of_words", "def _create_indices(cls):\r\n from thunderdome.connection import _hosts, _index_all_fields, create_key_index\r\n \r\n if not _hosts: return\r\n for column in cls._columns.values():\r\n if column.index or _index_all_fields:\r\n create_key_index(column.db_field_name)", "def build_index(self):\r\n date_time('Building indexes in citations table')\r\n self.cursor.execute('DROP INDEX IF EXISTS IDX_citations ;')\r\n self.cursor.execute('CREATE INDEX IDX_citations ON citations (citation);')\r\n self.conn.commit()\r\n gc.collect()" ]
[ "0.66589546", "0.6544771", "0.64670044", "0.64168614", "0.63033056", "0.6180472", "0.6178157", "0.6154601", "0.61489826", "0.6104722", "0.60848147", "0.6009543", "0.60010827", "0.5981455", "0.5976573", "0.5911548", "0.59066755", "0.590064", "0.5855223", "0.58235234", "0.58033395", "0.58029234", "0.57984054", "0.5796318", "0.57803154", "0.57530683", "0.5698898", "0.56863755", "0.568296", "0.5682313" ]
0.7427538
0
This does the exact same thing as index_search() except that it uses your htable. I.e., use htable_get(index, w) not index[w].
def myhtable_index_search(files, index, terms): res_file = [] count = 0 if len(terms) == 0: print('empty terms') return for term in terms: term = term.lower() count += 1 if count == 1: s = htable_get(index, term) if s == None: s = {-1} else: s = s.intersection(htable_get(index, term)) for id in s: if id != -1: res_file.append(files[id]) return res_file
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lookup(index,keyword):\n\tif keyword in index:\n\t\treturn index[keyword]\n\treturn None", "def __getitem__(self,idx):\n try:\n return self._cache[idx]\n except:\n pass\n\n try:\n # return full data entry as list\n out = self._data[idx]\n self._cache[idx] = out\n return out\n except:\n try:\n # return data entry with specified key word\n out = self._data[idx[0]][self._header[self._alias[idx[1]]]]\n self._cache[idx] = out\n return out\n except:\n pass", "def lookup(index, keyword):\n for item in index:\n if item[0] == keyword:\n return item[1]\n return []", "def get_index(self, *args, **dargs):\n pass", "def htable_get(table, key):", "def __index__(self, ???):", "def dummy_search(query):\n ii = InvertedIndex()\n return ii.lookup_query(query)", "def build_index():\n pass", "def get_by_index(self, index):\n # makes it easier for callers to just pass in a header value\n index = int(index) if index else 0\n return self.by_index.get(index)", "def search_engine_index(request):\n return {'NO_INDEX': settings.NO_INDEX}", "def _get_doc(results, index):\n return results[index]", "def bucket_indexof(table, key):", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def __index__(self, *args, **kwargs): # real signature unknown\n pass", "def search(self, key, headers=Headers()):", "def index(self):\n if hasattr(self, '_m_index'):\n return self._m_index if hasattr(self, '_m_index') else None\n\n self._m_index = (self.index_separate if self.is_index_separate else self.index_in_tag)\n return self._m_index if hasattr(self, '_m_index') else None" ]
[ "0.6155713", "0.59609514", "0.59491926", "0.59410375", "0.5900303", "0.5845122", "0.5789598", "0.577333", "0.57275766", "0.5688099", "0.5681756", "0.56468856", "0.5621905", "0.5621905", "0.5621905", "0.5621905", "0.5621905", "0.5621905", "0.5621905", "0.5621905", "0.5621905", "0.5621905", "0.5621905", "0.5621905", "0.5621905", "0.5621905", "0.5621905", "0.5621905", "0.56170857", "0.56094986" ]
0.6331249
0
Tests if builsing an dirichlet ensemble is running without problems
def test_dirichletensemble(): np.random.seed(seed=2) X, y = make_blobs(n_samples=200, centers=2, n_features=2, cluster_std=4, random_state=2) n_train = 100 trainX, testX = X[:n_train, :], X[n_train:, :] trainy, testy = y[:n_train], y[n_train:] n_members = 5 stack = DirichletEnsemble(N=5000) for i in range(n_members): model = _get_fitted_random_model(trainX, trainy) train_batches = CustomIterator(trainX, trainy, 32) val_batches = CustomIterator(testX, testy, 32) m = KerasMember(keras_model=model, name="Model " + str(i), train_batches=train_batches, val_batches=val_batches) stack.add_member(m) stack.fit() stack.describe() return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_machine_learning():", "def test_valid_ensemble(ensemble: bool) -> None:\n mapie = MapieRegressor(ensemble=ensemble)\n mapie.fit(X_toy, y_toy)", "def test_training(self):\n\t\tpass", "def run_experiment() -> List[bool]:\n return [random.random() < 0.5 for _ in range(1000)]", "def main():\n parser = argparse.ArgumentParser(description='Implementation of the Naive Bayes and Perceptron classifiers')\n parser.add_argument('--statsmode', help='whether to gather stats or not', choices=['y','Y','N','n'], default='n')\n parser.add_argument('--classifier', help='classifier to use', choices=['BAYES', 'PERCEPTRON'], required=True)\n parser.add_argument('--mode', help='image class to test', choices=['VALIDATION', 'TEST'], default='TEST')\n parser.add_argument('--type', help='image type to train', choices=['DIGIT', 'FACE', 'MNIST'], required=True)\n parser.add_argument('--range', metavar=('START', 'END_EXCLUSIVE'), nargs=2, type=int, help='Range of data to test', default=[0, 100])\n parser.add_argument('--trainpercent', metavar='PERCENT', type=int, help='the percent of training data to use (int out of 100)', default=100, dest='percentage')\n parser.add_argument('--smoothing', type=int, help='Laplace smoothing constant (Naive Bayes)', default=2)\n parser.add_argument('--iterations', type=int, help='Number of times to iterate over training data (Perceptron)', default=5)\n parser.add_argument('--debug', help='Outputs more detailed information to stdout', action='store_true')\n parser.add_argument('--statloops', type=int, help='Number of times the classifier iterates over test data (Statistics only)', default=5)\n args = parser.parse_args()\n # image_type = ImageType.DIGIT if args.type == 'DIGIT' else ImageType.FACE\n image_type = None\n if args.type == 'DIGIT':\n image_type = ImageType.DIGIT\n elif args.type == 'FACE':\n image_type = ImageType.FACE\n else:\n image_type = ImageType.MNIST\n mode = Mode.TEST if args.mode == 'TEST' else Mode.VALIDATION\n if args.statsmode == 'y' or args.statsmode == 'Y':\n run_percentages_classifier(args.classifier, image_type, args)\n else:\n run = run_classifier_bayes if args.classifier == 'BAYES' else run_classifier_perceptron\n run(mode, image_type, args)", "def main():\n # set up the program to take in arguments from the command line\n parser = argparse.ArgumentParser()\n parser.add_argument(\"md\",\n type=int,\n help=\"maximum depth\")\n parser.add_argument(\"mls\",\n type=int,\n help=\"minimum leaf samples\")\n parser.add_argument(\"--xTrain\",\n default=\"q4xTrain.csv\",\n help=\"filename for features of the training data\")\n parser.add_argument(\"--yTrain\",\n default=\"q4yTrain.csv\",\n help=\"filename for labels associated with training data\")\n parser.add_argument(\"--xTest\",\n default=\"q4xTest.csv\",\n help=\"filename for features of the test data\")\n parser.add_argument(\"--yTest\",\n default=\"q4yTest.csv\",\n help=\"filename for labels associated with the test data\")\n\n args = parser.parse_args()\n # load the train and test data\n xTrain = pd.read_csv(args.xTrain)\n yTrain = pd.read_csv(args.yTrain)\n xTest = pd.read_csv(args.xTest)\n yTest = pd.read_csv(args.yTest)\n # create an instance of the decision tree using gini\n start = time.time()\n dt1 = DecisionTree('gini', args.md, args.mls)\n trainAcc1, testAcc1 = dt_train_test(dt1, xTrain, yTrain, xTest, yTest)\n print(\"GINI Criterion ---------------\")\n print(\"Training Acc:\", trainAcc1)\n print(\"Test Acc:\", testAcc1)\n dt = DecisionTree('entropy', args.md, args.mls)\n trainAcc, testAcc = dt_train_test(dt, xTrain, yTrain, xTest, yTest)\n print(\"Entropy Criterion ---------------\")\n print(\"Training Acc:\", trainAcc)\n print(\"Test Acc:\", testAcc)\n end = time.time()\n print(\"Time taken: \", end-start)", "def test_perfectModelEnsemble_init(PM_ds_initialized_1d):\n pm = PerfectModelEnsemble(PM_ds_initialized_1d)\n print(PerfectModelEnsemble)\n assert pm", "def test_complete_experiment():\n try:\n # init logging\n tf.logging.set_verbosity(tf.logging.ERROR)\n\n # configure experiment\n run_experiment.load_gin_configs(PARAMS, [])\n # create the agent and run experiment\n runner = checkpoint_runner.create_runner(BASE_DIR)\n runner.run_experiment()\n except Exception:\n pytest.fail(\n 'Running experiments in Dopamine failed!')", "def run():\n # get arguments\n args = parse_args()\n assert args.batch_size % args.gpu_num == 0\n assert args.gru_hidden_size % 2 == 0\n\n # create a logger\n logger = logging.getLogger(\"GACM\")\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')\n check_path(args.save_dir)\n check_path(args.load_dir)\n check_path(args.result_dir)\n check_path(args.summary_dir)\n if args.log_dir:\n check_path(args.log_dir)\n file_handler = logging.FileHandler(args.log_dir + time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time())) + '.txt')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n else:\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n logger.info('Running with args : {}'.format(args))\n\n logger.info('Checking the directories...')\n for dir_path in [args.save_dir, args.result_dir, args.summary_dir]:\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n \n global Dataset\n global Agent\n logger.info('Agent version: {}.0'.format(args.agent_version))\n logger.info('Dataset version: {}.0'.format(args.dataset_version))\n logger.info('Checking the directories...')\n Dataset = importlib.import_module('dataset{}'.format(args.dataset_version)).Dataset\n Agent = importlib.import_module('Agent{}'.format(args.agent_version)).Agent\n \n if args.pretrain:\n pretrain(args)\n if args.train:\n train(args)\n if args.test:\n test(args)\n if args.rank:\n rank(args)\n if args.generate_synthetic_dataset:\n generate_synthetic_dataset(args)\n logger.info('run done.')", "def test_predictor():", "def check_distillation_agent(config: str, run_file: str):\n cmd = (\n f\"python {run_file} --cfg-path {config} --integration-test \"\n + f\"--episode-num 1 --interim-test 1 --off-render\"\n )\n\n p = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n shell=True,\n )\n output, _ = p.communicate()\n print(str(output))\n assert p.returncode == 0\n\n # Find saved checkpoint path and data path.\n pattern = r\"./checkpoint/.+/\"\n data_pattern = r\"data/.+/\"\n checkpoint_path = re.findall(pattern, str(output))[0]\n full_data_path, n_frame_from_last_path = re.findall(data_pattern, str(output))\n\n try:\n num_episode_step = re.findall(r\"episode step: \\d+\", str(output))[0]\n num_episode_step = int(re.findall(r\"\\d+\", num_episode_step)[0])\n\n # Check if the number of data is same with iterated episode step.\n saved_data_list = os.listdir(full_data_path)\n assert (\n len(saved_data_list) == num_episode_step\n ), \"The number of data does not match the number of iterated episode steps.\"\n\n # Check if n_frame_from_last works well.\n n_frame_from_last_data_list = os.listdir(n_frame_from_last_path)\n assert 3 == len(\n n_frame_from_last_data_list\n ), f\"n_frame_from_last doesn't work properly(expected num of data: 3, num of data: {len(n_frame_from_last_data_list)}).\"\n\n # Check if train-phase data only contaions state, not state & q value.\n with open(full_data_path + saved_data_list[0], \"rb\") as f:\n datum = pickle.load(f)\n assert (\n len(datum) == 1\n ), \"The length of the data is not appropriate(length must be 1, state only).\"\n\n except Exception as e:\n raise e\n\n finally:\n \"\"\"Delete generated directories.\"\"\"\n delete_path(checkpoint_path)\n delete_path(full_data_path)\n delete_path(n_frame_from_last_path)", "def test_mnist():\n env = os.environ.copy()\n if not \"CUDA_VISIBLE_DEVICES\" in env:\n env[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n subprocess.run(\n \"edflow -b template_tfe/config.yaml -t --max_batcher_per_epoch --num_epochs 1\",\n shell=True,\n check=True,\n env=env,\n )", "def test_step(self):\n # allow to escape testing by setting runtest to False\n if not self.cfg['runtest'] and not isinstance(self.cfg['runtest'], bool):\n\n # make very sure OMP_NUM_THREADS is set to 1, to avoid hanging GROMACS regression test\n env.setvar('OMP_NUM_THREADS', '1')\n\n self.cfg['runtest'] = 'check'\n if self.cfg['parallel']:\n # run 'make check' in parallel since it involves more compilation\n self.cfg.update('runtest', \"-j %s\" % self.cfg['parallel'])\n super(EB_GROMACS, self).test_step()", "def test_check_estimator(estimator):\n check_estimator(estimator)", "def test_run():\n # Only few steps for test\n timesteps = 128\n\n # Compute all sub testing conf\n envs = ['CartPole-v0']\n ml_platforms = ['torch', 'tf']\n agents = ['dqn', 'a2c']\n\n test_combinations = list(it.product(\n envs,\n ml_platforms,\n agents\n )\n )\n\n # Finally test them all\n for conf in test_combinations:\n env_str, ml_platform_str, agent_str = conf\n run(\n agent_str,\n ml_platform_str,\n env_str,\n 'dense',\n timesteps,\n './target/')", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def test_per_dqn(self):\n model = PERDQNLightning(self.hparams)\n result = self.trainer.fit(model)\n\n self.assertEqual(result, 1)", "def _TestEnsemble(self, config):\n # Note that the initialization of the lattice must be the same across the\n # units dimension (otherwise the loss will be different).\n # We fix the random seed to make sure we get similar initialization.\n if self.disable_ensembles:\n return\n config = dict(config)\n config[\"num_training_epoch\"] = 3\n config[\"kernel_initializer\"] = \"constant\"\n losses = []\n for units, lattice_index in [(1, 0), (3, 0), (3, 2)]:\n config[\"units\"] = units\n config[\"lattice_index\"] = lattice_index\n tf.keras.utils.set_random_seed(42)\n losses.append(self._TrainModel(config))\n self.assertAlmostEqual(min(losses), max(losses), delta=self.loss_eps)", "def test_training(self):\n warnings.filterwarnings('ignore')\n example_args = example_args_parser()\n example_args.unittest = True\n # prepare data\n example_args.stage = 'prepare'\n example_wrapper(example_args)\n # train goalDNN model\n example_args.stage = 'train'\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # train cVAE model\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # train gcVAE model\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # cVAE harmonization\n example_args.stage = 'predict'\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # gcVAE harmonization\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # goalDNN prediction\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # XGBoost\n example_args.stage = 'train'\n example_args.model = 'XGBoost'\n example_wrapper(example_args)\n # compare with reference results\n check_args = check_results_args_parser()\n check_args.unittest = True\n check_reference_results(check_args)", "def main() -> bool:\n global logger\n logger = setup_logger(\"nitpycker\")\n plugin_manager = Manager()\n plugin_manager.load_plugins()\n args = parse_args(plugin_manager)\n if plugin_manager.enable_plugins(args.plugins, args):\n exit(2)\n\n plugin_manager.pre_test_discovery()\n tests = unittest.defaultTestLoader.discover(args.start_directory, pattern=args.pattern)\n plugin_manager.post_test_discovery()\n tests = plugin_manager.filter_tests(tests)\n report = ParallelRunner(plugin_manager, process_number=args.process_number, verbosity=args.verbosity).run(tests)\n return not report.wasSuccessful()", "def test_run_simplega():\n WRFga_winner = run_simplega(pop_size=100, n_generations=1, testing=True)\n assert WRFga_winner.Fitness >= 0", "def test(args):\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n env_info = env.reset(train_mode=True)[brain_name]\n\n num_agents = len(env_info.agents)\n print('Number of agents:', num_agents)\n\n # dim of each action\n action_size = brain.vector_action_space_size\n print('Size of each action:', action_size)\n\n # dim of the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n\n agent = MADDPG(state_size, action_size, actor_layer_dim_1=args.actor_layer_dim_1,\n actor_layer_dim_2=args.actor_layer_dim_2,\n actor_layer_dim_3=args.actor_layer_dim_3,\n critic_layer_dim_1=args.critic_layer_dim_1,\n critic_layer_dim_2=args.critic_layer_dim_2,\n critic_layer_dim_3=args.critic_layer_dim_3)\n\n agent.load(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n\n test_scores = []\n for i_episode in tqdm(range(1, 1+args.test_n_run)):\n # initialize the scores\n scores = np.zeros(num_agents)\n env_info = env.reset(train_mode=True)[\n brain_name] # reset the environment\n states = env_info.vector_observations # get the current states\n dones = [False]*num_agents\n while not np.any(dones):\n actions = agent.act(states) # select actions\n # send the actions to the environment\n env_info = env.step(actions)[brain_name]\n next_states = env_info.vector_observations # get the next states\n rewards = env_info.rewards # get the rewards\n dones = env_info.local_done # see if episode has finished\n scores += rewards # update the scores\n # roll over the states to next time step\n states = next_states\n\n test_scores.append(np.max(scores))\n\n avg_score = sum(test_scores)/len(test_scores)\n print(\"Test Score: {}\".format(avg_score))\n\n return avg_score", "def main(cls, args):\n #cls.trainOfflineAndTest(100, 0.1, 0.1, 0.9);\n #cls.trainOfflineAndTest(500, 0.1, 0.1, 1.0);\n\n cls.trainer.teachActiveAndSaveStatistics(\"onlineTest\", 10, 0.8, 1.0 ,1.0, 0.0, 0.3, True, True,True);\n cls.trainer.teachActiveAndSaveStatistics(\"path\", 10, 0.0, 0.0, 0.0, 0.0, 0.0, True, False, False)\n\n #trainer.teachActiveAndSaveStatistics(\"onlineTest\", 10000, 0.1f, 0.1f, 1.0f, 0.0f, 0.1f,true, true, true);\n # \t\ttrainer.teachActiveAndSaveStatistics(\"onlineTest\", 10000, 0.1f, 0.1f, 1.0f, 0.0f, 0.1f,\n # \t\t\t\tfalse, true, true);\n # \t\t\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_first.net\", 10000, true);\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_secound.net\", 10000, true);\n #cls.testAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_first.net\", 10000, False)\n #cls.testAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_secound.net\", 10, False)\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1.net\", 10000, false);", "def test_no_errors(self):\n raised = False\n try:\n _ = RandomForest(n_estimators=1, max_depth=1, criterion=\"entropy\")\n except Exception as error:\n print(error)\n raised = True\n self.assertFalse(raised)", "def main(args):\n \n ## Load & Preprocess data \n if args.data_name == 'amsterdam': \n file_name = '../data/amsterdam/test_longitudinal_data.csv'\n ori_data = data_preprocess(file_name, args.max_seq_len)\n \n # Divide the data into training and testing\n divided_data, _ = data_division(ori_data, seed = args.seed, divide_rates = [args.train_rate, 1-args.train_rate])\n \n train_data = np.asarray(divided_data[0])\n test_data = np.asarray(divided_data[1])\n\n print('Finish data loading: ' + str(args.data_name)) \n \n ## Run hider algorithm\n if args.hider_model == 'timegan':\n generated_data = timegan.timegan(train_data)\n elif args.hider_model == 'add_noise':\n generated_data = add_noise.add_noise(train_data, args.noise_size) \n print('Finish hider algorithm training') \n \n ## Define enlarge data and its labels\n enlarge_data = np.concatenate((train_data, test_data), axis = 0)\n enlarge_data_label = np.concatenate((np.ones([train_data.shape[0],]), np.zeros([test_data.shape[0],])), axis = 0)\n \n # Mix the order\n idx = np.random.permutation(enlarge_data.shape[0])\n enlarge_data = enlarge_data[idx]\n enlarge_data_label = enlarge_data_label[idx]\n \n ## Run seeker algorithm\n reidentified_data = knn_seeker(generated_data, enlarge_data)\n \n print('Finish seeker algorithm training') \n \n ## Evaluate the performance\n # 1. Feature prediction\n feat_idx = np.random.permutation(train_data.shape[2])[:args.feature_prediction_no]\n ori_feat_pred_perf = feature_prediction(train_data, test_data, feat_idx)\n new_feat_pred_perf = feature_prediction(generated_data, test_data, feat_idx)\n \n feat_pred = [ori_feat_pred_perf, new_feat_pred_perf]\n \n print('Feature prediction results: ' + \n '(1) Ori: ' + str(np.round(ori_feat_pred_perf, 4)) + \n '(2) New: ' + str(np.round(new_feat_pred_perf, 4)))\n \n # 2. One step ahead prediction\n ori_step_ahead_pred_perf = one_step_ahead_prediction(train_data, test_data)\n new_step_ahead_pred_perf = one_step_ahead_prediction(generated_data, test_data)\n \n step_ahead_pred = [ori_step_ahead_pred_perf, new_step_ahead_pred_perf]\n \n print('One step ahead prediction results: ' + \n '(1) Ori: ' + str(np.round(ori_step_ahead_pred_perf, 4)) + \n '(2) New: ' + str(np.round(new_step_ahead_pred_perf, 4)))\n \n # 3. Reidentification score\n reidentification_score = reidentify_score(enlarge_data_label, reidentified_data)\n \n print('Reidentification score: ' + str(np.round(reidentification_score, 4)))\n \n shutil.rmtree('tmp')\n \n return feat_pred, step_ahead_pred, reidentification_score", "def run(self):\n logging.info('running experiment...')\n self._prepare()\n self._load_data()\n self._run()\n self._evaluate()\n self._summarise()\n return True", "def test(cfg):\n # Set up environment.\n distributed.init_distributed_training(cfg)\n\n # Set random seed from configs.\n np.random.seed(cfg.RNG_SEED)\n torch.manual_seed(cfg.RNG_SEED)\n\n # Print config.\n if distributed.is_master_proc():\n print(\"Test with config:\")\n print(cfg)\n\n # Build the model and print model statistics.\n # Use cuda if available\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # Construct the model\n model = PanopticNarrativeGroundingBaseline(cfg, device=device)\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n # Transfer the model to the current GPU device\n model = model.cuda(device=cur_device)\n if cfg.NUM_GPUS > 1:\n # Make model replica operate on the current device\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device,\n find_unused_parameters=True\n )\n if cfg.LOG_MODEL_INFO and distributed.is_master_proc():\n print(\"Model:\\n{}\".format(model))\n print(\"Params: {:,}\".format(np.sum([p.numel() for p in model.parameters()]).item()))\n print(\"Mem: {:,} MB\".format(torch.cuda.max_memory_allocated() / 1024 ** 3))\n print(\"nvidia-smi\")\n os.system(\"nvidia-smi\")\n\n # Load a checkpoint to test if applicable.\n checkpoint_path = osp.join(cfg.OUTPUT_DIR, 'model_final.pth')\n if cfg.TEST.CHECKPOINT_FILE_PATH != \"\":\n checkpoint_path = cfg.TEST.CHECKPOINT_FILE_PATH\n if osp.exists(checkpoint_path):\n if distributed.is_master_proc():\n print('Loading model from: {0}'.format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n if cfg.NUM_GPUS > 1:\n model.module.load_state_dict(checkpoint['model_state'])\n else:\n model.load_state_dict(checkpoint['model_state'])\n elif cfg.TRAIN.CHECKPOINT_FILE_PATH != \"\":\n # If no checkpoint found in TEST.CHECKPOINT_FILE_PATH or in the current\n # checkpoint folder, try to load checkpoint from\n # TRAIN.CHECKPOINT_FILE_PATH and test it.\n checkpoint_path = cfg.TRAIN.CHECKPOINT_FILE_PATH\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n if cfg.NUM_GPUS > 1:\n model.module.load_state_dict(checkpoint['model_state'])\n else:\n model.load_state_dict(checkpoint['model_state'])\n else:\n if distributed.is_master_proc():\n print(\"Testing with random initialization. Only for debugging.\")\n\n # Create testing loaders.\n test_dataset = PanopticNarrativeGroundingDataset(cfg, cfg.DATA.VAL_SPLIT, train=False)\n test_loader = DataLoader(\n test_dataset,\n batch_size=int(cfg.TRAIN.BATCH_SIZE / max(1, cfg.NUM_GPUS)),\n shuffle=False,\n sampler=(DistributedSampler(test_dataset) if cfg.NUM_GPUS > 1 else None),\n num_workers=cfg.DATA_LOADER.NUM_WORKERS,\n pin_memory=cfg.DATA_LOADER.PIN_MEMORY\n )\n \n if distributed.is_master_proc():\n print(\"Testing model for {} iterations\".format(len(test_loader)))\n\n # Perform test on the entire dataset.\n perform_test(test_loader, model, cfg)", "def run_experiment():\n pass", "def simulate_run(run, maker, all_data, train_mask, test_mask, instances, independent, mixture):\n\n train_data = all_data.masked(train_mask)\n test_data = all_data.masked(test_mask)\n\n if instances is not None:\n ids = sorted(train_data.run_lists, key = lambda _: numpy.random.rand())[:instances]\n train_data = train_data.filter(*ids)\n\n if independent:\n train_data = train_data.collect_independent(mixture).only_nonempty()\n else:\n train_data = train_data.collect_systematic(mixture).only_nonempty()\n\n budget = test_data.common_budget\n #budget = test_data.common_budget / 2 # XXX\n suite = borg.fake.FakeSuite(test_data)\n\n if maker.subname == \"preplanning-dir\":\n model_kwargs = {\"K\": 64}\n\n if \"set_alpha\" in maker.variants:\n model_kwargs[\"alpha\"] = 1e-2\n else:\n model_kwargs = {}\n\n solver = maker(suite, train_data, model_kwargs = model_kwargs)\n successes = []\n\n for (i, instance_id) in enumerate(test_data.run_lists):\n logger.info(\"simulating run %i/%i on %s\", i, len(test_data), instance_id)\n\n with suite.domain.task_from_path(instance_id) as instance:\n with borg.accounting() as accountant:\n answer = solver.start(instance).run_then_stop(budget)\n\n succeeded = suite.domain.is_final(instance, answer)\n\n logger.info(\n \"%s %s on %s (%.2f CPU s)\",\n maker.name,\n \"succeeded\" if succeeded else \"failed\",\n os.path.basename(instance),\n accountant.total.cpu_seconds,\n )\n\n if succeeded:\n successes.append(accountant.total.cpu_seconds)\n\n logger.info(\n \"%s had %i successes over %i instances\",\n maker.name,\n len(successes),\n len(test_data),\n )\n\n description = \"{0} ({1})\".format(mixture, \"Sep.\" if independent else \"Sys.\")\n\n return (\n description,\n maker.name,\n instances,\n len(successes),\n numpy.mean(successes),\n numpy.median(successes),\n )", "def test_generate_nb_testing(self):\n pass" ]
[ "0.65376455", "0.65050536", "0.6211623", "0.6093889", "0.60780334", "0.6073346", "0.6068026", "0.60495067", "0.60197634", "0.60121745", "0.59868246", "0.5984857", "0.5951661", "0.594414", "0.5943446", "0.5942952", "0.5937696", "0.59257823", "0.5918587", "0.59163463", "0.5901887", "0.5899242", "0.58881587", "0.58784586", "0.5875516", "0.5863793", "0.5853014", "0.5849564", "0.5845748", "0.58449924" ]
0.670472
0
Map s_new to t_new based on known mapping of s (source) to t (target), with s original/intrinsic coordinates and t intrinsic/original coordinates
def mapping(s, t, s_new, k,c): n, s_dim = s.shape t_dim = t.shape[1] n_new = s_new.shape[0] # 1. determine nearest neighbors dist = np.sum((s[np.newaxis] - s_new[:,np.newaxis])**2,-1) nn_ids = np.argsort(dist)[:,:k] # change to [:,:k] nns = np.row_stack([s[nn_ids[:,ki]] for ki in range(k)]) nns = nns.reshape((n_new, k, s_dim), order='F') # 2 determine gram matris; dif = s_new[:,np.newaxis] - nns G = np.tensordot(dif,dif,axes=([2],[2])) G = G[np.arange(n_new),:,np.arange(n_new)] # 3. determine weights not worth vectorizing this weights = np.zeros((n_new, k)) for i_n in range(n_new): weights[i_n] = np.linalg.inv(G[i_n]+c*np.eye(k)).dot(np.ones((k,))) weights /= np.sum(weights, -1, keepdims=True) # 4. compute coordinates t_nns = np.row_stack([t[nn_ids[:,ki]] for ki in range(k)]) t_nns = t_nns.reshape((n_new,k, t_dim), order='F') t_new = np.dot(weights, t_nns) t_new = t_new[np.arange(n_new), np.arange(n_new)] return t_new
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_model(self, original, t1, t2, resolution_scaling_factor=1):\n img = Image()\n img.time_stamp = t2\n\n if t1 == t2:\n img.initialize_with_image(original)\n return img\n\n calc_shift_fnc = self.calculate_shift\n orig_get_fnc = original.get\n interp_fnc = my_math.linear_interpolation\n\n def generate(y, x):\n \"\"\"Function describing the transformed image\"\"\"\n realy = y / resolution_scaling_factor\n realx = x / resolution_scaling_factor\n\n # move to time t2\n posy = y + calc_shift_fnc(realy, realx, t2, 0) - \\\n calc_shift_fnc(realy, realx, t1, 0)\n posx = x + calc_shift_fnc(realy, realx, t2, 1) - \\\n calc_shift_fnc(realy, realx, t1, 1)\n\n x_left = int(posx) # math.floor(pos[0])\n x_right = x_left + 1 # math.ceil(pos[0])\n y_down = int(posy) # math.floor(pos[1])\n y_up = y_down + 1 # math.ceil(pos[1])\n\n v11 = orig_get_fnc(y_down, x_left, resolution_scaling_factor)\n v12 = orig_get_fnc(y_down, x_right, resolution_scaling_factor)\n v21 = orig_get_fnc(y_up, x_left, resolution_scaling_factor)\n v22 = orig_get_fnc(y_up, x_right, resolution_scaling_factor)\n\n return interp_fnc(y_down, x_left, y_up, x_right, v11, v12, v21, v22,\n posy, posx)\n\n img.image_data = np.fromfunction(np.vectorize(generate),\n (original.shape()[0]*resolution_scaling_factor,\n original.shape()[1]*resolution_scaling_factor))\n\n if resolution_scaling_factor != 1:\n img.image_data = skimage.transform.resize(img.image_data,\n original.shape(),\n preserve_range=True)\n\n return img", "def img_map_transforms(ts):\n # XXX TODO: unchecked textures give error of variable referenced before assignment XXX\n # POV-Ray \"scale\" is not a number of repetitions factor, but ,its\n # inverse, a standard scale factor.\n # 0.5 Offset is needed relatively to scale because center of the\n # scale is 0.5,0.5 in blender and 0,0 in POV\n # Strange that the translation factor for scale is not the same as for\n # translate.\n # TODO: verify both matches with other blender renderers / internal in previous versions.\n image_map_transforms = \"\"\n image_map_transforms = \"scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % (\n ts.scale[0],\n ts.scale[1],\n ts.scale[2],\n ts.offset[0],\n ts.offset[1],\n ts.offset[2],\n )\n # image_map_transforms = (\" translate <-0.5,-0.5,0.0> scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % \\\n # ( 1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # (0.5 / ts.scale.x) + ts.offset.x,\n # (0.5 / ts.scale.y) + ts.offset.y,\n # ts.offset.z))\n # image_map_transforms = (\n # \"translate <-0.5,-0.5,0> \"\n # \"scale <-1,-1,1> * <%.4g,%.4g,%.4g> \"\n # \"translate <0.5,0.5,0> + <%.4g,%.4g,%.4g>\" % \\\n # (1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # ts.offset.x,\n # ts.offset.y,\n # ts.offset.z)\n # )\n return image_map_transforms", "def align_images(src, target, scoords, tcoords, shape):\n\n # create black background\n bg = np.zeros(target.shape, dtype=np.float32) \n # extract ROIs using the regions' coordinates\n src_roi, sr_center = extract_roi(src , scoords)\n t_roi, tr_center = extract_roi(target, tcoords)\n\n # check to see if we need to resize source\n # such that source and target's areas of ROIs are \n # approximately the same size\n # if the area of target roi is either at least twice or\n # at most half the area of source roi, resize source\n sr_h, sr_w = src_roi.shape[:2]\n tr_h, tr_w = t_roi.shape[:2]\n sr_area = sr_h * sr_w; tr_area = tr_h * tr_w\n if 2 <= tr_area / sr_area or tr_area / sr_area <= 0.5:\n kernel = gaussian_kernel((9,9), 2)\n # keep track of the ratio\n ratio = [tr_h / sr_h, tr_w / sr_w]\n new_size = [int(src.shape[0] * ratio[0]), int(src.shape[1] * ratio[1])]\n smooth_src = conv.conv2(src, kernel, 'reflect')\n resized_src = resample(smooth_src, new_size)\n else:\n resized_src = src.copy()\n ratio = [1,1]\n new_sr_cntr = [int(sr_center[0] * ratio[0]), int(sr_center[1] * ratio[1])]\n\n # psate source image on black background at (0,0)\n # translate pasted image so that center of source ROI\n # overlaps center of target ROI\n # M is translation matrix [[1, 0, tx], [0, 1, ty]]\n tx = tr_center[1] - new_sr_cntr[1]\n ty = tr_center[0] - new_sr_cntr[0]\n M = np.array([[1, 0, tx], [0, 1, ty]], dtype=np.float32)\n bg[0:resized_src.shape[0], 0:resized_src.shape[1]] = resized_src\n shifted = cv2.warpAffine(bg, M, (bg.shape[1], bg.shape[0]))\n\n # recalculating coordinates of source roi\n # creating aligned mask\n start, end = scoords\n # because coords in scoords have form (x, y)\n # ratio follows (y, x)\n new_start = (start[0] * ratio[1], start[1] * ratio[0])\n new_end = (end[0] * ratio[1], end[1] * ratio[0])\n tmp = np.zeros(target.shape, dtype=np.float32)\n if shape == \"rectangle\":\n mask = cv2.rectangle(tmp, new_start, new_end, (1,1,1), -1)\n elif shape == \"ellipse\":\n startx, starty = new_start\n endx, endy = new_end\n centerx = int((startx + endx) // 2)\n centery = int((starty + endy) // 2)\n axlen = (int((endx - startx) // 2), int((endy - starty) // 2))\n mask = cv2.ellipse(tmp, (centerx, centery), axlen, 0, 0, 360, (1,1,1), -1)\n mask = cv2.warpAffine(mask, M, (mask.shape[1], mask.shape[0]))\n\n return shifted, mask", "def map_tensor(self, source: Self, tensor: np.ndarray,\n cube_scale: float = 20., distances: bool = False,\n max_distance: float = None) -> np.ndarray:\n # create a cube for extrapolation\n smin, smax = source.extents\n tmin, tmax = self.extents\n min = np.minimum(smin, tmin).flatten()\n max = np.maximum(smax, tmax).flatten()\n\n avg, cube = self.__cuboid(max, min, scale=cube_scale)\n\n # pair original coordinates to scalar values and add the cuboid\n sdata = [(nid, source[nid].coors, tensor[nid]) for nid in source.keys()]\n spoints = np.array([x[1] for x in sdata], dtype=float)\n svalues = np.array([x[2] for x in sdata], dtype=float)\n\n tshape = svalues.shape\n\n if len(svalues.shape) == 1:\n svalues = svalues.reshape(svalues.shape[0], 1, 1)\n value_type = \"scalar\"\n elif len(svalues.shape) == 2:\n svalues = svalues.reshape(svalues.shape[0], 1, svalues.shape[1])\n value_type = \"vector\"\n else:\n value_type = \"tensor\"\n\n mean = np.mean(svalues, axis=0)\n cube_values = np.array([mean] * cube.shape[0], dtype=float).reshape(-1, svalues.shape[1], svalues.shape[2])\n spoints = np.concatenate((spoints, cube), axis=0)\n svalues = np.concatenate((svalues, cube_values), axis=0)\n\n # prepare new nodes and their coordinates\n tdata = [(nid, self[nid].coors) for nid in self.keys()]\n tids = [x[0] for x in tdata]\n tpoints = np.array([x[1] for x in tdata], dtype=float)\n\n # map values to new nodes\n grid = np.empty((tpoints.shape[0], svalues.shape[1], svalues.shape[2]), dtype=float)\n for m in range(svalues.shape[1]):\n for n in range(svalues.shape[2]):\n grid[:,m,n] = scipy.interpolate.griddata(spoints, svalues[:,m,n], tpoints, method=\"linear\")\n\n if value_type == \"scalar\":\n grid = grid.reshape(grid.shape[0])\n elif value_type == \"vector\":\n grid = grid.reshape(grid.shape[0], -1)\n\n # reformat mapped values to a dict {nid, value}\n results = dict(list(zip(tids, grid)))\n\n # if closest distances are reuqested\n if distances:\n tree = scipy.spatial.cKDTree(spoints)\n xi = scipy.interpolate.interpnd._ndim_coords_from_arrays(tpoints,\n ndim=tpoints.shape[1])\n distances, indexes = tree.query(xi)\n\n # Copy original result but mask missing values with NaNs\n if max_distance:\n grid2 = grid[:]\n if len(grid.shape) > 1:\n grid2[distances > max_distance, :] = np.nan\n else:\n grid2[distances > max_distance] = np.nan\n grid = grid2\n distances = dict(list(zip(tids, distances)))\n\n else:\n distances = None\n\n if distances:\n return results, distances\n else:\n return results", "def apply_affine_transform_no_numpy(pdr_shots_dict, start_shot_id, end_shot_id, s, A, b, deviation, gps_shot_ids=[]):\n new_dict = {}\n\n start_index = _shot_id_to_int(start_shot_id)\n end_index = _shot_id_to_int(end_shot_id)\n\n # transform pdr shots\n for i in range(start_index, end_index + 1):\n shot_id = _int_to_shot_id(i)\n dop = get_dop(shot_id, deviation, gps_shot_ids)\n\n if shot_id in pdr_shots_dict:\n X = pdr_shots_dict[shot_id]\n A_dot_X = [A[0][0]*X[0] + A[0][1]*X[1] + A[0][2]*X[2],\n A[1][0]*X[0] + A[1][1]*X[1] + A[1][2]*X[2],\n A[2][0]*X[0] + A[2][1]*X[1] + A[2][2]*X[2]]\n Xp = [i*s + j for i, j in zip(A_dot_X, b)]\n new_dict[shot_id] = [Xp[0], Xp[1], Xp[2], dop]\n #logger.info(\"new_dict {} = {} {} {} {}\".format(shot_id, new_dict[shot_id][0], new_dict[shot_id][1], new_dict[shot_id][2], new_dict[shot_id][3]))\n\n return new_dict", "def update_transforms(self, old_transforms, new_transforms):\n updated_transforms = {}\n for new_key, new_value in new_transforms.items():\n #if not new_key in old_transforms.valus():\n # old_transforms[new_key] = new_key\n\n if new_value[1] == \"transitive\":\n try:\n #updated_transforms[{v: k for k, v in old_transforms.items()}[new_key]] = new_value[0]\n #updated_transforms[old_transforms[new_key]] = new_value[0]\n updated_transforms[new_key] = old_transforms[new_value[0]]\n except KeyError:\n updated_transforms[new_key] = new_value[0]\n\n elif new_value[1] == \"additive\":\n # Perhaps needs to be adjusted, made more sophisticated\n # so that a new character is introduced even if it wasn't in the current segment\n if new_value[0] not in old_transforms:\n updated_transforms[new_key] = new_value[0]\n else:\n updated_transforms[new_key] = add_character_symbol_suffix(new_value[0], auto=True)\n if self.storyline:\n self.storyline.add_character([updated_transforms[new_key]],[updated_transforms[new_key]])\n else:\n self.add_character([updated_transforms[new_key]],[updated_transforms[new_key]])\n else:\n raise ValueError(\"Must be additive or transitive transposition\")\n for old_key, old_value in old_transforms.items():\n\n if old_key not in updated_transforms:\n updated_transforms[old_key] = old_transforms[old_key]\n\n #updated_transforms = dict(old_transforms, **{key:old_transforms[new_transforms[key]] for key in new_transforms.keys()})\n return updated_transforms", "def map_to_coords(source_lats, source_lons, target_lats, target_lons):\n source_lats_with_border, source_lons_with_border = add_border(source_lats, 'extrapolate'), add_border(source_lons, 'extrapolate')\n \n if len(target_lats.shape) == 1:\n target_lats = np.tile(target_lats,len(target_lons)).reshape(len(target_lons),-1).T\n target_lons = np.tile(target_lons,len(target_lats)).reshape(len(target_lats),-1)\n \n resample_map_rows = np.zeros(target_lats.shape, dtype=int)\n resample_map_cols = np.zeros(target_lats.shape, dtype=int)\n for i in range(target_lats.shape[0]):\n for j in range(target_lats.shape[1]):\n target_lat, target_lon = target_lats[i,j], target_lons[i,j]\n closest_source_cell = np.argmin(np.square(source_lats_with_border - target_lat) + \n np.square(source_lons_with_border - target_lon))\n resample_map_rows[i,j] = closest_source_cell // source_lats_with_border.shape[1]\n resample_map_cols[i,j] = closest_source_cell % source_lats_with_border.shape[1]\n \n return resample_map_rows, resample_map_cols", "def testTinttsysMapNNSp(self):\n self._runTest('tinttsys', True, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)", "def testTinttsysMapNN(self):\n self._runTest('tinttsys', False, [1,3,5,7,15], 'nearest,nearest',self.spwmap)", "def reproject_coordinates(x_in, y_in, spatial_reference_source, spatial_reference_target=None): \n if spatial_reference_target is not None:\n pass\n else:\n spatial_reference_target = osr.SpatialReference()\n spatial_reference_target.ImportFromEPSG(4326) \n pass\n \n if int(osgeo.__version__[0]) >= 3:\n # GDAL 3 changes axis order: https://github.com/OSGeo/gdal/issues/1546\n \n spatial_reference_source.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)\n spatial_reference_target.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)\n\n \n pTransform = osr.CoordinateTransformation( spatial_reference_source, spatial_reference_target)\n \n x_new,y_new, z = pTransform.TransformPoint( x_in,y_in)\n \n return x_new,y_new", "def _get_transformed_points_tps(new_points, source_points, coefficients,\n num_points, batch_size):\n\n # Calculate the U function for the new point and each source point as in\n # ref [2]\n # The U function is simply U(r) = r^2 * log(r^2), where r^2 is the\n # squared distance\n\n # Calculate the squared dist between the new point and the source points\n to_transform = new_points.dimshuffle(0, 'x', 1, 2)\n stacked_transform = T.tile(to_transform, (1, num_points, 1, 1))\n r_2 = T.sum(((stacked_transform - source_points.dimshuffle(\n 'x', 1, 0, 'x')) ** 2), axis=2)\n\n # Take the product (r^2 * log(r^2)), being careful to avoid NaNs\n log_r_2 = T.log(r_2)\n distances = T.switch(T.isnan(log_r_2), r_2 * log_r_2, 0.)\n\n # Add in the coefficients for the affine translation (1, x, and y,\n # corresponding to a_1, a_x, and a_y)\n upper_array = T.concatenate([T.ones((batch_size, 1, new_points.shape[2]),\n dtype=theano.config.floatX),\n new_points], axis=1)\n right_mat = T.concatenate([upper_array, distances], axis=1)\n\n # Calculate the new value as the dot product\n new_value = T.batched_dot(coefficients, right_mat)\n return new_value", "def spoint2opointfunc(source_shape, destine_shape):\n x, y = getSOpointRelation(source_shape, destine_shape)\n op = np.array([x, y], dtype=np.float32)\n\n def scaled2original(p):\n #rx = sx*rW/sW\n #ry = sy*rH/sH\n return p * op\n return scaled2original", "def map_scalar(self, source: Self, scalar: dict,\n cube_scale: float = 20., distances: bool = False,\n max_distance: float = None) -> np.ndarray:\n # create a cube for extrapolation\n smin, smax = source.extents\n tmin, tmax = self.extents # target\n min = np.minimum(smin, tmin).flatten()\n max = np.maximum(smax, tmax).flatten()\n\n avg, cube = self.__cuboid(min, max, scale=cube_scale)\n\n # pair original coordinates to scalar values and add the cuboid\n sdata = [(nid, source[nid].coors, scalar[nid]) for nid in source.keys()]\n spoints = np.array([x[1] for x in sdata], dtype=float)\n svalues = np.array([x[2] for x in sdata], dtype=float)\n mean = np.mean(svalues, axis=0)\n cube_values = np.array([mean] * cube.shape[0], dtype=float)\n spoints = np.concatenate((spoints, cube), axis=0)\n svalues = np.concatenate((svalues, cube_values), axis=0)\n\n # prepare new nodes and their coordinates\n tdata = [(nid, self[nid].coors) for nid in self.keys()]\n tids = [x[0] for x in tdata]\n tpoints = np.array([x[1] for x in tdata], dtype=float)\n\n # map values to new nodes\n grid = scipy.interpolate.griddata(spoints, svalues, tpoints, method=\"linear\")\n\n\n # reformat mapped values to a dict {nid, value}\n results = dict(list(zip(tids, grid)))\n\n # if closest distances are reuqested\n if distances:\n tree = scipy.spatial.cKDTree(spoints)\n xi = scipy.interpolate.interpnd._ndim_coords_from_arrays(tpoints,\n ndim=tpoints.shape[1])\n distances, indexes = tree.query(xi)\n\n # Copy original result but mask missing values with NaNs\n if max_distance:\n grid2 = grid[:]\n grid2[distances > max_distance] = np.nan\n grid = grid2\n distances = dict(list(zip(tids, distances)))\n\n else:\n distances = None\n\n if distances:\n return results, distances\n else:\n return results", "def init_transformations(self):\n shifts = (self.img_shape[0] // 2, self.img_shape[1] // 2)\n unshifts = (-self.img_shape[0] // 2, -self.img_shape[1] // 2)\n self.preshift = skimage.transform.SimilarityTransform(\n translation = shifts)\n self.postshift = skimage.transform.SimilarityTransform(\n translation = unshifts)", "def geo_transform(self):\n pass", "def transform_map(fixed, mutable):\n \n # I have decided I want to write this function in np rather than TF.\n fixed = np.array(fixed)\n mutable = np.array(mutable)\n\n # check that the shape of each is exactly the same\n if fixed.shape != mutable.shape:\n raise ValueError(\"transform_map: both inputs must have exactly the same shape.\")\n \n # fixed and mutable should have shape (n, d). Generate an nxn matrix that holds the \n # distance between each point. distance[n, m] should be the distance between fixed[n] and\n # mutable [m]\n \n point_count = fixed.shape[0]\n index_grid = np.mgrid[0:point_count, 0:point_count]\n\n diff = fixed[index_grid[0]] - mutable[index_grid[1]]\n distance = np.linalg.norm(diff, axis=2)\n\n # use the distance matrix as a cost matrix and use the Hungarian method implemented by \n # scipy to solve the problem\n fixed_indices, mutable_indices = linear_sum_assignment(distance)\n \n # we want to rearrange mutable, but keep the order of fixed. So we want to re-order \n # mutable by the set of mutable_indices generated by the above function call, which \n # themselves are ordered by fixed_indices. During testing, I found that fixed_indices \n # maintained its original order, but I don't want to rely on that. Not sure if this is \n # correct...\n \n return mutable[mutable_indices[fixed_indices]]", "def change_coord_sys(self, new_coord_sys, sensors, adcp):\n\n # Remove any trailing spaces\n if isinstance(self.orig_coord_sys, str):\n o_coord_sys = self.orig_coord_sys.strip()\n else:\n o_coord_sys = self.orig_coord_sys.strip()\n\n # Initialize variables\n orig_sys = 0\n new_sys = 0\n temp_t = None\n\n if self.orig_coord_sys.strip() != new_coord_sys.strip():\n # Assign the transformation matrix and retrieve the sensor data\n t_matrix = copy.deepcopy(adcp.t_matrix.matrix)\n t_matrix_freq = copy.deepcopy(adcp.frequency_khz)\n p = getattr(sensors.pitch_deg, sensors.pitch_deg.selected).data\n r = getattr(sensors.roll_deg, sensors.roll_deg.selected).data\n h = getattr(sensors.heading_deg, sensors.heading_deg.selected).data\n\n # Modify the transformation matrix and heading, pitch, and roll values base on\n # the original coordinate system so that only the needed values are used in\n # computing the new coordinate system\n if o_coord_sys == 'Beam':\n orig_sys = 1\n elif o_coord_sys == 'Inst':\n orig_sys = 2\n t_matrix[:] = np.eye(t_matrix.shape[0])\n elif o_coord_sys == 'Ship':\n orig_sys = 3\n p = np.zeros(h.shape)\n r = np.zeros(h.shape)\n t_matrix[:] = np.eye(t_matrix.shape[0])\n elif o_coord_sys == 'Earth':\n orig_sys = 4\n\n # Assign a value to the new coordinate system\n if new_coord_sys == 'Beam':\n new_sys = 1\n elif new_coord_sys == 'Inst':\n new_sys = 2\n elif new_coord_sys == 'Ship':\n new_sys = 3\n elif new_coord_sys == 'Earth':\n new_sys = 4\n\n # Check to ensure the new coordinate system is a higher order than the original system\n if new_sys - orig_sys > 0:\n\n # Compute trig function for heaing, pitch and roll\n ch = cosd(h)\n sh = sind(h)\n cp = cosd(p)\n sp = sind(p)\n cr = cosd(r)\n sr = sind(r)\n\n vel_changed = np.tile([np.nan], self.raw_vel_mps.shape)\n n_ens = self.raw_vel_mps.shape[1]\n\n for ii in range(n_ens):\n\n # Compute matrix for heading, pitch, and roll\n hpr_matrix = [[((ch[ii] * cr[ii]) + (sh[ii]*sp[ii]*sr[ii])),\n (sh[ii] * cp[ii]),\n ((ch[ii] * sr[ii]) - sh[ii]*sp[ii]*cr[ii])],\n [(-1 * sh[ii] * cr[ii])+(ch[ii] * sp[ii] * sr[ii]),\n ch[ii] * cp[ii],\n (-1 * sh[ii] * sr[ii])-(ch[ii] * sp[ii] * cr[ii])],\n [(-1.*cp[ii] * sr[ii]),\n sp[ii],\n cp[ii] * cr[ii]]]\n\n # Transform beam coordinates\n if o_coord_sys == 'Beam':\n\n # Determine frequency index for transformation matrix\n if len(t_matrix.shape) > 2:\n idx_freq = np.where(t_matrix_freq == self.frequency_khz[ii])\n t_mult = np.copy(t_matrix[idx_freq])\n else:\n t_mult = np.copy(t_matrix)\n\n # Get velocity data\n vel = np.copy(np.squeeze(self.raw_vel_mps[:, ii]))\n\n # Check for invalid beams\n idx_3_beam = np.where(np.isnan(vel))\n\n # 3-beam solution\n if len(idx_3_beam[0]) == 1:\n\n # Special processing for RiverRay\n if adcp.model == 'RiverRay':\n\n # Set beam pairing\n beam_pair_1a = 0\n beam_pair_1b = 1\n beam_pair_2a = 2\n beam_pair_2b = 3\n\n # Set speed of sound correction variables Note: Currently (2013-09-06)\n # WinRiver II does not use a variable correction and assumes the speed\n # of sound and the reference speed of sound are the same.\n # sos = sensors.speed_ofs_sound_mps.selected.data[ii]\n # sos_reference = 1536\n # sos_correction = np.sqrt(((2 * sos_reference) / sos) **2 -1)\n\n sos_correction = np.sqrt(3)\n\n # Reconfigure transformation matrix based on which beam is invalid\n\n # Beam 1 invalid\n if idx_3_beam[0][0] == beam_pair_1a:\n\n # Double valid beam in invalid pair\n t_mult[0:2, beam_pair_1b] *= 2\n\n # Eliminate invalid pair from vertical velocity computations\n t_mult[2, :] = [0, 0, 1/sos_correction, 1/sos_correction]\n\n # Reconstruct beam velocity matrix to use only valid beams\n t_mult = t_mult[0:3, [beam_pair_1b, beam_pair_2a, beam_pair_2b]]\n\n # Reconstruct beam velocity matrix to use only valid beams\n vel = vel[[beam_pair_1b, beam_pair_2a, beam_pair_2b]]\n\n # Apply transformation matrix\n temp_t = t_mult.dot(vel)\n\n # Correct horizontal velocity for invalid pair with the vertical velocity\n # and speed of sound correction\n temp_t[0] = temp_t[0] + temp_t[2] * sos_correction\n\n # Beam 2 invalid\n if idx_3_beam[0][0] == beam_pair_1b:\n\n # Double valid beam in invalid pair\n t_mult[0:2, beam_pair_1a] = t_mult[0:2, beam_pair_1a] * 2\n\n # Eliminate invalid pair from vertical velocity computations\n t_mult[2, :] = [0, 0, 1/sos_correction, 1/sos_correction]\n\n # Reconstruct transformation matrix as a 3x3 matrix\n t_mult = t_mult[0:3, [beam_pair_1a, beam_pair_2a, beam_pair_2b]]\n\n # Reconstruct beam velocity matrix to use only valid beams\n vel = vel[[beam_pair_1a, beam_pair_2a, beam_pair_2b]]\n\n # Apply transformation matrix\n temp_t = t_mult.dot(vel)\n\n # Correct horizontal velocity for invalid pair with the vertical\n # velocity and speed of sound correction\n temp_t[0] = temp_t[0] - temp_t[2] * sos_correction\n\n # Beam 3 invalid\n if idx_3_beam[0][0] == beam_pair_2a:\n\n # Double valid beam in invalid pair\n t_mult[0:2, beam_pair_2b] = t_mult[:2, beam_pair_2b] * 2\n\n # Eliminate invalid pair from vertical velocity computations\n t_mult[2, :] = [1/sos_correction, 1/sos_correction, 0, 0]\n\n # Reconstruct transformation matrix as a 3x3 matrid\n t_mult = t_mult[:3, [beam_pair_1a, beam_pair_1b, beam_pair_2b]]\n\n # Reconstruct beam velocity matrix to use only valid beams\n vel = vel[[beam_pair_1a, beam_pair_1b, beam_pair_2b]]\n\n # Apply transformation matrix\n temp_t = t_mult.dot(vel)\n\n # Correct horizontal velocity for invalid pair with the vertical\n # velocity and speed of sound correction\n temp_t[1] = temp_t[1] - temp_t[2] * sos_correction\n\n # Beam 4 invalid\n if idx_3_beam[0][0] == beam_pair_2b:\n\n # Double valid beam in invalid pair\n t_mult[:2, beam_pair_2a] *= 2\n\n # Eliminate invalid pair from vertical velocity computations\n t_mult[2, :] = [1/sos_correction, 1/sos_correction, 0, 0]\n\n # Reconstruct transformations matrix as a 3x3 matrix\n t_mult = t_mult[:3, [beam_pair_1a, beam_pair_1b, beam_pair_2a]]\n\n # Reconstruct beam velocity matrix to use only valid beams\n vel = vel[[beam_pair_1a, beam_pair_1b, beam_pair_2a]]\n\n # Apply transformation matrix\n temp_t = t_mult.dot(vel)\n\n # Correct horizontal velocity for invalid pair with the vertical\n # velocity and speed of sound correction\n temp_t[1] = temp_t[1] + temp_t[2] * sos_correction\n\n else:\n\n # 3 Beam solution for non-RiverRay\n vel_3_beam_zero = vel\n vel_3_beam_zero[np.isnan(vel)] = 0\n vel_error = np.matmul(t_mult[3, :], vel_3_beam_zero)\n vel[idx_3_beam] = -1 * vel_error / np.squeeze(t_mult[3, idx_3_beam])\n temp_t = t_mult.dot(vel)\n\n # Apply transformation matrix for 3 beam solutions\n temp_thpr = np.array(hpr_matrix).dot(temp_t[:3])\n temp_thpr = np.hstack([temp_thpr, np.nan])\n\n else:\n\n # Apply transformation matrix for 4 beam solutions\n temp_t = t_mult.dot(np.squeeze(self.raw_vel_mps[:, ii]))\n\n # Apply hpr_matrix\n temp_thpr = np.array(hpr_matrix).dot(temp_t[:3])\n temp_thpr = np.hstack([temp_thpr, temp_t[3]])\n\n else:\n\n # Get velocity data\n vel = np.copy(np.squeeze(self.raw_vel_mps[:, ii]))\n\n # Apply heading pitch roll for inst and ship coordinate data\n temp_thpr = np.array(hpr_matrix).dot(vel[:3])\n temp_thpr = np.hstack([temp_thpr, vel[3]])\n\n vel_changed[:, ii] = temp_thpr.T\n\n # Assign results to object\n self.u_mps = -1 * vel_changed[0, :]\n self.v_mps = -1 * vel_changed[1, :]\n self.w_mps = vel_changed[2, :]\n self.d_mps = vel_changed[3, :]\n self.coord_sys = new_coord_sys\n self.u_processed_mps = np.copy(self.u_mps)\n self.v_processed_mps = np.copy(self.v_mps)", "def apply_affine_transform(pdr_shots_dict, start_shot_id, end_shot_id, s, A, b, deviation, gps_shot_ids=[]):\n new_dict = {}\n\n start_index = _shot_id_to_int(start_shot_id)\n end_index = _shot_id_to_int(end_shot_id)\n\n # transform pdr shots\n for i in range(start_index, end_index + 1):\n shot_id = _int_to_shot_id(i)\n dop = get_dop(shot_id, deviation, gps_shot_ids)\n\n if shot_id in pdr_shots_dict:\n Xp = s * A.dot(pdr_shots_dict[shot_id][0:3]) + b\n new_dict[shot_id] = [Xp[0], Xp[1], Xp[2], dop]\n #logger.info(\"new_dict {} = {} {} {} {}\".format(shot_id, new_dict[shot_id][0], new_dict[shot_id][1], new_dict[shot_id][2], new_dict[shot_id][3]))\n\n return new_dict", "def _maping(x,y,l,a):\n newx = (x**2 *(l* ((x**2 + y**2)**(a/2) - 1) + 2) - l * y**2 *((x**2 + y**2)**(a/2) - 1))/(x**2 + y**2) \n newy = (2 * x* y *(l* ((x**2 + y**2)**(a/2) - 1) + 1))/(x**2 + y**2)\n return newx, newy", "def self_affine_equivalent_mappings(s):\n result = []\n for cstt_in in range(0, len(s)):\n for cstt_out in range(0, len(s)):\n mappings = linear_equivalence(\n s,\n [oplus(cstt_out, s[oplus(cstt_in, x)]) for x in range(0, len(s))],\n all_mappings=True\n )\n for AB in mappings:\n A = [oplus(apply_bin_mat(x, AB[0]), cstt_in) for x in range(0, len(s))]\n B = [apply_bin_mat(oplus(x, cstt_out), AB[1]) for x in range(0, len(s))]\n result.append([A, B])\n return result", "def map_addr_tree_2(s, d, tors1, tors2):\n s_d = crc8(0, s, 0x31)%2\n if s_d == 1:\n tors1, tors2 = tors2, tors1\n n1 = len(tors1)\n n2 = len(tors2)\n #s_out, d_out = crc8(0, s, 0x31)%n1 + 1, crc8(0, d, 0x1d)%n2 + 1\n s_out, d_out = random.randint(0, n1-1) + 1, random.randint(0, n2-1) + 1\n s_out, d_out = tors1[s_out-1], tors2[d_out-1]\n return s_out, d_out", "def map_vector(self, source: Self, vector: np.ndarray,\n cube_scale: float = 20., distances: bool = False,\n max_distance: float = None) -> np.ndarray:\n # create a cube for extrapolation\n smin, smax = source.extents\n tmin, tmax = self.extents\n min = np.minimum(smin, tmin).flatten()\n max = np.maximum(smax, tmax).flatten()\n\n avg, cube = self.__cuboid(min, max, scale=cube_scale)\n\n # pair original coordinates to scalar values and add the cuboid\n sdata = [(nid, source[nid].coors, vector[nid]) for nid in source.keys()]\n spoints = np.array([x[1] for x in sdata], dtype=float)\n svalues = np.array([x[2] for x in sdata], dtype=float)\n mean = np.mean(svalues, axis=0)\n cube_values = np.array([mean] * 8, dtype=float).reshape(-1, svalues.shape[1])\n spoints = np.concatenate((spoints, cube), axis=0)\n svalues = np.concatenate((svalues, cube_values), axis=0)\n\n # prepare new nodes and their coordinates\n tdata = [(nid, self[nid].coors) for nid in self.keys()]\n tids = [x[0] for x in tdata]\n tpoints = np.array([x[1] for x in tdata], dtype=float)\n\n # map values to new nodes\n grid = []\n for n in range(svalues.shape[1]):\n grid.append(scipy.interpolate.griddata(spoints, svalues[:,n], tpoints, method=\"linear\"))\n\n\n # reformat mapped values to a dict {nid, value}\n grid = np.array(list(zip(*grid)), dtype=float)\n results = dict(list(zip(tids, grid)))\n\n # if closest distances are reuqested\n if distances:\n tree = scipy.spatial.cKDTree(spoints)\n xi = scipy.interpolate.interpnd._ndim_coords_from_arrays(tpoints,\n ndim=tpoints.shape[1])\n distances, indexes = tree.query(xi)\n\n # Copy original result but mask missing values with NaNs\n if max_distance:\n grid2 = grid[:]\n if len(grid.shape) > 1:\n grid2[distances > max_distance, :] = np.nan\n else:\n grid2[distances > max_distance] = np.nan\n grid = grid2\n distances = dict(list(zip(tids, distances)))\n\n else:\n distances = None\n\n if distances:\n return results, distances\n else:\n return results", "def MapstoRef(f):\n #so given a mixture, find each of the n alpha maps\n n = f.n\n d = f.d\n \n A = np.zeros([n,d,d])\n L = np.zeros([n,d,d])\n b = np.zeros([n,d])\n \n for i in range(n):\n A[i,:,:] = fmp(f.cov[i,:,:],-1/2)\n L[i,:,:] = np.linalg.cholesky(A[i,:,:])\n b[i,:] = - A[i,:,:] @ f.m[i,:]\n \n ToRef = Affine_Maps(L,b)\n \n return ToRef", "def apply_spherical_map(img_src, mapping, output_res=(360, 640), f_fill=np.mean):\n (y_map, x_map), (fill_y_l, fill_x_l, nonzeros_l) = mapping\n\n transfo_img = np.zeros(output_res) + 128\n transfo_img[y_map, x_map] = img_src.flatten()\n\n for y, x, nonz in zip(fill_y_l, fill_x_l, nonzeros_l):\n transfo_img[y, x] = f_fill(transfo_img[nonz])\n return transfo_img", "def transform_map_old(fixed, mutable, origin=None, furthest_first=True):\n \n # I have decided I want to write this function in np rather than TF.\n fixed = np.array(fixed)\n mutable = np.array(mutable)\n \n if fixed.shape != mutable.shape:\n raise ValueError(\"transform_map: both inputs must have exactly the same shape.\")\n \n if origin is None:\n origin = np.zeros(fixed.shape[1])\n elif origin.shape[0] != fixed.shape[1]:\n raise ValueError(\"transform_map: origin must have the same dimension as fixed.\")\n\n # determine the order to iterate through points. Takes points in fixed in order of \n # decreasing distance from origin\n distance = np.linalg.norm(fixed - origin, axis=1)\n if furthest_first:\n fixed_indices = np.argsort(distance)[::-1]\n else:\n fixed_indices = np.argsort(distance)\n \n rearanged = np.zeros_like(mutable)\n used_mutable = np.zeros(mutable.shape[0], dtype=bool)\n \n for fixed_index in fixed_indices:\n # compute distance between the selected fixed and every mutable\n distance = np.linalg.norm(fixed[fixed_index] - mutable, axis=1)\n \n # filter out mutables that have already been used\n max_distance = 2*np.amax(distance)\n distance[used_mutable] = max_distance\n \n # find just the closest mutable\n closest_mutable = np.argmin(distance)\n \n # update things for the next iteration\n used_mutable[closest_mutable] = True\n rearanged[fixed_index] = mutable[closest_mutable]\n return rearanged", "def test_transform_update():\n pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_answer_0)\n ncs_obj = ncs.input(hierarchy=pdb_inp.construct_hierarchy())\n pdb_inp = iotbx.pdb.input(lines=pdb_answer_0,source_info=None)\n nrgl = ncs_obj.get_ncs_restraints_group_list()\n asu_site_cart = pdb_inp.atoms().extract_xyz()\n # reference matrices\n r1 = nrgl[0].copies[0].r\n t1 = nrgl[0].copies[0].t\n r2 = nrgl[0].copies[1].r\n t2 = nrgl[0].copies[1].t\n # modify matrices in the ncs group list\n nrgl[0].copies[0].r = r1 + r2\n nrgl[0].copies[0].t = t1 + t2\n nrgl[0].copies[1].r = r1 + r2\n nrgl[0].copies[1].t = t1 + t2\n nrgl.recalculate_ncs_transforms(asu_site_cart)\n # Get the updated values\n r1_n = nrgl[0].copies[0].r\n t1_n = nrgl[0].copies[0].t\n r2_n = nrgl[0].copies[1].r\n t2_n = nrgl[0].copies[1].t\n #\n assert approx_equal(r1, r1_n, eps=0.001)\n assert approx_equal(t1, t1_n, eps=0.1)\n assert approx_equal(r2, r2_n, eps=0.001)\n assert approx_equal(t2, t2_n, eps=0.1)", "def make_map(self, sampling=None, size=None):\n if sampling is None and self.sampling is None:\n self.sampling = self.minimum_sampling()\n elif sampling is not None:\n self.sampling = sampling\n \n if size is None and self.size is None:\n self.size = self.minimum_size()\n elif size is not None:\n self.size = size\n\n # Build the on-sky source distribution\n self.seeing.make_map(sampling=self.sampling, size=self.size)\n self.x = self.seeing.x\n self.X = self.seeing.X\n self.y = self.seeing.y\n self.Y = self.seeing.Y\n try:\n # Construct the intrinsic map of the source\n self.intrinsic.make_map(sampling=self.sampling, size=self.size)\n # Convolve with the seeing distribution, conserving the\n # integral of the intrinsic source\n self.data = signal.fftconvolve(self.intrinsic.data,\n self.seeing.data * numpy.square(self.sampling),\n mode='same')\n except AttributeError:\n # Renormalize the unity-integral seeing kernal for to\n # represent a point source\n self.data = self.intrinsic*self.seeing.data\n\n # Get the integral\n try:\n # After convolving with the seeing kernel, the total\n # integral should be the same, up to some tolerance\n self.integral = self.intrinsic.integral\n tolerance = 1e-3\n diff = numpy.absolute(self.integral - numpy.square(self.sampling)*numpy.sum(self.data))\n if diff > tolerance:\n warnings.warn('Map and analytic integrals are discrepant by {0} ({1} %)'.format(\n diff, 100*diff/self.integral))\n except AttributeError:\n self.integral = numpy.square(self.sampling) * numpy.sum(self.data)\n\n # Prep for interpolation\n self.interp = interpolate.interp2d(self.x, self.y, self.data, bounds_error=True)", "def get_model(self):\n S = [(x,y) for x in range(12) for y in range(9)] # States\n A = self.actions.copy() # Actions\n L = self.objects.copy() # Labeling function\n T = {} # Transitions (s,a) -> s' (they are deterministic)\n for s in S:\n x,y = s\n for a in A:\n T[(s,a)] = self._get_new_position(x,y,a)\n return S,A,L,T # SALT xD", "def make_maps_of_2x1_pix_coordinates (sp) : \n x_rhs = np.arange(sp.colsh)*sp.pixs + sp.pixw - sp.pixsh\n x_rhs[0] = sp.pixwh # set x-coordinate of the wide pixel \n x_arr = np.hstack([-x_rhs[::-1],x_rhs])\n\n y_arr = np.arange(sp.rows) * sp.pixs\n y_arr -= y_arr[-1]/2 # move origin to the center of array\n\n sp.x_map2x1, sp.y_map2x1 = np.meshgrid(x_arr, y_arr)", "def _compose_transforms(basis_transforms, source_basis, source_dag):\n example_gates = _get_example_gates(source_dag)\n mapped_instrs = {}\n\n for gate_name, gate_num_qubits in source_basis:\n # Need to grab a gate instance to find num_qubits and num_params.\n # Can be removed following https://github.com/Qiskit/qiskit-terra/pull/3947 .\n example_gate = example_gates[gate_name, gate_num_qubits]\n num_params = len(example_gate.params)\n\n placeholder_params = ParameterVector(gate_name, num_params)\n placeholder_gate = Gate(gate_name, gate_num_qubits, list(placeholder_params))\n placeholder_gate.params = list(placeholder_params)\n\n dag = DAGCircuit()\n qr = QuantumRegister(gate_num_qubits)\n dag.add_qreg(qr)\n dag.apply_operation_back(placeholder_gate, qr[:], [])\n mapped_instrs[gate_name, gate_num_qubits] = placeholder_params, dag\n\n for gate_name, gate_num_qubits, equiv_params, equiv in basis_transforms:\n logger.debug(\n \"Composing transform step: %s/%s %s =>\\n%s\",\n gate_name,\n gate_num_qubits,\n equiv_params,\n equiv,\n )\n\n for mapped_instr_name, (dag_params, dag) in mapped_instrs.items():\n doomed_nodes = [\n node\n for node in dag.op_nodes()\n if (node.op.name, node.op.num_qubits) == (gate_name, gate_num_qubits)\n ]\n\n if doomed_nodes and logger.isEnabledFor(logging.DEBUG):\n\n logger.debug(\n \"Updating transform for mapped instr %s %s from \\n%s\",\n mapped_instr_name,\n dag_params,\n dag_to_circuit(dag, copy_operations=False),\n )\n\n for node in doomed_nodes:\n\n replacement = equiv.assign_parameters(\n dict(zip_longest(equiv_params, node.op.params))\n )\n\n replacement_dag = circuit_to_dag(replacement)\n\n dag.substitute_node_with_dag(node, replacement_dag)\n\n if doomed_nodes and logger.isEnabledFor(logging.DEBUG):\n\n logger.debug(\n \"Updated transform for mapped instr %s %s to\\n%s\",\n mapped_instr_name,\n dag_params,\n dag_to_circuit(dag, copy_operations=False),\n )\n\n return mapped_instrs" ]
[ "0.55668783", "0.5532111", "0.5460896", "0.5447538", "0.53842735", "0.5356351", "0.53368974", "0.53297365", "0.53066075", "0.5294714", "0.52837485", "0.5263581", "0.52585614", "0.52573544", "0.52403593", "0.5213048", "0.51814204", "0.5172513", "0.51586723", "0.51488996", "0.5139261", "0.5137218", "0.5124848", "0.5112447", "0.5109568", "0.5076573", "0.5064597", "0.50577414", "0.50509", "0.5050541" ]
0.66779846
0
Read source and creates a new brace token
def create_token(self): token = Token(PAREN.get(self.current_char), "brace") self.current_char = self.source.read(1) return token
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_till_closing_brace(stream):\n rv = \"\"\n in_braces = 1\n while True:\n if EscapeCharToken.starts_here(stream, '{}'):\n rv += stream.next() + stream.next()\n else:\n c = stream.next()\n if c == '{': in_braces += 1\n elif c == '}': in_braces -= 1\n if in_braces == 0: break\n rv += c\n return rv", "def cs_brace(cs_parse:Parse,brace_parse:Parse) -> Parse:\n return cs_parse + c.brace(brace_parse).many()", "def brace(self, token: Token):\n if token.data == \"{\" and self.state in [\n InterfaceState.BODY,\n InterfaceState.INHERIT,\n ]:\n self.state = InterfaceState.DEFINITION\n elif token.data == \"}\" and self.state == InterfaceState.DEFINITION:\n self.state = InterfaceState.END\n else:\n self.invalid(token)", "def clean_open_close_brace(self):\n # Loop over all lines, check for braces and replace them with \\n{ and \\n}\n brack_num = False\n code_on = False\n\n for line_num, line in enumerate(self.file_ltxt[:-1]):\n self.line_num = line_num\n\n # First check if we are in an inline code section\n breaker = False\n for s_type in VALID_SCRIPT_TYPES:\n if re.findall(f\"^ *{s_type} *{{\", line) or (re.findall(f\"^ *{s_type}\", line) and re.findall(\"^ *{\", self.file_ltxt[line_num+1])):\n if code_on is not False:\n self.print_error(f\"Inline {s_type} code is not supported inside {code_on} code.\")\n\n code_on = s_type\n brack_num = 0\n\n if '{' in line:\n s = line.split(\"{\")\n line = s[0] + \"\\n{\\n\" + '{'.join(s[1:])\n brack_num = 1\n if '}' in line:\n s = line.split(\"}\")\n line = s[0] + \"\\n}\\n\" + '}'.join(s[1:])\n code_on = False\n brack_num = 0\n\n\n self.file_ltxt[line_num] = line\n breaker = True\n if breaker:\n continue\n\n # If we are in an inline code section don't edit it\n if code_on is not False:\n if '}' in line: brack_num -= 1\n if '{' in line: brack_num += 1\n\n if brack_num == 0:\n code_on = False\n\n # If not then we can edit the brace opening and closings\n else:\n str_part, non_str = gen_parse.get_str_between_delims(line)\n non_str = non_str.replace(\"{\", \"\\n{\\n\").replace(\"}\", \"\\n}\\n\")\n line = non_str.replace(r\"??!%s!?\", str_part)\n\n self.file_ltxt[line_num] = line\n # print(self.file_ltxt)\n # raise SystemExit(\"BREAK\")\n # Re-split by line-end and remove blank lines\n self.file_ltxt = [i for i in '\\n'.join(self.file_ltxt).split('\\n')\n if not i.isspace() and i]", "def nex_token(self):\n\n self.__list_lexema = []\n while True:\n\n self.column = self.column + 1\n\n # Read File\n try:\n self.c = self.__file.read(1)\n if not self.c:\n self.EOF = -1\n # return Token(Tag_type.EOF, \"EOF\", self.row, self.column)\n\n except IOError as e:\n raise e\n\n if self.__state == 1: # CASE 1\n\n if not self.c:\n return Token(Tag_type.EOF, \"EOF\", self.row, self.column)\n\n elif self.c == \"\\n\": # state 1\n self.row = self.row + 1 # increase the line\n self.column = 1 # returns the column count\n self.__state = 1\n pass\n\n elif self.c == '\\t': # state 1\n self.column = self.column + 3\n\n elif self.c == \" \":\t # state 1\n #self.column = self.column + 1\n pass\n elif self.c == \"<\":\n self.__state = 2 # state 2 return of the state 3 or 4\n\n elif self.c == \"=\":\n self.__state = 5 # state 5 return of the state 6 or 7\n\n elif self.c == \">\":\n self.__state = 8 # state 8 return of the state 9 or 10\n\n elif self.c == \"!\":\n self.__state = 11 # state 11 return of the 11\n\n elif self.c == \"-\":\n self.__state = 1 # state 13\n # return of the state 13\n return Token(Tag_type.OP_MIN, \"-\", self.row, self.column)\n\n elif self.c == \"+\":\n self.__state = 1 # state 14\n # return of the state 14\n return Token(Tag_type.OP_AD, \"+\", self.row, self.column)\n\n elif self.c == \"{\":\n self.__state = 1 # state 25\n # return of the state 25\n return Token(Tag_type.SMB_OBC, \"{\", self.row, self.column)\n\n elif self.c == \"}\":\n self.__state = 1 # state 26\n # return of the state 26\n return Token(Tag_type.SMB_CBC, \"}\", self.row, self.column)\n\n elif self.c == \"(\":\n self.__state = 1 # state 27\n # return of the state 27\n return Token(Tag_type.SMB_OPA, \"(\", self.row, self.column)\n\n elif self.c == \")\":\n self.__state = 1 # state 28\n # return of the state 28\n return Token(Tag_type.SMB_CPA, \")\", self.row, self.column)\n\n elif self.c == \",\":\n self.__state = 1 # state 29\n # return of the state 29\n return Token(Tag_type.SMB_COM, \",\", self.row, self.column)\n\n elif self.c == \";\":\n self.__state = 1 # state 30\n # return of the state 30\n return Token(Tag_type.SMB_SEM, \";\", self.row, self.column)\n\n elif self.c == \"/\":\n self.__state = 16\n\n elif self.c == \"*\":\n self.__state = 15\n\n # build num_const\n elif self.c.isdigit():\n self.__list_lexema.append(self.c) # return of the state 34\n self.__state = 31\n\n # build id\n elif self.c.isalpha():\n # return of the state 36 ( install ID)\n self.__list_lexema.append(self.c)\n self.__state = 35\n\n elif self.c == \"'\":\n # return of the state 42 ( return a char_const )\n self.__state = 40\n\n elif self.c == \"\\\"\":\n self.__state = 37\n\n else:\n\n self.panic_mode(self.row, self.column, \"Invalid Character\")\n\n elif self.__state == 2: # CASE 2\n if self.c == \"=\":\n self.__state = 1\n return Token(Tag_type.OP_LE, \"<=\", self.row, self.column)\n else:\n self.__state = 1\n self.file_pointer()\n #self.column = self.column - 1\n return Token(Tag_type.OP_LT, \"<\", self.row, self.column)\n\n elif self.__state == 15:\n if self.c == \"/\":\n return Token(Tag_type.OP_DIV, \"/\", self.row, self.column)\n else:\n self.file_pointer()\n self.__state = 1\n return Token(Tag_type.OP_MUL, \"*\", self.row, self.column)\n\n elif self.__state == 5: # CASE 5\n if self.c == \"=\": # state 6\n self.__state = 1\n return Token(Tag_type.OP_EQ, \"==\", self.row, self.column)\n else:\n self.__state = 1 # state 7\n self.file_pointer()\n self.column = self.column - 1\n return Token(Tag_type.OP_ASS, \"=\", self.row, self.column)\n\n elif self.__state == 8: # CASE 8\n if self.c == \"=\":\n self.__state = 1 # state 9\n return Token(Tag_type.OP_GE, \">=\", self.row, self.column)\n else:\n self.__state = 1 # state 10\n self.file_pointer()\n self.column = self.column - 1\n return Token(Tag_type.OP_GT, \">\", self.row, self.column)\n\n elif self.__state == 11: # CASE 11\n if self.c == \"=\":\n self.__state = 1 # state 12\n return Token(Tag_type.OP_NE, \"!=\", self.row, self.column)\n else:\n if self.EOF == -1:\n return None\n else:\n if self.c == \"\\n\":\n self.row = self.row + 1\n self.column = 1\n if self.c == \"\\t\":\n self.column = self.column + 3\n\n self.__state = 11\n self.panic_mode(self.row, self.column,\n \"Invalid Character, expected =\", \"=\", self.c)\n\n elif self.__state == 16:\n if self.c == \"*\":\n self.__state = 17\n\n elif self.c == \"/\":\n self.__state = 21\n\n else:\n self.__state = 1\n self.file_pointer()\n return Token(Tag_type.OP_DIV, \"/\", self.row, self.column)\n\n elif self.__state == 17:\n if self.c != \"*\":\n self.__state = 17 # state 18\n if self.c == \"\\n\":\n self.row = self.row + 1\n self.column = 1\n if self.c == \"\\t\":\n self.column = self.column + 3\n if self.EOF == -1:\n self.panic_mode(self.row, self.column,\n \"Invalid Character expected */\"\"\", \"*\", self.c)\n return None\n else:\n self.__state = 19\n\n elif self.__state == 19:\n if self.c == \"/\":\n self.__state = 1\n pass # ignore\n else:\n if self.EOF == -1:\n self.panic_mode(self.row, self.column,\n \"Invalid Character expected */\"\"\", \"/\", self.c)\n return None\n else:\n self.__state = 17\n\n elif self.__state == 21:\n if self.c == \" \" or self.c == \"\\t\" or self.c in string.letters or self.c.isdigit():\n self.__state = 21\n else:\n if self.c == \"\\n\":\n self.__state = 1\n self.file_pointer()\n pass\n else:\n self.__state = 21\n\n elif self.__state == 31: # CASE 31\n if self.c.isdigit():\n self.__list_lexema.append(self.c)\n self.__state = 31\n else:\n if self.c == \".\":\n self.__list_lexema.append(self.c)\n self.__state = 32 # state 32\n else:\n self.__state = 1\n self.file_pointer()\n self.column = self.column - 1\n return Token(Tag_type.CON_NUM, ''.join(map(str, self.__list_lexema)), self.row, self.column)\n\n elif self.__state == 32: # CASE 31\n if self.c.isdigit():\n self.__list_lexema.append(self.c)\n self.__state = 31\n else:\n if self.EOF == -1:\n self.panic_mode(self.row, self.column,\n \"Error encountered. Expected an integer \" ,\"1\", self.c) # tratar esse\n return None\n else:\n if self.c == \"\\n\":\n self.row = self.row + 1\n self.column = 1\n if self.c == \"\\t\":\n self.column = self.column + 3\n self.panic_mode(self.row, self.column,\n \"Error encountered. Expected an integer \", \"1\", self.c)\n self.__state = 32\n\n elif self.__state == 35: # Case 35\n if self.c.isalpha() or self.c.isdigit():\n self.__list_lexema.append(self.c)\n else:\n self.__state = 1 # state 36\n # Pesquisa na Tabela de Simbolo.\n token = self.__TS.get_token(\n ''.join(map(str, self.__list_lexema)))\n self.column = self.column - 1\n self.file_pointer()\n if token == None:\n # if not found in the symbol table, insert.\n token = Token(Tag_type.ID, ''.join(\n map(str, self.__list_lexema)), self.row, self.column)\n\n # insert in the symbol table.\n self.__TS.put_symbol_table(token)\n if self.EOF == -1:\n pass\n else:\n # self.file_pointer()\n pass\n # return token # returns a new token object.\n\n return token\n\n elif self.__state == 37:\n # print self.c, self.__state\n # time.sleep(1)\n\n if self.c == \" \" or self.c.isalpha() or self.c.isdigit():\n self.__list_lexema.append(self.c)\n self.__state = 37\n\n elif self.c == \"\\n\":\n self.row = self.row + 1\n self.column = 1\n self.__state = 37\n self.panic_mode(self.row, self.column,\n \"Can not have line break :\" \" \", self.c) #tratar esse\n\n elif self.c == \"\\\"\":\n if len(self.__list_lexema) < 1:\n self.__state = 1\n self.panic_mode(self.row, self.column,\n \"Empty literal :\")\n else:\n self.__state = 1\n return Token(Tag_type.LIT, ''.join(map(str, self.__list_lexema)), self.row, self.column)\n\n if self.EOF == -1:\n self.__state = 1\n self.panic_mode(self.row, self.column,\n \"Invalid Character expected \\\"\", \"\\\"\", self.c)\n return None\n\n elif self.__state == 40:\n\n if self.c.isdigit() or self.c.isalpha() and self.c != \"'\":\n self.__list_lexema.append(self.c)\n self.__state = 40\n else:\n\n if self.c == \"'\":\n self.__state = 1\n if len(self.__list_lexema) > 1:\n self.panic_mode(\n self.row, self.column, \"literal must contain only one character\", \"'\", self.c)\n self.__list_lexema = []\n # print self.c\n else:\n return Token(Tag_type.CON_CHAR, ''.join(map(str, self.__list_lexema)), self.row, self.column)\n if self.EOF == -1:\n self.panic_mode(self.row, self.column,\n \"Invalid Character expected '\"\"\", \"'\", self.c)\n return None", "def parse(source_code):\n tokens = tokenize(source_code)\n return read(tokens)", "def tokenize(src):\n\n pass", "def _tokenize(source):\n lines = source.split(\"\\n\")\n print(\n \"{type:<10}{string:<25} {start:^12} {end:^12}\".format(\n type=\"Type\", string=\"String\", start=\"Start\", end=\"End\"\n )\n )\n print(\"-\" * 60)\n for line in lines:\n tokens = collect_tokens(line)\n for token in tokens:\n print(token)", "def _parse_source(self, src, python_version):\n f = moves.StringIO(src)\n defs_start = None\n open_type_comment_set = _TypeCommentSet.start(1)\n open_decorator = False\n last_function_definition = None\n open_variable_annotation = None\n for token in tokenize.generate_tokens(f.readline):\n tok = token.exact_type\n line = token.line\n lineno, col = token.start\n\n # Check for the first line with a top-level class or function definition.\n if defs_start is None and _CLASS_OR_FUNC_RE.match(line):\n defs_start = lineno\n\n # Process the token for decorators, function definitions, and comments.\n if tok == tokenize.AT:\n if _DECORATOR_RE.match(line):\n open_decorator = True\n elif tok == tokenize.NAME:\n if open_decorator and token.string in (\"class\", \"def\"):\n self.decorators.add(lineno)\n open_decorator = False\n if token.string == \"def\":\n last_function_definition = _FunctionDefinition.start(lineno)\n elif tok == tokenize.COMMENT:\n self._process_comment(line, lineno, col, open_type_comment_set)\n elif tok == tokenize.LPAR:\n if last_function_definition:\n last_function_definition.add_lpar(lineno)\n elif tok == tokenize.RPAR:\n if last_function_definition:\n last_function_definition.add_rpar(lineno)\n elif tok in (tokenize.NEWLINE, tokenize.ENDMARKER):\n if open_type_comment_set.type_comments:\n open_type_comment_set.end_line = lineno\n self._type_comments.append(open_type_comment_set)\n open_type_comment_set = _TypeCommentSet.start(lineno + 1)\n\n # Process the token for variable annotations.\n if last_function_definition and last_function_definition.contains(lineno):\n pass # ignore function annotations\n elif not open_variable_annotation:\n open_variable_annotation = _VariableAnnotation.start(lineno, token)\n elif tok in (tokenize.NEWLINE, tokenize.SEMI):\n # NEWLINE indicates the end of a *logical* line of Python code, allowing\n # us to handle annotations split over multiple lines.\n annotation = open_variable_annotation.annotation\n if annotation and open_variable_annotation.closed:\n # In 3.8+, the STORE_* opcode for a multiline variable assignment is\n # at the first line in the assignment; before that, it is at the last.\n if python_version >= (3, 8):\n assert open_variable_annotation.start_lineno\n annotation_lineno = open_variable_annotation.start_lineno\n else:\n annotation_lineno = lineno\n self._variable_annotations[annotation_lineno] = annotation\n open_variable_annotation = None\n else:\n open_variable_annotation.add_token(lineno, token)\n\n # Record docstrings.\n if _DOCSTRING_RE.match(line):\n self._docstrings.add(lineno)\n\n if defs_start is not None:\n disables = list(self._disables.items())\n # Add \"# type: ignore\" to the list of disables that we check.\n disables.append((\"Type checking\", self._ignore))\n for name, lineset in disables:\n lineno = lineset.get_disable_after(defs_start)\n if lineno is not None:\n self._errorlog.late_directive(self._filename, lineno, name)", "def _consume(self, block: str) -> None:\n self.buffer = []\n self.level = 0\n last_char = \"START\"\n for ch in char_iter(block):\n if ch in \"\\n\\r\":\n self._check_body_level()\n self.line_number += 1\n if self.in_comment:\n if ch == \"\\n\":\n self.in_comment = False\n elif ch in \" \\n\\t\\r\" and last_char in \" \\n\\t\\r\":\n pass\n elif ch == \"#\" and not self.in_value and not self.in_var_ref:\n self.in_comment = True\n elif not self.in_value and ch == \"(\":\n self._open_bracket(ch)\n elif not self.in_value and ch == \")\":\n self._close_bracket(ch)\n elif ch == '\"':\n self.in_value = not self.in_value\n self.buffer.append(ch)\n elif self.level == 1:\n if (\n ch == \"r\"\n and last_char == \"o\"\n and \"\".join(self.buffer).strip() == \"o\"\n ):\n self.n_ors += 1\n self.buffer = []\n elif ch == \"$\":\n self.param_buffer = self._parse_operator()\n self.in_var_ref = True\n self.buffer = [ch]\n elif self.in_var_ref and ch in \" \\n\\r\\t\":\n self._parse_var()\n elif ch == \">\" and last_char == \"-\":\n self._parse_label()\n else:\n self.buffer.append(ch)\n else:\n self.buffer.append(ch)\n last_char = ch\n if self.opened:\n raise ValueError(\n f\"Unmatched opening bracket \" f\"at line {self.line_number}\"\n )\n if self.n_ors and not self.is_union:\n raise ValueError(f'Missing \"or\" operator ' f\"at line {self.line_number}\")", "def get_end_brace(self):\n # Find the code to run\n\n brack_num, found_first = 0, False\n for iline, line in enumerate(self.file_ltxt[self.line_num:]):\n if '{' in line: brack_num += 1\n if '}' in line: brack_num -= 1\n\n if not found_first:\n if brack_num > 0: found_first = True\n else: continue\n\n if brack_num == 0: break\n\n else:\n self.print_error(\"Can't find the closing brace\")\n\n end_line = self.line_num + iline\n return end_line", "def raw_tokenize(src: str) -> Iterable[RawToken]:\n # Raw token handling; there is a later semantic mapping stage which\n # annotates atoms for the special handling of keywords and numbers.\n # We treat tokenization as an explicit state machine.\n # State transitions emit the previous block along with the previous state.\n state, start = None, 0\n\n for index, character in enumerate(src):\n next_state = None\n major_category = unicodedata.category(character) + character\n\n for (from_state, category_match), to_state in STATE_MACHINE.items():\n if (\n from_state == state and\n major_category.startswith(category_match)\n ):\n next_state = to_state\n break\n\n if next_state is None:\n raise ParseError(\n \"Unexpected '{0!r}'\".format(character),\n (index, index + 1),\n )\n\n if next_state != state:\n if start != index:\n assert state is not None\n\n yield RawToken(\n kind=state,\n value=src[start:index],\n location=(start, index),\n )\n start = index\n state = next_state\n\n if start != len(src):\n assert state is not None\n\n yield RawToken(\n kind=state,\n value=src[start:],\n location=(start, index + 1),\n )", "def rehydrate_indented_code_block(self, next_token):\n self.block_stack.append(next_token)\n return \"\"", "def CheckBraces(fn, filename, clean_lines, linenum, error):\n line = clean_lines.elided[linenum]\n if Match(r'^(.*){(.*)}.*$', line):\n # Special case when both braces are on the same line together, as is the\n # case for one-line getters and setters, for example, or rows of a multi-\n # dimenstional array initializer.\n pass\n else:\n # Line does not contain both an opening and closing brace.\n m = Match(r'^(.*){(.*)$', line)\n if m and not (IsBlankLine(m.group(1))):\n # Line contains a starting brace and is not empty, uh oh.\n if \"=\" in line and Match(r'\\)( *){$', line):\n # Opening brace is permissable in case of an initializer.\n pass\n else:\n error(filename, linenum, 'whitespace/braces', 4,\n 'when starting a new scope, { should be on a line by itself')\n m = Match(r'^(.*)}(.*)$', line)\n if m and (not IsBlankLine(m.group(1)) or not IsBlankLine(m.group(2))):\n if m.group(2) != \";\":\n error(filename, linenum, 'whitespace/braces', 4,\n '} should be on a line by itself')\n pass", "def parse_cst(self):\n stack = []\n self.tokenizer.next().must_be('{')\n for token in self.tokenizer:\n stack += [ token ] # Build a stack to process\n if token.text == \".\":\n # We've got a rule to process. Start by determining correct syntax.\n stack[1].must_be(':')\n ## Name analysis\n stack[0].assert_symbol_name()\n production_elements = stack[2:-1]\n for element in production_elements:\n element.assert_symbol_name()\n if stack[0].text in self.GlobalSymbolDict: # Redefined lexical sym or add a new production?\n existingSymbol = self.GlobalSymbolDict[stack[0].text]\n if existingSymbol.is_gla:\n raise Exception(\"Lexical Symbol %s redefined at %d,%d. Originally at %d,%d\" % \\\n (stack[0].text, stack[0].line, stack[0].col, \\\n existingSymbol.defining_token.line, existingSymbol.defining_token.col))\n existingSymbol.productions += [Production(existingSymbol,production_elements)]\n else: # Brand new symbol occurrence\n s = Symbol(stack[0])\n s.is_gla = False\n s.productions = [Production(s,production_elements)]\n self.GlobalSymbolDict[stack[0].text] = s\n stack = []\n elif token.text == \"{\":\n raise Exception(\"Unexpected %s\" % token)\n elif token.text == \"}\":\n if len(stack) > 1: raise Exception(\"Unfinished lexical specification beginning with %s\" % stack[0])\n #pp = pprint.PrettyPrinter()\n #pp.pprint(self.GlobalSymbolDict)\n return\n else: pass", "def scan(self):\n self.tokfile = open(self.tokfile_path, 'w')\n word = ''\n for line in open(self.srcfile):\n for ch in line:\n if ch in alphanum: \n word += ch\n else:\n if word:\n try:\n self.print_tok('$int', int(word))\n except ValueError:\n if word in self.reserved: \n self.print_tok('$' + word)\n else:\n self.print_tok('$id', word)\n if ch in special:\n self.print_tok(ch)\n word = ''\n self.tokfile.close()", "def __init__(self, file):\n self.__in_file = file\n self.__token = \"\" # will contain the current token\n self.__tokens = [] # will contain all tokens\n self.__lines = file.readlines()\n self.__lines = \"\".join(self.__lines) # all lines as one string, in order to make the iteration easier.\n self.__i = 0\n self.build_tokens()", "def __init__(self, current_char, source):\n self.current_char = current_char\n self.source = source", "def scheme_read(src):\n if src.current() is None:\n raise EOFError\n if val == 'nil':\n return nil\n elif val not in DELIMITERS: # ( ) ' .\n return val\n elif val == '(':\n return read_tail(src)\n else:\n raise SyntaxError('unexpected token: {0}'.format(val))", "def iter_function(source):\n is_scr = False\n is_str = False\n is_dsp = False\n begin = 0\n tokens = []\n marks = []\n\n # @NOTE: can nhac viec viet lai ham nay bang C/D/C++\n for current, c in enumerate(source):\n if is_scr is True:\n if c == '\\\"' and source[current:current + 3] == '\\\"\\\"\\\"':\n is_scr = False\n tokens.append(('bash', source[begin + 3:current]))\n begin = current + 3\n elif is_dsp is True:\n if c == '\\n':\n is_dsp = False\n begin = current + 1\n else:\n continue\n elif is_str is True:\n if c == '\\\"':\n tokens.append(('string', source[begin:current]))\n begin = current + 1\n is_str = False\n elif c in ['=', ',', '\\n', '#']:\n if c == '=':\n tokens.append(('variable', source[begin:current]))\n elif c == '#':\n is_dsp = True\n continue\n elif c == ',':\n if current - begin > 1 and len(source[begin:current].strip()) > 0:\n is_okey = False\n try:\n value = int(source[begin:current])\n is_okey = True\n except ValueError:\n pass\n try:\n if is_okey is False:\n value = float(source[begin:current])\n is_okey = True\n except ValueError:\n pass\n try:\n if is_okey is False:\n value = bool(source[begin:current])\n is_okey = True\n except ValueError:\n pass\n tokens.append(('value', source[begin:current]))\n begin = current + 1\n elif c in ['[', '(', '{', '\\\"', ']', ')', '}']:\n if c == '{':\n tokens.append('dict')\n elif c == '(':\n tokens.append(('function', source[begin:current]))\n elif c == '[':\n tokens.append('list')\n\n if c in [']', ')', '}', '\\\"']:\n if c == '}' and marks[-1] == '{':\n tokens.append('enddict')\n elif c == ']' and marks[-1] == '[':\n tokens.append('endlist')\n elif c == '\\\"' and marks[-1] == '\\\"':\n pass\n elif c == ')' and marks[-1] == '(':\n tokens.append('endfunction')\n elif c == '\\\"' and is_str is False:\n if source[current:current + 3] == '\\\"\\\"\\\"':\n is_scr = True\n else:\n is_str = True\n begin = current + 1\n continue\n else:\n raise StandardError('')\n\n marks.pop()\n else:\n marks.append(c)\n begin = current + 1\n\n # @NOTE: sau khi lay duoc cac token, chung ta chuyen token thanh\n # list cac function\n bundle_of_functions = None\n current_variable = None\n current_dict = None\n current_list = None\n function = None\n status = 0\n\n for token in tokens:\n if isinstance(token, tuple):\n if token[0] == 'function':\n # @NOTE: neu function ben trong 1 function thi no can phai duoc\n # parse truoc va load vao ben trong function cha\n\n if status > 0 and not function is None:\n bundle_of_functions.append({\n 'current_variable': current_variable,\n 'current_dict': current_dict,\n 'current_list': current_list,\n 'function': function\n })\n elif status <= 0 or bundle_of_functions == None:\n bundle_of_functions = []\n\n current_variable = None\n current_dict = None\n current_list = None\n status += 1\n function = {\n 'function': token[1].strip(),\n 'variables': []\n }\n elif token[0] == 'variable':\n function['variables'].append({token[1].strip(): ''})\n current_variable = len(function['variables']) - 1\n elif token[0] in ['bash', 'string', 'value']:\n if token[0] in ['string', 'value']:\n packed = token[1]\n else:\n packed = token\n\n if not current_list is None:\n current_list.append(packed)\n elif not current_dict is None:\n current_dict.append(packed)\n elif current_variable is None:\n function['variables'].append(packed)\n else:\n variable_name = list(function['variables'][current_variable].keys())[0]\n function['variables'][current_variable][variable_name] = packed\n current_variable = None\n elif token == 'endfunction':\n # @NOTE: chung ta da toi diem ket thuc cua 1 function, can kiem tra xem\n # cac function da duoc load hoan tat chua\n\n if not bundle_of_functions is None and status > 1:\n # @NOTE: function nay duoc goi boi mot function khac\n packed = function\n\n current_variable = bundle_of_functions[-1]['current_variable']\n current_dict = bundle_of_functions[-1]['current_dict']\n current_list = bundle_of_functions[-1]['current_list']\n function = bundle_of_functions[-1]['function']\n\n if packed['function'] == function:\n bundle_of_functions.pop()\n\n if not current_list is None:\n current_list.append(packed)\n elif not current_dict is None:\n current_dict.append(packed)\n elif current_variable is None:\n function['variables'].append(packed)\n else:\n variable_name = list(function['variables'][current_variable].keys())[0]\n function['variables'][current_variable][variable_name] = packed\n\n if len(bundle_of_functions) == 0:\n bundle_of_functions = None\n\n status -= 1\n if status == 0:\n bundle_of_functions = None\n yield function\n elif token == 'list':\n current_list = []\n elif token == 'dict':\n current_dict = []\n elif token == 'endlist':\n if current_variable is None:\n function['variables'].append(current_list)\n else:\n variable_name = list(function['variables'][current_variable].keys())[0]\n function['variables'][current_variable][variable_name] = current_list\n current_variable = None\n current_list = None\n elif token == 'enddict':\n if current_variable is None:\n function['variables'].append(current_dict)\n else:\n variable_name = list(function['variables'][current_variable].keys())[0]\n function['variables'][current_variable][variable_name] = current_dict\n current_variable = None\n current_list = None", "def create_token_generator(self):\n while self.current_char is not None:\n # handle whitespaces\n if self.current_char.isspace():\n self.skip_whitespace()\n continue\n\n # handle integer and float numbers\n if self.current_char.isdigit():\n yield self.number()\n continue\n\n # handle identifiers e.g variable names\n if self.current_char.isalpha():\n yield self.identifier()\n continue\n\n # handle strings e.g \"Hello, World\"\n if self.current_char == '\"':\n self.advance() # skip opening quote\n yield self.string()\n self.advance() # skip closing quote\n continue\n\n # handle single characters e.g symbols\n if self.current_char in self.tokentype.keys():\n char = self.current_char\n self.advance()\n yield self.generic_token(char)\n continue\n # add token to indicate end of file (EOF)\n yield Token(self.tokentype['EOF'], None)", "def read_from_tokens(tokens):\n if len(tokens) == 0:\n raise SyntaxError(\"unexpected EOF while reading\")\n token = tokens.pop(0)\n if \"(\" == token:\n res = []\n while tokens[0] != \")\":\n res.append(read_from_tokens(tokens))\n tokens.pop(0) # pop off ')'\n return res\n elif \")\" == token:\n raise SyntaxError(\"unexpected )\")\n else:\n return atom(token)", "def structure_parse(source):\r\n return structure_grammar().parseString(source)", "def process_next_char(self): \n self.current_position += 1\n if self.current_position >= len(self.code_input):\n '''End of file since the position is equal to or greater than the input's position'''\n self.current_char = '\\0' #EOF\n print('end of line')\n self.current_char = self.code_input[self.current_position]", "def substr_brace(s: str, left_brace_index: int) -> str:\n assert s[left_brace_index] == \"{\"\n depth = 1\n for i, c in enumerate(s[left_brace_index + 1:]):\n if c == \"{\":\n depth += 1\n elif c == \"}\":\n depth -= 1\n if depth == 0:\n right = left_brace_index + i + 2\n return s[left_brace_index:right]\n\n raise ValueError(f\"Unexpected EOF on '{s[left_brace_index:]}'\")", "def __init__(self, input_file):\n file_content = None\n self.input_file_name = input_file\n\n # opening the input jack file\n print(\"file is \"+input_file)\n file = open(input_file, \"r\")\n if file.mode == \"r\":\n # extracting the entire text\n self.file_content = file.read()\n\n # removing jack documentation\n self.file_content = re.sub(r\"(/{2}.*?\\n)\", \"\",file_content)\n self.file_content = re.sub(r\"/\\*{1,2}(.|\\n)*?\\*/\", \"\", file_content, re.DOTALL)\n print(file_content)\n file.close()\n\n # creating a list and filtering the list from empty strings\n if self.file_content is not None:\n\n # setting other variables\n self.token_type = None\n self.token = None\n self.inside_string = False\n self.word = \"\"\n self.key_words = {\"class\", \"constructor\", \"function\", \"method\",\n \"field\", \"static\", \"var\", \"int\", \"char\", \"true\",\n \"boolean\", \"void\", \"false\", \"null\", \"this\",\n \"let\", \"do\", \"if\", \"else\", \"while\", \"return\"}\n\n self.symbols = {\"{\", \"}\", \"(\", \")\", \"[\", \"]\", \".\", \",\", \";\", \"+\",\n \"-\", \"*\", \"/\", \"&\", \"|\", \"<\", \">\", \"=\", \"~\"}\n self.double_symbols = {\"<=\", \">=\", \"!=\"}#todo", "def tokenize(source_code):\n delimiters = '();'\n for delimiter in delimiters:\n source_code = source_code.replace(delimiter, ' '+delimiter+' ')\n return source_code.split()", "def ast_parse(self, source, *args, **kwargs):\n compiled = syntaxerr_memoized_parse_block(source)\n return super(CoconutCompiler, self).ast_parse(compiled, *args, **kwargs)", "def __parse_blocks_pass(self):\n\n self.stack = [DocumentStackToken()]\n\n self.tokenized_document = []\n token_to_use = self.source_provider.get_next_line()\n did_start_close = False\n did_started_close = False\n requeue = []\n ignore_link_definition_start = False\n POGGER.debug(\"---$---\", token_to_use)\n POGGER.debug(\"---\")\n self.__parse_properties.pragma_lines = {}\n line_number = 1\n try:\n (\n token_to_use,\n line_number,\n requeue,\n ) = self.__process_front_matter_header_if_present(\n token_to_use, line_number, requeue\n )\n did_start_close = token_to_use is None\n keep_on_going = True\n while keep_on_going:\n POGGER.debug(\"next-line>>$\", token_to_use)\n POGGER.debug(\"stack>>$\", self.stack)\n POGGER.debug(\"current_block>>$\", self.stack[-1])\n POGGER.debug(\"line_number>>$\", line_number)\n POGGER.debug(\"---\")\n\n position_marker = PositionMarker(line_number, 0, token_to_use)\n parser_state = ParserState(\n self.stack,\n self.tokenized_document,\n TokenizedMarkdown.__close_open_blocks,\n self.__handle_blank_line,\n )\n if did_start_close:\n POGGER.debug(\"\\n\\ncleanup\")\n\n was_link_definition_started_before_close = self.stack[\n -1\n ].was_link_definition_started\n\n did_started_close = True\n (\n tokens_from_line,\n requeue_line_info,\n ) = TokenizedMarkdown.__close_open_blocks(\n parser_state,\n self.tokenized_document,\n include_block_quotes=True,\n include_lists=True,\n caller_can_handle_requeue=True,\n was_forced=True,\n )\n if tokens_from_line and not self.tokenized_document:\n self.tokenized_document.extend(tokens_from_line)\n\n if not (requeue_line_info and requeue_line_info.lines_to_requeue):\n keep_on_going = False\n else:\n assert was_link_definition_started_before_close\n assert not requeue_line_info.lines_to_requeue[0]\n\n del requeue_line_info.lines_to_requeue[0]\n line_number -= 1\n\n did_start_close = False\n tokens_from_line = None\n else:\n POGGER.debug(\">>>>$\", self.tokenized_document)\n\n if not token_to_use or not token_to_use.strip():\n POGGER.debug(\"call __parse_blocks_pass>>handle_blank_line\")\n (\n tokens_from_line,\n requeue_line_info,\n ) = self.__handle_blank_line(\n parser_state,\n token_to_use,\n from_main_transform=True,\n position_marker=position_marker,\n )\n else:\n POGGER.debug(\"\\n\\nnormal lines\")\n (\n tokens_from_line,\n _,\n _,\n requeue_line_info,\n _,\n ) = ContainerBlockProcessor.parse_line_for_container_blocks(\n parser_state,\n position_marker,\n ignore_link_definition_start,\n self.__parse_properties,\n None,\n )\n\n POGGER.debug(\"<<<<$\", self.tokenized_document)\n\n if keep_on_going:\n line_number, ignore_link_definition_start = TokenizedMarkdown.__xx(\n line_number, requeue_line_info, requeue\n )\n\n POGGER.debug(\n \"---\\nbefore>>$\",\n self.tokenized_document,\n )\n POGGER.debug(\"before>>$\", tokens_from_line)\n if tokens_from_line:\n self.tokenized_document.extend(tokens_from_line)\n POGGER.debug(\n \"after>>$\",\n self.tokenized_document,\n )\n if requeue:\n POGGER.debug(\"requeue>>$\", requeue)\n POGGER.debug(\"---\")\n\n (\n token_to_use,\n did_start_close,\n did_started_close,\n ) = self.__determine_next_token_process(\n requeue, did_start_close, did_started_close\n )\n except AssertionError as this_exception:\n error_message = f\"A project assertion failed on line {line_number} of the current document.\"\n raise BadTokenizationError(error_message) from this_exception\n\n if self.__parse_properties.pragma_lines:\n self.tokenized_document.append(\n PragmaToken(self.__parse_properties.pragma_lines)\n )\n return self.tokenized_document", "def collapse_braces(self, src):\r\n result = []\r\n nesting = 0;\r\n\r\n for c in src:\r\n if not nesting:\r\n result.append(c)\r\n if c == '{':\r\n nesting += 1\r\n elif c == '}':\r\n nesting -= 1\r\n result.append(c)\r\n \r\n return ''.join(result)" ]
[ "0.59238863", "0.58185357", "0.5810777", "0.5810427", "0.5761932", "0.5747912", "0.542546", "0.5393044", "0.5324538", "0.530029", "0.5290811", "0.5258126", "0.5244535", "0.52285373", "0.5211549", "0.517982", "0.51321423", "0.51276994", "0.51232326", "0.51212436", "0.5091131", "0.50712776", "0.5049046", "0.5031744", "0.5010702", "0.50086176", "0.50045997", "0.5001782", "0.49633548", "0.4949883" ]
0.71761155
0
Receives a char and returning if its a left or right brace
def should_lex(cls, char): return char == '{' or char == '}'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bracketed (phrase,bracketing='()'):\r\n\r\n level = 0\r\n left_point = None\r\n right_point = None\r\n \r\n\r\n for count,char in enumerate(phrase):\r\n\r\n if char == bracketing[0]:\r\n if level == 0:\r\n left_point = count\r\n level+=1\r\n if char == bracketing[1]:\r\n level-=1\r\n if level == 0:\r\n right_point = count\r\n if not (left_point is None) and (not right_point is None) and left_point == 0 and right_point == len(phrase)-1:\r\n return True\r\n return False", "def bracketed (self,phrase):\r\n\r\n level = 0\r\n left_point = None\r\n right_point = None\r\n \r\n\r\n for count,char in enumerate(phrase):\r\n\r\n if char == '(':\r\n if level == 0:\r\n left_point = count\r\n level+=1\r\n if char == ')':\r\n level-=1\r\n if level == 0:\r\n right_point = count\r\n if not (left_point is None) and (not right_point is None) and left_point == 0 and right_point == len(phrase)-1:\r\n return True\r\n return False", "def _match_brace(string, start_pos, pair='[]'):\n depth = 1\n if string[start_pos] != pair[0]:\n return None\n for index, char in enumerate(string[start_pos + 1:]):\n if char == pair[0]:\n depth += 1\n elif char == pair[1]:\n depth -= 1\n if depth == 0:\n return start_pos + index + 1\n return None", "def bracket_match(bracket):\n if bracket == '(':\n return ')'\n if bracket == '[':\n return ']'\n if bracket == '{':\n return '}'", "def es_parentesis(caracter):\n if len(caracter) != 1:\n raise TypeError(str(caracter) + ' no es un parentesis')\n elif caracter in '()': # caracter == '(' or caracter == ')':\n return 'Es parentesis'\n return 'No es parentesis'", "def is_char_token(c: str) -> bool:\n return c in [\"+\", \"-\", \"*\", \"/\", \"(\", \")\"]", "def character(x):\n if (x==\"a\"or x==\"A\"or x==\"e\"or x==\"E\"or x==\"i\"or x==\"I\"or x==\"o\"or x==\"O\"or x==\"u\"or x==\"U\"):\n return('True')\n else:\n return('False')", "def _is_control(char):\n if char == '\\t' or char == '\\n' or char == '\\r':\n return False\n cat = unicodedata.category(char)\n if cat.startswith('C'):\n return True\n return False", "def substr_brace(s: str, left_brace_index: int) -> str:\n assert s[left_brace_index] == \"{\"\n depth = 1\n for i, c in enumerate(s[left_brace_index + 1:]):\n if c == \"{\":\n depth += 1\n elif c == \"}\":\n depth -= 1\n if depth == 0:\n right = left_brace_index + i + 2\n return s[left_brace_index:right]\n\n raise ValueError(f\"Unexpected EOF on '{s[left_brace_index:]}'\")", "def check_type(character: str):\n if character.isupper():\n return 'upper'\n elif character.islower():\n return 'lower'\n elif character.isspace():\n return 'space'\n elif character in string.punctuation:\n return 'punc'\n else:\n return 'digit'", "def check_type(character: str):\n if character.isupper():\n return 'upper'\n elif character.islower():\n return 'lower'\n elif character.isspace():\n return 'space'\n elif character in string.punctuation:\n return 'punc'\n else:\n return 'digit'", "def end_marker(data):\n if ord(data[-1]) == 10 and data[-2] == '}':\n return True", "def is_balanced(content):\n\n if content.find('{') == -1:\n return False\n stack = []\n push_chars, pop_chars = '({', ')}'\n for c in content:\n if c in push_chars:\n stack.append(c)\n elif c in pop_chars:\n if not len(stack):\n return False\n else:\n stack_top = stack.pop()\n balancing_bracket = push_chars[pop_chars.index(c)]\n if stack_top != balancing_bracket:\n return False\n return not stack", "def checkForNOrGap(character):\n if character == \"-\" or character == \"N\":\n return False\n else:\n return True", "def checkForNOrGap(character):\n if character == \"-\" or character == \"N\":\n return False\n else:\n return True", "def isbracket(token):\n\n # Token is a bracket\n return token == \"[\"", "def valid_ring_character(self, character):\n if character in self._charset:\n return character\n else:\n raise RotorRingCharacterError(character)", "def has_balanced_parens(string):", "def isBalanced(s):\n left = list(s)\n right = []\n\n result = 'YES'\n# breakpoint()\n while left or right: # if not empty, enter loop\n if (not left) and right:\n # left is empty but right is not empty, we have a problem\n result = 'NO'\n break\n\n else:\n x = left.pop()\n if x in set('})]'):\n right.append(x)\n elif x == '{' and not right:\n # cannot have hanging opening brackets; right.pop()\n # when right is empty generates error\n result = 'NO'\n break\n\n elif x == '{' and right[-1] == '}':\n right.pop()\n elif x == '(' and not right:\n result = 'NO'\n break\n\n elif x == '(' and right[-1] == ')':\n right.pop()\n elif x == '[' and not right:\n result = 'NO'\n break\n elif x == '[' and (right[-1] == ']'):\n right.pop()\n else:\n result = 'NO'\n break\n print(result)\n return result", "def validator(self, char):\r\n if curses.ascii.isprint(char):\r\n return char\r\n if char == curses.ascii.TAB:\r\n char = curses.KEY_DOWN\r\n if char in [curses.KEY_DOWN, curses.KEY_UP]:\r\n self.result = char\r\n return curses.ascii.BEL\r\n if char in [10, 13, curses.KEY_ENTER, curses.ascii.BEL]:\r\n self.result = 10\r\n return curses.ascii.BEL\r\n if char in [27, curses.KEY_F10]:\r\n self.result = -1\r\n return curses.ascii.BEL\r\n return char", "def is_valid_char(t_char):\r\n eax = 1 # mi preparo il flag \"invalido\" per il carattere\r\n \r\n # se il carattere e' un operatore, un operando o uno spazio\r\n # il carattere e' valido\r\n if is_operator(t_char) == 0:\r\n # e' operatore\r\n eax = 0\r\n \r\n if is_operand(t_char) == 0:\r\n # e' operando\r\n eax = 0\r\n \r\n if ord(t_char) == 32:\r\n # e' uno spazio\r\n eax = 0\r\n\r\n return eax", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False", "def match(str1, str2):\n if ( (str2 == '[' and str1 == ']')\n or (str2 == '{' and str1 == '}')\n or (str2 == '(' and str1 == ')')):\n return True\n return False", "def check_matching_brackets(s, opening=\"([{\", closing=\")]}\"):\n\n stack = Stack()\n\n for item in s:\n # Stores any opening character\n if item in opening:\n stack.push(item)\n else:\n # if a closing bracket is found and we try to remove the matching opening bracket from the top of the stack\n # in case the top of the stack is not a matching bracket, the string is not properly nested\n if stack.length() < 1 or stack.pop() is not opening[closing.index(item)]:\n return 0\n\n return 0 if stack.length() > 0 else 1", "def is_matched(expression):\n\n balance = []\n\n for char in expression:\n if char == \"{\" or char == \"[\" or char == \"(\":\n balance.append(char)\n\n elif char == \"}\":\n if balance[-1] == \"{\":\n balance.pop()\n else:\n return False\n\n elif char == \"]\":\n if balance[-1] == \"[\":\n balance.pop()\n else:\n return False\n\n elif char == \")\":\n if balance[-1] == \"(\":\n balance.pop()\n else:\n return False\n\n if len(balance) == 0:\n return True", "def has_balanced_parens(string):\n\n parens = 0\n\n for char in string:\n if char == \"(\":\n parens += 1\n elif char == \")\":\n parens -= 1\n\n if parens < 0:\n return False\n\n return parens == 0", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False # pragma: no cover\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True # pragma: no cover\n return False", "def balanced_string(string):\n stack = create_stack()\n pos = 0\n while pos < len(string):\n if string[pos] in '[{(':\n stack.push(string[pos])\n elif string[pos] in ']})':\n pair = stack.pop()\n if not match(string[pos], pair):\n return False\n pos = pos+1\n #return stack.length()\n if stack.length() == 0:\n return True\n else:\n return False" ]
[ "0.6396085", "0.6267263", "0.6178741", "0.61347187", "0.6007452", "0.6006558", "0.59606045", "0.5951733", "0.59003645", "0.58945894", "0.58871955", "0.58813554", "0.58139944", "0.58129483", "0.58129483", "0.5799351", "0.57966834", "0.5772036", "0.57385767", "0.57300985", "0.56983304", "0.5643672", "0.5643672", "0.5630539", "0.5613133", "0.5607284", "0.555153", "0.55403095", "0.55057716", "0.5471864" ]
0.72300553
0
Crop graph image Crops the desired image by it's type.
def crop_image(self): image_data = Image.open(self.img_path) return image_data.crop(self.data_type)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crop_image(self, img):\n img.crop_image(self._center, 1.1 * self._radius)", "def crop(image, dimX, dimY):\n # TODO\n return image", "def crop(self, *args, **kwargs):\n return _image.image_crop(self, *args, **kwargs)", "def __call__(self, img):\n image_width, image_height = img.size\n image_short = min(image_width, image_height)\n\n crop_size = float(self.imgsize) / (self.imgsize + 32) * image_short\n\n crop_height, crop_width = crop_size, crop_size\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))", "def __call__(self, img):\n image_width, image_height = img.size\n image_short = min(image_width, image_height)\n\n crop_size = float(self.imgsize) / (self.imgsize + 32) * image_short\n\n crop_height, crop_width = crop_size, crop_size\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))", "def _generate_crop(self):\n if self.box_drawn == True:\n if (self.cd_pic_num != -1) & (self.cd_crop_num == 1):\n self.communicator.generate_crop(picture_num=self.cd_pic_num, \\\n xa=self.xa, ya=self.ya, xb=self.xb, yb=self.yb)\n else:\n print \"ERROR: can only generate a new crop from a thumbnail\"\n else:\n print \"ERROR: please select an area to generate a crop from\"", "def doCrop(image, x, y, w, h):\n\tcrop_height = int((config.FACE_HEIGHT / float(config.FACE_WIDTH)) * w)\n\tmidy = y + h/2\n\ty1 = max(0, midy-crop_height/2)\n\ty2 = min(image.shape[0]-1, midy+crop_height/2)\n\treturn image[y1:y2, x:x+w]", "def crop_img(image, bound):\n scale = 1.01 # 1%\n return image.crop((bound.vertices[0].x // scale, bound.vertices[0].y // scale,\n int(bound.vertices[2].x * scale), int(bound.vertices[2].y) * scale))", "def crop(img):\n new_shape = min(img.shape[0], img.shape[1])\n \n return img[0:new_shape, 0:new_shape, ...]", "def crop_face_from_data(cfg, is_inference, data):\r\n raise NotImplementedError()", "def crop_image (filename):\n from PIL import Image\n image = Image.open(filename)\n for edge in 'NSWE':\n image = _crop(image, edge)\n image.save(filename)", "def crop_image(image):\r\n return image[40:-20, :]", "def crop_image(image):\n delta = .05\n rand_top_ratio = random.uniform(default_top_ratio - delta,\n default_top_ratio + delta)\n rand_bot_ratio = random.uniform(default_bot_tatio - delta,\n default_bot_tatio + delta)\n image = preprocess(image, top_ratio=rand_top_ratio, bot_ratio=rand_bot_ratio)\n\n return image", "def crop_object_from_image(saving_folder,root_folder_path,root_folder_name,row_info):\n class_name=row_info['class']\n file_id=row_info['file_id']\n img_type=row_info['type']\n xmin=row_info['x_min']\n xmax=row_info['x_max']\n ymin=row_info['y_min']\n ymax=row_info['y_max']\n\n\n origin_img_path=os.path.join(root_folder_path,root_folder_name,img_type,file_id+\".png\")\n crop_img_path=os.path.join(saving_folder,file_id+\"_\"+class_name+\".png\")\n\n origin_img=cv2.imread(origin_img_path)\n crop_img=origin_img[ymin:ymax-1,xmin:xmax-1]\n\n # If width or height only contain 1 pixel, do not crop.\n if xmax-xmin<=2 or ymax-ymin<=2:\n print(\"Only one pixel, pass!\")\n return 0\n # print(origin_img.shape)\n # print(xmin,xmax,ymin,ymax)\n # print(crop_img.shape)\n # print(crop_img_path)\n cv2.imwrite(crop_img_path,crop_img)", "def crop_image(input_image, output_image, start_x, start_y, width, height):\n box = (start_x, start_y, start_x + width, start_y + height)\n output_img = img.crop(box)\n output_img.save(output_image +\".png\")", "def crop_image_to_shape(image, x, y, shape):\n return crop_image(image, x, y, shape[0], shape[1])", "def crop_roi_image(data_dir):\n images = list()\n labels = list()\n\n csv_dir = data_dir\n images_dir = data_dir.split(\"_\")[0] + \"_png\"\n\n df = pd.read_csv('/'.join(csv_dir.split('/')[:-1]) + '/data_description.csv', header=None)\n\n for row in df.iterrows():\n # Skip normal cases.\n if str(row[1][4]) == 'nan':\n continue\n if str(row[1][4]) == '*NOT':\n continue\n\n # Process image.\n image = preprocess_image(images_dir + '/' + row[1][0] + '.png')\n\n # Abnormal case: crop around tumour.\n y2 = 0\n x2 = 0\n if row[1][2] != 'NORM':\n y1 = image.shape[1] - int(row[1][5]) - 112\n if y1 < 0:\n y1 = 0\n y2 = 224\n if y2 != 224:\n y2 = image.shape[1] - int(row[1][5]) + 112\n if y2 > image.shape[1]:\n y2 = image.shape[1]\n y1 = image.shape[1] - 224\n x1 = int(row[1][4]) - 112\n if x1 < 0:\n x1 = 0\n x2 = 224\n if x2 != 224:\n x2 = int(row[1][4]) + 112\n if x2 > image.shape[0]:\n x2 = image.shape[0]\n x1 = image.shape[0] - 224\n\n # Normal case: crop around centre of image.\n else:\n y1 = int(image.shape[1] / 2 - 112)\n y2 = int(image.shape[1] / 2 + 112)\n x1 = int(image.shape[0] / 2 - 112)\n x2 = int(image.shape[0] / 2 + 112)\n\n # Get label from CSV file.\n label = \"normal\"\n if str(row[1][3]) == 'B':\n label = \"benign\"\n elif str(row[1][3]) == 'M':\n label = \"malignant\"\n\n # Append image and label to lists.\n images.append(image[y1:y2, x1:x2, :])\n labels.append(label)\n\n return images, labels", "def crop(self, image):\n\t\treturn image.copy()[self.ymin:self.ymax,self.xmin:self.xmax]", "def crop(img, x, y, w, h):\n check_type(img)\n return img.crop((x, y, x + w, y + h))", "def crop (*args, **kwargs):\n return compute('crop', inputs=list(args), args=kwargs)", "def basic_crop(data):\n return data['crop'];", "def crop_image(image_to_crop, year):\r\n\timg = Image.open(image_to_crop)\r\n\t#The dimensions of just the US in the image\r\n\timg = img.crop((80, 240, 800, 615))\r\n\r\n\tfile_destination = \"images/cropped_images/\" + str(year) + \".png\"\r\n\r\n\timage_file = open(file_destination, 'wb')\r\n\timg.save(image_file, 'png')\r\n\timage_file.close()", "def Crop_Image(img, mask, x, y, width, height):\n img = img[y:y+height, x:x+width,:]\n mask = mask[y:y+height, x:x+width,:]\n return img, mask", "def crop(img: 'np.ndarray', x: int, y: int, width: int, height: int) -> 'np.ndarray':\n return img[y:y+height, x:x+width]", "def crop(self,channel,center_coord,crop_size,z_coord=None,z_size=1): \n x1=center_coord[0]-int(crop_size/2)\n x2=x1+crop_size\n y1=center_coord[1]-int(crop_size/2)\n y2=y1+crop_size\n img_crop=MicImage()\n img_crop._metaData={**self._metaData}\n img_crop.xml=self.xml\n\n\n if z_coord is not None and z_size>1:\n z1=z_coord-int(z_size/2)\n if z1<0:\n z1=0\n z2=z1+z_size\n if (z_coord is not None and z_size==1):\n z1=z_coord\n z2=z1+1\n if z_coord is None:\n z1=0\n z2=-1\n\n img_crop.pixels= self.pixels[z1:z2,x1:x2,y1:y2,channel]\n \n if img_crop.pixels.shape[0]==1:\n img_crop.pixels=np.squeeze(img_crop.pixels)\n img_crop.sumprj=np.squeeze(img_crop.pixels)\n img_crop.maxprj=np.squeeze(img_crop.pixels)\n else:\n img_crop.prj(\"max\")\n img_crop.prj(\"sum\")\n img_crop._metaData.update({\"size_x\": crop_size})\n img_crop._metaData.update({\"size_x\": crop_size})\n\n return img_crop", "def Save_Image_Crop(img, x, y, width, height, filename = None, path = 'Predictions'):\n img = img[y:y+height, x:x+width,:]\n\n if filename is not None:\n try: \n os.mkdir(path)\n except OSError as error: \n print('') \n fig, ax = plt.subplots(figsize=(18, 20))\n ax.imshow(img)\n plt.tight_layout()\n plt.savefig(path + '/' + filename + '_Crop.png')", "def _crop(image, offset_height, offset_width, crop_height, crop_width):\n original_shape = tf.shape(image)\n\n rank_assertion = tf.Assert(\n tf.equal(tf.rank(image), 3),\n ['Rank of image must be equal to 3.'])\n with tf.control_dependencies([rank_assertion]):\n cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])\n\n size_assertion = tf.Assert(\n tf.logical_and(\n tf.greater_equal(original_shape[0], crop_height),\n tf.greater_equal(original_shape[1], crop_width)),\n ['Crop size greater than the image size.'])\n\n offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))\n\n # Use tf.strided_slice instead of crop_to_bounding box as it accepts tensors\n # to define the crop size.\n with tf.control_dependencies([size_assertion]):\n image = tf.strided_slice(image, offsets, offsets + cropped_shape,\n strides=tf.ones_like(offsets))\n return tf.reshape(image, cropped_shape)", "def __crop_img(img, cx, cy, max_axis, padding=0):\n\n new_height = max_axis\n new_width = max_axis\n\n cy -= new_height // 2\n cx -= new_width // 2\n\n if (cy + new_height) > img.shape[0]:\n shift = (cy + new_height) - img.shape[0]\n cy -= shift\n\n if (cx + new_width) > img.shape[1]:\n shift = (cx + new_width) - img.shape[1]\n cx -= shift\n\n cy = max(0., cy)\n cx = max(0., cx)\n\n cx = padding if cx == 0 else cx\n cy = padding if cy == 0 else cy\n\n cropped_img = img[cy - padding:cy + new_height + padding, cx - padding:cx + new_width + padding, :]\n\n return cropped_img", "def __randomCrop(self, img):\n limit = self.PROCESSING_DIM - self.INPUT_DIM\n # pick 2 random integers less than this limit as the origin of the cropped image\n x_start = np.random.randint(limit)\n y_start = np.random.randint(limit)\n return img.crop((x_start, y_start, x_start + self.INPUT_DIM, y_start + self.INPUT_DIM))", "def convert_crop(node, **kwargs):\n\n name, inputs, attrs = get_inputs(node, kwargs)\n\n start = np.array([0, 0, 0, 0], dtype=np.int) # index是int类型\n\n export_nodes = []\n\n start_node = create_helper_tensor_node(start, name + '__starts', kwargs)\n export_nodes.extend(start_node)\n start_node = start_node[-1].name\n shape_node = create_helper_shape_node(inputs[1], inputs[1] + '__shape')\n export_nodes.extend(shape_node)\n shape_node = shape_node[-1].name\n\n crop_node = onnx.helper.make_node(\n \"Slice\",\n inputs=[inputs[0], name + '__starts', inputs[1] + '__shape'], # data、start、end\n outputs=[name],\n name=name\n )\n\n logging.warning(\n \"Using an experimental ONNX operator: Crop. \" \\\n \"Its definition can change.\")\n export_nodes.extend([crop_node])\n\n return export_nodes" ]
[ "0.66398174", "0.6505323", "0.6446542", "0.63750404", "0.63750404", "0.63555723", "0.6341775", "0.633309", "0.62684494", "0.626365", "0.623445", "0.6195632", "0.61726093", "0.61628", "0.6140623", "0.6068601", "0.60564905", "0.6035729", "0.60079396", "0.6002261", "0.5993988", "0.59885573", "0.594257", "0.5935405", "0.5908762", "0.5878006", "0.58662486", "0.58571565", "0.5855612", "0.58521116" ]
0.6803471
0
Transform Image into array Transform cropped image into an numpy multidimensional array.
def np_image_matrix(self): return np.array(self.crop_image())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def image_to_array(self, img):\n x = np.asarray(img, dtype=self.dtype)\n if len(x.shape) == 3:\n if self.channels_first:\n x = x.transpose(2, 0, 1)\n elif len(x.shape) == 2:\n if self.channels_first:\n x = x.reshape((1, x.shape[0], x.shape[1]))\n else:\n x = x.reshape((x.shape[0], x.shape[1], 1))\n else:\n raise ValueError('Unsupported image shape: ', x.shape)\n return x", "def img_to_array(img, path=True):\n global width, height\n\n if path:\n img = Image.open(img)\n img_arr = np.array(img) / 255.0\n img_arr = img_arr.reshape(width, height, channels)\n \n return img_arr", "def convert_image_to_array(image):\n w, h = image.size\n\n # we support black and white images only.\n if \"P\" in image.getbands():\n #a = image.split()\n #a = image.tostring()\n img_array = np.fromstring(image.tostring(), dtype=np.uint8)\n return img_array.reshape((w, h))\n else:\n raise TypeError(\"Wrong Image type. Only B&W images are supported\")", "def carla_cityscapes_image_to_ndarray(image: carla.Image) -> np.ndarray: # pylint: disable=no-member\n image.convert(carla.ColorConverter.CityScapesPalette) # pylint: disable=no-member\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = array.astype(np.float32) / 255\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n return array", "def array_from_img(image):\n return np.array(image)", "def carla_rgb_image_to_ndarray(image: carla.Image) -> np.ndarray: # pylint: disable=no-member\n image.convert(carla.ColorConverter.Raw) # pylint: disable=no-member\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = array.astype(np.float32) / 255\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n return array", "def to_array(self):\n return np.array(self.to_image())", "def get_array(self, scale=1):\n array = cv2.imread(str(self.path), self.read_type)\n\n # resize original image so it can be be scaled without fractions\n x_extra = array.shape[0] % self.scaling\n y_extra = array.shape[1] % self.scaling\n\n x_extra = self.scaling - x_extra if x_extra != 0 else x_extra\n y_extra = self.scaling - y_extra if y_extra != 0 else y_extra\n\n padded_array = cv2.resize(\n array, (int(array.shape[1] + y_extra), int(array.shape[0] + x_extra))\n )\n\n # scale image\n resized_array = cv2.resize(\n padded_array,\n (int(padded_array.shape[1] * scale), int(padded_array.shape[0] * scale)),\n )\n\n # cv2 reads in array as BGR, tensorboard shows as RGB\n if not self.greyscale:\n x = np.copy(resized_array)\n resized_array[:, :, 0] = x[:, :, 2]\n resized_array[:, :, 2] = x[:, :, 0]\n\n # cv2.imshow('image',array)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n if self.greyscale:\n resized_array = np.expand_dims(resized_array, 2)\n return resized_array", "def resize_img_to_array(img, img_shape=(244, 244)):\n # Code from https://gist.github.com/yinleon/8a6bf8c2f3226a521680f930f3d4339f\n img_array = np.array(\n img.resize(\n img_shape,\n Image.ANTIALIAS\n )\n )\n\n return img_array", "def process_image(self, image_path):\n\n img = load_img(image_path, target_size=IMAGE_SIZE)\n img_array = img_to_array(img)\n # Create a batch by increase dimensions\n img_array = expand_dims(img_array, 0)\n print(img_array.shape)\n return img_array", "def preprocess(self, image):\n return img_to_array(image, data_format=self.dataFormat)", "def vtk_image_to_numpy(image):\n data = vtk_to_numpy(image.GetPointData().GetScalars())\n data.shape = get_vtk_image_shape(image)\n return data", "def convert_image_to_1d_array(x):\r\n\r\n #x = cv2.imread(img, cv2.IMREAD_GRAYSCALE)\r\n if x is None:\r\n print( \"ERROR: loading image ' + img + ' failed.\")\r\n return None\r\n \r\n x = cv2.threshold(x, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\r\n if x is None:\r\n print (\"ERROR: thresholding image ' + img + ' failed.\")\r\n return None\r\n\r\n return x.flatten()", "def preprocess_image(image: Image) -> np.ndarray:\n return np.array(image.convert('L'))", "def convert_canvas_image_to_array(image_data):\n # Split the image data from the metadata\n split_str = b'base64,'\n index = image_data.find(split_str) + len(split_str)\n # Decode the image data from base64\n png_data = base64.b64decode(image_data[index:])\n # Create a PIL image from the bytes\n image = PIL.Image.open(BytesIO(png_data))\n # Resize image\n image = image.resize([IMAGE_SIZE, IMAGE_SIZE], PIL.Image.LANCZOS)\n # Reshape into a 4D numpy tensor\n pix = np.array(image, dtype='float32')\n pix = pix[..., 3]\n pix = pix.reshape(1, 28, 28, 1)\n # Normalize from [0-255] -> [0,1]\n pix /= 255\n return pix", "def makearray(self, *args, **kwargs):\n return _image.image_makearray(self, *args, **kwargs)", "def image2array(im):\n\n arr = numpy.zeros(im.size)\n\n for x in xrange(im.size[0]):\n for y in xrange(im.size[1]):\n arr[x,y] = im.getpixel((x,y))\n\n return arr", "def load_image_into_numpy_array(self, path):\n \n return np.array(Image.open(path))", "def get_correction_array(self):\n import numpy\n\n # Select the first datablock and rewind all the categories\n self.cbf_handle.select_datablock(0)\n self.cbf_handle.select_category(0)\n self.cbf_handle.select_column(2)\n self.cbf_handle.select_row(0)\n\n # Check the type of the element to ensure it's a binary\n # otherwise raise an exception\n type = self.cbf_handle.get_typeofvalue()\n if type.find('bnry') > -1:\n\n # Read the image data into an array\n image_string = self.cbf_handle.get_integerarray_as_string()\n image = numpy.fromstring(image_string, numpy.int32)\n\n # Get the array parameters\n parameters = self.cbf_handle.get_integerarrayparameters_wdims()\n image_size = (parameters[10], parameters[9])\n\n # Resize the image\n image.shape = (image_size)\n\n else:\n raise TypeError('Can\\'t find image')\n\n # Return the image\n return image", "def _grey_img_to_arr(image, verbose=False):\n try:\n w, h = image.size\n arr = np.array(image.getdata())\n arr = _rgb_to_grey(arr, (h, w), verbose=verbose)\n if verbose:\n print(\"Converted from RGB to grayscale\")\n except:\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(height, width)\n return arr", "def flattenImage(input_array):\r\n shp = np.size(input_array)\r\n return np.reshape(input_array, (shp,))", "def get_array(self) -> numpy.array:\r\n \r\n return self.pic_array", "def get_image_array(self):\n with picamera.array.PiRGBArray(self.camera) as output:\n self.camera.resolution = (640, 480)\n self.camera.capture(output, 'rgb')\n logging.info(\"Captured image of size {0}x{1}x{2}\".format(\n output.array.shape[0], output.array.shape[1], output.array.shape[2]))\n output.truncate(0)\n return output.array\n # self.camera.capture_continuous(self.stream, format='jpeg', use_video_port=True)\n # self.stream.seek(0)\n # image = Image.open(self.stream).convert('RGB').resize((self._input_width, self._input_height), Image.ANTIALIAS)\n # self.stream.seek(0)\n # self.stream.truncate()\n # self.camera.close()", "def imagefile_to_array(imagefname):\n with Image.open(imagefname) as image: \n im_arr = np.fromstring(image.tobytes(), dtype=np.uint8)\n rows = image.size[1]\n cols = image.size[0]\n no_channels = int(len(im_arr)/rows/cols)\n im_arr = im_arr.reshape((rows, cols, no_channels))\n im_arr = np.rollaxis(im_arr,-1)\n return im_arr", "def features_to_np_array(self, images):\n \n images = list(images)\n \n images = np.stack(images, axis=0)\n \n return images", "def load_image_as_array(filename):\n im = Image.open(filename)\n arr = np.asarray(im)\n return arr", "def read_image_to_array(image_name='default.png'):\n\timg = Image.open(image_name)\n\tres_arr = []\n\n\tfor y in range(img.height):\n\t\tres_arr.append([])\n\n\t\tfor x in range(img.width):\n\t\t\tpixel = img.getpixel((x,y))\n\t\t\tres_arr[y].append(is_there_life(pixel))\n\treturn res_arr", "def preprocess(img: 'np.ndarray') -> 'np.ndarray':\n return cv2.resize(img, (45, 45), interpolation=cv2.INTER_AREA)[:,:,:3]", "def crop(self):\n return np.array([f.crop() for f in self])", "def load_image_into_numpy_array(path):\r\n \r\n return np.array(Image.open(path))" ]
[ "0.722699", "0.6876687", "0.68659633", "0.68560636", "0.6853701", "0.65742826", "0.64895433", "0.64520043", "0.64459306", "0.63613623", "0.6347112", "0.6313576", "0.6297982", "0.6266448", "0.6222398", "0.6181755", "0.6175415", "0.6161334", "0.6153839", "0.6122482", "0.6093073", "0.6092047", "0.6090698", "0.59680563", "0.5928888", "0.5917064", "0.5899322", "0.58880806", "0.58759046", "0.586882" ]
0.6892566
1
Find Blue pixels Finds all blue pixels inside the graph area, which represents the desired points of the graph. The method generates a numpy 2d array with these pixels relative positions.
def blue_matrix(self): return np.vstack(np.where(self.np_image_matrix() == 2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_blue(x, y, slot = 0):\r\n return __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3 + 2]", "def get_blue(self, x, y):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].get_blue()", "def blue_channel(img):\n\n blue = np.zeros(img.shape,dtype=float)\n\n blue[:,:,0] = np.copy(img[:,:,0])\n\n return blue", "def to_blue(array):\n result = np.zeros(array.shape)\n\n for row in range(result.shape[0]):\n for pixel in range(result.shape[1]):\n result[row][pixel][2] = array[row][pixel][2]\n\n return result", "def blue_channel(image: Image) -> Image:\n new_image = copy(image)\n # filter the intensities of every component in every pixel.\n for x, y, (r, g, b) in image:\n blue = create_color(0,0,b)\n set_color(new_image, x, y, blue)\n return new_image", "def neighbour_pixels(x, y):\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y ), (x, y ), (x + 1, y ),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1)\n ]", "def get_pixel_obs(self):\n delta = self.side / (self.pixel_side - 1)\n bd1 = -self.side / 2\n bd2 = self.side / 2 + delta\n x, y = np.meshgrid(np.arange(bd1, bd2, delta), np.arange(bd2, bd1, -delta))\n if self.robot.sensor.lower() == \"rgb\":\n obs1, obs2, obs3 = np.zeros(x.shape), np.zeros(x.shape), np.zeros(x.shape)\n # Color humans:\n for human in self.humans:\n robot_distance = np.sqrt(\n (human.px - self.robot.px) ** 2 + (human.py - self.robot.py) ** 2\n )\n if robot_distance < self.robot.horizon:\n obs1[\n np.nonzero((x - human.px) ** 2 + (y - human.py) ** 2 <= human.radius ** 2)\n ] = 1\n # Color goal:\n obs2[\n np.nonzero(\n (x - self.robot.gx) ** 2 + (y - self.robot.gy) ** 2 <= self.goal_radius ** 2\n )\n ] = 1\n # Color robot:\n obs3[\n np.nonzero(\n (x - self.robot.px) ** 2 + (y - self.robot.py) ** 2 <= self.robot.radius ** 2\n )\n ] = 1\n obs = np.concatenate(\n (np.expand_dims(obs1, 0), np.expand_dims(obs2, 0), np.expand_dims(obs3, 0)), axis=0\n )\n return np.float32(np.expand_dims(obs, 0))\n elif self.robot.sensor.lower() == \"gray\":\n obs = np.zeros(x.shape)\n # Color humans:\n for human in self.humans:\n robot_distance = np.sqrt(\n (human.px - self.robot.px) ** 2 + (human.py - self.robot.py) ** 2\n )\n if robot_distance < self.robot.horizon:\n obs[\n np.nonzero((x - human.px) ** 2 + (y - human.py) ** 2 <= human.radius ** 2)\n ] = (1.0 / 3)\n # Color goal:\n obs[\n np.nonzero(\n (x - self.robot.gx) ** 2 + (y - self.robot.gy) ** 2 <= self.goal_radius ** 2\n )\n ] = (2.0 / 3)\n # Color robot:\n obs[\n np.nonzero(\n (x - self.robot.px) ** 2 + (y - self.robot.py) ** 2 <= self.robot.radius ** 2\n )\n ] = 1.0\n return np.float32(np.expand_dims(np.expand_dims(obs, 0), 0))\n else:\n raise ValueError(\"Robot sensor incompatible with pixel observation.\")", "def getPixels(self):\n\t\treturn self.strip.ledsColorBuffer", "def find_reddest_pixel(img):\n # HINTS/ADVICE-------------\n # Use a nested for loop here.\n #\n # BE CAREFUL DOING ARITHMETIC WITH UNSIGNED INTEGERS: \n # >>> a = np.array([2], dtype='uint8')\n # >>> b = np.array([3], dtype='uint8')\n # >>> a - b\n # array([255], dtype=uint8)\n #\n # Reminder:\n # numpy arrays have a \"shape\" attribute that stores the layout:\n # img.shape[0] - rows\n # img.shape[1] - columns\n # img.shape[2] - color channels\n\n max_redness = 0\n max_x = 0\n max_y = 0\n \n img = np.array(img, dtype = 'int32')\n for r in range(img.shape[0]):\n for c in range(img.shape[1]):\n red = img[r, c, 2]\n green = img[r, c, 1]\n blue = img[r, c, 0] \n redness = (red - green) + (red - blue)\n\n if redness > max_redness:\n max_redness = redness\n max_x = c\n max_y = r\n \n return (max_x, max_y)", "def blue_channel(image: Image) -> Image:\r\n new_image = copy(image)\r\n for x, y, (r, g, b) in image:\r\n r, g, b = 0, 0, b\r\n blue = create_color(r, g, b)\r\n set_color(new_image, x, y, blue)\r\n return new_image", "def pixelcode(self):\n\n maxX, maxY = self.size()\n result = bitmap((2*maxX, 2*maxY))\n for x in range(maxX):\n for y in range(maxY):\n pixel = self.get(x,y)\n result.set(2*x,2*y, pixel)\n result.set(2*x,2*y+1, not pixel)\n result.set(2*x+1,2*y, not pixel)\n result.set(2*x+1,2*y+1, pixel)\n return result", "def pixelPoints(img, cnt):\n\tm = np.zeros(grayscale(img).shape, np.uint8)\n\tcv2.drawContours(m, [cnt], 0, 255, -1)\n\tpixelpoints = cv2.findNonZero(m)\n\treturn pixelpoints", "def getFillPoints(image):\n image = cv.CloneMat(image)\n retList = []\n _, maxVal, _ , maxLoc = cv.MinMaxLoc(image)\n while maxVal > 0:\n retList.append(maxLoc)\n cv.FloodFill(image, maxLoc, 0)\n _, maxVal, _, maxLoc = cv.MinMaxLoc(image)\n return retList", "def extract_target_pixel_location(self):\n #Respective Image location\n pixel_array = self.imageprepare(self.image_path)\n\n #Select less_than_target color point --> must be calibrated\n #?? Should we use an abstract class here instead of an if statment ??\n if self.color == \"g\":\n less_than_target = .15\n else:\n raise ValueError(\"Unknown color value\")\n\n #Chooses target pixels as well as it's location\n target_pixels = []\n for pixel in enumerate(pixel_array):\n if pixel[1] < less_than_target:\n target_pixels.append(pixel[0])\n\n return target_pixels", "def get_neighbor_values(training_image, row, col):\n #patches.append((img[i-1:i+2,j-1:j+2],(i,j)))\n return training_image[row-1:row+2,col-1:col+2]", "def find_pixel_edges(img):\n \n transparency_cells = img.T[3].T # Ignores rbg, returns only transparency. \n # Values are either:\n # 0 (fully transparent) or \n # 1 (not at all transparent)\n right_up = np.rot90(transparency_cells)\n bottom_up = np.rot90(right_up)\n left_up = np.rot90(bottom_up)\n\n y_top = count_from_top(transparency_cells)\n x_left = count_from_top(left_up)\n y_bottom = count_from_top(bottom_up)\n x_right = count_from_top(right_up)\n #print(f'\\ty_top:{y_top}\\nx_left:{x_left}\\t\\tx_right:{x_right}\\n\\ty_bottom:{y_bottom}')\n return y_top, y_bottom, x_left, x_right", "def find_neighbor_pixels(pix_x, pix_y, rad):\n\n points = np.array([pix_x, pix_y]).T\n indices = np.arange(len(pix_x))\n kdtree = KDTree(points)\n neighbors = [kdtree.query_ball_point(p, r=rad) for p in points]\n for nn, ii in zip(neighbors, indices):\n nn.remove(ii) # get rid of the pixel itself\n return neighbors", "def filterBankEdges(img):\n imgE = Views.extendBorder(img)\n opTop = as2DKernel(imgE, [-1]*3 + [0]*3 + [1]*3)\n opBottom = as2DKernel(imgE, [1]*3 + [0]*3 + [-1]*3)\n opLeft = as2DKernel(imgE, [-1, 0, 1] * 3)\n opRight = as2DKernel(imgE, [1, 0, -1] * 3)\n return [opTop, opBottom, opLeft, opRight]", "def getPixel(self,x,y) :\n # check the bounds to make sure we are in the correct area\n if x<0 or x>self.m_width :\n print \"error x out of bounds\\n\"\n return\n if y<0 or y>self.m_height :\n print \"error y our of bounds\\n\"\n return\n # now calculate the index into the 1D array of data\n index=(y*self.m_width*4)+x*4\n # grab the pixels\n red = self.getUcharArrayItem(self.charPixelPtr,index)\n green = self.getUcharArrayItem(self.charPixelPtr,index+1)\n blue = self.getUcharArrayItem(self.charPixelPtr,index+2)\n alpha=self.getUcharArrayItem(self.charPixelPtr,index+3)\n return (red,green,blue,alpha)", "def GetBlue(self):\n return _itkRGBAPixelPython.itkRGBAPixelUS_GetBlue(self)", "def find_pixels(self):\n ref_image=Image.open('sample0000.png')\n imarray=np.array(ref_image)\n ref_image.close()\n self.number_of_pix=imarray.shape\n print self.number_of_pix\n ref_image=None\n imarray=None", "def get_palace_board_blue(self):\n\n return self._palace_board_blue", "def get_pixel(self, x, y):\n if x < 0 or x > 7 or y < 0 or y > 15:\n # Ignore out of bounds pixels.\n return\n if y < 8:\n return self.get_led( y * 16 + x)\n else:\n return self.get_led((y-8) * 16 + (x+8))", "def GetBlue(self):\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetBlue(self)", "def img_pixels(self, img_debut):\n pixels = np.array(img_debut)\n pixels2 = np.array(img_debut)\n\n xv, yv = self.img_matrix_x, self.img_matrix_y\n\n yv[yv >= self.axe_Y] = -2 # locate pixels outside of the image\n xv[xv >= self.axe_X] = -2\n\n pixels2 = pixels[yv, xv] # apply the black hole deformation\n\n pixels2[xv == -1] = [0, 0, 0] # color the black hole in black\n pixels2[yv == -2] = [255, 192, 203] # color pixels outside\n pixels2[xv == -2] = [255, 192, 203]\n\n img2 = Image.fromarray(pixels2.astype('uint8'), 'RGB')\n return img2", "def get_patch(i,j,im,h=H): #X\n print(i,j)\n return im[(i-h):(i+h+1),(j-h):(j+h+1)]", "def getPixel (self, x, y):\r\n return self.image [y][x]", "def get_general_position_blue(self):\n\n return self._general_position_blue", "def getpixels(self,x,y,dx,dy,Nx,Ny):\n \n Dx = (Nx*dx)\n Dy = (Ny*dy)\n\n # Not Nx + 1 to account for rounding\n pX = (x/dx + (Nx + 2)/2.).astype(int)\n pY = (y/dy + (Ny + 2)/2.).astype(int)\n pixels = pX + pY*Nx\n pixels[((pX < 0) | (pX >= Nx)) | ((pY < 0) | (pY >= Ny))] = -1\n\n # here we do use Nx + 1 as you want the precise float value of the pixel.\n return pixels,x/dx + (Nx + 1)/2., y/dx + (Nx + 1.)/2.", "def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours" ]
[ "0.6495526", "0.64614147", "0.62514365", "0.60262513", "0.5827481", "0.57433754", "0.5736232", "0.5696482", "0.56894076", "0.5617795", "0.5553846", "0.55003446", "0.54996854", "0.54855716", "0.5472395", "0.54648805", "0.5462373", "0.5456809", "0.5430799", "0.5417668", "0.54154146", "0.5409806", "0.54066896", "0.54026204", "0.5384328", "0.5373544", "0.53523195", "0.5351756", "0.53456", "0.5329961" ]
0.6844396
0
clean repeated j pixels Find the first item of each row and gets the pixels with the lowest j value, which represents the biggest real value of the y axis of the graph, crossed with x axis.
def clean_double_values(self): trans_blue = self.blue_matrix().transpose() b_array = [] for i in trans_blue: min_col = [i[0], i[1]] for j in trans_blue[0:]: if j[1] == min_col[1]: if j[0] < min_col[0]: min_col[0] = j[0] if min_col not in b_array: b_array.append(min_col) return sorted(b_array, key=lambda i: i[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_smaller_neighbour(plots, i, j):\n n = len(plots)\n neighbours = []\n if i > 0:\n neighbours.append((i-1, j))\n if i < n-1:\n neighbours.append((i+1, j))\n if j > 0:\n neighbours.append((i, j-1))\n if j < n-1:\n neighbours.append((i, j+1))\n\n min_elevation = plots[i][j]\n min_elevation_plot = None\n for m in neighbours:\n if plots[m[0]][m[1]] <= min_elevation:\n min_elevation = plots[m[0]][m[1]]\n min_elevation_plot = m\n\n return min_elevation_plot", "def reduce_mini(minigrid):\n row = []\n for i in range(3):\n for j in range(3):\n row.append(minigrid[i][j])\n for i in range(9):\n if len(row[i]) == 1:\n for j in range(9):\n if i != j:\n if row[i] in row[j]:\n chunks = row[j].split(row[i])\n row[j] = chunks[0] + chunks[1]\n\n count_dict = {}\n for i in range(9):\n for char in row[i]:\n if char in count_dict:\n count_dict[char] = \"X\"\n else:\n count_dict[char] = i\n\n for key in count_dict:\n if count_dict[key] != \"X\":\n row[count_dict[key]] = key\n\n for i in range(3):\n for j in range(3):\n minigrid[i][j] = row[(i*3)+j]\n\n return minigrid", "def addHigherPiks(self, PiksList):\n position1,High1=PiksList[0]\n position2,High2=PiksList[len(PiksList)-1]\n for kol in range(position1, position2): ##for each column checking all pixels over finden group\n line=0\n while not((kol, line) in PiksList):\n if self.getPixel(kol,line)==0: ##if they are black add them to PiksList\n PiksList.append((kol,line))\n line+=1\n PiksList.sort()## at the end sort the PiksList with number of column\n return PiksList", "def get_patch(i,j,im,h=H): #X\n print(i,j)\n return im[(i-h):(i+h+1),(j-h):(j+h+1)]", "def task4(x: List[List[int]]) -> int:\n ###\n ###\n row = len(x)\n max_circle = 0\n i = 0\n while i < row:\n for j in range(i, row):\n if x[i][j] != 1:\n if i == j - 1:\n max_circle += 1\n j += 1\n i = j - 1\n break\n if j == row - 1 and x[i-1][j] == 0:\n max_circle += 1\n if j == row - 1:\n i += 1\n\n return max_circle\n #a b c d", "def find_unsettled_spot(self):\n\t\tfor i in range(9):\n\t\t\tfor j in range(9):\n\t\t\t\tif self.grid[i][j] == 0:\n\t\t\t\t\treturn i, j\n\t\treturn", "def find_min(self):\n\n\n min_x = 1000\n min_y = 1000\n k = len(self.__col_lista)\n for i in range(k):\n x, y = self.__col_lista[i]\n if x < min_x:\n min_x = x\n if y < min_y:\n min_y = y\n return min_x, min_y", "def find_local_minima(black_pixels_per_column_in_line):\n\n #array che contiene i minimi locali nella forma di coppie (coordinata x, numero di pixel neri)\n local_minima = []\n\n # un punto e' minimo locale se e' minore dei suoi punti adiacenti;\n # nello specifico, diciamo che deve essere minore strettamente almeno di uno dei due\n # (mentre puo' essere minore o uguale all'altro)\n\n local_minima.append(0)\n\n for i in range(1, len(black_pixels_per_column_in_line)-1):\n\n is_local_minimum = ((black_pixels_per_column_in_line[i] <= black_pixels_per_column_in_line[i-1] and\n black_pixels_per_column_in_line[i] < black_pixels_per_column_in_line[i+1]) or\n (black_pixels_per_column_in_line[i] < black_pixels_per_column_in_line[i-1] and\n black_pixels_per_column_in_line[i] <= black_pixels_per_column_in_line[i+1]))\n\n if is_local_minimum:\n local_minima.append(i)\n\n local_minima.append(len(black_pixels_per_column_in_line)-1)\n\n return np.array(local_minima)", "def remove_border(src): #---- remove blank border\r\n rows = src.shape[0]; VMIN= 0; VMAX= rows; \r\n cols = src.shape[0]; UMIN= 0; UMAX= cols;\r\n for ky in range(1,rows):\r\n sum0 = np.sum(src[ky,:,:]);\r\n sum1 = np.sum(src[rows-ky-1,:,:]);\r\n if sum0== 0 and VMIN== ky-1: VMIN= ky;\r\n if sum1== 0 and VMAX== rows-ky+1: VMAX= rows-ky;\r\n for kx in range(1,cols):\r\n sum0 = np.sum(src[:,kx,:]);\r\n sum1 = np.sum(src[:,cols-kx-1,:]);\r\n if sum0== 0 and UMIN== kx-1: UMIN= kx;\r\n if sum1== 0 and UMAX== cols-kx+1: UMAX= cols-kx;\r\n #--- --- \r\n DV = np.minimum(VMIN, rows-VMAX);\r\n DU = np.minimum(UMIN, cols-UMAX);\r\n return src[DV:(rows-DV), DU:(cols-DU), :];", "def fermionic_cells(self):\n cells = self.cells()\n cells_and_circles = self.all_cells()\n circles = [x for x in cells_and_circles if x not in cells]\n coords = [(i, jprime)\n for iprime, jprime in circles\n for i, j in circles\n if iprime > i\n ]\n coords.sort()\n return coords", "def remove_dbledge(img):\n\t(ny,nx) = np.shape(img)\n\tfor i in range(nx):\n\t\tidx = np.array(np.nonzero(img[:,i]))\n\t\tif np.size(idx) == 0:\n\t\t\tcontinue\n\t\tidx = idx[0][-1]\n\t\timg[idx-1::-1,i] = 0\n\treturn img", "def get_max_point(image):\r\n max_value= 0\r\n better_point= None\r\n for line in range(len(image)):\r\n for column in range(len(image[0])):\r\n if image[line][column]>max_value:\r\n max_value= image[line][column]\r\n better_point = [line,column]\r\n return better_point", "def get_tile(board):\n t = [[0,0]]\n for i in range(board.shape[0] -1):\n for j in range(board.shape[1] -1):\n if board[i, j] == board[i +1, j]:\n t.append([i +1, j])\n if board[i, j] == board[i, j+1]:\n t.append([i, j+1])\n if board[i, j] == board[i+1, j+1]:\n t.append([i+1, j+1])\n # print(t)\n t = list(np.unique(t, axis=0))\n return t", "def get_ij_neighbors(self, i, j):\n ii, jj = np.mgrid[i-1:i+2, j-1:j+2]\n ii, jj = ii.ravel(), jj.ravel()\n filtr = (ii >= 0) & (ii < self.height) & (jj >= 0) & (jj < self.width)\n ij_neighbors = set(zip(ii[filtr], jj[filtr]))\n ij_neighbors.remove((i, j))\n return ij_neighbors", "def remove_patch(i,j,im,h=H): #X\n imn= im.copy()\n imn[(i-h):(i+h+1),(j-h):(j+h+1)]=DEAD\n return imn,get_patch(i,j,im)", "def get_sorted_pixels(self):\n # (but make sure you have lots of memory before you do that on huge images).\n # [::-1] reverses order\n return sorted(self.get_colors(), key=lambda x: x[0])[::-1]", "def denoise(grouped, width, height, reach, beta):\n new_pixels = []\n new_pixels = copy.deepcopy(grouped)\n for row in range(height):\n for column in range(width):\n neighbors = locate_neighbors(grouped, row, column, width, height, reach)\n insertion_sort(neighbors)\n med = neighbors[len(neighbors)//2]\n row_num = (row * width) + column\n original = new_pixels[row_num][0]\n if abs(original - med) / (original + 0.1) > beta:\n new_pixels[row_num][0] = int(med)\n new_pixels[row_num][1] = int(med)\n new_pixels[row_num][2] = int (med)\n return new_pixels", "def fill_holes(img):\n neg = 1 - img\n s = ndimage.generate_binary_structure(3,1) # iterate structure\n labeled_array, numpatches = ndimage.label(neg,s) # labeling\n sizes = ndimage.sum(neg,labeled_array,range(1,numpatches+1)) \n sizes_list = [sizes[i] for i in range(len(sizes))]\n sizes_list.sort()\n max_size = sizes_list[-1]\n max_label = np.where(sizes == max_size)[0] + 1\n component = labeled_array == max_label\n return 1 - component", "def get_neighbor_values(training_image, row, col):\n #patches.append((img[i-1:i+2,j-1:j+2],(i,j)))\n return training_image[row-1:row+2,col-1:col+2]", "def remove_stuck(traj,size):\n from numpy import sqrt, where\n \n r_min = traj.groupby('particle').first()\n r_max = traj.groupby('particle').last()\n\n pos_columns = ['x','y']\n dist = r_min[pos_columns] - r_max[pos_columns]\n dist_eu = sqrt(dist['x']**2+dist['y']**2)\n\n index_remove = dist_eu.index[where(dist_eu < size)]\n \n traj_new = traj\n for i in range(len(index_remove)):\n traj_new = traj_new[(traj_new['particle'] != index_remove[i])]\n \n return traj_new", "def get_nullify_idxs(original_size, border_size):\n\tassert border_size < original_size/2, \"Border too large to be removed from image of this size\"\n\ttmp = np.zeros((original_size, original_size), dtype=int)\n\ttmp[:border_size,:] = 1\n\ttmp[-border_size:,:] = 1\n\ttmp[:,-border_size:] = 1\n\ttmp[:,:border_size] = 1\n\ttmp = tmp.reshape(tmp.shape[0]*tmp.shape[1])\n\treturn np.where(tmp==1)[0]", "def count_from_top(img):\n pixel_count = 0\n for row in img:\n unique_pixel_vals = np.unique(row)\n if 255 not in unique_pixel_vals: # ignore shading (values between 0-255)\n pixel_count += 1\n else:\n return pixel_count", "def umrandung_zeichnen(img):\n for zeilen_index in range(0,img.__len__()):\n for spalten_index in range(0, img[zeilen_index].__len__()):\n if img[zeilen_index][spalten_index] == 1:\n #Markiere benachbarte Pixel\n umrandung_pixel(img, zeilen_index, spalten_index)", "def find_reddest_pixel_fast(img): \n img = np.array(img, dtype = 'int32')\n location = cv2.minMaxLoc((img[:, :, 2] - img[:, :, 1]) + (img[:, :, 2] - img[:, :, 0]))[3]\n return location", "def get_min_pixel_seperation(pix_x, pix_y):\n # dx = pix_x[1] - pix_x[0] <=== Not adjacent for DC-SSTs!!\n # dy = pix_y[1] - pix_y[0]\n\n dx = pix_x - pix_x[0]\n dy = pix_y - pix_y[0]\n pixsep = np.min(np.sqrt(dx ** 2 + dy ** 2)[1:])\n return pixsep", "def removeMinimum(img, x, y, w, h):\n minmax = cvMinMaxLoc(img)\n cvRectangle(img, cvPoint(x-int(w/2), y-int(h/2)), cvPoint(x+int(w/2), y+int(h/2)), cvScalar(minmax[1], 0, 0, 0), CV_FILLED)", "def _remove_blanks(all_icons: List[numpy.ndarray]) -> List[numpy.ndarray]:\n filtered_icons = []\n for icon in all_icons:\n if icon[20:60, 20:60, 2].min() > 100:\n continue\n filtered_icons.append(icon)\n return filtered_icons", "def correct_artefacts(wsh):\n unique, count = np.unique(wsh, return_counts=True)\n to_remove = unique[count<=3]\n for rem in to_remove:\n rem_im = wsh==rem\n rem_cont = dilation(rem_im) & ~rem_im\n vals, val_counts = np.unique(wsh[rem_cont], return_counts=True)\n replace_val = vals[np.argmax(val_counts)]\n if replace_val != 0:\n wsh[rem_im] = int(replace_val)\n return wsh", "def _uni_pnts(a):\r\n _, idx = np.unique(a, return_index=True, axis=0)\r\n a = np.concatenate((a[np.sort(idx)], [a[0]]))\r\n return a", "def get_min_max_intensity_lines(self, folder_name, columns, froms, tos, non_zero=True):\n minimum_pixel_values = []\n maximum_pixel_values = []\n photo_list = self.get_photo_list(folder_name)\n\n for i, name in enumerate(photo_list):\n pixels = []\n file_name = folder_name + '/' + name\n image = cv2.imread(file_name, cv2.IMREAD_ANYDEPTH)\n pixels.extend(image[froms[0]:tos[0], columns[0]])\n pixels.extend(image[froms[1]:tos[1], columns[1]])\n pixels.extend(image[froms[2]:tos[2], columns[2]])\n pixels.extend(image[froms[3]:tos[3], columns[3]])\n pixels.extend(image[froms[4]:tos[4], columns[4]])\n pixels.extend(image[froms[5]:tos[5], columns[5]])\n pixels.extend(image[froms[6]:tos[6], columns[6]])\n\n if non_zero:\n pixels.sort()\n # need to make the 'pixels' variable into a np.ndarray i think\n new_pixels = np.full((len(pixels), 1), 1)\n for k, j in enumerate(pixels):\n new_pixels[k, :] = j\n\n # this is a really fiddly and stupid way to get around how bad this is implemented for lists\n non_zero_pixels = new_pixels[np.nonzero(new_pixels)]\n minimum_pixel_values.append(non_zero_pixels[0])\n\n else:\n minimum_pixel_values.append(np.amin(pixels))\n\n maximum_pixel_values.append(np.amax(pixels))\n\n folder_minimum_pixel_value = np.amin(minimum_pixel_values)\n folder_maximum_pixel_value = np.amax(maximum_pixel_values)\n\n print(\"The min is: \" + str(folder_minimum_pixel_value) + \" The max is: \" + str(folder_maximum_pixel_value))\n return folder_minimum_pixel_value, folder_maximum_pixel_value" ]
[ "0.56734896", "0.56369495", "0.56135774", "0.5611539", "0.5562747", "0.5554404", "0.55534256", "0.55487216", "0.55104053", "0.5484114", "0.5463305", "0.5457158", "0.54555976", "0.5414113", "0.5403876", "0.5402138", "0.5381725", "0.53808093", "0.53107697", "0.53102785", "0.5303458", "0.5302751", "0.5302173", "0.5279688", "0.5239448", "0.52373105", "0.5236387", "0.5236031", "0.521332", "0.52080774" ]
0.57108814
0
Saves csv file into image folder saves generated data by class into a csv file with the name, plus the type of data. This method keeps track if the file was generate, and replace it with a new one
def save_values(self): f_name = self.img_path.split('.')[0] + '_{}_'.\ format(self.data_type_name) + '.csv' dir_name = os.path.join(self.base_dir, f_name) if not os.path.exists(dir_name): for data_list in self.converted_values(): with open(f_name, 'a') as f: wr = csv.writer(f, delimiter=';') wr.writerow(data_list) else: os.remove(f_name) for data_list in self.converted_values(): with open(f_name, 'a') as f: wr = csv.writer(f, delimiter=';') wr.writerow(data_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_csv(self):\n if not self.__is_csv():\n # creates the csv file if it did not exist.\n self.__create_csv()\n try:\n with open(self.__csv_file_name, 'a', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writerow(self.__values)\n except IOError: # this exception avoid a product does not have saved in csv file\n time.sleep(0.5)\n self.save_csv()\n # display on the screen what is being record on csv\n for key, value in self.__values.items():\n print('{}: {}'.format(key, value), end='; ' if key != 'url' else '\\n')", "def save_csv_file():\n global output_on_display, import_lst, column_names, data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = data[0]\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"CSV\", \"*.csv\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.csv')\n step = len(column_names)\n data_csv = import_lst\n if len(data_csv[0]) == step:\n pass\n else:\n data_csv = import_lst[step::]\n\n with open(save_name, 'w+') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(column_names)\n csv_writer.writerows(data_csv)", "def save(self):\n assert self.data is not None\n with open(self._csv_path, mode=\"w\", encoding=\"utf-8\") as spp_file:\n # We don't want to save the index, as it's not especially meaningful, and makes life harder when trying to\n # restore the binary version from the csv (the index column would be imported and then need to be dropped).\n self.data.to_csv(spp_file, index=False)", "def save_file(self):\n\n file_save_path = QFileDialog.getSaveFileName(self, 'Save CSV', \"\", 'CSV(*.csv)')\n\n if file_save_path[0]:\n with open(file_save_path[0], 'w', newline=\"\") as csv_file:\n writer = csv.writer(csv_file)\n # Add the header row explicitly\n writer.writerow(self.column_headers)\n for row in range(self.csv_data_table.rowCount()):\n row_data = []\n for column in range(self.csv_data_table.columnCount()):\n\n # Check if the current column is set to be visible, if not skip it\n if self.csv_data_table.isColumnHidden(column):\n continue\n\n item = self.csv_data_table.item(row, column)\n if item is not None:\n row_data.append(item.text())\n else:\n row_data.append('')\n writer.writerow(row_data)\n\n # Set the flag to no changes in current file state\n self.file_changed = False\n self.set_save_enabled(False)\n\n # TODO: add a better variant of message box compared to about like sucess, critical, warning etc according to context\n QMessageBox.about(self, \"Success!\", \"Your file has been saved successfully.\")", "def saveCSV(self):\n filename=tkFileDialog.asksaveasfilename(defaultextension='.csv',\n initialdir=os.getcwd(),\n filetypes=[(\"csv\",\"*.csv\"),(\"All files\",\"*.*\")])\n if not filename:\n return\n for m in self.matrices:\n matrix = self.matrices[m] \n if matrix != None: \n c=matrix.csvRepresentation()\n f=open(filename,'w')\n f.write(c)\n f.close()\n return", "def generate_data(self, file_name, data, header=None):\n with open(f'{self.path_file}/{file_name}.csv', 'w') as csvfile:\n if header:\n csvfile.write(header)\n csvfile.writelines(data)\n return True", "def save_csv(self, filename): # DONE\n self.data.to_csv(filename)", "def writeFile(shorten=True):\n # first create a folder containing data\n # get time\n now = datetime.now()\n day = now.strftime(\"%m_%d\")\n current_time = now.strftime(\"%H_%M_%S\")\n\n # generate data folder for save result\n if not os.path.exists(\"Image_Processing/Data\"):\n os.mkdir(\"Image_Processing/Data\")\n\n # global picFolder\n # picFolder = \"Image_Processing/Data/{}_{}\".format(day, current_time)\n #\n # if not os.path.exists(picFolder):\n # os.mkdir(picFolder)\n\n # create a csv file if already doesn't exist\n if not os.path.exists(\"Image_Processing/Data/Data.csv\"):\n openLog(f'Image_Processing/Data/Data.csv')\n\n # csv header\n fieldnames = [\"漢字\", \"言葉\", \"読み方 (音|訓)\", \"英語\", \"意味\", \"例文\"]\n\n with open('Image_Processing/Data/Data.csv', 'w', encoding='UTF8', newline='') as f:\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n\n # openLog(f'Image_Processing/Data/{day}_{current_time}/Data.csv')\n\n # find the file name for all images needed for extracting data\n image = os.listdir(\"Image_Processing/Images/\")\n\n # remove file called DS_Store\n if '.DS_Store' in image:\n image.remove('.DS_Store')\n\n # initialize df\n df = pd.read_csv(\"Image_Processing/Data/Data.csv\")\n # extract data for each image\n for i in range(len(image)):\n # initialize list\n list_data = []\n\n # find the image folder\n image_path = f\"Image_Processing/Images/{image[i]}\"\n\n # extract data\n data = extractData(image_path, shorten).items()\n\n # append it to list\n data = [string[1] for string in data]\n\n # if kanji, then second should be blank, else the first should be blank\n if len(data[0]) == 1:\n data.insert(1, '')\n else:\n data.insert(0, '')\n\n # showMessage(data)\n\n df2 = pd.DataFrame([data], columns=[\"漢字\", \"言葉\", \"読み方 (音|訓)\", \"英語\", \"意味\", \"例文\"], index=[i+0.5])\n df = df.append(df2, ignore_index=False)\n df = df.sort_index().reset_index(drop=True)\n\n df.drop(df.columns[df.columns.str.contains('unnamed', case=False)], axis=1, inplace=True)\n\n df.to_csv(\"Image_Processing/Data/Data.csv\")\n\n # now move the image from images to used_images\n\n\n\n\n # df2 = pd.DataFrame(ls, columns=[\"漢字\", \"言葉\", \"読み方 (音|訓)\", \"英語\", \"意味\", \"例文\"], index=[nindx])\n\n # # write data into file\n # writeLog(data)\n\n # write data into list\n list_data.append(data)\n\n # # csv header\n # fieldnames = [\"\", \"漢字\", \"言葉\", \"読み方 (音|訓)\", \"英語\", \"意味\", \"例文\"]\n #\n # with open(f'Image_Processing/Data/{day}_{current_time}/Data.csv', 'w', newline='') as f:\n # writer = csv.DictWriter(f, fieldnames=fieldnames)\n # writer.writeheader()\n # writer.writerows(list_data)", "def save_clean_data(self):\n for data in self.clean_data:\n file_name = \"../data/clean_data/\" + data.file.name + data.file.extension\n data.save_csv(file_name)", "def __save_datasets(self):\n self.train.to_csv('{}/{}/{}'.format(path_to_train_set, img_format, 'train.csv'))\n self.valid.to_csv('{}/{}/{}'.format(path_to_valid_set, img_format, 'valid.csv'))\n self.test.to_csv('{}/{}/{}'.format(path_to_test_set, img_format, 'test.csv'))", "def _save_data(self):\n self.data.to_csv('data/c&le/{}'.format(self.name))", "def saveData(self):\n pdIds = pd.DataFrame.from_dict(self.pathIds, orient='index')\n pdCrr = pd.DataFrame.from_dict(self.pathCrr, orient='index', columns=['cid'])\n mergedData = pd.concat([pdIds, pdCrr['cid']], axis=1, ignore_index=False)\n\n # Create the save dialog box\n name, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File',\n '', 'csv files (*.csv)', 'csv file (*.csv)')\n\n if not name:\n return\n # Check the extension when saving\n if self.csvExt in name:\n mergedData.to_csv(name, header=False, index=True)\n else:\n message = 'Error saving file {}.'.format(name)\n self.messageBox(message)", "def init_csv_file(self):\n folder = \"/home/pi/data/\" + datetime.now().strftime(\"%Y_%m_%d\") + \"/\"\n if not os.path.isdir(folder):\n # append 'a' to the folder name until we find a name that does not exist\n while os.path.exists(folder):\n folder = folder[:-1] + \"a\" + \"/\"\n os.mkdir(folder)\n filename = folder + 'particledata_' + datetime.now().strftime (\"%H-%M-%S\") \n while os.path.exists(filename):\n filename = filename + '_a'\n filename += '.csv'\n log.info('Writing data to: ' + filename)\n self.file = open(filename, \"w\")\n self.file.write('Unix Time;Human Readable Time;pm 2.5;pm 10;Has Fix;Longitude;Latitude;Altitude;GPS Unix Time\\n')\n self.file.flush()\n self.synced_time = False", "def save(self, **kwargs):\n self.remove_file()\n if not self.image:\n self.generate(save=False)\n else:\n self.image.name = self.file()\n super(FormatedPhoto, self).save(**kwargs)", "def save_csv(self):\n path, _ = QtWidgets.QFileDialog.getSaveFileName(self, '保存数据', '', 'CSV(*.csv)')\n\n if not path:\n return\n\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator='\\n')\n\n writer.writerow(self.headers.keys())\n\n for row in range(self.rowCount()):\n row_data = []\n for column in range(self.columnCount()):\n item = self.item(row, column)\n if item:\n row_data.append(str(item.text()))\n else:\n row_data.append('')\n writer.writerow(row_data)", "def save_csv(data): \n bank_data = data\n\n #Creating headers for the csv file\n header = [\"Lender\", \"Max Loan Amount\", \"Max LTV\", \"Max DTI\", \"Max Credit Score\", \"Interest Rate\"]\n\n #Creating output path of the CSV file\n csvpath = Path(\"save_file.csv\")\n\n #Opening the csv file in csvpath by using the open() method\n with open(csvpath, \"w\", newline='') as csvfile:\n\n csvwriter = csv.writer(csvfile, delimiter = \",\")\n csvwriter.writerow(header)\n for row in bank_data:\n csvwriter.writerow(row)\n\n return data", "def save_waveform_csv(self, waveform, csv_filename):\n\n # Get the modules data directory\n data_dir = self.module.getString(\"directory\")\n\n # All CSV files within the waves directory are automatically recognized\n # by the AWG module\n wave_dir = os.path.join(data_dir, \"awg\", \"waves\")\n\n # The data directory is created by the AWG module and should always exist.\n # If this exception is raised, something might be wrong with the file system.\n if not os.path.isdir(wave_dir):\n raise Exception(f\"AWG module wave directory {wave_dir} does not exist or is not a directory\")\n\n if not csv_filename.endswith(\".csv\"):\n self.hd.log.warn(f\"CSV filename {csv_filename} did not end in .csv, appending it to filename.\")\n csv_filename += \".csv\"\n\n # Save waveform data to CSV\n csv_file = os.path.join(wave_dir, csv_filename)\n np.savetxt(csv_file, waveform)", "def save_annotation(self):\n\n if self.mediaPlayer.state() == QMediaPlayer.PlayingState:\n self.play_video() # Pause the video if it is playing.\n \n if self.filename != None and self.videoOpened:\n with open(self.filename.replace(\".mp4\",\".csv\"), \"w\", newline=\"\\n\") as file:\n writer = csv.writer(file)\n writer.writerow([\"Time (ms)\", \"Engagement (%)\"])\n\n if self.xValues != [] and self.yValues != []:\n for n in range(len(self.xValues)):\n writer.writerow([self.xValues[n], self.yValues[n]])\n self.colors[n] = self.saveColor\n\n self.savedRecently = True\n message = QMessageBox()\n message.setWindowTitle(\"Success!\")\n message.setText(\"The annotation is saved successfully as a csv-file. It is saved in the same directory as the source video file. \\n \\nThe directory is: \"+ self.filename.replace(\".mp4\",\".csv\"))\n x = message.exec_() # this will show our messagebox\n\n else:\n message = QMessageBox()\n message.setWindowTitle(\"Fail!\")\n message.setText(\"There is no annotation to save.\")\n x = message.exec_() # this will show our messagebox\n if not self.videoOpened:\n message = QMessageBox()\n message.setWindowTitle(\"Fail!\")\n message.setText(\"No video has been opened yet.\")\n x = message.exec_() # this will show our messagebox", "def _save_log(self, save_dir, data):\n date = datetime.datetime.today().strftime('%Y-%m-%d')\n file_dir = os.path.join(save_dir, date + \".csv\")\n with open(file_dir, 'a') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(data)", "def save(self, data, outpath):\n data.to_csv(outpath)", "def save_csv(self, save_path=''):\n if not save_path:\n time = datetime.now()\n time = datetime.strftime(time, '%Y-%m-%d_%H:%M:%S')\n filename = time + '.csv'\n save_path = os.path.join(os.path.abspath(os.curdir), filename)\n data = self._get_data()\n with open(save_path, 'wb') as f:\n for line in data:\n f.write(line + '\\n')", "def save_csv(data, name):\n\n data_shp = np.asarray(np.shape(data))\n data_shp[0] = data_shp[0] + 1\n\n new_array = np.zeros(data_shp)\n new_array[1:, :] = data\n\n csv_array = new_array.astype(str)\n\n # Format the first row with legend\n leg = ['Energy (MeV)', 'Attentuation (cm^-1)']\n\n csv_array[0, :] = leg\n\n np.savetxt(os.path.join(directory, f'{name}.csv'), csv_array, delimiter=',', fmt='%s')", "def dataSave():\n # NR5G = gui_reader()\n try: #Python3\n f = open(__file__ + \".csv\",'wt', encoding='utf-8')\n except:\n f = open(__file__ + \".csv\",'wb')\n f.write('%s,'%(entryCol.entry0.get()))\n f.write('%s,'%(entryCol.entry1.get()))\n f.write('%s,'%(entryCol.entry2.get()))\n f.write('%s,'%(entryCol.entry3.get()))\n f.close()\n print(\"DataSave: File Saved\")", "def save_data_csv(self, filename):\n #add masked entry as last column\n fields = numpy.r_[self.colLabels, ['masked']]\n\n #add dynamic expression to column headers\n for k, col in enumerate(self.dynamic_cols):\n fields[col] += \" [%s]\"%self.dynamic_expressions[k] if self.dynamic_expressions[k] else ''\n\n #add custom labels to field names \n for col, fieldname in enumerate(fields):\n custom_label = self.column_labels_custom.get(col)\n fields[col] += \" (%s)\"%custom_label if custom_label else ''\n\n fields[col] += \" {*}\" if (col in self.colsel and (fieldname.find('user')==0 or col in self.dynamic_cols)) else ''\n \n #add options\n \n \n #don't save last two lines\n data = numpy.c_[self.data[:-2], self.rowmask[:-2]]\n\n with open(filename, 'wb') as f:\n import csv\n writer = csv.writer(f)\n writer.writerow(fields)\n #writer.writerows(data)\n for row in data:\n r = [entry.encode('latin_1') if type(entry) is types.UnicodeType else entry for entry in row]\n writer.writerow(r)\n self.modified = False", "def create_file(self, type_simu=0, simulation_counter=0):\n portion_duration = self.individuals[0].portion_duration\n nbr_portions = self.individuals[0].nbr_portions\n arriveeX = self.individuals[0].arriveeX*nbr_portions*4.0/20\n arriveeY = self.individuals[0].arriveeY*nbr_portions*4.0/20\n\n if type_simu == 0:\n repertoire = (f\"simulation_data/Simu {simulation_counter} : \"\n f\"type_simu=0, nbr_individuals={self.nbr_individuals}, \"\n f\"mutation_factor={self.mutation_factor}, portion_duration={portion_duration}, \"\n f\"nbr_portions={nbr_portions}, arrivee=({arriveeX},{arriveeY})\")\n else:\n repertoire = (f\"simulation_data/Simu {simulation_counter} : \"\n f\"type_simu=1, nbr_individuals={self.nbr_individuals}, \"\n f\"mutation_factor={self.mutation_factor}, portion_duration={portion_duration}, \"\n f\"nbr_portions={nbr_portions}, accepted_radius={self.accepted_radius}, \"\n f\"arrivee=({arriveeX},{arriveeY})\")\n\n fname = \"data_simu.csv\"\n \n try: \n os.makedirs(repertoire)\n except OSError:\n if not os.path.isdir(repertoire):\n Raise\n\n with open(repertoire+\"/\"+fname, \"a\") as file:\n list_col = [\"Génération\", 'Individu', 'Score']\n for portion in range(self.individuals[0].nbr_portions):\n for roue in range(4):\n list_col.append('Gene_' + str(portion) + 'roue_' + str(roue))\n if type_simu == 1:\n list_col.append('individuals_under_threshold')\n writer = csv.writer(file)\n writer.writerow(list_col)\n\n return repertoire", "def save(self):\r\n\r\n for video_name, video_data in self.data.items():\r\n save_path = os.path.join(\r\n self.features_dir, video_name + \".\" + self.file_type\r\n )\r\n write_df(\r\n df=video_data.fillna(0), file_type=self.file_type, save_path=save_path\r\n )\r\n print(\"Created additional ROI features for {}...\".format(video_name))\r\n self.timer.stop_timer()\r\n stdout_success(\r\n msg=\"Created additional ROI features for files within the project_folder/csv/features_extracted directory\",\r\n elapsed_time=self.timer.elapsed_time_str,\r\n )", "def save_file(self):\n # paginate over deputies and senators getting their fields\n fieldnames = set([])\n congressmen = self.deputies + self.senators\n for data in congressmen:\n fieldnames = fieldnames.union(data.dump().keys())\n\n\n with open(IDENTITY_FILE_UPDATED, 'a') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=list(fieldnames), delimiter=';')\n writer.writeheader()\n\n for data in congressmen:\n writer.writerow(data.dump())", "def write(self, args):\n\t\tnewcsvfile = self.filename[:len(self.filename)-4] + \"NEW.csv\" #clever naming MIGHT NEED TO CHANGE THIS LATER/OVERWRITE OLD FILE?\n\t\twith open(newcsvfile, 'wb') as f:\n\t\t\twriter = csv.writer(f)\n\t\t\twriter.writerows(self.all_likes)", "def save(self):\n if self.hasChanged:\n filePath = self.path\n tempPath = filePath+'.tmp'\n fileDir = os.path.split(filePath)[0]\n if not os.path.exists(fileDir): os.makedirs(fileDir)\n cPickle.dump(self.data,open(tempPath,'w'))\n renameFile(tempPath,filePath,True)\n self.hasChanged = False", "def save(self):\n\n # TODO:Find place to save data, write logic to save images(Filter out video?)" ]
[ "0.6624201", "0.6549683", "0.6435749", "0.64239025", "0.6376977", "0.62230694", "0.61884254", "0.6154384", "0.6124744", "0.6118921", "0.6071223", "0.60594875", "0.59954077", "0.5983704", "0.5972146", "0.5940008", "0.59392995", "0.59351414", "0.5909594", "0.5909318", "0.5908452", "0.58978677", "0.5883957", "0.5882847", "0.58588415", "0.5853181", "0.5848755", "0.58372855", "0.5829908", "0.58188033" ]
0.7220325
0
Run an acrosssubject classification Decode responses on each hand separately from CPRO data Limit to ROIs within SMN network
def conditionDecodings(data, rois, ncvs=100, effects=False, motorOutput=False,confusion=False, decoder='similarity', nproc=5): ncond = data.shape[1] # two motor outputs nSubjs = data.shape[2] nsamples = nSubjs * ncond stats = np.zeros((len(rois),nsamples)) rmatches = np.zeros((len(rois),)) rmismatches = np.zeros((len(rois),)) # Label array for supervised learning labels = np.tile(range(ncond),nSubjs) subjarray = np.repeat(range(nSubjs),ncond) # Run SVM classifications on network-level activation patterns across subjects confusion_mats = [] roicount = 0 for roi in rois: roi_ind = np.where(glasser2==roi+1)[0] nfeatures = len(roi_ind) roi_ind.shape = (len(roi_ind),1) svm_mat = np.zeros((nsamples,roi_ind.shape[0])) samplecount = 0 for scount in range(nSubjs): roidata = np.squeeze(data[roi_ind,:,scount]) svm_mat[samplecount:(samplecount+ncond),:] = roidata.T samplecount += ncond # Spatially demean matrix across features # samplemean = np.mean(svm_mat,axis=1) # samplemean.shape = (len(samplemean),1) # svm_mat = svm_mat - samplemean scores, rmatch, rmismatch, confusion_mat = randomSplitLOOBaselineCV(ncvs, svm_mat, labels, subjarray, motorOutput=motorOutput, decoder=decoder, nproc=nproc) stats[roicount,:] = scores rmatches[roicount] = np.mean(rmatch) rmismatches[roicount] = np.mean(rmismatch) confusion_mats.append(confusion_mat) roicount += 1 if effects and confusion: return stats, rmatch, rmismatch, confusion_mats if effects and not confusion: return stats, rmatch, rmismatch if confusion and not effects: return stats, confusion_mats else: return stats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute(self, requests):\n responses = []\n for request in requests:\n infer_outputs = pb_utils.get_input_tensor_by_name(\n request, self.input_names[0])\n im_infos = pb_utils.get_input_tensor_by_name(request,\n self.input_names[1])\n ori_imgs = pb_utils.get_input_tensor_by_name(request,\n self.input_names[2])\n\n infer_outputs = infer_outputs.as_numpy()\n im_infos = im_infos.as_numpy()\n ori_imgs = ori_imgs.as_numpy()\n\n results = self.postprocessor.run([infer_outputs], im_infos)\n batch_rec_texts = []\n batch_rec_scores = []\n batch_box_list = []\n for i_batch in range(len(results)):\n\n cls_labels = []\n cls_scores = []\n rec_texts = []\n rec_scores = []\n\n box_list = fd.vision.ocr.sort_boxes(results[i_batch])\n image_list = []\n if len(box_list) == 0:\n image_list.append(ori_imgs[i_batch])\n else:\n for box in box_list:\n crop_img = get_rotate_crop_image(ori_imgs[i_batch], box)\n image_list.append(crop_img)\n\n batch_box_list.append(box_list)\n\n cls_pre_tensors = self.cls_preprocessor.run(image_list)\n cls_dlpack_tensor = cls_pre_tensors[0].to_dlpack()\n cls_input_tensor = pb_utils.Tensor.from_dlpack(\n \"x\", cls_dlpack_tensor)\n\n inference_request = pb_utils.InferenceRequest(\n model_name='cls_pp',\n requested_output_names=['cls_labels', 'cls_scores'],\n inputs=[cls_input_tensor])\n inference_response = inference_request.exec()\n if inference_response.has_error():\n raise pb_utils.TritonModelException(\n inference_response.error().message())\n else:\n # Extract the output tensors from the inference response.\n cls_labels = pb_utils.get_output_tensor_by_name(\n inference_response, 'cls_labels')\n cls_labels = cls_labels.as_numpy()\n\n cls_scores = pb_utils.get_output_tensor_by_name(\n inference_response, 'cls_scores')\n cls_scores = cls_scores.as_numpy()\n\n for index in range(len(image_list)):\n if cls_labels[index] == 1 and cls_scores[\n index] > self.cls_threshold:\n image_list[index] = cv2.rotate(\n image_list[index].astype(np.float32), 1)\n image_list[index] = np.astype(np.uint8)\n\n rec_pre_tensors = self.rec_preprocessor.run(image_list)\n rec_dlpack_tensor = rec_pre_tensors[0].to_dlpack()\n rec_input_tensor = pb_utils.Tensor.from_dlpack(\n \"x\", rec_dlpack_tensor)\n\n inference_request = pb_utils.InferenceRequest(\n model_name='rec_pp',\n requested_output_names=['rec_texts', 'rec_scores'],\n inputs=[rec_input_tensor])\n inference_response = inference_request.exec()\n if inference_response.has_error():\n raise pb_utils.TritonModelException(\n inference_response.error().message())\n else:\n # Extract the output tensors from the inference response.\n rec_texts = pb_utils.get_output_tensor_by_name(\n inference_response, 'rec_texts')\n rec_texts = rec_texts.as_numpy()\n\n rec_scores = pb_utils.get_output_tensor_by_name(\n inference_response, 'rec_scores')\n rec_scores = rec_scores.as_numpy()\n\n batch_rec_texts.append(rec_texts)\n batch_rec_scores.append(rec_scores)\n\n out_tensor_0 = pb_utils.Tensor(\n self.output_names[0],\n np.array(\n batch_rec_texts, dtype=np.object_))\n out_tensor_1 = pb_utils.Tensor(self.output_names[1],\n np.array(batch_rec_scores))\n out_tensor_2 = pb_utils.Tensor(self.output_names[2],\n np.array(batch_box_list))\n inference_response = pb_utils.InferenceResponse(\n output_tensors=[out_tensor_0, out_tensor_1, out_tensor_2])\n responses.append(inference_response)\n return responses", "def forward(self, *args): # noqa: R0914\r\n encoder_out, (hn, cn) = self.unified_encoder(*args)\r\n device = hn.device\r\n non_sequential_cont_decoded = self.mlp_non_seq_cont(hn)\r\n non_sequential_cat_decoded = []\r\n for mlp_non_seq_cat in self.mlp_non_seq_cat_list:\r\n non_sequential_cat_decoded.append(mlp_non_seq_cat(hn))\r\n\r\n hn = torch.unsqueeze(hn, 0)\r\n cn = torch.unsqueeze(cn, 0)\r\n # decoded is the output prediction of timestep i-1 of the decoder\r\n decoded = torch.zeros(encoder_out.shape[0], int(\r\n self.unified_encoder.seq_cont_count + self.unified_encoder.no_of_embs_seq), device=device)\r\n seq_cont_decoded = torch.Tensor(device=device)\r\n seq_cat_decoded = []\r\n for _ in range(self.unified_encoder.seq_cat_count):\r\n seq_cat_decoded.append(torch.Tensor(device=device))\r\n\r\n for _ in range(encoder_out.shape[1]):\r\n decoded, (hn, cn), out_cont, out_cat = self.decoder(decoded, (hn, cn))\r\n # Predict all categorical columns\r\n out_cat_onehot = []\r\n if self.unified_encoder.seq_cat_count != 0:\r\n for idx, out in enumerate(out_cat):\r\n out_cat_onehot.append(torch.argmax(out, dim=1).unsqueeze(-1))\r\n seq_cat_decoded[idx] = torch.cat(\r\n [seq_cat_decoded[idx], out.view(out.shape[0], 1, -1)], dim=1)\r\n out_cat_onehot = torch.cat(out_cat_onehot, -1)\r\n out_cat_embedding = self.unified_encoder.seq_emb_layers(out_cat_onehot)\r\n decoded = torch.cat([out_cat_embedding, out_cont], dim=-1)\r\n else:\r\n decoded = out_cont\r\n seq_cont_decoded = torch.cat(\r\n [seq_cont_decoded, out_cont.view(out_cont.shape[0], 1, -1)], dim=1)\r\n\r\n return non_sequential_cont_decoded, non_sequential_cat_decoded, seq_cont_decoded, seq_cat_decoded", "def compute_classifications2(depc, gid_list, config=None):\n logger.info('[ibs] Process Image Classifications2')\n logger.info('config = {!r}'.format(config))\n # Get controller\n ibs = depc.controller\n depc = ibs.depc_image\n if config['classifier_two_algo'] in ['cnn']:\n config_ = {\n 'draw_annots': False,\n 'thumbsize': (192, 192),\n }\n # depc.delete_property('thumbnails', gid_list, config=config_)\n thumbnail_list = depc.get_property('thumbnails', gid_list, 'img', config=config_)\n result_list = ibs.generate_thumbnail_class2_list(thumbnail_list, **config)\n elif config['classifier_two_algo'] in ['rf']:\n from wbia.algo.detect.rf import classify\n\n config_ = {'algo': 'resnet'}\n vector_list = depc.get_property('features', gid_list, 'vector', config=config_)\n classifier_weight_filepath = config['classifier_weight_filepath']\n result_list = classify(vector_list, weight_filepath=classifier_weight_filepath)\n elif config['classifier_two_algo'] in ['densenet']:\n from wbia.algo.detect import densenet\n\n config_ = {\n 'draw_annots': False,\n 'thumbsize': (densenet.INPUT_SIZE, densenet.INPUT_SIZE),\n }\n thumbpath_list = ibs.depc_image.get(\n 'thumbnails', gid_list, 'img', config=config_, read_extern=False, ensure=True\n )\n config_ = {\n 'classifier_weight_filepath': config['classifier_two_weight_filepath'],\n }\n result_list = densenet.test(\n thumbpath_list,\n ibs=ibs,\n gid_list=gid_list,\n return_dict=True,\n multiclass=True,\n **config_,\n )\n result_list = list(result_list)\n for index in range(len(result_list)):\n best_score, best_key, scores = result_list[index]\n classes = [best_key]\n result_list[index] = (\n scores,\n classes,\n )\n else:\n raise ValueError(\n 'specified classifier_two algo is not supported in config = {!r}'.format(\n config\n )\n )\n\n # yield detections\n for result in result_list:\n yield result", "def coco_metrics(pred_data, anno_json, config):\n from pycocotools.coco import COCO\n from pycocotools.cocoeval import COCOeval\n num_classes = config.num_classes\n\n #Classes need to train or test.\n val_cls = config.classes\n val_cls_dict = {}\n for i, cls in enumerate(val_cls):\n val_cls_dict[i] = cls\n coco_gt = COCO(anno_json)\n classs_dict = {}\n cat_ids = coco_gt.loadCats(coco_gt.getCatIds())\n for cat in cat_ids:\n classs_dict[cat[\"name\"]] = cat[\"id\"]\n\n predictions = []\n img_ids = []\n\n for sample in pred_data:\n pred_boxes = sample['boxes']\n box_scores = sample['box_scores']\n img_id = sample['img_id']\n h, w = sample['image_shape']\n\n final_boxes = []\n final_label = []\n final_score = []\n img_ids.append(img_id)\n\n for c in range(1, num_classes):\n class_box_scores = box_scores[:, c]\n score_mask = class_box_scores > config.min_score\n class_box_scores = class_box_scores[score_mask]\n class_boxes = pred_boxes[score_mask] * [h, w, h, w]\n\n if score_mask.any():\n nms_index = apply_nms(class_boxes, class_box_scores, config.nms_threshold, config.max_boxes)\n class_boxes = class_boxes[nms_index]\n class_box_scores = class_box_scores[nms_index]\n\n final_boxes += class_boxes.tolist()\n final_score += class_box_scores.tolist()\n final_label += [classs_dict[val_cls_dict[c]]] * len(class_box_scores)\n\n for loc, label, score in zip(final_boxes, final_label, final_score):\n res = {}\n res['image_id'] = img_id\n res['bbox'] = [loc[1], loc[0], loc[3] - loc[1], loc[2] - loc[0]]\n res['score'] = score\n res['category_id'] = label\n predictions.append(res)\n if not os.path.exists('./eval_out'):\n os.makedirs('./eval_out')\n with open('./eval_out/predictions.json', 'w') as f:\n json.dump(predictions, f)\n\n coco_dt = coco_gt.loadRes('./eval_out/predictions.json')\n E = COCOeval(coco_gt, coco_dt, iouType='bbox')\n E.params.imgIds = img_ids\n E.evaluate()\n E.accumulate()\n E.summarize()\n return E.stats[0]", "def test_service_api_predict_multiple_raw_classified(service_app):\n response = service_app.post('/predict',\n data=json.dumps(data),\n content_type='application/json')\n response_data = json.loads(response.data)\n assert response.headers['Content-Type'] == 'application/json'\n assert response.status_code == 200\n assert response_data['message'] == 'Records successfully classified'\n assert len(response_data['prediction'].keys()) == 102\n assert response_data['prediction']['business_outcome'] == [4, 5]\n assert response_data['prediction']['phat'] == [0.8228085289874678, 0.753958838418463]\n assert all(len(value) == 2 for value in response_data['prediction'].values())", "def yield_prediction():\n ## input code here\n if request.method == \"POST\":\n re = request.get_json()\n city = re[\"city\"]\n state = re[\"state\"]\n ## convert into lower case\n state = state.lower()\n city = city.lower()\n model_crop = re[\"crop\"]\n model_crop = model_crop.lower()\n model_season = re[\"season\"]\n model_season = model_season.lower()\n model_area = re[\"area\"]\n model_area = int(model_area)\n\n ## store name of crop for the graph\n crop = model_crop\n ## preprocesss the code\n\n try:\n state_le = load(\"static/labelencoder/state_le.joblib\")\n district_le = load(\"static/labelencoder/district_le.joblib\")\n season_le = load(\"static/labelencoder/season_le.joblib\")\n crop_le = load(\"static/labelencoder/crop_le.joblib\")\n model_crop = crop_le.transform([model_crop])[0]\n model_season = season_le.transform([model_season])[0]\n model_state = state_le.transform([state])[0]\n model_city = district_le.transform([city])[0]\n except:\n response_dict = {\n \"status\": False,\n \"message\": \"Enter Valid Data\"\n }\n return jsonify(response_dict)\n\n model_city = int(model_city)\n model_state = int(model_state)\n model_crop = int(model_crop)\n model_season = int(model_season)\n model_para = [model_state, model_city, model_season, model_crop, model_area]\n\n ## prediction code here\n\n import requests\n # NOTE: you must manually set API_KEY below using information retrieved from your IBM Cloud account.\n API_KEY = \"S30qFHkYTHMDO81ijSRiGSiE1jOfnlt01Vtn9UBU2KqL\"\n token_response = requests.post('https://iam.cloud.ibm.com/identity/token',\n data={\"apikey\": API_KEY, \"grant_type\": 'urn:ibm:params:oauth:grant-type:apikey'})\n mltoken = token_response.json()[\"access_token\"]\n\n header = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + mltoken}\n\n # NOTE: manually define and pass the array(s) of values to be scored in the next line\n payload_scoring = {\"input_data\": [\n {\"fields\": [\"State_Name\", \"District_Name\", \"Season\", \"Crop\", \"Area\"], \"values\": [model_para]}]}\n\n response_scoring = requests.post(\n 'https://us-south.ml.cloud.ibm.com/ml/v4/deployments/180fe5c1-a652-4e59-8b33-781326790706/predictions?version=2021-07-16',\n json=payload_scoring, headers={'Authorization': 'Bearer ' + mltoken})\n\n output = response_scoring.json()\n\n ## retrive the output\n\n pred_yield = output[\"predictions\"][0]['values'][0][0]\n pred_production = pred_yield * model_area\n\n ## PIE CHART\n try:\n kharif_value = kharif_yield.query.filter_by(crop_name=crop).first()\n kharif_values = kharif_value.yield_value\n except:\n kharif_values = 0\n try:\n rabi_value = rabi_yield.query.filter_by(crop_name=crop).first()\n rabi_values = rabi_value.yield_value\n except:\n rabi_values = 0\n\n try:\n summer_value = summer_yield.query.filter_by(crop_name=crop).first()\n summer_values = summer_value.yield_value\n except:\n summer_values = 0\n\n try:\n winter_value = winter_yield.query.filter_by(crop_name=crop).first()\n winter_values = winter_value.yield_value\n except:\n winter_values = 0\n\n try:\n autumn_value = autumn_yield.query.filter_by(crop_name=crop).first()\n autumn_values = autumn_value.yield_value\n except:\n autumn_values = 0\n\n try:\n whole_year_value = whole_year_yield.query.filter_by(crop_name=crop).first()\n whole_year_values = whole_year_value.yield_value\n except:\n whole_year_values = 0\n\n season_name = ['kharif', 'rabi', 'summer', 'winter', 'autumn', 'whole year']\n yield_list = [kharif_values, rabi_values, summer_values, winter_values, autumn_values, whole_year_values]\n\n season_yield_dict = dict()\n pie_list = list()\n for season, value in zip(season_name, yield_list):\n if value == 0:\n pass\n else:\n season_yield_dict[season] = round(value, 2)\n pie_list.append(round(value, 2))\n bar_graph_label = list(season_yield_dict.keys())\n pie_final_list = list()\n sum_list = sum(pie_list)\n for val in pie_list:\n suceess = val / sum_list\n suceess = round(suceess, 2)\n pie_final_list.append(suceess * 100)\n\n ## reponse dict here\n response_dict = {\n \"predYield\": pred_yield,\n \"predProduction\": pred_production,\n \"barGraphLabel\": bar_graph_label,\n \"barGraphvalue\": yield_list,\n \"pieChartLabel\": bar_graph_label,\n \"pieChartValue\": pie_final_list\n }\n return jsonify(response_dict)", "def decode_results(self, outputs):\n ...", "def Preprocess_MR(path=\"datasets/raw/rt10662\"):\n\n output_path = \"datasets/preprocessed/MR_Data\"\n\n # load positive and negative data\n with io.open(os.path.join(path, \"rt-polarity.pos\"), encoding='latin-1') as f:\n pos_data = f.readlines()\n pos_data = [sentence.strip() for sentence in pos_data]\n with io.open(os.path.join(path, \"rt-polarity.neg\"), encoding='latin-1') as f:\n neg_data = f.readlines()\n neg_data = [sentence.strip() for sentence in neg_data]\n\n labels = compute_labels(pos_data, neg_data)\n text, labels = shuffle_data(pos_data + neg_data, labels)\n\n # split data in 70%/20%/10% train/test/dev split\n train_len = ((len(text) / 10) * 7) + (len(text) % 10)\n test_len = (len(text) / 10) * 2\n dev_len = len(text) / 10\n\n trX = text[0:train_len]\n teX = text[train_len:train_len + test_len]\n vaX = text[train_len + test_len: train_len + test_len + dev_len]\n\n trY = labels[0:train_len]\n teY = labels[train_len:train_len + test_len]\n vaY = labels[train_len + test_len: train_len + test_len + dev_len]\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n dat1 = pd.DataFrame({'label': trY})\n dat2 = pd.DataFrame({'sentence': trX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"train_binary_sent.csv\"), encoding='utf-8', index=False)\n\n\n dat1 = pd.DataFrame({'label': teY})\n dat2 = pd.DataFrame({'sentence': teX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"test_binary_sent.csv\"), encoding='utf-8', index=False)\n\n dat1 = pd.DataFrame({'label': vaY})\n dat2 = pd.DataFrame({'sentence': vaX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"dev_binary_sent.csv\"), encoding='utf-8', index=False)", "def apply_classifier(self):\n for detected_object in self.detected_objects:\n detected_object.predict_class(self.original_image)", "def get_mc_pred(output,eng,frame,nImg):\n preds_2d = [];\n preds_3d = [];\n \n c,s = ut.calc_cent_scale(frame);\n \n center = matlab.double([list(c)],(1,2));\n scale = matlab.double([s],(1,1));\n \n for i in tqdm(range(0,nImg)):\n preds_2d.append(eng.transformMPII(output[\"W_final\"][2*i:2*(i+1)],center,scale,matlab.double([64,64],(1,2)),1));\n preds_3d.append(output[\"S_final\"][3*i:3*i + 3]);\n \n print(\"Converting estimates to Python format...\");\n preds_2d = np.array(preds_2d);\n preds_3d = np.array(preds_3d);\n \n preds_2d = preds_2d.swapaxes(1, 2);\n preds_3d = preds_3d.swapaxes(1, 2);\n \n return preds_2d, preds_3d;", "def voc_pred_process(pred_data, val_cls, recs):\n num_classes = config.num_classes\n cls_img_ids = {}\n cls_bboxes = {}\n cls_scores = {}\n classes = {}\n cls_npos = {}\n for cls in val_cls:\n if cls == 'background':\n continue\n class_recs = {}\n npos = 0\n for imagename in imagenames:\n R = [obj for obj in recs[imagename] if obj['name'] == cls]\n bbox = np.array([x['bbox'] for x in R])\n difficult = np.array([x['difficult'] for x in R]).astype(np.bool)\n det = [False] * len(R)\n npos = npos + sum(~difficult)\n class_recs[imagename] = {'bbox': bbox,\n 'difficult': difficult,\n 'det': det}\n cls_npos[cls] = npos\n classes[cls] = class_recs\n cls_img_ids[cls] = []\n cls_bboxes[cls] = []\n cls_scores[cls] = []\n\n for sample in pred_data:\n pred_boxes = sample['boxes']\n box_scores = sample['box_scores']\n img_id = sample['img_id']\n h, w = sample['image_shape']\n\n final_boxes = []\n final_label = []\n final_score = []\n\n for c in range(1, num_classes):\n class_box_scores = box_scores[:, c]\n score_mask = class_box_scores > config.min_score\n class_box_scores = class_box_scores[score_mask]\n class_boxes = pred_boxes[score_mask] * [h, w, h, w]\n\n if score_mask.any():\n nms_index = apply_nms(class_boxes, class_box_scores, config.nms_threshold, config.max_boxes)\n class_boxes = class_boxes[nms_index]\n class_box_scores = class_box_scores[nms_index]\n\n final_boxes += class_boxes.tolist()\n final_score += class_box_scores.tolist()\n final_label += [c] * len(class_box_scores)\n\n for loc, label, score in zip(final_boxes, final_label, final_score):\n cls_img_ids[val_cls[label]].append(img_id)\n cls_bboxes[val_cls[label]].append([loc[1], loc[0], loc[3], loc[2]])\n cls_scores[val_cls[label]].append(score)\n return classes, cls_img_ids, cls_bboxes, cls_scores, cls_npos", "def prep_metrics(ap_data, dets, gt, gt_masks, h, w, num_crowd):\n gt_boxes = torch.Tensor(gt[:, :4])\n gt_boxes[:, [0, 2]] *= w\n gt_boxes[:, [1, 3]] *= h\n gt_classes = list(gt[:, 4].astype(int))\n gt_masks = torch.Tensor(gt_masks).view(-1, h*w)\n\n if num_crowd > 0:\n split = lambda x: (x[-num_crowd:], x[:-num_crowd])\n crowd_boxes , gt_boxes = split(gt_boxes)\n crowd_masks , gt_masks = split(gt_masks)\n crowd_classes, gt_classes = split(gt_classes)\n\n classes, scores, boxes, masks = postprocess(dets, w, h, crop_masks=True, score_threshold=0)\n\n classes = list(classes.cpu().numpy().astype(int))\n scores = list(scores.cpu().numpy().astype(float))\n masks = masks.view(-1, h*w)\n \n num_pred = len(classes)\n num_gt = len(gt_classes)\n\n mask_iou_cache = mask_iou(masks, gt_masks)\n bbox_iou_cache = bbox_iou(boxes.float(), gt_boxes.float())\n\n if num_crowd > 0:\n crowd_mask_iou_cache = mask_iou(masks, crowd_masks, iscrowd=True)\n crowd_bbox_iou_cache = bbox_iou(boxes.float(), crowd_boxes.float(), iscrowd=True)\n else:\n crowd_mask_iou_cache = None\n crowd_bbox_iou_cache = None\n\n iou_types = [\n ('box', lambda i,j: bbox_iou_cache[i, j].item(), lambda i,j: crowd_bbox_iou_cache[i,j].item()),\n ('mask', lambda i,j: mask_iou_cache[i, j].item(), lambda i,j: crowd_mask_iou_cache[i,j].item())\n ]\n\n for _class in set(classes + gt_classes):\n ap_per_iou = []\n num_gt_for_class = sum([1 for x in gt_classes if x == _class])\n \n for iouIdx in range(len(iou_thresholds)):\n iou_threshold = iou_thresholds[iouIdx]\n\n for iou_type, iou_func, crowd_func in iou_types:\n gt_used = [False] * len(gt_classes)\n\n ap_obj = ap_data[iou_type][iouIdx][_class]\n ap_obj.add_gt_positives(num_gt_for_class)\n\n for i in range(num_pred):\n if classes[i] != _class:\n continue\n \n max_iou_found = iou_threshold\n max_match_idx = -1\n for j in range(num_gt):\n if gt_used[j] or gt_classes[j] != _class:\n continue\n \n iou = iou_func(i, j)\n\n if iou > max_iou_found:\n max_iou_found = iou\n max_match_idx = j\n \n if max_match_idx >= 0:\n gt_used[max_match_idx] = True\n ap_obj.push(scores[i], True)\n else:\n # If the detection matches a crowd, we can just ignore it\n matched_crowd = False\n\n if num_crowd > 0:\n for j in range(len(crowd_classes)):\n if crowd_classes[j] != _class:\n continue\n \n iou = crowd_func(i, j)\n\n if iou > iou_threshold:\n matched_crowd = True\n break\n\n # All this crowd code so that we can make sure that our eval code gives the\n # same result as COCOEval. There aren't even that many crowd annotations to\n # begin with, but accuracy is of the utmost importance.\n if not matched_crowd:\n ap_obj.push(scores[i], False)", "def compositionalActflowDecodings(data, nov_actflow_data, prac_actflow_data, effects=False, featsel=True, ncvs=1, nproc=5):\n\n nSubjs = data.shape[2]\n stats = np.zeros((1,))\n \n ncond = data.shape[1]\n\n nsamples = nSubjs * ncond\n nfeatures = data.shape[0]\n\n # Label array for supervised learning\n labels = np.tile(range(ncond),nSubjs)\n subjarray = np.repeat(range(nSubjs),ncond)\n\n svm_mat = np.zeros((nsamples,nfeatures))\n nov_svm_mat = np.zeros((nsamples,nfeatures))\n prc_svm_mat = np.zeros((nsamples,nfeatures))\n samplecount = 0\n scount = 0\n for subj in range(nSubjs):\n origdata = data[:,:,scount]\n nov_data = nov_actflow_data[:,:,scount]\n prc_data = prac_actflow_data[:,:,scount]\n svm_mat[samplecount:(samplecount+ncond),:] = origdata.T\n nov_svm_mat[samplecount:(samplecount+ncond),:] = nov_data.T\n prc_svm_mat[samplecount:(samplecount+ncond),:] = prc_data.T\n\n scount += 1\n samplecount += ncond\n\n # Spatially demean matrix across features\n# samplemean = np.mean(svm_mat,axis=1)\n# samplemean.shape = (len(samplemean),1)\n# svm_mat = svm_mat - samplemean\n#\n# samplemean = np.mean(actflow_svm_mat,axis=1)\n# samplemean.shape = (len(samplemean),1)\n# actflow_svm_mat = actflow_svm_mat - samplemean\n\n scores, rmatch, rmismatch = compositionalActflowRandomSplitLOOBaselineCV(ncvs, svm_mat, nov_svm_mat, prc_svm_mat, labels, subjarray, featsel=featsel, nproc=nproc)\n# stats = np.mean(scores)\n stats = scores \n if effects: \n return stats, rmatch,rmismatch\n else:\n return stats", "def infer():\n\n # Create StreamManagerApi object\n stream_manager_api = StreamManagerApi()\n # Use InitManager method init StreamManagerApi\n ret = stream_manager_api.InitManager()\n if ret != 0:\n print(\"Failed to init Stream manager, ret=%s\" % str(ret))\n exit()\n\n # create streams by pipeline config file\n with open(args.pipeline_path, \"rb\") as f:\n pipeline_str = f.read()\n\n # Configuring a stream\n ret = stream_manager_api.CreateMultipleStreams(pipeline_str)\n if ret != 0:\n print(\"Failed to create Stream, ret=%s\" % str(ret))\n exit()\n\n # Construct the input of the stream\n data_input = MxDataInput()\n # Stream_name encoded in UTF-8\n stream_name = args.stream_name.encode()\n print(stream_name)\n predictions = []\n with open(args.label_path, 'rt') as f:\n val_cls = f.read().rstrip(\"\\n\").split(\"\\n\")\n val_cls_dict = {}\n for i, cls in enumerate(val_cls):\n val_cls_dict[i] = cls\n coco_gt = COCO(args.instances_path)\n classs_dict = {}\n cat_ids = coco_gt.loadCats(coco_gt.getCatIds())\n for cat in cat_ids:\n classs_dict[cat[\"name\"]] = cat[\"id\"]\n\n for file_name in os.listdir(args.img_path):\n pred_data = []\n # Gets the Address of each image\n img_id = int(file_name.split('.')[0])\n file_path = args.img_path + file_name\n size = (cv2.imread(file_path)).shape\n\n # Read each photo in turn\n with open(file_path, \"rb\") as f:\n img_data = f.read()\n if not img_data:\n print(f\"read empty data from img:{file_name}\")\n continue\n # The element value img_data\n data_input.data = img_data\n boxes_output, scores_output = send_data_get_output(stream_name, data_input, stream_manager_api)\n pred_data.append({\"boxes\": boxes_output,\n \"box_scores\": scores_output,\n \"img_id\": img_id,\n \"image_shape\": size})\n\n parse_img_infer_result(pred_data[0], predictions, val_cls_dict, classs_dict)\n print(f\"Inferred image:{file_name} success!\")\n\n # Save the result in JSON format\n if not os.path.exists(args.res_path):\n os.makedirs(args.res_path)\n with open(args.res_path + 'predictions_test.json', 'w') as f:\n json.dump(predictions, f)\n stream_manager_api.DestroyAllStreams()", "def chainercv_postprocess_change_labels(results):\n bboxes, labels, scores = results\n # loop over the results and add them to the list of\n # returned predictions\n classes = []\n boxes = []\n confs = []\n for index, bbox in enumerate(bboxes[0]):\n classes.append(str(voc_bbox_label_names[int(labels[0][index])]))\n boxes.append([bbox[0], bbox[1], bbox[2], bbox[3]])\n confs.append(scores[0][index])\n\n return (boxes, classes, confs)", "def actflowDecodings(data, actflow_data, effects=False, featsel=True, ncvs=1, permutation=False, confusion=False, nproc=5):\n\n nSubjs = data.shape[2]\n stats = np.zeros((1,))\n \n ncond = data.shape[1]\n\n nsamples = nSubjs * ncond\n nfeatures = data.shape[0]\n\n # Label array for supervised learning\n labels = np.tile(range(ncond),nSubjs)\n subjarray = np.repeat(range(nSubjs),ncond)\n\n svm_mat = np.zeros((nsamples,nfeatures))\n actflow_svm_mat = np.zeros((nsamples,nfeatures))\n samplecount = 0\n scount = 0\n for subj in range(nSubjs):\n roidata = data[:,:,scount]\n actflow_roidata = actflow_data[:,:,scount]\n svm_mat[samplecount:(samplecount+ncond),:] = roidata.T\n actflow_svm_mat[samplecount:(samplecount+ncond),:] = actflow_roidata.T\n\n scount += 1\n samplecount += ncond\n\n # Spatially demean matrix across features\n# samplemean = np.mean(svm_mat,axis=1)\n# samplemean.shape = (len(samplemean),1)\n# svm_mat = svm_mat - samplemean\n#\n# samplemean = np.mean(actflow_svm_mat,axis=1)\n# samplemean.shape = (len(samplemean),1)\n# actflow_svm_mat = actflow_svm_mat - samplemean\n\n scores, rmatch, rmismatch, confusion_mat = actflowRandomSplitLOOBaselineCV(ncvs, svm_mat, actflow_svm_mat, labels, subjarray, featsel=featsel, permutation=permutation, nproc=nproc)\n# stats = np.mean(scores)\n stats = scores \n\n if effects and confusion:\n return stats, rmatch, rmismatch, confusion_mat\n if effects and not confusion:\n return stats, rmatch, rmismatch\n if confusion and not effects:\n return stats, confusion_mat\n else:\n return stats", "def postprocess(self, frame, outs):\n frameHeight = frame.shape[0]\n frameWidth = frame.shape[1]\n classIds = []\n confidences = []\n boxes = []\n # Scan through all the bounding boxes output from the network and keep only the\n # ones with high confidence scores. Assign the box's class label as the class with the highest score.\n # your code here\n # loop over each of the layer output (I guess the outs is the number of anchor boxes)\n for output in outs:\n # loop over each of the detection\n for detection in output:\n # extract the class ID and confidence of the current object detection\n # the detection is an array of [bx, by, bw, bh, Pc, c1, c2, ..., c80]\n # Pc is the probability that there is an object\n scores = detection[5:]\n classID = np.argmax(scores)\n confidence = scores[classID]\n \n if confidence > self.confThreshold:\n center_x = int(detection[0] * frameWidth)\n center_y = int(detection[1] * frameHeight)\n width = int(detection[2] * frameWidth)\n height = int(detection[3] * frameHeight)\n left = int(center_x - width / 2)\n top = int(center_y - height / 2)\n \n classIds.append(classID)\n confidences.append(float(confidence))\n boxes.append([left, top, width, height])\n \n # Perform non maximum suppression to eliminate redundant overlapping boxes with\n # lower confidences.\n # your code here\n idxs = cv2.dnn.NMSBoxes(boxes, confidences, self.confThreshold, self.nmsThreshold)\n \n # get the bounding bxoes after performing non maximum suppression\n # your code here\n output_boxes = []\n if len(idxs) > 0:\n for i in idxs.flatten(): # idxs = [[1],[2],[5],...], idxs.flatten() = [1,2,5,...]\n output_boxes.append(boxes[i])\n left = boxes[i][0]\n top = boxes[i][1]\n width = boxes[i][2]\n height = boxes[i][3]\n right = left + width\n bottom = top + height\n frame = self.drawPred(frame, classIds[i], confidences[i], left, top, right, bottom)\n \n output_image = frame\n return output_image, output_boxes", "def prep_metrics(ap_data, dets, img, gt, gt_masks, h, w, num_crowd, image_id, detections: Detections = None):\n if not args.output_coco_json:\n with timer.env('Prepare gt'):\n gt_boxes = torch.Tensor(gt[:, :4])\n gt_boxes[:, [0, 2]] *= w\n gt_boxes[:, [1, 3]] *= h\n gt_classes = list(gt[:, 4].astype(int))\n gt_masks = torch.Tensor(gt_masks).view(-1, h * w)\n\n if num_crowd > 0:\n split = lambda x: (x[-num_crowd:], x[:-num_crowd])\n crowd_boxes, gt_boxes = split(gt_boxes)\n crowd_masks, gt_masks = split(gt_masks)\n crowd_classes, gt_classes = split(gt_classes)\n\n with timer.env('Postprocess'):\n classes, scores, boxes, masks = postprocess(dets, w, h, crop_masks=args.crop,\n score_threshold=args.score_threshold)\n\n if classes.size(0) == 0:\n return\n\n classes = list(classes.cpu().numpy().astype(int))\n if isinstance(scores, list):\n box_scores = list(scores[0].cpu().numpy().astype(float))\n mask_scores = list(scores[1].cpu().numpy().astype(float))\n else:\n scores = list(scores.cpu().numpy().astype(float))\n box_scores = scores\n mask_scores = scores\n masks = masks.view(-1, h * w).cuda()\n boxes = boxes.cuda()\n\n if args.output_coco_json:\n with timer.env('JSON Output'):\n boxes = boxes.cpu().numpy()\n masks = masks.view(-1, h, w).cpu().numpy()\n for i in range(masks.shape[0]):\n # Make sure that the bounding box actually makes sense and a mask was produced\n if (boxes[i, 3] - boxes[i, 1]) * (boxes[i, 2] - boxes[i, 0]) > 0:\n detections.add_bbox(image_id, classes[i], boxes[i, :], box_scores[i])\n detections.add_mask(image_id, classes[i], masks[i, :, :], mask_scores[i])\n return\n\n with timer.env('Eval Setup'):\n num_pred = len(classes)\n num_gt = len(gt_classes)\n\n mask_iou_cache = _mask_iou(masks, gt_masks)\n bbox_iou_cache = _bbox_iou(boxes.float(), gt_boxes.float())\n\n if num_crowd > 0:\n crowd_mask_iou_cache = _mask_iou(masks, crowd_masks, iscrowd=True)\n crowd_bbox_iou_cache = _bbox_iou(boxes.float(), crowd_boxes.float(), iscrowd=True)\n else:\n crowd_mask_iou_cache = None\n crowd_bbox_iou_cache = None\n\n box_indices = sorted(range(num_pred), key=lambda i: -box_scores[i])\n mask_indices = sorted(box_indices, key=lambda i: -mask_scores[i])\n\n iou_types = [\n ('box', lambda i, j: bbox_iou_cache[i, j].item(),\n lambda i, j: crowd_bbox_iou_cache[i, j].item(),\n lambda i: box_scores[i], box_indices),\n ('mask', lambda i, j: mask_iou_cache[i, j].item(),\n lambda i, j: crowd_mask_iou_cache[i, j].item(),\n lambda i: mask_scores[i], mask_indices)\n ]\n\n timer.start('Main loop')\n for _class in set(classes + gt_classes):\n ap_per_iou = []\n num_gt_for_class = sum([1 for x in gt_classes if x == _class])\n\n for iouIdx in range(len(iou_thresholds)):\n iou_threshold = iou_thresholds[iouIdx]\n\n for iou_type, iou_func, crowd_func, score_func, indices in iou_types:\n gt_used = [False] * len(gt_classes)\n\n ap_obj = ap_data[iou_type][iouIdx][_class]\n ap_obj.add_gt_positives(num_gt_for_class)\n\n for i in indices:\n if classes[i] != _class:\n continue\n\n max_iou_found = iou_threshold\n max_match_idx = -1\n for j in range(num_gt):\n if gt_used[j] or gt_classes[j] != _class:\n continue\n\n iou = iou_func(i, j)\n\n if iou > max_iou_found:\n max_iou_found = iou\n max_match_idx = j\n\n if max_match_idx >= 0:\n gt_used[max_match_idx] = True\n ap_obj.push(score_func(i), True)\n else:\n # If the detection matches a crowd, we can just ignore it\n matched_crowd = False\n\n if num_crowd > 0:\n for j in range(len(crowd_classes)):\n if crowd_classes[j] != _class:\n continue\n\n iou = crowd_func(i, j)\n\n if iou > iou_threshold:\n matched_crowd = True\n break\n\n # All this crowd code so that we can make sure that our eval code gives the\n # same result as COCOEval. There aren't even that many crowd annotations to\n # begin with, but accuracy is of the utmost importance.\n if not matched_crowd:\n ap_obj.push(score_func(i), False)\n timer.stop('Main loop')", "def compute_classifications(depc, gid_list, config=None):\n logger.info('[ibs] Process Image Classifications')\n logger.info('config = {!r}'.format(config))\n # Get controller\n ibs = depc.controller\n depc = ibs.depc_image\n if config['classifier_algo'] in ['cnn']:\n config_ = {\n 'draw_annots': False,\n 'thumbsize': (192, 192),\n }\n thumbnail_list = depc.get_property('thumbnails', gid_list, 'img', config=config_)\n result_list = ibs.generate_thumbnail_class_list(thumbnail_list, **config)\n elif config['classifier_algo'] in ['svm']:\n from wbia.algo.detect.svm import classify\n\n config_ = {'algo': 'resnet'}\n vector_list = depc.get_property('features', gid_list, 'vector', config=config_)\n classifier_weight_filepath = config['classifier_weight_filepath']\n result_list = classify(vector_list, weight_filepath=classifier_weight_filepath)\n elif config['classifier_algo'] in ['densenet']:\n from wbia.algo.detect import densenet\n\n config_ = {\n 'draw_annots': False,\n 'thumbsize': (densenet.INPUT_SIZE, densenet.INPUT_SIZE),\n }\n thumbpath_list = ibs.depc_image.get(\n 'thumbnails', gid_list, 'img', config=config_, read_extern=False, ensure=True\n )\n result_list = densenet.test(thumbpath_list, ibs=ibs, gid_list=gid_list, **config)\n elif config['classifier_algo'] in ['tile_aggregation', 'tile_aggregation_quick']:\n classifier_weight_filepath = config['classifier_weight_filepath']\n classifier_weight_filepath = classifier_weight_filepath.strip().split(';')\n\n assert len(classifier_weight_filepath) == 2\n classifier_algo_, model_tag_ = classifier_weight_filepath\n\n include_grid2 = config['classifier_algo'] in ['tile_aggregation']\n tid_list = ibs.scout_get_valid_tile_rowids(\n gid_list=gid_list, include_grid2=include_grid2\n )\n ancestor_gid_list = ibs.get_tile_ancestor_gids(tid_list)\n confidence_list = ibs.scout_wic_test(\n tid_list, classifier_algo=classifier_algo_, model_tag=model_tag_\n )\n\n gid_dict = {}\n for ancestor_gid, tid, confidence in zip(\n ancestor_gid_list, tid_list, confidence_list\n ):\n if ancestor_gid not in gid_dict:\n gid_dict[ancestor_gid] = []\n gid_dict[ancestor_gid].append(confidence)\n\n result_list = []\n for gid in tqdm.tqdm(gid_list):\n gid_confidence_list = gid_dict.get(gid, None)\n assert gid_confidence_list is not None\n best_score = np.max(gid_confidence_list)\n\n if best_score >= 0.5:\n best_key = 'positive'\n else:\n best_key = 'negative'\n best_score = 1.0 - best_score\n\n result = (\n best_score,\n best_key,\n )\n result_list.append(result)\n elif config['classifier_algo'] in ['densenet+neighbors']:\n raise NotImplementedError\n # ut.embed()\n # classifier_weight_filepath = config['classifier_weight_filepath']\n\n # all_bbox_list = ibs.get_image_bboxes(gid_list)\n # wic_confidence_list = ibs.scout_wic_test(gid_list, classifier_algo='densenet',\n # model_tag=classifier_weight_filepath)\n #\n # ancestor_gid_list = list(set(ibs.get_tile_ancestor_gids(gid_list)))\n # all_tile_list = list(set(ibs.scout_get_valid_tile_rowids(gid_list=ancestor_gid_list)))\n # all_bbox_list = ibs.get_image_bboxes(all_tile_list)\n # all_confidence_list = ibs.scout_wic_test(all_tile_list, classifier_algo='densenet',\n # model_tag=classifier_weight_filepath)\n #\n # TODO: USE THRESHOLDED AVERAGE, NOT MAX\n # result_list = []\n # for gid, wic_confidence in zip(gid_list, wic_confidence_list):\n # best_score = wic_confidence\n # for aid in aid_list:\n # wic_confidence_ = aid_conf_dict.get(aid, None)\n # assert wic_confidence_ is not None\n # best_score = max(best_score, wic_confidence_)\n #\n # if wic_confidence < 0.5:\n # best_key = 'negative'\n # best_score = 1.0 - best_score\n # else:\n # best_key = 'positive'\n # if best_score > wic_confidence:\n # recovered += 1\n # result = (best_score, best_key, )\n # result_list.append(result)\n elif config['classifier_algo'] in ['scout_detectnet']:\n import json\n\n json_filepath = join(ibs.dbdir, config['classifier_weight_filepath'])\n assert exists(json_filepath)\n with open(json_filepath, 'r') as json_file:\n values = json.load(json_file)\n annotations = values.get('annotations', {})\n\n gpath_list = ibs.get_image_paths(gid_list)\n gname_list = [split(gpath)[1] for gpath in gpath_list]\n\n result_list = []\n for gname in gname_list:\n annotation = annotations.get(gname, None)\n assert annotation is not None\n\n best_score = 1.0\n if len(annotation) == 0:\n best_key = 'negative'\n else:\n best_key = 'positive'\n result = (\n best_score,\n best_key,\n )\n result_list.append(result)\n elif config['classifier_algo'] in ['scout_detectnet_csv', 'scout_faster_rcnn_csv']:\n uuid_str_list = list(map(str, ibs.get_image_uuids(gid_list)))\n\n manifest_filepath = join(ibs.dbdir, 'WIC_manifest_output.csv')\n csv_filepath = join(ibs.dbdir, config['classifier_weight_filepath'])\n\n assert exists(manifest_filepath)\n assert exists(csv_filepath)\n\n manifest_dict = {}\n with open(manifest_filepath, 'r') as manifest_file:\n manifest_file.readline() # Discard column header row\n manifest_line_list = manifest_file.readlines()\n for manifest_line in manifest_line_list:\n manifest = manifest_line.strip().split(',')\n assert len(manifest) == 2\n manifest_filename, manifest_uuid = manifest\n manifest_dict[manifest_filename] = manifest_uuid\n\n csv_dict = {}\n with open(csv_filepath, 'r') as csv_file:\n csv_file.readline() # Discard column header row\n csv_line_list = csv_file.readlines()\n for csv_line in csv_line_list:\n csv = csv_line.strip().split(',')\n assert len(csv) == 2\n csv_filename, csv_score = csv\n csv_uuid = manifest_dict.get(csv_filename, None)\n assert (\n csv_uuid is not None\n ), 'Test image {!r} is not in the manifest'.format(\n csv,\n )\n csv_dict[csv_uuid] = csv_score\n\n result_list = []\n for uuid_str in uuid_str_list:\n best_score = csv_dict.get(uuid_str, None)\n assert best_score is not None\n\n if config['classifier_algo'] in ['scout_detectnet_csv']:\n assert best_score in ['yes', 'no']\n best_key = 'positive' if best_score == 'yes' else 'negative'\n best_score = 1.0\n elif config['classifier_algo'] in ['scout_faster_rcnn_csv']:\n best_score = float(best_score)\n if best_score >= 0.5:\n best_key = 'positive'\n else:\n best_key = 'negative'\n best_score = 1.0 - best_score\n else:\n raise ValueError\n\n result = (\n best_score,\n best_key,\n )\n result_list.append(result)\n elif config['classifier_algo'] in [\n 'lightnet',\n 'densenet+lightnet',\n 'densenet+lightnet!',\n ]:\n min_area = 10\n\n classifier_weight_filepath = config['classifier_weight_filepath']\n classifier_weight_filepath = classifier_weight_filepath.strip().split(',')\n\n if config['classifier_algo'] in ['lightnet']:\n assert len(classifier_weight_filepath) == 2\n weight_filepath, nms_thresh = classifier_weight_filepath\n wic_thresh = 0.0\n nms_thresh = float(nms_thresh)\n wic_confidence_list = [np.inf] * len(gid_list)\n wic_filter = False\n elif config['classifier_algo'] in ['densenet+lightnet', 'densenet+lightnet!']:\n assert len(classifier_weight_filepath) == 4\n (\n wic_model_tag,\n wic_thresh,\n weight_filepath,\n nms_thresh,\n ) = classifier_weight_filepath\n wic_thresh = float(wic_thresh)\n nms_thresh = float(nms_thresh)\n wic_confidence_list = ibs.scout_wic_test(\n gid_list, classifier_algo='densenet', model_tag=wic_model_tag\n )\n wic_filter = config['classifier_algo'] in ['densenet+lightnet']\n else:\n raise ValueError\n\n flag_list = [\n wic_confidence >= wic_thresh for wic_confidence in wic_confidence_list\n ]\n if wic_filter:\n gid_list_ = ut.compress(gid_list, flag_list)\n else:\n gid_list_ = gid_list[:]\n config = {\n 'grid': False,\n 'algo': 'lightnet',\n 'config_filepath': weight_filepath,\n 'weight_filepath': weight_filepath,\n 'nms': True,\n 'nms_thresh': nms_thresh,\n 'sensitivity': 0.0,\n }\n prediction_list = depc.get_property(\n 'localizations', gid_list_, None, config=config\n )\n prediction_dict = dict(zip(gid_list_, prediction_list))\n\n result_list = []\n for gid, wic_confidence, flag in zip(gid_list, wic_confidence_list, flag_list):\n if not flag:\n best_key = 'negative'\n best_score = 1.0 - wic_confidence\n else:\n prediction = prediction_dict.get(gid, None)\n assert prediction is not None\n\n best_score = 0.0\n if prediction is not None:\n score, bboxes, thetas, confs, classes = prediction\n for bbox, conf in zip(bboxes, confs):\n xtl, ytl, w, h = bbox\n area = w * h\n if area >= min_area:\n best_score = max(best_score, conf)\n\n if best_score >= 0.5:\n best_key = 'positive'\n else:\n best_key = 'negative'\n best_score = 1.0 - best_score\n result = (\n best_score,\n best_key,\n )\n result_list.append(result)\n else:\n raise ValueError(\n 'specified classifier algo is not supported in config = {!r}'.format(config)\n )\n\n # yield detections\n for result in result_list:\n yield result", "def do_predict(self):\n answer = []\n response = []\n\n for it_predictions in json.loads(request.data.decode('UTF-8')):\n prediction = it_predictions['score']\n for ite_clf in g_list_of_classifier:\n answer.append(ite_clf.predict(prediction))\n if answer.count(True) > answer.count(False):\n response.append({'answer' : True})\n else:\n response.append({'answer' : False})\n return json.dumps(response, indent=4)", "def loadMotorResponses(subj,hand='Right'):\n \n hands = {'Left':[0,1],'Right':[2,3]}\n\n x = tgp.loadTaskTiming(subj,'ALL')\n stimIndex = np.asarray(x['stimIndex'])\n ind = np.where(stimIndex=='motorResponse')[0]\n \n datadir = projectdir + 'data/postProcessing/hcpPostProcCiric/'\n h5f = h5py.File(datadir + subj + '_glmOutput_data.h5','r')\n data = h5f['taskRegression/ALL_24pXaCompCorXVolterra_taskReg_betas_canonical'][:].copy()\n data = data[:,ind].copy()\n h5f.close()\n \n # Isolate hand responses\n hand_ind = hands[hand]\n tmpdat = np.zeros((data.shape[0],2))\n if hand=='Right':\n #tmpdat[:,0] = data[:,3] #rmid -- need to flip this once glm is re-run -- check the new reffunc\n tmpdat[:,0] = np.real(data[:,2]) \n tmpdat[:,1] = np.real(data[:,3]) \n elif hand=='Left':\n tmpdat[:,0] = np.real(data[:,0]) #lmid\n tmpdat[:,1] = np.real(data[:,1]) #lind\n data = tmpdat.copy()\n\n return data", "def classify_encapsulated(audio_summary, track_info, pickle_file):\n\tf = open(pickle_file, 'r')\n\tfit = pickle.load(f)\n\tf.close()\n\trv = {}\n\t#print track_info.keys()\n\t#print track_info[audio_summary]['title'].keys()\n\trv[\"speechiness\"] = [audio_summary['audio_summary']['speechiness']]\n\trv[\"time_sig\"] = [audio_summary['audio_summary']['time_signature']]\n\trv[\"bpm\"] = [audio_summary['audio_summary']['tempo']]\n\trv[\"key\"] = [audio_summary['audio_summary']['key']]\n\trv[\"duration\"] = [audio_summary['audio_summary']['duration']]\n\trv[\"loudness\"] = [audio_summary['audio_summary']['loudness']]\n\trv[\"end_of_fade_in\"] = [track_info['track']['end_of_fade_in']]\n\trv[\"start_of_fade_out\"] = [track_info['track']['start_of_fade_out']]\n\trv[\"bpm_range\"] = [proc.bpm_range(track_info['beats'])]\n\trv[\"max_bpm_spike\"] = [proc.max_bpm_spike(track_info['beats'])]\n\ttry:\n\t\trv[\"num_keys\"] = [proc.num_keys(track_info['sections'])]\n\texcept:\n\t\trv[\"num_keys\"] = [1]\n\trv[\"sections\"] = [proc.num_sections(track_info['sections'])]\n\tnew_df = build_data_frame([rv],[\"Unknown\"])\n\tp = prob_category(new_df,fit)\n\trobjects.globalenv[\"pred\"] = p\n\tedm_prob = robjects.default_ri2py(p.rx(1))[0]\n\tfolk_prob = robjects.default_ri2py(p.rx(2))[0]\n\trap_prob = robjects.default_ri2py(p.rx(3))[0]\n\tcls = classify(new_df,fit)\n\treturn [(edm_prob,folk_prob,rap_prob),cls[0]]", "def makepredictions(self):\n data, sampling_rate = librosa.load(self.file)\n mfccs = np.mean(librosa.feature.mfcc(y=data, sr=sampling_rate, n_mfcc=40).T, axis=0)\n x = np.expand_dims(mfccs, axis=1)\n x = np.expand_dims(x, axis=0)\n predictions = self.loaded_model.predict_classes(x)\n predict = self.convertclasstoemotion(predictions)\n print(\"Prediction is\", \" \", self.convertclasstoemotion(predictions))\n return predict", "def main(domain):\n\n filepath_train1 = '../../Non_covid_data_15oct/train_data_batch1_disregard_removed.pkl'\n filepath_test1 = '../../Non_covid_data_15oct/test_data_batch1_disregard_removed.pkl'\n filepath_train2 = '../../Covid_data_11nov/traindata_covidbatch.pkl'\n filepath_test2 = '../../Covid_data_11nov/testdata_covidbatch.pkl'\n\n df_train_nc, df_test_nc = createDataframe(filepath_train1, filepath_test1, domain, 'noncovid')\n df_train_c, df_test_c = createDataframe(filepath_train2, filepath_test2, domain, 'covid')\n #print(df_train)\n sen_reps_tr_nc, labels_tr_nc, sen_reps_te_nc, labels_te_nc = prepro(df_train_nc, df_test_nc)\n sen_reps_tr_c, labels_tr_c, sen_reps_te_c, labels_te_c = prepro(df_train_c, df_test_c)\n #print(labels_te)\n\n #Uncomment to combine training datasets \n #sen_reps_tr_c += sen_reps_tr_nc\n #labels_tr_c += labels_tr_nc\n\n #Uncomment to combine test datasets and test labels if necessary (if you do so, also combine test df's)\n #sen_reps_te_c += sen_reps_te_nc\n #labels_te_c += labels_te_nc\n #df_test = pd.concat([df_test_c, df_test_nc])\n\n #Feed selected train and test data to regression model\n predictions = get_predictions(sen_reps_tr_c, labels_tr_c, sen_reps_te_c)\n\n #Make dataframes of note id's and labels\n df_ann = make_note_df(df_test_c, labels_te_c)\n df_pred = make_note_df(df_test_c, predictions)\n\n #Evaluate on sentence level\n MSE, MAE, RMSE = evaluation(labels_te_c, predictions)\n\n print(\"MSE \"+domain, MSE)\n print(\"MAE \"+domain, MAE)\n print(\"RMSE \"+domain, RMSE)\n\n #Aggregate per note\n means_ann = means(df_ann)\n means_pred = means(df_pred)\n\n #Evaluate on note level\n MSE, MAE, RMSE = evaluation(means_ann, means_pred)\n\n print(\"MSE agg\"+domain, MSE)\n print(\"MAE agg\"+domain, MAE)\n print(\"RMSE agg\"+domain, RMSE)", "def chainercv_postprocess_pack_each_item(results):\n bboxes, labels, scores = results\n\n # loop over the results and add them to the list of\n # returned predictions\n predictions = []\n for index, bbox in enumerate(bboxes[0]):\n r = {\"class\": str(voc_bbox_label_names[int(labels[0][index])]),\n \"bbox\": {\n \"ymin\": str(bbox[0]),\n \"xmin\": str(bbox[1]),\n \"ymax\": str(bbox[2]),\n \"xmax\": str(bbox[3])\n },\n \"probability\": str(scores[0][index])\n }\n predictions.append(r)\n\n return predictions", "def __train__(self):\n if (self.type_camf == 'CAMF_CI'):\n #users, items, context, ratings\n ci = camf_ci.CI_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = ci.fit()\n elif (self.type_camf == 'CAMF_CU'):\n cu = camf_cu.CU_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = cu.fit()\n elif (self.type_camf == 'CAMF_C'):\n c = camf_c.C_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = c.fit()\n\n dummy_pred = np.zeros((predictions.shape))\n for r, pred_array in enumerate(predictions):\n for c, pred in enumerate(pred_array):\n dummy_pred[r][c] = self.__check_ratings__(pred)\n predictions = dummy_pred\n #save a plot with a loss function\n plots = prs.PlotRSData()\n #print(losses)\n plots.plot_loss_cars(losses, self.type_camf, self.__save_prefix__+\"_loop\"+str(self.loop))\n pd.DataFrame(losses).to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ +\"losses_loop\"+str(self.loop)+\".csv\")\n print('Saving the feature matrix...')\n # set predictions back to the pivot table\n self.__utility_saved_training__(predictions) \n # save results\n self.utility_predictions.to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ + \"_SGD_predictions_loop\"+str(self.loop)+\".csv\")", "def classify(test, network, rbfknn=0, radio=0, spinvalue=0, convalue=0, num_board=1, verbose=1):\r\n value4 = spinvalue #value4 : K\r\n consensus=convalue #value5 : Min consensus of N\r\n if rbfknn==3:\r\n network.rbforknn=0\r\n elif rbfknn==4:\r\n network.rbforknn=1\r\n #print(rbfknn)\r\n #print(network.rbforknn)\r\n #print (network.neurons[0:11])\r\n # Train network\r\n patternID = []\r\n test_detail=[]\r\n total_detail = []\r\n i=1\r\n ID = 0\r\n UNC_c = 0\r\n UNC_i = 0\r\n UNK = 0\r\n incorrect=0\r\n cla_result=[]\r\n classification_result=0\r\n cat_acurracy=[]\r\n j=1\r\n for input in test:\r\n input_comps = [int(x) for x in input]\r\n test_context = input_comps[0]\r\n test_cat = input_comps[1]\r\n test_pattern = input_comps[2:]\r\n test_detail.append([input_comps[0],input_comps[1]])\r\n patternID.append(i)\r\n i+=1\r\n id_, unc, firing_neurons = network.broadcast(input_=test_pattern, new_gcr=test_context)\r\n #print(firing_neurons)\r\n temp=0\r\n temp2=0\r\n temp3=0\r\n temp4=0\r\n temp5=0\r\n cat=[]\r\n for firing_neuron in firing_neurons:\r\n cat.append(firing_neuron.cat)\r\n if len(cat)>=value4:\r\n iter=value4\r\n else :\r\n iter=len(cat)\r\n #print '=========================================='\r\n #print('network.read_cat() : ', cat)\r\n #print('iter : ', iter)\r\n\r\n #########Category Out#########\r\n #Best match\r\n if radio==5:\r\n best_neuron = firing_neurons[-1] if firing_neurons else None\r\n if best_neuron != None:\r\n classification_result = cat[0]\r\n else :\r\n classification_result = None\r\n #print('classification_result : ', classification_result)\r\n\r\n #Dominant\r\n elif radio==6:\r\n if iter>=1:\r\n for i in range(iter):\r\n if cat[i]==1:\r\n temp+=1\r\n elif cat[i]==2:\r\n temp2+=1\r\n elif cat[i]==3:\r\n temp3+=1\r\n elif cat[i]==4:\r\n temp4+=1\r\n # elif cat[i]==5:\r\n # temp5+=1\r\n list=[temp, temp2, temp3, temp4]\r\n value=max(list)\r\n\r\n if value==temp:\r\n classification_result = 1\r\n elif value == temp2:\r\n classification_result = 2\r\n elif value == temp3:\r\n classification_result = 3\r\n elif value == temp4:\r\n classification_result = 4\r\n # elif value==temp5:\r\n # classification_result = 5\r\n else: #iter==0\r\n classification_result = None\r\n #print('classification_result : ', classification_result)\r\n #Unanimity\r\n elif radio==7:\r\n if iter>=1:\r\n for i in range(iter):\r\n if cat[i] == test_cat:\r\n temp += 1\r\n else:\r\n temp -= 1\r\n if temp == (iter):\r\n classification_result = test_cat\r\n else:\r\n classification_result = None\r\n else: #iter==0\r\n classification_result = None\r\n #print('temp : ', temp)\r\n #print('classification_result : ', classification_result)\r\n #Min consensus of N(value5)\r\n elif radio==8:\r\n for i in range(iter):\r\n if cat[i] == 1:\r\n temp += 1\r\n elif cat[i] == 2:\r\n temp2 += 1\r\n elif cat[i] == 3:\r\n temp3 += 1\r\n elif cat[i] == 4:\r\n temp4 += 1\r\n # elif cat[i]==5:\r\n # temp5+=1\r\n list = [temp, temp2, temp3, temp4]\r\n value = max(list)\r\n if value >= consensus:\r\n if value == temp:\r\n classification_result = 1\r\n elif value == temp2:\r\n classification_result = 2\r\n elif value == temp3:\r\n classification_result = 3\r\n elif value == temp4:\r\n classification_result = 4\r\n # elif value==temp5:\r\n # classification_result=5\r\n else:\r\n classification_result=None\r\n #print('classification_result : ', classification_result)\r\n cla_result.append(classification_result)\r\n\r\n #Accuracy\r\n #print('test_cat == classification_result ?', test_cat, classification_result)\r\n l=len(cat)\r\n if l==1 or l==0:\r\n if classification_result == test_cat :\r\n ID += 1\r\n cat_acurracy.append([test_cat, 'ID'])\r\n elif classification_result == None:\r\n UNK += 1\r\n cat_acurracy.append([test_cat, 'UNK'])\r\n elif classification_result != test_cat :\r\n incorrect +=1\r\n elif l==2:\r\n if cat[0] == cat[1] :\r\n if classification_result == test_cat:\r\n ID += 1\r\n cat_acurracy.append([test_cat, 'ID'])\r\n else:\r\n if classification_result == None:\r\n UNK += 1\r\n cat_acurracy.append([test_cat, 'UNK'])\r\n elif classification_result != test_cat:\r\n UNC_i += 1\r\n cat_acurracy.append([test_cat, 'UNC_i'])\r\n elif classification_result == test_cat:\r\n UNC_c += 1\r\n cat_acurracy.append([test_cat, 'UNC_c'])\r\n elif l>=3:\r\n if cat[0] == cat[1] and cat[0] == cat[2]:\r\n if classification_result == test_cat:\r\n ID += 1\r\n cat_acurracy.append([test_cat, 'ID'])\r\n else:\r\n if classification_result == None:\r\n UNK += 1\r\n cat_acurracy.append([test_cat, 'UNK'])\r\n elif classification_result != test_cat:\r\n UNC_i += 1\r\n cat_acurracy.append([test_cat, 'UNC_i'])\r\n elif classification_result == test_cat:\r\n UNC_c += 1\r\n cat_acurracy.append([test_cat, 'UNC_c'])\r\n\r\n #sum=ID+UNC_i+UNC_c+UNK\r\n #print(\"ID, UNC_c, UNC_i, UNK\", j, ID, UNC_c, UNC_i, UNK, sum)\r\n #print(l)\r\n j+=1\r\n #table data\r\n #print(cat_acurracy)\r\n detail = []\r\n for firing_neuron in firing_neurons:\r\n detail.append([firing_neuron.cat,firing_neuron.dist, firing_neuron.id_ ])\r\n #print(detail)\r\n patternID = np.reshape(patternID, (1, -1))\r\n test_detail = np.reshape(test_detail, (1, -1))\r\n detail = np.reshape(detail, (1, -1))\r\n if classification_result != None:\r\n temp_detail = np.hstack((patternID, test_detail, detail))\r\n else:\r\n temp_detail = np.hstack((patternID, test_detail))\r\n test_detail= []\r\n patternID = []\r\n total_detail.append(temp_detail)\r\n #print(temp_detail)\r\n\r\n #print (ID/32.16)\r\n #print(UNC_c/32.16)\r\n #print(UNC_i/32.16)\r\n #print(UNK/32.16)\r\n return ID, UNC_c, UNC_i, UNK, total_detail, cla_result, cat_acurracy", "def _eval_predictions(self, tasks, predictions):\n self._logger.info(\"Preparing results for COCO format ...\")\n coco_results = list(itertools.chain(*[x[\"instances\"] for x in predictions]))\n\n # unmap the category ids for COCO\n if hasattr(self._metadata, \"thing_dataset_id_to_contiguous_id\"):\n reverse_id_mapping = {\n v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()\n }\n for result in coco_results:\n category_id = result[\"category_id\"]\n assert (\n category_id in reverse_id_mapping\n ), \"A prediction has category_id={}, which is not available in the dataset.\".format(\n category_id\n )\n result[\"category_id\"] = reverse_id_mapping[category_id]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"coco_instances_results.json\")\n self._logger.info(\"Saving results to {}\".format(file_path))\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(coco_results))\n f.flush()\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\"Evaluating predictions ...\")\n for i, task in enumerate(list(sorted(tasks)) + ['segm_voc', 'segm_nvoc']):\n\n if task in ['segm_voc', 'segm_nvoc']:\n iou_type, ps_mode = task.split('_')\n else:\n iou_type = task\n ps_mode = None\n\n print('psmode', ps_mode, iou_type)\n coco_eval = (\n _evaluate_predictions_on_coco_ps(\n self._coco_api, coco_results, iou_type, ps_mode, kpt_oks_sigmas=self._kpt_oks_sigmas\n )\n if len(coco_results) > 0\n else None # cocoapi does not handle empty results very well\n )\n\n if ps_mode == 'voc':\n class_names = list(np.asarray(self._metadata.get(\"thing_classes\"))[VOC_INDICES])\n elif ps_mode == 'nvoc':\n class_names = list(np.asarray(self._metadata.get(\"thing_classes\"))[NON_VOC_INDICES])\n else:\n class_names = self._metadata.get(\"thing_classes\")\n\n res = self._derive_coco_results(\n coco_eval, iou_type, class_names=class_names\n )\n self._results[task] = res", "def predict(image_path, wrapper):\n \"\"\"\n #Don't forget to store your prediction into ImgPred\n img_prediction = ImgPred(...)\n \"\"\"\n\n #This is where all of our code will probably go. Here are the steps to success\n\n \n #Step One: Make a list which will contain the locations of every character in our source Image.\n SymPredList = []\n\n #Step Two: Go down that list we just made and use the code from PA4 in conjunction with our new Model to analyze each character. George made this part.\n #This is the find a character part of the code. Max and George worked it out.\n im = cv2.imread(image_path,0)\n (thresh, imbw) = cv2.threshold(im,20,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n #cv2.imwrite('clapfuck.jpg', imbw)\n im3,contours,hierarchy = cv2.findContours(imbw,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n idx = 0\n for cnt in contours:\n idx += 1\n x1,y1,w,h = cv2.boundingRect(cnt)\n roi=imbw[y1:y1+h,x1:x1+w]\n\n #Step Two.1: Make a Numpy Array of all the pixels starting from the top left corner of an identified character to the bottom right corner of the identified character.\n height, width = roi.shape\n if height >= width:\n padded = cv2.copyMakeBorder(roi,0,0,(height-width)//2,(height-width)//2,cv2.BORDER_CONSTANT,value=[0,0,0])\n else:\n padded = cv2.copyMakeBorder(roi,(width-height)//2,(width-height)//2,0,0,cv2.BORDER_CONSTANT,value=[0,0,0])\n Smol = cv2.resize(padded, (28, 28))\n (thresh, evaluateMe) = cv2.threshold(Smol, 20, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n #scipy.misc.imsave(os.path.basename(file), ree)\n #Step Two.2: Feed that numpy into our PA4 image analyzer converter thing but using our new trained model\n evaluateMeMe = numpy.reshape(evaluateMe, (1, 28, 28, 1))\n prediction = tf.argmax(y_conv,1)\n final_number = prediction.eval(feed_dict={x:evaluateMeMe,y_:numpy.zeros((1,40)), keep_prob:1.0})\n #Step Two.3: Record what we think it is as the prediction field of the SymPred we are currently on\n final_guess = wrapper.label_types[int(final_number)]\n DisSymPred = SymPred(final_guess,x1,y1,x1+w,y1-h)\n SymPredList.append(DisSymPred)\n\n #Step Three: Wrap that now complete SymPred list, in an ImgPred, fill out all the fields of that ImgPred, and then return that shit.\n img_prediction = ImgPred(os.path.basename(image_path), SymPredList)\n\n #Step Four: Were Donezo\n return img_prediction", "def StageSubjects(self,X_test, Y_test, Xrange):\r\n\r\n # subject prediction\r\n pred_sub = []\r\n expectation_sub = []\r\n\r\n # distribution of trajectory samples\r\n sampling_dist = []\r\n\r\n for biomarker in range(self.nrBiomk):\r\n sampling_dist.append([])\r\n for i in range(500):\r\n s_omega, m_omega, s, m, sigma, l, eps = self.unpack_parameters(self.parameters[biomarker])\r\n perturbation_zero_W = np.random.randn(int(2 * self.N_rnd_features)).reshape(\r\n [2 * self.N_rnd_features, 1])\r\n perturbation_zero_Omega = np.random.randn(int(self.N_rnd_features))\r\n Omega = 1 / np.sqrt(np.exp(l)) * self.perturbation_Omega\r\n W = np.multiply(perturbation_zero_W, np.sqrt(np.exp(s))) + m\r\n output = self.basis(Xrange, np.exp(sigma), Omega)\r\n sampling_dist[biomarker].append(np.dot(output, W))\r\n\r\n for sub in range(len(X_test[0])):\r\n print(\"predicting sub: \", sub, \"out of \", len(X_test[0]))\r\n pred_sub.append([])\r\n expectation_sub.append([])\r\n for pos_index,position in enumerate(Xrange):\r\n pred_sub[sub].append(0)\r\n for biomarker in range(self.nrBiomk):\r\n Y_test_biom = np.array(Y_test[biomarker][sub]).reshape([len(Y_test[biomarker][sub]),1])\r\n X_test_biom = np.array(X_test[biomarker][sub]).reshape([len(X_test[biomarker][sub]),1])\r\n\r\n X_test_biom = (X_test_biom - self.mean_std_X[biomarker][0]) / self.mean_std_X[biomarker][1]\r\n X_test_biom = X_test_biom / self.max_X[biomarker]\r\n\r\n Y_test_biom = (Y_test_biom - self.mean_std_Y[biomarker][0]) / self.mean_std_Y[biomarker][1]\r\n Y_test_biom = Y_test_biom / self.max_Y[biomarker]\r\n\r\n if len(X_test_biom > 0):\r\n X_to_test = position + X_test_biom\r\n for i in range(500):\r\n current_sample = sampling_dist[biomarker][i][pos_index:(pos_index + len(Y_test_biom))]\r\n if (len(Y_test_biom) == len(current_sample)):\r\n pred_sub[sub][pos_index] = pred_sub[sub][pos_index] \\\r\n + np.sum((Y_test_biom - current_sample) ** 2)\r\n # - 0.5 * (np.log(2 * np.pi * np.exp(eps)) \\\r\n\r\n final_pred = []\r\n for sub in range(len(pred_sub)):\r\n invalid_indices = np.where(np.array(pred_sub[sub])==0)[0]\r\n # pred_sub[sub][pred_sub[sub] == 0] = 1e10\r\n # print('valid_indices', valid_indices, np.array(pred_sub[sub]).shape)\r\n # invalid_indices = np.logical_not(np.in1d(np.array(range(Xrange.shape[0])), valid_indices))\r\n # print(asds)\r\n # predictions = np.array(pred_sub[sub])[valid_indices]\r\n predictions = np.array(pred_sub[sub])\r\n final_pred.append([])\r\n final_pred[sub] = np.exp(-predictions/500)/ np.sum(np.exp(-predictions/500))\r\n final_pred[sub][invalid_indices] = 0\r\n final_pred[sub] /= np.sum(final_pred[sub])\r\n scaling = self.mean_std_X[biomarker][1]*self.max_X[biomarker]\r\n #expectation_sub[sub] = np.sum(final_pred[sub] * Xrange.flatten()[valid_indices]) * scaling + self.mean_std_X[biomarker][0]\r\n # expectation_sub[sub] = np.sum(final_pred[sub] * (Xrange.flatten()[valid_indices] * scaling + self.mean_std_X[biomarker][0]))\r\n expectation_sub[sub] = np.sum(final_pred[sub] * (Xrange.flatten() * scaling + self.mean_std_X[biomarker][0]))\r\n return final_pred, expectation_sub" ]
[ "0.56707114", "0.56553566", "0.5601765", "0.55017805", "0.5460834", "0.5439918", "0.54097337", "0.537334", "0.53697425", "0.5360478", "0.53484106", "0.5340918", "0.52992934", "0.5270834", "0.5248631", "0.52458835", "0.5234071", "0.5232353", "0.52291584", "0.52209944", "0.52191246", "0.5216457", "0.5216363", "0.52039564", "0.5202222", "0.51861346", "0.51855004", "0.51838213", "0.51728195", "0.5156003" ]
0.6313217
0
Returns a dictionary containing all diagnostic light curves. The dictionary will provide a light curve for each matrix in the design matrix collection.
def _create_diagnostic_lightcurves(self): if self.coefficients is None: raise ValueError("you need to call `correct()` first") lcs = {} for idx, submatrix in enumerate(self.dmc.matrices): # What is the index of the first column for the submatrix? firstcol_idx = sum([m.shape[1] for m in self.dmc.matrices[:idx]]) submatrix_coefficients = self.coefficients[ firstcol_idx : firstcol_idx + submatrix.shape[1] ] # submatrix_coefficients_err = self.coefficients_err[firstcol_idx:firstcol_idx+submatrix.shape[1], firstcol_idx:firstcol_idx+submatrix.shape[1]] # samples = np.asarray([np.dot(submatrix.values, np.random.multivariate_normal(submatrix_coefficients, submatrix_coefficients_err)) for idx in range(100)]).T # model_err = np.abs(np.percentile(samples, [16, 84], axis=1) - np.median(samples, axis=1)[:, None].T).mean(axis=0) model_flux = u.Quantity( submatrix.X.dot(submatrix_coefficients), unit=self.lc.flux.unit ) model_flux_err = u.Quantity( np.zeros(len(model_flux)), unit=self.lc.flux.unit ) lcs[submatrix.name] = LightCurve( time=self.lc.time, flux=model_flux, flux_err=model_flux_err, label=submatrix.name, ) return lcs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_materials_dict(self):\n c = 299792458.0\n w_mat = 2 * np.pi * c / self.l_mat - self.w0\n l2_mat = (self.l_mat * 1e6) ** 2\n\n n_air = 1 + 0.05792105 * l2_mat / (238.0185 * l2_mat - 1) + 0.00167917 * l2_mat / (57.362 * l2_mat - 1)\n air_ip = interp1d(w_mat, n_air, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['air'] = air_ip\n\n n_fs = np.sqrt(1 + 0.6961663 * l2_mat / (l2_mat - 0.0684043 ** 2) +\n 0.4079426 * l2_mat / (l2_mat - 0.1162414 ** 2) +\n 0.8974794 * l2_mat / (l2_mat - 9.896161 ** 2))\n fs_ip = interp1d(w_mat, n_fs, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['fs'] = fs_ip\n\n n_mgf2 = np.sqrt(1 + 0.48755108 * l2_mat / (l2_mat - 0.04338408 ** 2) +\n 0.39875031 * l2_mat / (l2_mat - 0.09461442 ** 2) +\n 2.3120353 * l2_mat / (l2_mat - 23.793604 ** 2))\n mgf2_ip = interp1d(w_mat, n_mgf2, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['mgf2'] = mgf2_ip\n\n n_sapphire_o = np.sqrt(1 + 1.4313493 * l2_mat / (l2_mat - 0.0726631 ** 2) +\n 0.65054713 * l2_mat / (l2_mat - 0.1193242 ** 2) +\n 5.3414021 * l2_mat / (l2_mat - 18.028251 ** 2))\n sapphire_o_ip = interp1d(w_mat, n_sapphire_o, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['sapphire_o'] = sapphire_o_ip\n\n n_sapphire_e = np.sqrt(1 + 1.5039759 * l2_mat / (l2_mat - 0.0740288 ** 2) +\n 0.55069141 * l2_mat / (l2_mat - 0.1216529 ** 2) +\n 6.5927379 * l2_mat / (l2_mat - 20.072248 ** 2))\n sapphire_e_ip = interp1d(w_mat, n_sapphire_e, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['sapphire_e'] = sapphire_e_ip\n\n n_bbo_o = np.sqrt(2.7405 + 0.0184 / (l2_mat - 0.0179) - 0.0155 * l2_mat)\n bbo_o_ip = interp1d(w_mat, n_bbo_o, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['bbo_o'] = bbo_o_ip\n\n n_bbo_e = np.sqrt(2.3730 + 0.0128 / (l2_mat - 0.0156) - 0.0044 * l2_mat)\n bbo_e_ip = interp1d(w_mat, n_bbo_e, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['bbo_e'] = bbo_e_ip\n\n materials_files = os.listdir(self.materials_path)\n logger.info(\"Found {0:d}\".format(materials_files.__len__()))\n for mat_file in materials_files:\n logger.debug(mat_file)\n self.read_material(''.join((self.materials_path, '/', mat_file)))", "def light_curve(num_energies, num_samples):\n fixed_header = (\n 1*8 # SSID\n + 4*8 # SCET Coarse time\n + 2*8 # SCET Fine time\n + 2*8 # Integration time\n + 4*8 # Detector mask\n + 4 # spare\n + 12 # Pixel mask\n + 1 # spare\n + 1 # Comp Schema light curve S\n + 3 # Comp Schema light curve K\n + 3 # Comp Schema light curve M\n + 1 # Comp Schema trigger S\n + 3 # Comp Schema trigger K\n + 3 # Comp Schema trigger M\n + 1 # Energy bin mask upper boundary\n + 4*8 # Energy bin mask lower boundray\n + 1*8 # Number of energies\n + num_energies*2*8 # Number data points\n + 2*8 # Number of data points\n + 2*8 # Number of data point\n )\n\n variable = (\n + num_energies*num_samples*8 # Compressed light curves\n + num_samples*8 # Compressed triggers\n + num_samples*8 # RCR\n )\n\n return fixed_header, variable", "def lights(self) -> List[dict]:\n return self.items_by_domain(\"light\")", "def read_lightcurves(params,log):\n\n lightcurves = { 'g':None, 'r':None , 'i': None}\n\n for f in lightcurves.keys():\n\n lc_file = params['target_lc_file_'+f]\n\n if path.isfile(lc_file):\n\n lightcurves[f] = read_rbn_lightcurve(lc_file,log)\n\n return lightcurves", "def gradients(self):\n return {}", "async def Lights_Description() -> List[Dict[str, Any]]:\n result = []\n for index, light in enumerate(busylightapi.manager.lights):\n result.append(\n {\n \"light_id\": index,\n \"name\": light.name,\n \"info\": light.info,\n \"is_on\": light.is_on,\n \"color\": rgb_to_hex(*light.color),\n }\n )\n return result", "def show_materials(self):\n print('\\nThe materials with known dielectric properties are:\\n')\n pprint.pprint(mats.Electrical.props)\n# pprint.pprint(mats.Electrical.DIELECTRIC)\n print('\\nThe materials with known loss tangents are:\\n')\n pprint.pprint(mats.Electrical.props)\n# pprint.pprint(mats.Electrical.LOSS_TAN)\n return", "def curves(self):\n return self._curve_reg", "def show_lightcurve(self):\n\n time_array = self.exp_start_times\n\n fig = plt.figure()\n\n if self.transmission_spectroscopy:\n lc_model = self.generate_lightcurves(time_array,\n self.planet.calcTransitDepth())\n plt.ylabel(\"Transit Depth\")\n else:\n lc_model = np.ones_like(time_array)\n plt.ylabel(\"Unit Flux\")\n\n if self._visit_trend:\n trend_model = self._visit_trend.scale_factors\n # have to convert weird model format to flat array\n lc_model = trend_model * lc_model.T[0]\n\n plt.scatter(time_array, lc_model)\n plt.xlabel(\"Time (JD)\")\n plt.title(\"Normalised White Time Series of observation\")\n\n return time_array, lc_model", "def compute_diagnostics(self) -> Dict[str, Any]:\n return {}", "def get_sigma_dict(self):\n sigma = {}\n for c in self.cellLines:\n sigma[c] = {}\n for l in self.ligands:\n sigma[c][l] = self.aucs[c][l]['sigma']\n return sigma", "def contrast_matrices(self):\n \n p = len(self.formula.terms)\n matrices = {}\n for crep in self._contrast_reps:\n s = self.slices[crep]\n l = s.stop - s.start\n array = np.zeros((l,p), np.float)\n for i, j in enumerate(range(l)):\n array[i,j+s.start] = 1.\n matrices[crep] = array\n return matrices", "def realizations(self):\n return [ self.km(), self.kHLP(), self.affineSchur(), self.dual_k_Schur()]", "def get_light_list(self):\n return self.light_array", "def Thresholds(self) :\n \n # keep pass through thresholds\n d = { }\n\n\n from Hlt2Lines.SingleMuon.Lines import SingleMuonLines\n d.update({SingleMuonLines : \n {'Prescale' : {\"Hlt2SingleMuon\" : 0.5, \n \"Hlt2SingleMuonLowPT\" : 0.1},\n 'HltReq' : {\"SingleMuon\" : \"HLT_PASS_RE('Hlt1TrackMuonDecision')\"},\n 'Common' : {'TrChi2' : 3, # Adimensional\n 'Pt' : 1300 * MeV },\n 'SingleMuon' : {'IP' : 0.0 * mm,\n 'IPChi2' : 16 }, # Adimensional\n 'HighPT' : {'HighPt' : 10000 *MeV },\n 'LowPT' : { 'HighPt' : 4800 * MeV }\n }\n })\n \n return d", "def get_mof_descriptors(self) -> dict:\n result_dict = OrderedDict(\n (\n (\"name\", self.name),\n (\"graph_hash\", self.graph_hash),\n (\"path\", self._filename),\n (\"density\", self.density),\n (\"has_oms\", self.has_oms),\n (\"has_carbon\", self.has_carbon),\n (\"has_hydrogen\", self.has_hydrogen),\n (\"has_atomic_overlaps\", self.has_atomic_overlaps),\n (\"has_overcoordinated_c\", self.has_overvalent_c),\n (\"has_overcoordinated_n\", self.has_overvalent_n),\n (\"has_overcoordinated_h\", self.has_overvalent_h),\n (\"has_undercoordinated_c\", self.has_undercoordinated_c),\n (\"has_undercoordinated_n\", self.has_undercoordinated_n),\n (\"has_metal\", self.has_metal),\n (\"has_lone_atom\", self.has_lone_atom),\n (\"has_lone_molecule\", self.has_lone_molecule),\n (\"has_high_charges\", self.has_high_charges),\n (\"has_undercoordinated_metal\", self.has_undercoordinated_metal),\n )\n )\n return result_dict", "def Thresholds(self) :\n \n # keep pass through thresholds\n d = { }\n\n from Hlt2Lines.Commissioning.Lines import CommissioningLines \n d.update({CommissioningLines :\n {'Prescale' : {'Hlt2PassThrough' : 0.0001,\n 'Hlt2Forward' : 0.00001,\n 'Hlt2DebugEvent' : 0.000001},\n 'Postscale' : {'Hlt2ErrorEvent' : 'RATE(0.01)'},\n # do not want debug events on lumi-exclusive Hlt1 events...\n 'DebugEvent' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(?!Lumi).*Decision$')\"},\n 'ErrorEvent' : {'Priority' : 254,\n 'VoidFilter' : '',\n 'HLT2' : \"HLT_COUNT_ERRORBITS_RE('^Hlt2.*',0xffff) > 0\"},\n 'PassThrough' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(?!Lumi).*Decision$')\",\n 'VoidFilter' : ''},\n 'NoBiasPassThrough' : {'HLT1' : \"HLT_PASS('Hlt1NoBiasPrescaledDecision')\",\n 'VoidFilter' : ''},\n 'Transparent' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(ODIN.*|L0.*|MB.*|BeamGas.*|Velo.*|NZS.*|Incident|Tell1Error|ErrorEvent)Decision$')\",\n 'VoidFilter' : ''},\n 'Lumi' : {'HLT1' : \"HLT_PASS_SUBSTR('Hlt1Lumi')\",\n 'VoidFilter' : ''},\n 'KS0_DD' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(?!Lumi).*Decision$')\",\n 'VoidFilter' : ''},\n 'KS0_LL' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(?!Lumi).*Decision$')\",\n 'VoidFilter' : ''},\n 'Turbo' : ['KS0_DD', 'KS0_LL']\n }}\n )\n return d", "def lightcurve(self):\n return NGCLightCurve(self['corotid'])", "def generate_lightcurves(self, time_array, depth=False):\n\n # TODO (ryan) quick check if out of transit, in that case ones!\n\n # TODO (ryan) should this be in generate exposure?\n\n planet = self.planet\n star = self.planet.star\n\n P = float(planet.P.rescale(pq.day))\n a = float((planet.a / star.R).simplified)\n rp = float((planet.R / star.R).simplified)\n i = float(planet.i.rescale(pq.deg))\n e = planet.e\n W = float(planet.periastron)\n transittime = float(planet.transittime)\n\n if np.isnan(W):\n W = 0\n\n time_array = time_array.to(u.day).value\n # model for each resolution element.\n\n if depth:\n planet_spectrum = np.array(\n [depth]) # an array as we want to perform ndim later.\n else:\n planet_spectrum = self.planet_spectrum\n\n models = np.zeros((len(time_array), len(planet_spectrum)))\n\n planet_spectrum = np.sqrt(\n planet_spectrum) # pylc wants Rp/Rs not transit depth\n\n time_array = tools.jd_to_hjd(time_array,\n planet) # pylc wants hjd not jd\n\n logger.debug(\n \"Generating lightcurves with P={}, a={}, i={}, e={}, W={}, T14={},\"\n \" mean_depth={}\".format(\n P, a, i, e, W, transittime, np.mean(planet_spectrum)\n ))\n\n for j, spec_elem in enumerate(planet_spectrum):\n models[:, j] = pylc.transit('claret', self.ldcoeffs, spec_elem, P, a, e, i,\n W, transittime, time_array) - \\\n (\n 1. - pylc.eclipse(spec_elem ** 2, rp, P, a, e,\n i, W,\n transittime, time_array))\n\n return models", "def generate_visualization_dict(self):\n self._data = {}\n self._data['name'] = self.name\n self._data['type'] = self.__repr__()\n self._data['color'] = self._color_rgb\n\n try:\n self._data['simulation_matrix'] = \\\n self._visualization_matrix.tolist()\n\n except:\n #Not sure which error to call here.\n raise RuntimeError('''Please call the numerical\n transformation methods,\n before generating simulation dict ''')\n\n\n return self._data", "def get_lightcurves(product_type):\n\n # Build lists of paths to lightcurves\n results = session.query(Outputs).all()\n individual_lightcurves = set([os.path.join(result.individual_path, result.individual_filename) for result in results])\n composite_lightcurves = set([os.path.join(result.composite_path, result.composite_filename) for result in results if result.composite_filename != None])\n\n # Return appropriate list based on product_type\n if product_type == 'individual':\n lightcurves = individual_lightcurves\n elif product_type == 'composite':\n lightcurves = composite_lightcurves\n elif product_type == 'both':\n lightcurves = individual_lightcurves | composite_lightcurves\n\n return lightcurves", "def _supported_convergence_norms() -> Dict[Text, _NormType]:\n return {\n _monitor_key('convergence', 'l-1'): _NormType.L1,\n _monitor_key('convergence', 'l-2'): _NormType.L2,\n _monitor_key('convergence', 'l-inf'): _NormType.L_INF,\n _monitor_key('subiter-scalar', 'convergence_l-1'): _NormType.L1,\n _monitor_key('subiter-scalar', 'convergence_l-2'): _NormType.L2,\n _monitor_key('subiter-scalar', 'convergence_l-inf'): _NormType.L_INF,\n }", "def all_lightcurves():\n for fname in listdir(join(DATA_PATH, 'lightcurves')):\n try:\n locus_id, fmt = fname.split('.')\n assert fmt == 'lc'\n yield fetch_lightcurve(locus_id)\n except Exception as e:\n print(f'{repr(e)} fetching {fname}')", "def get_metal_descriptors(self) -> dict:\n if not self.has_metal:\n raise NoMetal\n return self._get_metal_descriptors()", "def sans_monitor() -> dict:\n return {**beamline.beamline(scatter=False), **tof.elastic_wavelength('tof')}", "def contrasts(self):\n if not hasattr(self, \"_contrasts\"):\n self._contrasts = {}\n self._contrast_reps = []\n self._formulae = []\n for expr in sorted(self.graded_dict.keys()):\n formulae = get_contributions(self.codings[expr],\n self.sorted_factors)\n for formula, srep in formulae:\n v = formula * Formula([expr])\n sexpr = \"I(%s)\" % str(expr)\n if srep:\n crep = ':'.join([sexpr,srep])\n else:\n crep = sexpr\n self._contrasts[crep] = v\n self._contrast_reps.append(crep)\n self._formulae.append(v)\n return self._contrasts", "def get_gradients(self):\n\n result_gradients = {}\n for signal in self.signals_list:\n net = self.networks[signal]\n\n list_gradients = []\n for w in net.parameters():\n list_gradients.append(w.grad.view(-1).clone())\n\n grad_vector = torch.cat(list_gradients, dim=0)\n result_gradients[signal] = grad_vector\n\n return result_gradients", "def loss_functions(self) -> dict:\n tmp_dict = dict()\n tmp_dict['From_Root'] = (('Cov', self.log_post_cov, self.constraint_cov),\n ('Beta', self.log_post_beta, self.constraint_sigma),\n ('Lambda', self.log_post_lambda, self.constraint_sigma),\n ('Psi', self.log_post_psi, self.constraint_psi),\n ('Theta', self.log_post_theta, self.constraint_theta),\n ('Tree', self.log_post_tree, self.constraint_tree))\n\n tmp_dict['Likelihood'] = (self.log_likelihood, ('Psi',\n 'Beta',\n 'Theta',\n 'Lambda'))\n return tmp_dict", "def lCurve(self): \n\n # --------------------------------------------------------------------------------------------- #\n # Read data\n fitsNnam = os.path.join(self.workpath, 'LCresults.fits')\n lcTab = Table.read(fitsNnam)\n if (self.tstart is not None) and (self.tstop is not None):\n lcTab = lcTab[ (self.tstart <= lcTab['mjd']) & (lcTab['mjd'] <= self.tstop)]\n lcTab = lcTab[lcTab['flux'] != -1.] # avoid undone analyses\n\n timeMJD = lcTab['mjd']\n tref = int(np.floor( timeMJD[0] / 100.0)) * 100 # round to lowest hundred\n timeMJD -= tref\n ts = lcTab['ts']\n detect = lcTab['ts'] >= self.tsmin\n undet = lcTab['ts'] < self.tsmin\n flux = lcTab['flux'][detect]\n fluxerr = lcTab['fluxerr'][detect]\n upperl = lcTab['upperlim'][undet]\n upperl[upperl == -1.] = 0. # for when it failed\n scale = 10**int(np.floor(np.log10( np.mean( np.concatenate( (flux, upperl), axis=0) ) ))) \n\n # --------------------------------------------------------------------------------------------- #\n # Plot\n lcplt = FermiPlot(savepath='', xsize=8.5, ysize=6)\n lcplt.figname = os.path.join(self.workpath, 'LightCurve.pdf')\n lcplt.xlabel = r'Time (MJD $-$ {})'.format(tref)\n lcplt.ylabel = [r'Flux ($10^{%d}$ ph\\,cm$^{-2}$\\,s$^{-1}$)'%(int(np.log10(scale))), r'TS']\n lcplt.hline = [None, self.tsmin]\n deltaY = max(np.concatenate((flux+fluxerr, upperl), axis=0)) - min(np.concatenate((flux-fluxerr, upperl), axis=0))\n lcplt.ymin = [(min(np.concatenate((flux-fluxerr, upperl-upperl*0.1), axis=0)) - 0.05*deltaY) / scale, min(ts) - 0.05*(max(ts)-min(ts))]\n lcplt.ymax = [(max(np.concatenate((flux+fluxerr, upperl), axis=0)) + 0.05*deltaY) / scale, max(ts) + 0.05*(max(ts)-min(ts))]\n deltaX = (timeMJD[-1] + lcTab['mjderr'][-1]) - (timeMJD[0] - lcTab['mjderr'][0]) \n lcplt.xmin = timeMJD[0] - lcTab['mjderr'][0] - 0.05*deltaX\n lcplt.xmax = timeMJD[-1] + lcTab['mjderr'][-1] + 0.05*deltaX\n lcplt.fill = [item for sublist in zip( timeMJD[detect]-lcTab['mjderr'][detect], timeMJD[detect]+lcTab['mjderr'][detect] ) for item in sublist]\n lcplt.shadecol= self.loran \n if len(flux) == 0:\n lcplt.mksize = [2, 2]\n lcplt.ymode = ['linear', 'linear']\n lcplt.color = ['gray', 'black']\n lcplt.prop = [3, 1]\n lcplt.limit = [True, False]\n lcplt.multiplot(x = [ timeMJD[undet], timeMJD ],\n y = [ upperl/scale, ts ],\n xerr = [ lcTab['mjderr'][undet], lcTab['mjderr']],\n yerr = [ upperl/scale*0.1, None])\n else:\n lcplt.mksize = [2, 2, 2]\n lcplt.ymode = ['linear', 'linear', 'linear']\n lcplt.color = ['gray', 'black', 'black']\n lcplt.prop = [3, 1]\n lcplt.limit = [[True, False], False]\n lcplt.multiplot(x = [ [timeMJD[undet], timeMJD[detect]], timeMJD ],\n y = [ [upperl/scale, flux/scale], ts ],\n xerr = [ [lcTab['mjderr'][undet], lcTab['mjderr'][detect]], lcTab['mjderr']],\n yerr = [ [upperl/scale*0.1, fluxerr/scale], None])\n lcplt.save()\n\n print(\"\\t=== Figure '{}' created ===\".format(lcplt.figname)) \n return", "def _collect_legend(self) -> Dict[str, Any]:\n legend_labels = dict()\n for ax in self.axes_active:\n for handle, label in zip(*ax.get_legend_handles_labels()): # type: ignore\n if label in legend_labels:\n # TODO: Add warning/exception if color conflict is found\n pass\n else:\n legend_labels[label] = handle\n return legend_labels" ]
[ "0.60194796", "0.59146357", "0.5619035", "0.55798995", "0.55360436", "0.54979974", "0.54268354", "0.5380969", "0.53533584", "0.53345037", "0.5332291", "0.52693045", "0.52423155", "0.5225769", "0.52155113", "0.5174672", "0.51708114", "0.51582485", "0.51488817", "0.51368886", "0.51165", "0.5111178", "0.5093713", "0.50933754", "0.50664324", "0.5058357", "0.5046492", "0.5044474", "0.5043926", "0.5040329" ]
0.75933963
0
Produce diagnostic plots to assess the effectiveness of the correction.
def _diagnostic_plot(self): if not hasattr(self, "corrected_lc"): raise ValueError( "Please call the `correct()` method before trying to diagnose." ) with plt.style.context(MPLSTYLE): _, axs = plt.subplots(2, figsize=(10, 6), sharex=True) ax = axs[0] self.lc.plot(ax=ax, normalize=False, label="original", alpha=0.4) for key in self.diagnostic_lightcurves.keys(): ( self.diagnostic_lightcurves[key] - np.median(self.diagnostic_lightcurves[key].flux) + np.median(self.lc.flux) ).plot(ax=ax) ax.set_xlabel("") ax = axs[1] self.lc.plot(ax=ax, normalize=False, alpha=0.2, label="original") self.corrected_lc[self.outlier_mask].scatter( normalize=False, c="r", marker="x", s=10, label="outlier_mask", ax=ax ) self.corrected_lc[~self.cadence_mask].scatter( normalize=False, c="dodgerblue", marker="x", s=10, label="~cadence_mask", ax=ax, ) self.corrected_lc.plot(normalize=False, label="corrected", ax=ax, c="k") return axs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Diagnostic_plot2(self):\n\n probs = pd.read_csv(self.probfile)\n\n fig, ax = generalPlot(xaxis=r'$\\nu / \\mu$Hz', yaxis=r'$P_{\\rm det}$')\n plt.scatter(probs['f0'], probs['Pdet_Kepler'], label='Kepler - 4yrs')\n plt.scatter(probs['f0'], probs['Pdet_TESS365'], label='TESS - 1 yr')\n plt.scatter(probs['f0'], probs['Pdet_TESS27'], label='TESS - 27 days')\n plt.legend(loc='lower right')\n plt.ylim([0,1])\n plt.show()\n fig.savefig(os.getcwd() + os.sep + 'DetTest1_plots' + os.sep +\\\n 'DetTest_Diagnostic_plot2_Pdet' + self.ds.epic + '.pdf')\n\n fig, ax = generalPlot(xaxis=r'$\\nu / \\mu$Hz', yaxis=r'SNR')\n plt.scatter(probs['f0'], probs['SNR_Kepler'], label='Kepler - 4yrs')\n plt.scatter(probs['f0'], probs['SNR_TESS365'], label='TESS - 1 yr')\n plt.scatter(probs['f0'], probs['SNR_TESS27'], label='TESS - 27 days')\n plt.legend(loc='lower right')\n #plt.ylim([0,1])\n plt.show()\n fig.savefig(os.getcwd() + os.sep + 'DetTest1_plots' + os.sep +\\\n 'DetTest_Diagnostic_plot2_SNR' + self.ds.epic + '.pdf')", "def diagnose(self):\n return self._diagnostic_plot()", "def galactic_correction_summary(self):\n t = self.GalacticCorrection(self).x\n dd = dict()\n for i,c in enumerate(t.T):\n dd[i] = dict(offset=c.mean()-1,std=c.std())\n df = pd.DataFrame(dd).T\n fig, ax = plt.subplots()\n ax.errorbar(x=range(8), y=df.offset.values, yerr=df['std'].values, fmt='o')\n ax.axhline(0, color='grey')\n ax.set(xlabel='energy band', ylabel='offset', title='galactic correction offsets')\n return fig", "def generate_plots(self, env):\n print(' Generating diagnostic plots for : {0}'.format(self.__class__.__name__))\n for ncl in self._ncl:\n diagUtilsLib.generate_ncl_plots(env, ncl)", "def generate_plots(self, env):\n print(' Generating diagnostic plots for : {0}'.format(self.__class__.__name__))\n for ncl in self._ncl:\n diagUtilsLib.generate_ncl_plots(env, ncl)", "def generate_plots(self, env):\n print(' Generating diagnostic plots for : {0}'.format(self.__class__.__name__))\n for ncl in self._ncl:\n diagUtilsLib.generate_ncl_plots(env, ncl)", "def summarize_diagnostics(self):\n # plot loss\n pyplot.subplot(211)\n pyplot.title('Cross Entropy Loss')\n pyplot.plot(self.history.history['loss'], color='blue', label='train')\n pyplot.plot(self.history.history['val_loss'], color='orange', label='test')\n # plot accuracy\n pyplot.subplot(212)\n pyplot.title('Classification Accuracy')\n pyplot.plot(self.history.history['accuracy'], color='blue', label='train')\n pyplot.plot(self.history.history['val_accuracy'], color='orange', label='test')\n # save plot to file\n pyplot.savefig(f'{self.project_home / \"o\"}/{self.model.name}_plot.png')\n pyplot.close()", "def plot_reconstruction_diagnostics(self, figsize=(20, 10)):\n figs = []\n fig_names = []\n\n # upsampled frequency\n fx_us = tools.get_fft_frqs(2 * self.nx, 0.5 * self.dx)\n fy_us = tools.get_fft_frqs(2 * self.ny, 0.5 * self.dx)\n\n # plot different stages of inversion\n extent = tools.get_extent(self.fy, self.fx)\n extent_upsampled = tools.get_extent(fy_us, fx_us)\n\n for ii in range(self.nangles):\n fig = plt.figure(figsize=figsize)\n grid = plt.GridSpec(3, 4)\n\n for jj in range(self.nphases):\n\n # ####################\n # separated components\n # ####################\n ax = plt.subplot(grid[jj, 0])\n\n to_plot = np.abs(self.separated_components_ft[ii, jj])\n to_plot[to_plot <= 0] = np.nan\n plt.imshow(to_plot, norm=LogNorm(), extent=extent)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('O(f)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.title('m*O(f-fo)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.title('m*O(f+fo)otf(f)')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # deconvolved component\n # ####################\n ax = plt.subplot(grid[jj, 1])\n\n plt.imshow(np.abs(self.components_deconvolved_ft[ii, jj]), norm=LogNorm(), extent=extent)\n\n if jj == 0:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('deconvolved component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # shifted component\n # ####################\n ax = plt.subplot(grid[jj, 2])\n\n # avoid any zeros for LogNorm()\n cs_ft_toplot = np.abs(self.components_shifted_ft[ii, jj])\n cs_ft_toplot[cs_ft_toplot <= 0] = np.nan\n plt.imshow(cs_ft_toplot, norm=LogNorm(), extent=extent_upsampled)\n plt.scatter(0, 0, edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('shifted component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # normalized weights\n # ####################\n ax = plt.subplot(grid[jj, 3])\n\n to_plot = self.weights[ii, jj] / self.weight_norm\n to_plot[to_plot <= 0] = np.nan\n im2 = plt.imshow(to_plot, norm=LogNorm(), extent=extent_upsampled)\n im2.set_clim([1e-5, 1])\n fig.colorbar(im2)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('normalized weight')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n plt.suptitle('period=%0.3fnm at %0.3fdeg=%0.3frad, f=(%0.3f,%0.3f) 1/um\\n'\n 'mod=%0.3f, min mcnr=%0.3f, wiener param=%0.2f\\n'\n 'phases (deg) =%0.2f, %0.2f, %0.2f, phase diffs (deg) =%0.2f, %0.2f, %0.2f' %\n (self.periods[ii] * 1e3, self.angles[ii] * 180 / np.pi, self.angles[ii],\n self.frqs[ii, 0], self.frqs[ii, 1], self.mod_depths[ii, 1], np.min(self.mcnr[ii]), self.wiener_parameter,\n self.phases[ii, 0] * 180/np.pi, self.phases[ii, 1] * 180/np.pi, self.phases[ii, 2] * 180/np.pi,\n 0, np.mod(self.phases[ii, 1] - self.phases[ii, 0], 2*np.pi) * 180/np.pi,\n np.mod(self.phases[ii, 2] - self.phases[ii, 0], 2*np.pi) * 180/np.pi))\n\n figs.append(fig)\n fig_names.append('sim_combining_angle=%d' % (ii + 1))\n\n # #######################\n # net weight\n # #######################\n figh = plt.figure(figsize=figsize)\n grid = plt.GridSpec(1, 2)\n plt.suptitle('Net weight, Wiener param = %0.2f' % self.wiener_parameter)\n\n ax = plt.subplot(grid[0, 0])\n net_weight = np.sum(self.weights, axis=(0, 1)) / self.weight_norm\n im = ax.imshow(net_weight, extent=extent_upsampled, norm=PowerNorm(gamma=0.1))\n\n figh.colorbar(im, ticks=[1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 1e-2, 1e-3, 1e-4, 1e-5])\n\n ax.set_title(\"non-linear scale\")\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2*self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n ax = plt.subplot(grid[0, 1])\n ax.set_title(\"linear scale\")\n im = ax.imshow(net_weight, extent=extent_upsampled)\n\n figh.colorbar(im)\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2 * self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n figs.append(figh)\n fig_names.append('net_weight')\n\n return figs, fig_names", "def reconstruction_errors(identifier, train_errors, vali_errors,\n generated_errors, random_errors):\n print(identifier)\n fig, axarr = plt.subplots(4, 1, sharex=True, figsize=(4, 8))\n axarr[0].hist(train_errors, normed=1, color='green', bins=50)\n axarr[0].set_title(\"train reconstruction errors\")\n axarr[1].hist(vali_errors, normed=1, color='blue', bins=50)\n axarr[1].set_title('vali reconstruction errors')\n axarr[2].hist(generated_errors, normed=1, color='pink', bins=50)\n axarr[2].set_title('generated reconstruction errors')\n axarr[3].hist(random_errors, normed=1, color='grey', bins=50)\n axarr[3].set_title('random reconstruction errors')\n for ax in axarr:\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"left\"].set_visible(False)\n ax.tick_params(bottom='off', left='off')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n axarr[3].set_xlim(0, 0.05)\n plt.tight_layout()\n plt.savefig('./experiments/plots/' + identifier + '_reconstruction_errors.png')\n return True", "def test_plotting():\n specs = _read_schrodinger('tests/test_data/{}.inp'.format(PROBLEM))\n xint, yint = _interpolate(specs['interpolxydecs'][:, 0],\n specs['interpolxydecs'][:, 1], specs['xopt'])\n\n energies = np.loadtxt('tests/test_data/energies_{}.ref'.format(PROBLEM))\n wfuncsdata = np.loadtxt('tests/test_data/wfuncs_{}.ref'.format(PROBLEM))\n potdata = np.vstack((xint, yint)).T\n\n expval = calculate_expval(xint, wfuncsdata[:, 1:].T)\n uncval = calculate_uncertainty(xint, wfuncsdata[:, 1:].T)\n\n expvaldata = np.vstack((expval, uncval)).T\n _write_data('tests/test_data', potdata, energies, wfuncsdata, expvaldata)\n\n # qm_plottings('tests/test_data', scale=0.5)\n # assert True", "def plot_proof_functions():\n\n def thm1_D(x):\n return abs(1 / (2 + x) - 3 / (5 + x)) + abs(1 / (2 + x) - 2 / (5 + x))\n\n def thm2_D(x):\n return abs(2 / (2 + x) - (1 / 2) / ((1 / 2) + x))\n\n plt.figure(figsize=(2, 1.5))\n x = np.linspace(0, 2, 1000)\n\n plt.plot(x, thm1_D(x))\n plt.xlim(0, 2)\n plt.ylim(0.15, 0.22)\n plt.vlines(1, 0, 1, linestyles='dashed', colors='grey', alpha=0.5)\n plt.hlines(1 / 6, 0, 2, linestyles='dashed', colors='grey', alpha=0.5)\n plt.ylabel('$D(Z)$')\n plt.xlabel('$s_Z / t$')\n plt.savefig('plots/thm1_D.pdf', bbox_inches='tight')\n plt.xticks(range(3), range(3))\n plt.close()\n\n print(f'Saved plot to: plots/thm1_D.pdf')\n\n plt.figure(figsize=(2, 1.5))\n x = np.linspace(0, 5, 1000)\n plt.vlines(1, 0, 1, linestyles='dashed', colors='grey', alpha=0.5)\n plt.hlines(1 / 3, 0, 5, linestyles='dashed', colors='grey', alpha=0.5)\n plt.plot(x, thm2_D(x))\n plt.xlim(0, 5)\n plt.ylim(0, 0.4)\n plt.xticks(range(6), range(6))\n\n plt.ylabel('$D(Z)$')\n plt.xlabel('$s_Z / t$')\n plt.savefig('plots/thm2_D.pdf', bbox_inches='tight')\n plt.close()\n\n print(f'Saved plot to: plots/thm2_D.pdf')", "def visualize(self):\n\t\tplt.figure(1)\n\t\tax1 = plt.add_suplot(1,2,1)\n\t\t# Plot free energy error\n\t\tax1.plot(self.FE_errors_GMM_CV_, linewidth=4, label='GMM with cross-validation')\n\t\tax1.plot(self.FE_errors_GMM_mix_models_, linewidth=4, label='GMM with mixture of models')\n\t\tplt.legend()\n\n\t\t# Plot density error\n\n\t\t# Plot log-likelihood of test set\n\n\t\t# Plot clustering score\n\n\t\tplt.show()\n\n\t\treturn", "def plot_spectrumxichange(self):\n countgood = 0 ; countbad = 0\n for idata in self.datarg:\n if idata[-1, 0] == 1.: \n self.fig.axes[0].plot(idata[0:,0], idata[0: ,1] ,'b') \n countgood += 1\n print countgood , 'good solution'\n else: \n self.fig.axes[0].plot(idata[0:,0], idata[0: ,1] ,'r') \n print countbad, 'bad solution'\n countbad += 1\n print 'We found %g good solutions and %g tda startdistributions that broke down before xi = 1, we hope that\\'s what you expected' %(countgood,countbad)\n #Create custom artistsr[goodline,badline],['solution','breakdown']\n goodline = pl.Line2D((0,1),(0,0), color='b') \n badline = pl.Line2D((0,1),(0,0), color='r')\n self.layout(self.reader.depvar['depvar'] , r'energy spectrum (a.u.)' , tit = r'All tda start distributions $\\xi$' , legendhand = [goodline , badline] , legendlab = ['solution', 'breakdown'] )\n self.savefig('xispec')", "def Diagnostic_plot3(self):\n\n floc = glob.glob('/home/mxs191/Desktop/MathewSchofield/TRG/DetTest/DetTest1_results/Info2Save/*.csv')\n fig = plt.figure()\n plt.rc('font', size=18)\n #fig, ax = generalPlot(xaxis=r'$\\nu / \\mu$Hz', yaxis=r'$P_{\\rm det}$')\n gs = gridspec.GridSpec(1, 2, width_ratios=(4,1))\n ax = fig.add_subplot(gs[0])\n\n for idx, i in enumerate(floc):\n\n d = pd.read_csv(i)\n\n if idx == 0:\n fullpdet = d[['f0', 'Pdet_Kepler', 'Pdet_TESS365', 'Pdet_TESS27']]\n else:\n fullpdet = pd.concat([ fullpdet,\\\n d[['f0', 'Pdet_Kepler', 'Pdet_TESS365', 'Pdet_TESS27']] ])\n\n plt.scatter(d['f0'], d['Pdet_Kepler'], color='b',\\\n label=r\"$\\rm Kepler - 4\\ yrs$\" if idx == 0 else '')\n plt.scatter(d['f0'], d['Pdet_TESS365'], color='orange',\\\n label=r'$\\rm TESS - 1\\ yr$' if idx == 0 else '')\n plt.scatter(d['f0'], d['Pdet_TESS27'], color='g',\\\n label=r'$\\rm TESS - 27\\ days$' if idx == 0 else '')\n\n plt.axhline(fullpdet['Pdet_Kepler'].median(), color='b')\n plt.axhline(fullpdet['Pdet_TESS365'].median(), color='orange')\n plt.axhline(fullpdet['Pdet_TESS27'].median(), color='g')\n ax.legend(loc='lower right')\n plt.ylim([0,1])\n ax.set_ylabel(r'$P_{\\rm det}$')\n ax.set_xlabel(r'$\\nu / \\mu \\rm Hz$')\n\n bx = fig.add_subplot(gs[1])\n import seaborn as sns\n bw = 0.4\n sns.kdeplot(fullpdet['Pdet_Kepler'].values, shade=True, vertical=True, \\\n ax=bx, color='b', bw=bw)\n sns.kdeplot(fullpdet['Pdet_TESS365'].values, shade=True, vertical=True, \\\n ax=bx, color='orange', bw=bw)\n sns.kdeplot(fullpdet['Pdet_TESS27'].values, shade=True, vertical=True, \\\n ax=bx, color='g', bw=bw)\n bx.set_ylim([0.0,1.0])\n bx.set_xticks([])\n bx.set_yticks([])\n bx.set_xlabel(r'$\\rm Density$')\n plt.tight_layout()\n\n plt.show()\n fig.savefig(os.getcwd() + os.sep + 'DetTest1_plots' + os.sep +\\\n 'DetTest_Diagnostic_plot3.pdf')\n sys.exit()", "def plotResults(results):\n e = results['eclean'] - results['eCTI']\n e1 = results['e1clean'] - results['e1CTI']\n e2 = results['e2clean'] - results['e2CTI']\n\n print 'Delta e, e_1, e_2:', np.mean(e), np.mean(e1), np.mean(e2)\n print 'std e, e_1, e_2:', np.std(e), np.std(e1), np.std(e2)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.hist(e, bins=15, label='$e$', alpha=0.5)\n ax.hist(e1, bins=15, label='$e_{2}$', alpha=0.5)\n ax.hist(e2, bins=15, label='$e_{1}$', alpha=0.5)\n ax.set_xlabel(r'$\\delta e$ [no CTI - CDM03 corrected]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig('ellipticityDelta.pdf')\n plt.close()\n\n r2 = (results['R2clean'] - results['R2CTI'])/results['R2clean']\n print 'delta R2 / R2: mean, std ', np.mean(r2), np.std(r2)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.hist(r2, bins=15, label='$R^{2}$')\n ax.set_xlabel(r'$\\frac{\\delta R^{2}}{R^{2}_{ref}}$ [no CTI - CDM03 corrected]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig('sizeDelta.pdf')\n plt.close()", "def plot_field_uncertainties():\n\n resize_size = (1000, 1000)\n\n\n dirs = [os.path.join(path_to_here, '../data/landscape_visualizations/{}/{}/'.format(drug_name, j)) for j in ['original', 'repeat_a', 'repeat_b']]\n if drug_name == 'DMSO':\n dirs = [os.path.join(path_to_here, '../data/landscape_visualizations/{}/{}/30_hours/'.format(drug_name, j)) for j in ['original', 'repeat_a', 'repeat_b']]\n\n def transform(x):\n if type(x) is np.ndarray:\n x = change_array_lims(x)\n x = np.log(x)\n return x\n\n F_unc_vmin = -7\n F_unc_vmax = -4\n sigma_vmin = -5\n sigma_vmax = 0 #0.4\n sigma_unc_vmin = -6\n sigma_unc_vmax = -2\n\n fig_Fs = [plt.figure() for _ in range(3)]\n fig_uncertainty = plt.figure()\n sigma_lists, F_arrays = [], []\n for idx_fig, dir in enumerate(dirs):\n\n p_list = _load_and_resize_list(dir+'p_list_0.pickle')\n D_list = _load_and_resize_list(dir+'D_list_0.pickle')\n U_array = pickle.load(open(dir+'U.pickle', 'rb'))\n U_array = cv2.resize(U_array, resize_size, interpolation = cv2.INTER_LINEAR)\n Gx, Gy = np.gradient(U_array, 26./resize_size[0], 26./resize_size[0]) # gradients with respect to x and y\n F_array = (Gx**2+Gy**2)**.5 # gradient magnitude\n F_array[np.isinf(F_array)] = np.nan\n F_array[p_list[-1]<1e-3]=np.nan # final PDF\n sigma_list = []\n for j in range(9):\n arr = D_list[2*j] # current PDF\n arr[p_list[j]<1e-3]=np.nan\n sigma_list.append(np.sqrt(2*arr))\n\n\n sigma_lists.append(sigma_list)\n F_arrays.append(F_array)\n\n ax = fig_Fs[idx_fig].add_subplot(111)\n ax.imshow(transform(F_array)[::-1, :], cmap = cmap, vmin = -4.6, vmax = -2)\n ax.set_title(dir)\n\n all_axes = [i for j in fig_Fs for i in j.axes]\n for ax in all_axes:\n ax.axis('off')\n\n # uncertainties\n\n std = np.std(F_arrays, axis = 0)\n ax = fig_uncertainty.add_subplot(121)\n ax.imshow(transform(std)[::-1, :], cmap = cmap, vmin = F_unc_vmin, vmax = F_unc_vmax)\n ax.set_title('F_uncertainty')\n\n fig_sigma = plt.figure()\n ax = fig_sigma.add_subplot(111)\n ax.imshow(transform(np.nanmean(sigma_lists[0], axis = 0))[::-1, :], cmap = cmap, vmin = sigma_vmin, vmax = sigma_vmax) # index 0 (i.3 'original' is corresponds to the landscapes in other figures)\n ax.set_title('sigma_mean')\n\n sigma_means = [np.nanmean(sigma_list, axis = 0) for sigma_list in sigma_lists]\n std_array = np.nanstd(sigma_means, axis = 0)\n ax = fig_uncertainty.add_subplot(122)\n ax.imshow(transform(std_array)[::-1, :], cmap = cmap, vmin = sigma_unc_vmin, vmax = sigma_unc_vmax)\n ax.set_title('sigma_uncertainty')\n\n fig_sigma.savefig(path_to_here+'/../outputs/{}_mean_sigma.png'.format(drug_name), dpi = 1200)\n fig_uncertainty.savefig(path_to_here+'/../outputs/{}_uncertainties.png'.format(drug_name), dpi = 1200)", "def quick_plot(solution):\n plt.suptitle('GNLSE solution')\n\n plt.subplot(1, 2, 1)\n plot_wavelength_vs_distance(solution)\n\n plt.subplot(1, 2, 2)\n plot_delay_vs_distance(solution)\n\n plt.show()", "def interactions_plot():\n data = load_data('ints_CC'),load_data('ints_CD')\n fig,ax = plt.subplots()\n plot_mean_std(data_CC,ax,'C-C interactions')\n plot_mean_std(data_CD,ax,'C-D interactions')\n plt.xlabel('cluster size, n')\n plt.legend(loc='best')\n plt.savefig('interactions.pdf')", "def robustnessPlot(ax):\n # Setup the range of avidity and ligand concentration we'll look at\n gnus = np.logspace(1, 3, 3, base=2, dtype=np.int)\n Los = np.logspace(start=-11, stop=-7, num=35, dtype=np.float)\n\n pp = pd.DataFrame(np.array(np.meshgrid(gnus, Los)).T.reshape(-1, 2),\n columns=['gnus', 'Los'])\n\n pp['CPredict'] = pp.apply(lambda x: InVivoPredict(x.values)[1], axis=1)\n\n # Change avidities to strings\n pp['gnus'] = pp['gnus'].apply(lambda gnu: r'$\\nu=' + str(int(gnu)) + '$')\n\n avcolors = dict(zip(pp['gnus'].unique(), sns.color_palette()[1:]))\n\n # Plot the calculated crossvalidation performance\n sns.FacetGrid(pp,\n hue='gnus',\n palette=sns.color_palette()[1:]).map(ax.semilogx, 'Los', 'CPredict')\n\n ax.legend(handles=Legend(pp['gnus'].unique(), avcolors, [], {}), bbox_to_anchor=(1, 1), loc=2)\n\n ax.set_xlabel('Assumed IC Conc. (M)')\n ax.set_ylabel('LOO Prediction R-Squared')\n ax.set_ylim(0.0, 1.0)", "def plot_metric_results():\n from run_metric_comparison_experiments import (\n PIVECTOR_TEMPLATE,\n PIVECTOR_DISTANCE_MATRIX_TEMPLATE,\n DISCRIMINATOR_DISTANCE_MATRIX_TEMPLATE,\n GAUSSIAN_DISTANCE_MATRIX_TEMPLATE,\n ENCODER_DISTANCE_MATRIX_TEMPLATE,\n DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE,\n NUM_TRAJECTORIES,\n NUM_COMPONENTS,\n NUM_REPETITIONS,\n REWARD_SCALES,\n ENVS\n )\n\n # Path-templates to each distance matrix to compare\n # BC = Behavioural Characteristication\n BC_DISTANCE_MATRIX_TEMPLATES = [\n PIVECTOR_DISTANCE_MATRIX_TEMPLATE,\n GAUSSIAN_DISTANCE_MATRIX_TEMPLATE,\n DISCRIMINATOR_DISTANCE_MATRIX_TEMPLATE,\n ENCODER_DISTANCE_MATRIX_TEMPLATE,\n DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE\n ]\n\n BC_LEGEND_NAMES = [\n \"Supervector\",\n \"Gaussian\",\n \"Discriminator\",\n \"Encoder\",\n \"Discretization\"\n ]\n\n BC_PLOT_COLORS = [\n \"C0\",\n \"C1\",\n \"C2\",\n \"C3\",\n \"C4\"\n ]\n\n fig, axs = pyplot.subplots(\n figsize=[4.8 * 3 * 0.75, 4.8 * 0.75],\n nrows=1,\n ncols=3,\n )\n\n def get_policy_names(env):\n policy_names = glob(PIVECTOR_TEMPLATE.format(env=env, num_traj=\"*\", num_components=\"*\", policy_name=\"*\", repetition_num=\"*\"))\n policy_names = [\"_\".join(os.path.basename(x).split(\"_\")[-4:-2]) for x in policy_names]\n policy_names = sorted(list(set(policy_names)))\n return policy_names\n\n # For each different distance measurement\n for distance_matrix_template, plot_legend_name, plot_color in zip(BC_DISTANCE_MATRIX_TEMPLATES, BC_LEGEND_NAMES, BC_PLOT_COLORS):\n # These will be NUM_TRAJECTORY length lists\n average_scores = np.ones((len(NUM_TRAJECTORIES),))\n std_scores = np.ones((len(NUM_TRAJECTORIES),))\n for num_traj_idx, num_traj in enumerate(NUM_TRAJECTORIES):\n # Average over environments, policies and repetitions\n scores = []\n for env_i, env in enumerate(ENVS):\n if \"Bipedal\" in env and distance_matrix_template == DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE:\n print(\"[Note] Skipping env {} for discretization distances (OOM)\".format(env))\n continue\n min_reward, max_reward = REWARD_SCALES[env]\n policy_names = get_policy_names(env)\n\n for policy_name in policy_names:\n for repetition in range(1, NUM_REPETITIONS + 1):\n # Ugh bit of messing around because I did not think this through...\n if distance_matrix_template == PIVECTOR_DISTANCE_MATRIX_TEMPLATE:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, num_components=NUM_COMPONENTS, policy_name=policy_name, repetition_num=repetition)\n else:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, policy_name=policy_name, repetition_num=repetition)\n\n data = np.load(file_path)\n distance_matrix = data[\"distance_matrix\"]\n rewards = data[\"average_episodic_rewards\"]\n\n raveled_reward_distances = np.abs(rewards - rewards[:, None])\n # Take upper diagonal, skip diagonal\n raveled_reward_distances = raveled_reward_distances[np.triu_indices(raveled_reward_distances.shape[0], 1)]\n raveled_distances = distance_matrix[np.triu_indices(distance_matrix.shape[0], 1)]\n\n # Score is correlation between the two\n correlation = np.corrcoef(raveled_distances, raveled_reward_distances)[0, 1]\n scores.append(correlation)\n\n scores = np.array(scores)\n average_score = np.mean(scores)\n std_score = np.std(scores)\n average_scores[num_traj_idx] = average_score\n std_scores[num_traj_idx] = std_score\n ax = axs[0]\n ax.plot(NUM_TRAJECTORIES, average_scores, c=plot_color, label=plot_legend_name)\n ax.scatter(NUM_TRAJECTORIES, average_scores, c=plot_color)\n #ax.fill_between(\n # NUM_TRAJECTORIES,\n # average_scores - std_scores,\n # average_scores + std_scores,\n # alpha=0.2,\n # color=plot_color,\n # edgecolor=\"none\",\n # linewidth=0.0\n #)\n ax.set_xticks(NUM_TRAJECTORIES)\n ax.tick_params(axis='both', which='both', labelsize=\"x-large\")\n ax.set_ylabel(\"Correlation with return-distances\", fontsize=\"x-large\")\n ax.set_xlabel(\"Number of trajectories\", fontsize=\"x-large\")\n ax.grid(alpha=0.2)\n\n # Amount of error to \"ground truth\" result,\n # where \"ground truth\" is one of the results with 100 trajectories of data.\n # Because of wonkyness of this, store list [#num-traj] of lists,\n # each storing results for that num-traj run\n per_trajectory_relative_errors = [[] for i in NUM_TRAJECTORIES]\n for env in ENVS:\n if \"Bipedal\" in env and distance_matrix_template == DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE:\n print(\"[Note] Skipping env {} for discretization distances (OOM)\".format(env))\n continue\n policy_names = get_policy_names(env)\n for policy_name in policy_names:\n # The \"ground truth\" distances, will be filled with first\n # result with 100 trajectories.\n anchor_distance = None\n for traj_i, num_traj in enumerate(NUM_TRAJECTORIES):\n for repetition in range(1, NUM_REPETITIONS + 1):\n if distance_matrix_template == PIVECTOR_DISTANCE_MATRIX_TEMPLATE:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, num_components=NUM_COMPONENTS, policy_name=policy_name, repetition_num=repetition)\n else:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, policy_name=policy_name, repetition_num=repetition)\n distance_matrix = np.load(file_path)[\"distance_matrix\"]\n # Normalize to [0, 1]\n distance_matrix = (distance_matrix - distance_matrix.min()) / (distance_matrix.max() - distance_matrix.min())\n # Get only upper triangle as distance matrix is symmetric. Exlude diagonal\n raveled_distances = distance_matrix[np.triu_indices(distance_matrix.shape[0], 1)]\n # Check if we use this as the zero-point or compute relative error to\n if anchor_distance is None:\n assert num_traj == 100\n anchor_distance = raveled_distances\n else:\n per_trajectory_relative_errors[traj_i].append(\n np.mean(np.abs(raveled_distances - anchor_distance) / np.abs(anchor_distance))\n )\n # Lists are not of equal length, so can not just change into an array\n mean_average_errors = np.array([np.mean(np.array(results) * 100) for results in per_trajectory_relative_errors])\n std_average_errors = np.array([np.std(np.array(results) * 100) for results in per_trajectory_relative_errors])\n ax = axs[1]\n ax.plot(NUM_TRAJECTORIES, mean_average_errors, c=plot_color, label=plot_legend_name)\n ax.scatter(NUM_TRAJECTORIES, mean_average_errors, c=plot_color)\n #ax.fill_between(\n # NUM_TRAJECTORIES,\n # mean_average_errors - std_average_errors,\n # mean_average_errors + std_average_errors,\n # alpha=0.2,\n # color=plot_color,\n # edgecolor=\"none\",\n # linewidth=0.0\n #)\n ax.set_xticks(NUM_TRAJECTORIES)\n ax.tick_params(axis='both', which='both', labelsize=\"x-large\")\n ax.set_ylabel(\"Relative error to ground truth (%)\", fontsize=\"x-large\")\n ax.set_xlabel(\"Number of trajectories\", fontsize=\"x-large\")\n ax.grid(alpha=0.2)\n\n # Variation between results\n cv_means = np.ones((len(NUM_TRAJECTORIES,)))\n cv_stds = np.ones((len(NUM_TRAJECTORIES,)))\n for traj_i, num_traj in enumerate(NUM_TRAJECTORIES):\n traj_averaged_cvs = []\n for env in ENVS:\n if \"Bipedal\" in env and distance_matrix_template == DISCRETIZATION_DISTANCE_MATRIX_TEMPLATE:\n print(\"[Note] Skipping env {} for discretization distances (OOM)\".format(env))\n continue\n policy_names = get_policy_names(env)\n for policy_name in policy_names:\n # Compute std over repetitions\n distances = []\n for repetition in range(1, NUM_REPETITIONS + 1):\n if distance_matrix_template == PIVECTOR_DISTANCE_MATRIX_TEMPLATE:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, num_components=NUM_COMPONENTS, policy_name=policy_name, repetition_num=repetition)\n else:\n file_path = distance_matrix_template.format(env=env, num_traj=num_traj, policy_name=policy_name, repetition_num=repetition)\n\n distance_matrix = np.load(file_path)[\"distance_matrix\"]\n # Normalize to [0, 1]\n distance_matrix = (distance_matrix - distance_matrix.min()) / (distance_matrix.max() - distance_matrix.min())\n # Get only upper triangle as distance matrix is symmetric. Exlude diagonal\n raveled_distances = distance_matrix[np.triu_indices(distance_matrix.shape[0], 1)]\n distances.append(raveled_distances)\n distances = np.stack(distances)\n # Coefficient of variance (std / mean)\n average_cv = np.mean(np.std(distances, axis=0) / np.mean(distances, axis=0))\n traj_averaged_cvs.append(average_cv)\n traj_averaged_cvs = np.array(traj_averaged_cvs)\n cv_means[traj_i] = np.mean(traj_averaged_cvs)\n cv_stds[traj_i] = np.std(traj_averaged_cvs)\n\n ax = axs[2]\n ax.plot(NUM_TRAJECTORIES, cv_means, c=plot_color, label=plot_legend_name)\n ax.scatter(NUM_TRAJECTORIES, cv_means, c=plot_color)\n #ax.fill_between(\n # NUM_TRAJECTORIES,\n # cv_means - cv_stds,\n # cv_means + cv_stds,\n # alpha=0.2,\n # color=plot_color,\n # edgecolor=\"none\",\n # linewidth=0.0\n #)\n ax.set_xticks(NUM_TRAJECTORIES)\n ax.tick_params(axis='both', which='both', labelsize=\"x-large\")\n ax.set_ylabel(\"Coefficient of variance $\\\\sigma/\\\\mu$\", fontsize=\"x-large\")\n ax.set_xlabel(\"Number of trajectories\", fontsize=\"x-large\")\n ax.grid(alpha=0.2)\n\n axs[1].legend(prop={\"size\": \"large\"})\n pyplot.tight_layout()\n pyplot.savefig(\"figures/metric_comparison.pdf\", bbox_inches=\"tight\", pad_inches=0.0)", "def analysis_plot(predictions, ys):\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20, 5))\n\n residuals = ys - predictions\n\n # Plot 1 - Predicted vs Actual\n sns.scatterplot(predictions, ys, ax=ax1)\n ax1.set_title('Predicted vs Actual', fontsize=20)\n ax1.set(xlabel='Predicted Ys', ylabel='Actual Ys')\n\n # Plot 2 - Residuals PLot (predicted vs residuals)\n sns.scatterplot(predictions, residuals, ax=ax2)\n ax2.set_title('Residuals Plot', fontsize=20)\n ax2.set(xlabel='Predicted Ys', ylabel='Residuals')\n\n # Plot 3 - QQ Plot\n sm.qqplot(residuals, ax=ax3, line='s')\n ax3.set_title('QQ Plot- Distribution of Residuals', fontsize=20)\n\n plt.show();", "def compareRepeatability():\n fc,pc = refCylNoise()\n fp,pp = flatNoisePellicle()\n fcgh,pcgh = flatNoiseCGH()\n fpco,ppco = PCO1S12Noise()\n\n midfreq = [1000*np.sqrt(np.sum(p[np.logical_and(f>.1,f<1.)])) \\\n for f,p in zip([fp,fc,fcgh,fpco],[pp,pc,pcgh,ppco])]\n plt.clf()\n \n plt.loglog(fp,pp/fp[0],label='Flat+Pellicle: %.2f' % midfreq[0])\n plt.loglog(fc,pc/fc[0],label='Cylinder+CGH: % .2f' % midfreq[1])\n plt.loglog(fcgh,pcgh/fcgh[0],label='Flat+CGH: %.2f' % midfreq[2])\n plt.loglog(fpco,ppco/fpco[0],label='PCO1S12: %.2f' % midfreq[3])\n plt.title('Repeatability Comparison')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n plt.legend(loc='lower left')", "def small_signal_mobility_paperplot(fieldVector, freqVector, df):\n vcm = np.array(fieldVector)*1e-2\n n = utilities.calculate_density(df)\n lw = 1.5\n fig, ax = plt.subplots()\n for freq in freqVector:\n cond = []\n mu_3 = []\n for ee in fieldVector:\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n cond.append(np.load(pp.outputLoc + 'Small_Signal/' + 'cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n ax.plot(vcm, np.array(np.real(cond))/c.e/n*100**2, '-', label='{:.1f} GHz'.format(freq),linewidth=lw)\n ax.plot(vcm,mu_3,'-',label = 'Ohmic Mobility',linewidth=lw)\n plt.xlabel(r'Field ($\\rm V \\, cm^{-1}$)')\n plt.ylabel(r'AC Mobility ($\\rm cm^2 \\, V^{-1} \\, s^{-1}$)')\n plt.ylim([-0.05*np.max(mu_3),np.max(mu_3)*1.2])\n # ax.text(0.55, 0.95, textstr, transform=ax.transAxes, fontsize=8, verticalalignment='top', bbox=props)\n plt.legend(ncol=3,loc='lower center')\n plt.savefig(pp.figureLoc+'ac_mobility.png', bbox_inches='tight',dpi=600)\n\n\n plt.figure(figsize=(2.05, 2.5))\n ax = plt.axes([0.21, 0.19, 0.75, 0.75])\n i = 0\n for ee in fieldVector:\n colorList = [eq_color, med_color, high_color]\n cond = []\n cond_linear = []\n mu_3 = []\n for freq in freqVector:\n mu_3 = []\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n cond.append(np.load(pp.outputLoc + 'Small_Signal/' + 'cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n cond_linear.append(np.load(pp.outputLoc + 'Small_Signal/' + 'linear_cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n ax.plot(freqVector, np.array(np.real(cond))/c.e/n*100**2/1000, '-',\n label='{:.0f} '.format(ee/100)+r'$\\rm V \\, cm^{-1}$', linewidth=lw, color=colorList[i])\n # ax.plot(freqVector, np.array(cond_linear)/c.e/n*100**2, '-.', label='E = {:.0f} L '.format(ee/100)+r'$V \\, cm^{-1}$',linewidth=lw,color = colorList[i])\n i = i + 1\n\n plt.xlabel(r'Frequency (GHz)')\n plt.ylabel(r'$\\Re(\\rm AC\\ mobility$) (1000 $\\rm cm^2 \\, V^{-1}\\, s^{-1}$)')\n # plt.ylim([-0.4*np.max(mu_3),np.max(mu_3)*1.2])\n plt.xscale('log')\n plt.legend(frameon=False)\n plt.ylim([0, 20])\n plt.xlim([freqs[0], freqs[-1]])\n # ax.text(0.55, 0.95, textstr, transform=ax.transAxes, fontsize=8, verticalalignment='top', bbox=props)\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))\n plt.xlim([freqVector[0],freqVector[-1]])\n locmaj = matplotlib.ticker.LogLocator(base=10, numticks=6)\n ax.xaxis.set_major_locator(locmaj)\n locmin = matplotlib.ticker.LogLocator(base=10.0, subs=np.arange(2, 10) * .1,\n numticks=100)\n ax.xaxis.set_minor_locator(locmin)\n plt.savefig(pp.figureLoc+'Real_ac_mobility.png',dpi=600)\n\n fig, ax = plt.subplots()\n i = 0\n for ee in fieldVector:\n colorList = ['black', 'dodgerblue', 'tomato']\n cond = []\n cond_linear = []\n mu_3 = []\n\n for freq in freqVector:\n mu_3 = []\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n cond.append(\n np.load(pp.outputLoc + 'Small_Signal/' + 'cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n cond_linear.append(np.load(\n pp.outputLoc + 'Small_Signal/' + 'linear_cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n\n ax.plot(freqVector, np.array(np.imag(cond)) / c.e / n * 100 ** 2, '-',\n label='{:.0f} '.format(ee / 100) + r'$\\rm V \\, cm^{-1}$', linewidth=lw, color=colorList[i])\n # ax.plot(freqVector, np.array(cond_linear)/c.e/n*100**2, '-.', label='E = {:.0f} L '.format(ee/100)+r'$V \\, cm^{-1}$',linewidth=lw,color = colorList[i])\n\n i = i + 1\n plt.xlabel(r'Frequency ($\\rm GHz$)')\n plt.ylabel(r'$\\Im \\, [\\mu_{\\omega}]$ ($\\rm cm^2 \\, V^{-1}\\, s^{-1}$)')\n # plt.ylim([-0.4*np.max(mu_3),np.max(mu_3)*1.2])\n plt.xscale('log')\n plt.legend(frameon=False)\n # ax.text(0.55, 0.95, textstr, transform=ax.transAxes, fontsize=8, verticalalignment='top', bbox=props)\n plt.savefig(pp.figureLoc + 'Imag_ac_mobility.png', bbox_inches='tight', dpi=600)\n\n\n fig, ax = plt.subplots()\n i = 0\n for ee in fieldVector:\n colorList = ['black', 'dodgerblue', 'tomato']\n cond = []\n cond_linear = []\n mu_3 = []\n\n for freq in freqVector:\n mu_3 = []\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n cond.append(\n np.load(pp.outputLoc + 'Small_Signal/' + 'cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n cond_linear.append(np.load(\n pp.outputLoc + 'Small_Signal/' + 'linear_cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n\n ax.plot(freqVector, np.array(np.arctan(np.imag(cond)/np.real(cond)))/np.pi, '-',\n label='{:.0f} '.format(ee / 100) + r'$\\rm V \\, cm^{-1}$', linewidth=lw, color=colorList[i])\n # ax.plot(freqVector, np.array(cond_linear)/c.e/n*100**2, '-.', label='E = {:.0f} L '.format(ee/100)+r'$V \\, cm^{-1}$',linewidth=lw,color = colorList[i])\n i = i + 1\n ax.yaxis.set_major_formatter(tck.FormatStrFormatter('%g $\\pi$'))\n ax.yaxis.set_major_locator(tck.MultipleLocator(base=1.0))\n plt.xlabel(r'Frequency ($\\rm GHz$)')\n plt.ylabel(r'AC Mobility Phase Angle (Radians)')\n # plt.ylim([-0.4*np.max(mu_3),np.max(mu_3)*1.2])\n plt.xscale('log')\n plt.legend()\n yloc = plt.MaxNLocator(6)\n ax.yaxis.set_major_locator(yloc)\n # ax.text(0.55, 0.95, textstr, transform=ax.transAxes, fontsize=8, verticalalignment='top', bbox=props)\n plt.savefig(pp.figureLoc + 'Phase_ac_mobility.png', bbox_inches='tight', dpi=600)", "def plot_diagnostics(foldspec, flag, mask):\n \n plt.figure(figsize=(15,10))\n \n plt.subplot(231)\n plt.plot(foldspec.mean(0).mean(0), color='k')\n plt.xlabel('phase (bins)')\n plt.ylabel('I (arb.)')\n plt.title('Pulse Profile')\n plt.xlim(0, foldspec.shape[-1])\n \n plt.subplot(232)\n plt.title('RFI flagging parameter (log10)')\n plt.xlabel('time (bins)')\n plt.ylabel('freq (bins)')\n plt.imshow(np.log10(flag).T, aspect='auto')\n\n plt.subplot(233)\n plt.title('Manual off-gate scaling')\n plt.imshow(mask.T, aspect='auto', cmap='Greys')\n plt.xlabel('time (bins)')\n \n plt.subplot(234)\n plt.imshow(foldspec.mean(0), aspect='auto')\n plt.xlabel('phase')\n plt.ylabel('freq')\n\n plt.subplot(235)\n plt.imshow(foldspec.mean(1), aspect='auto')\n plt.xlabel('phase')\n plt.ylabel('time')\n\n plt.subplot(236)\n plt.imshow(foldspec.mean(2).T, aspect='auto')\n plt.xlabel('time')\n plt.ylabel('freq')", "def save_figs(self):\n if not self._fitted:\n self.fit()\n #self._message(\"Saving plots...\")\n # 1. Generate the required PNG plots\n # 1.1 Truncation plots\n for i,s in enumerate(self.samples):\n fig,ax=plt.subplots(1,2,figsize=(8,4))\n cyct=np.arange(self.nvalues)\n cycf=np.arange(self._cutoffidx[i])\n cycd=0.5*(cyct[1:]+cyct[:-1])\n ax[0].plot(cyct,self.samplesdata[:,i],'k.-',linewidth=0.5,label=\"Full series\")\n ax[0].plot(cycf,self.samplesdata[:self._cutoffidx[i],i],'r-',linewidth=1,label=\"Truncated\")\n ax[0].set_xlim([0,self.nvalues-1])\n ax[0].set_ylim([0,self.samplesdata.max()*1.1])\n ax[0].set_xlabel(\"Cycle\")\n ax[0].set_ylabel(\"Fluorescence (a.u.)\")\n ax[0].set_title(\"Detected fluorescence\")\n plt.legend(loc='upper left',frameon=False)\n # First derivative\n ax[1].plot(cycd,self.samplesdatadiff[:,i],'k.-',linewidth=0.5)\n ax[1].axvline(self._cutoffidx[i],color='r')\n ax[1].set_xlim([0,self.nvalues-1])\n ax[1].set_ylim([self.samplesdatadiff.min()*1.1,self.samplesdatadiff.max()*1.1])\n ax[1].set_xlabel(\"Cycle\")\n ax[1].set_ylabel(\"dF/dCycle (a.u.)\")\n ax[1].set_title(\"Fluorescence rate\")\n plt.tight_layout()\n fn=get_valid_fname(self.samples[i])\n figname=\"%s_%s_%s.svg\"%(self.ID,\"01truncation\",fn)\n self.info['samples'][s]['Data truncation for fitting']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close() \n # 1.2 Fitting plots\n for i,s in enumerate(self.samples):\n fig,ax=plt.subplots(1,3,figsize=(12,4))\n cyct=np.arange(self.nvalues)\n cycf=np.arange(self._cutoffidx[i])\n ax[0].plot(cyct,self.samplesdata[:,i],'k:',linewidth=0.5,label=\"Full series\")\n ax[0].plot(cycf,self.samplesdata[:self._cutoffidx[i],i],'r.-',linewidth=0.5,label=\"Truncated\")\n #ax[0].plot(cycf,self.mak3fpre[s],'y-',linewidth=1,label=\"prefit\")\n ax[0].plot(cycf,self.mak3fluorescence[s],'g-',linewidth=1,label=\"MAK3 fit\")\n ax[0].axvline(self._cutoffidx[i],color='k')\n ax[0].set_xlim([0,self.nvalues-1])\n ax[0].set_ylim([0,self.samplesdata.max()*1.1])\n ax[0].set_xlabel(\"Cycle\")\n ax[0].set_ylabel(\"Fluorescence (a.u.)\")\n ax[0].set_title(\"Detected fluorescence\")\n ax[0].legend(loc='upper left',frameon=False)\n # DNA levels\n ax[1].plot(cycf,self.mak3concentration[s],'g-',linewidth=1,label=\"MAK3\")\n ax[1].axvline(self._cutoffidx[i],color='k')\n ax[1].set_xlim([0,self.nvalues-1])\n ax[1].set_ylim([0,self.mak3concentration[s].max()*1.1])\n ax[1].set_xlabel(\"Cycle\")\n ax[1].set_ylabel(\"concentration (a.u.)\")\n ax[1].set_title(\"estimated cDNA levels\")\n # Efficiency\n ax[2].plot(cycf,self.mak3efficiency[s],'b-',linewidth=1,label=\"MAK3\")\n ax[2].axvline(self._cutoffidx[i],color='k')\n ax[2].set_xlim([0,self.nvalues-1])\n ax[2].set_ylim([0,1.1])\n ax[2].set_xlabel(\"Cycle\")\n ax[2].set_ylabel(\"Efficiency\")\n ax[2].set_title(\"Amplification efficiency\") \n plt.tight_layout()\n fn=get_valid_fname(self.samples[i])\n figname=\"%s_%s_%s.svg\"%(self.ID,\"02mak3\",fn)\n self.info['samples'][s]['MAK3 Fitting']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close()\n # 2 Initial concentrations\n figwdth=np.maximum(5,0.4*self.nsamples+1)\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.initialConcentration.values())\n k=list(self.initialConcentration.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00initialConcentration\")\n self.info['figname_initialConcentration']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close()\n # 3 Fitting Error\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.fitting_error.values())\n k=list(self.fitting_error.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n ax.set_ylim([0,1e-2])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00fittingError\")\n self.info['figname_fittingError']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 4 kinetic constant\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.k.values())\n k=list(self.k.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00kineticConstant\")\n self.info['figname_k']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 5 background fluorescence\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.Fb.values())\n k=list(self.Fb.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00bkgFluorescence\")\n self.info['figname_Fb']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 6 slope\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.slope.values())\n k=list(self.slope.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n ax.set_ylim([0,0.025])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00fluorescenceSlope\")\n self.info['figname_slope']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))", "def plot_visual_abstract():\n # Which generations to plot\n GENERATIONS = [100, 230, 350]\n\n # LunarLander CMA-ES\n experiment_path = glob(\"experiments/wann_LunarLander-v2_CMAES*\")\n assert len(experiment_path) == 1, \"There should be only one CMA-ES experiment with LunarLander-v2\"\n experiment_path = experiment_path[0]\n\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n\n tsnes = []\n rewards = []\n for generation in GENERATIONS:\n # Find pivector files for specific generation, load them and store points\n generation_paths = [path for path in pivector_paths if \"gen_{}_\".format(generation) in path]\n\n population = [np.load(path) for path in generation_paths]\n population_tsnes = np.array([x[\"tsne\"] for x in population])\n population_rewards = np.array([x[\"average_episodic_reward\"] for x in population])\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n\n figure, axs = pyplot.subplots(\n figsize=[2.5 * 3, 2.5],\n nrows=1,\n ncols=len(GENERATIONS),\n sharex=\"all\",\n sharey=\"all\"\n )\n\n min_reward = min(x.min() for x in rewards)\n max_reward = max(x.max() for x in rewards)\n scatter = None\n\n for idx in range(len(GENERATIONS)):\n population_tsne = tsnes[idx]\n population_rewards = rewards[idx]\n generation = GENERATIONS[idx]\n ax = axs[idx]\n\n scatter = ax.scatter(\n population_tsne[:, 0],\n population_tsne[:, 1],\n c=population_rewards,\n vmin=min_reward,\n vmax=max_reward,\n cmap=\"plasma\"\n )\n ax.set_title(\"Generation {}\".format(generation))\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axis(\"off\")\n\n # Making room for colorbar\n # Stackoverflow #13784201\n figure.subplots_adjust(right=1.0)\n cbar = figure.colorbar(scatter)\n cbar.set_ticks([])\n cbar.ax.set_ylabel(\"Reward $\\\\rightarrow$\", rotation=90, fontsize=\"large\")\n\n figure.tight_layout()\n figure.savefig(\"figures/visual_abstract.pdf\", bbox_inches=\"tight\", pad_inches=0.05)", "def create_plots(initial_condition, flux):\n \n# Path to the hdf5 file containing results of computations\n computations_database = h5py.File(database_path, \"r\")\n \n# Path to the data group containing the computational data\n group_path = initial_condition + \"/\" + flux\n \n# Path to the highest grid resolution data (k = 16)\n dataset_path = group_path + \"/k = \" + str(16)\n# Adjust the plot window to the maximum and minimum values the computed solution attains \n ymax = asarray(computations_database[dataset_path]).max()\n ymin = asarray(computations_database[dataset_path]).min()\n pyplot.ylim(ymax = ymax*1.2)\n pyplot.ylim(ymin = ymin*1.2)\n \n# Path to the initial condition dataset \n dataset_initial_path = initial_condition + \"/k = \" + str(16) + \" initial_data\"\n \n# The title of the plot, containing relevant information about the computation \n title = \"{group_path}: CFL = {CFL}, a = {a}\".format(\n group_path = group_path.replace(\"/\", \", \"), \n CFL = computations_database[group_path].attrs[\"CFL\"], \n a = computations_database[group_path].attrs[\"a\"]\n )\n \n# Define the title and axes' labels; enable grid in the plot's background\n pyplot.title(title)\n pyplot.xlabel(\"x\")\n pyplot.ylabel(\"u(x, t = 9)\")\n pyplot.grid(True)\n\n# Define the grid against which data should be ploted, i.e. uniform grid with stepsize 2^(-16)\n x = numpy.linspace(0, 1, 2**16)\n# Plot the initial data \n pyplot.plot(x, computations_database[dataset_initial_path])\n\n# Include the initial data legend in the array of all legends which will be displayed in the plot\n legend_labels = [\"Initial Data\"]\n# Plot the legend for the initial data \n pyplot.legend(legend_labels)\n\n# Plot solutions corresponding to grid stepsizes 2^(-10), 2^(-12), 2^(-14), and 2^(-16),\n for k in range(10, 17, 2):\n x = numpy.linspace(0, 1, 2**k) \n \n dataset_path = group_path + \"/k = \" + str(k)\n \n pyplot.plot(x, computations_database[dataset_path])\n \n# Append the legend text corresponding to the current computation to the array of legends\n legend_labels.append(\"k = \" + str(k))\n# Plot the newly appended legend; ncol - number of columns, fancybox - rounded corners of the box enclosing the legend text\n pyplot.legend(legend_labels, ncol = 3, fancybox = True, fontsize = \"small\")\n\n# Output plots in two formats to be used in the website \n pyplot.savefig(html_path + \"images/\" + group_path.replace(\"/\",\"_\") + \".png\")\n pyplot.savefig(html_path + \"pdf/plots/\" + group_path.replace(\"/\",\"_\") + \".pdf\")\n pyplot.close()\n \n computations_database.close()", "def residual_plots(test_data, mods, station=None, squared=False):\r\n import config\r\n learning_var = config.learning_var\r\n data = test_data.get_miniOD()\r\n mods.models[0].mod.load()\r\n pred = mods.models[0].mod.predict(test_data)\r\n test_data.miniOD = None\r\n if squared:\r\n data[test_data.get_stations_col(2015)] = (data[test_data.get_stations_col(2015)] - pred) ** 2 # /pred\r\n else:\r\n data[test_data.get_stations_col(2015)] = data[test_data.get_stations_col(2015)] - pred\r\n ind = data[data['Annee'] == 0].index\r\n data.drop(ind, inplace=True)\r\n print(data.columns.values)\r\n i = 0\r\n if station is None:\r\n ch_an = test_data.get_stations_col(2015)\r\n else:\r\n ch_an = 'End date ' + str(station)\r\n for ch in learning_var:\r\n if not (ch[0] == 'h') and not (ch in ['LV', 'MMJ', 'SD', 'poudrerie', 'verglas']):\r\n # data.boxplot(ch_an, by=ch)\r\n # if ch != 'Heure':\r\n plt.figure(i // 9)\r\n # plt.title('squared error / expectation')\r\n fig = plt.subplot(3, 3, (i % 9) + 1)\r\n i += 1\r\n # fig = plt.figure().add_subplot(111)\r\n fig.set_xlabel(ch)\r\n if squared:\r\n fig.set_ylabel('error²')\r\n else:\r\n fig.set_ylabel('error')\r\n l = []\r\n xaxis = np.unique(data[ch])\r\n print(ch, xaxis.shape)\r\n if xaxis.shape[0] < 20 or ch == 'Heure':\r\n for u in xaxis:\r\n l.append(data[ch_an][data[ch] == u])\r\n else:\r\n m = np.min(data[ch])\r\n M = np.max(data[ch])\r\n step = (M - m) / 20\r\n xaxis = np.arange(m, M, step)\r\n for u in xaxis:\r\n l.append(data[ch_an][(data[ch] >= u) * (data[ch] < u + step)])\r\n xaxis = xaxis.astype(int)\r\n # fig = plt.boxplot(ch_an, by=ch)\r\n # g = data.groupby(ch).mean()[ch_an]\r\n # v = data.groupby(ch).std()[ch_an]\r\n plt.boxplot(l, labels=xaxis)\r\n if squared:\r\n plt.ylim((0, 12))\r\n else:\r\n plt.ylim((-5, 5))\r\n # plt.plot(g, '-r')\r\n # plt.plot(g + v, ':r')\r\n # plt.plot(g - v, ':r')\r\n plt.show()", "def plot_variables(self, n, show=False, diagnostics=False):\n\n if diagnostics:\n fig, ax = plt.subplots(5, 1, sharex = True, figsize = (10, 10))\n else:\n fig, ax = plt.subplots(2, 1, sharex = True, figsize = (10, 10))\n\n plt.subplots_adjust(hspace = 0)\n end = len(n.history[\"det F\"])\n epochs = np.arange(end)\n a, = ax[0].plot(epochs, n.history[\"det F\"], label = 'Training data')\n b, = ax[0].plot(epochs, n.history[\"det test F\"], label = 'Test data')\n # ax[0].axhline(y=5,ls='--',color='k')\n ax[0].legend(frameon = False)\n ax[0].set_ylabel(r'$|{\\bf F}_{\\alpha\\beta}|$')\n ax[0].set_title('Final Fisher info on test data: %.3f'%n.history[\"det test F\"][-1])\n ax[1].plot(epochs, n.history[\"loss\"])\n ax[1].plot(epochs, n.history[\"test loss\"])\n # ax[1].set_xlabel('Number of epochs')\n ax[1].set_ylabel(r'$\\Lambda$')\n ax[1].set_xlim([0, len(epochs)]);\n \n if diagnostics:\n ax[2].plot(epochs, n.history[\"det C\"])\n ax[2].plot(epochs, n.history[\"det test C\"])\n # ax[2].set_xlabel('Number of epochs')\n ax[2].set_ylabel(r'$|{\\bf C}|$')\n ax[2].set_xlim([0, len(epochs)]);\n \n # Derivative of first summary wrt to theta1 theta1 is 3rd dimension index 0\n ax[3].plot(epochs, np.array(n.history[\"dμdθ\"])[:,0,0]\n , color = 'C0', label=r'$\\theta_1$',alpha=0.5)\n \n \"\"\"\n # Derivative of first summary wrt to theta2 theta2 is 3rd dimension index 1\n ax[3].plot(epochs, np.array(n.history[\"dμdθ\"])[:,0,1]\n , color = 'C0', ls='dashed', label=r'$\\theta_2$',alpha=0.5)\n \"\"\"\n\n # Test Derivative of first summary wrt to theta1 theta1 is 3rd dimension index 0\n ax[3].plot(epochs, np.array(n.history[\"test dμdθ\"])[:,0,0]\n , color = 'C1', label=r'$\\theta_1$',alpha=0.5)\n \n \"\"\"\n # Test Derivative of first summary wrt to theta2 theta2 is 3rd dimension index 1\n ax[3].plot(epochs, np.array(n.history[\"test dμdθ\"])[:,0,1]\n , color = 'C1', ls='dashed', label=r'$\\theta_2$',alpha=0.5)\n ax[3].legend(frameon=False)\n \"\"\"\n\n ax[3].set_ylabel(r'$\\partial\\mu/\\partial\\theta$')\n # ax[3].set_xlabel('Number of epochs')\n ax[3].set_xlim([0, len(epochs)])\n\n # Mean of network output summary 1\n ax[4].plot(epochs, np.array(n.history[\"μ\"])[:,0],alpha=0.5)\n # Mean of test output network summary 1\n ax[4].plot(epochs, np.array(n.history[\"test μ\"])[:,0],alpha=0.5)\n ax[4].set_ylabel('μ')\n ax[4].set_xlabel('Number of epochs')\n ax[4].set_xlim([0, len(epochs)])\n \n\n print ('Maximum Fisher info on train data:',np.max(n.history[\"det F\"]))\n print ('Final Fisher info on train data:',(n.history[\"det F\"][-1]))\n \n print ('Maximum Fisher info on test data:',np.max(n.history[\"det test F\"]))\n print ('Final Fisher info on test data:',(n.history[\"det test F\"][-1]))\n\n if np.max(n.history[\"det test F\"]) == n.history[\"det test F\"][-1]:\n print ('Promising network found, possibly more epochs needed')\n\n plt.tight_layout()\n plt.savefig(f'{self.figuredir}variables_vs_epochs_{self.modelversion}.png')\n if show: plt.show()\n plt.close()", "def all_conditions_visualized(self):\n # group dataframe based on standard deviation and effect size\n grouped_df = self.df.groupby([\"es\", \"sd1\", \"samp2\"])\n results = self.helpers.get_mean_df(grouped_df, False)\n print(results.sample())\n\n # seperate groups based on typeI or typeII error\n dfs = {\"typeI_df\": results[:8*7],\n \"ef_0_2\": results[8*7:8*7*2],\n \"ef_0_5\": results[8*7*2:8*7*3],\n \"ef_0_8\": results[8*7*3:]}\n\n for i in dfs:\n sdI = self.get_sd_list(dfs[i], False)\n dictI = {\"xlabel\": 'Group sizes', \"ylabel\": \"TypeI error\",\n \"title\": \"Mean typeI error per group size\",\n \"xtickslabels\": sdI}\n self.multiple_bars(dfs[i], 2, 2, dictI)" ]
[ "0.6873683", "0.6821231", "0.6672336", "0.66437536", "0.66437536", "0.66437536", "0.6629172", "0.6596364", "0.6589779", "0.6557499", "0.6521361", "0.6518009", "0.65085554", "0.64940095", "0.64903545", "0.64805514", "0.6445851", "0.6403377", "0.6347727", "0.63423556", "0.6334893", "0.6332524", "0.6318154", "0.63163507", "0.6250694", "0.62495553", "0.6179467", "0.6152781", "0.6146424", "0.6146058" ]
0.7091261
0
takes a discrete point in time, and puts the position, velocity, and acceleration into a ROS JointTrajectoryPoint() to be put into a RobotTrajectory.
def trajectory_point(self, t, jointspace): point = JointTrajectoryPoint() delta_t = .01 if jointspace: x_t, x_t_1, x_t_2 = None, None, None ik_attempts = 0 theta_t_2 = self.get_ik(self.target_position(t-2*delta_t)) theta_t_1 = self.get_ik(self.target_position(t-delta_t)) theta_t = self.get_ik(self.target_position(t)) # print(self.target_position(t)) #theta_t = np.array(theta_t) # print(theta_t) # we said you shouldn't simply take a finite difference when creating # the path, why do you think we're doing that here? cause you're mean point.positions = theta_t point.velocities = (theta_t - theta_t_1) / delta_t point.accelerations = (theta_t - 2*theta_t_1 + theta_t_2) / (2*delta_t) else: point.positions = self.target_position(t) point.velocities = self.target_velocity(t) point.accelerations = self.target_acceleration(t) point.time_from_start = rospy.Duration.from_sec(t) return point
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trajectory_point(self, t, jointspace):\n point = JointTrajectoryPoint()\n delta_t = .01\n if jointspace:\n x_t, x_t_1, x_t_2 = None, None, None\n ik_attempts = 0\n theta_t = theta_t_1 = theta_t_2 = None\n while theta_t_2 is None:\n theta_t_2 = self.get_ik(self.target_position(t-2*delta_t))\n while theta_t_1 is None:\n theta_t_1 = self.get_ik(self.target_position(t-delta_t))\n while theta_t is None:\n theta_t = self.get_ik(self.target_position(t))\n \n # we said you shouldn't simply take a finite difference when creating\n # the path, why do you think we're doing that here?\n point.positions = theta_t\n # print 'theta_t: {0}, {1}'.format(theta_t.shape, theta_t)\n # print 'theta_t_1: {0}, {1}'.format(theta_t_1.shape, theta_t_1)\n\n vel_t_prev = (theta_t_1 - theta_t_2) / delta_t\n vel_t_curr = (theta_t - theta_t_1) / delta_t\n vel_t_avg = (vel_t_prev + vel_t_curr) / 2.\n\n point.velocities = vel_t_avg #(theta_t - theta_t_1) / delta_t\n point.accelerations = (theta_t - 2*theta_t_1 + theta_t_2) / (2*delta_t)\n # if t >= 3.:\n # import pdb; pdb.set_trace()\n else:\n point.positions = self.target_position(t)\n point.velocities = self.target_velocity(t)\n point.accelerations = self.target_acceleration(t)\n point.time_from_start = rospy.Duration.from_sec(t)\n return point", "def look_at(self, point, connector):\n\n\n try:\n point_camera = self.tfBuffer.transform(point, 'head')\n except (tf2.LookupException, tf2.ConnectivityException, tf2.ExtrapolationException) as e:\n rospy.loginfo(\"Waiting for transform... ({})\".format(e))\n return\n\n # Calculate the head joint angles and clip them to the right range\n angle_pan, angle_tilt = connector.head.get_motor_goals_from_point(point_camera.point)\n angle_pan = np.clip(np.rad2deg(angle_pan), connector.head.min_pan, connector.head.max_pan)\n angle_tilt = np.clip(np.rad2deg(angle_tilt), connector.head.min_tilt, connector.head.max_tilt)\n\n current_pan_pos, current_tilt_pos = connector.head.get_current_head_pos()\n if (abs(current_pan_pos - angle_pan) < connector.head.delta and\n abs(current_tilt_pos - angle_tilt) < connector.head.delta):\n # We reached the position\n if rospy.get_time() - self.position_reached_time > connector.head.wait_time:\n # We waited long enough, go back\n return self.pop()\n else:\n # Represent remaining wait time\n self.publish_debug_data(\"remaining_wait_time\",connector.head.wait_time - (rospy.get_time() - self.position_reached_time))\n\n else:\n # We haven't reached it\n # Update when we should reach it\n self.position_reached_time = rospy.get_time()\n connector.head.send_motor_goals(angle_pan, 30.0, angle_tilt, 30.0)\n\n # Represent remaining tilt\n self.publish_debug_data(\"remaining_tilt\",abs(current_pan_pos - angle_pan))\n self.publish_debug_data(\"remaining_pan\",abs(current_tilt_pos - angle_tilt))", "def getPointTrajectory(self, localPt: Vector3) -> Trajectory:\n return Trajectory(self.times,[so3.apply(m,localPt) for m in self.milestones])", "def generate_trajectory(t, v, waypoints, coeff_x, coeff_y, coeff_z):\n global yaw\n global current_heading\n yawdot = 0.0\n pos = np.zeros(3)\n acc = np.zeros(3)\n vel = np.zeros(3)\n jerk = np.zeros(3)\n snap = np.zeros(3)\n yawddot = 0.0\n\n # distance vector array, represents each segment's distance\n distance = waypoints[0:-1] - waypoints[1:]\n # T is now each segment's travel time\n T = (1.0 / v) * np.sqrt(distance[:,0]**2 + distance[:,1]**2 + distance[:,2]**2)\n # accumulated time\n S = np.zeros(len(T) + 1)\n S[1:] = np.cumsum(T)\n\n # find which segment current t belongs to\n t_index = np.where(t >= S)[0][-1]\n\n # prepare the next desired state\n if t == 0:\n pos = waypoints[0]\n t0 = get_poly_cc(8, 1, 0)\n\n # get X-Y plane project of velocity vector ( this vector is tangent to curve )\n v_proj = np.array([coeff_x[0:8].dot(t0), coeff_y[0:8].dot(t0)])\n if(LA.norm(v_proj) == 0.0):\n # if velocity vector is of zero magnitude there should be no change in heading!\n pass\n else:\n current_heading = v_proj/LA.norm(v_proj) * (1.0 / T[0])\n \n\n # stay hover at the last waypoint position\n elif t > S[-1]:\n pos = waypoints[-1]\n else:\n # scaled time\n scale = (t - S[t_index]) / T[t_index]\n start = 8 * t_index\n end = 8 * (t_index + 1)\n\n t0 = get_poly_cc(8, 0, scale)\n pos = np.array([coeff_x[start:end].dot(t0), coeff_y[start:end].dot(t0), coeff_z[start:end].dot(t0)])\n\n t1 = get_poly_cc(8, 1, scale)\n # chain rule applied\n vel = np.array([coeff_x[start:end].dot(t1), coeff_y[start:end].dot(t1), coeff_z[start:end].dot(t1)]) * (1.0 / T[t_index])\n\n t2 = get_poly_cc(8, 2, scale)\n # chain rule applied\n acc = np.array([coeff_x[start:end].dot(t2), coeff_y[start:end].dot(t2), coeff_z[start:end].dot(t2)]) * (1.0 / T[t_index]**2)\n\n t3 = get_poly_cc(8, 3, scale)\n # apply chain rule\n jerk = np.array([coeff_x[start:end].dot(t3), coeff_y[start:end].dot(t3), coeff_z[start:end].dot(t3)]) * (1.0 / T[t_index]**3)\n\n t4 = get_poly_cc(8, 4, scale)\n # apply chain rule\n snap = np.array([coeff_x[start:end].dot(t4), coeff_y[start:end].dot(t4), coeff_z[start:end].dot(t4)]) * (1.0 / T[t_index]**4)\n\n # calculate desired yaw and yaw rate\n\n v_proj = np.array([vel[0], vel[1]])\n\n if( LA.norm(v_proj) == 0.0):\n # if velocity vector is zero, again there should be no change in heading\n next_heading = current_heading\n else:\n next_heading = v_proj/LA.norm(v_proj)\n\n \"\"\"\n try :\n #current_heading = v_proj/LA.norm(v_proj) #* (1.0 / T[0]) #np.array([coeff_x[0:8].dot(t0), coeff_y[0:8].dot(t0)]) * (1.0 / T[0])\n next_heading = v_proj/LA.norm(v_proj)\n except ZeroDivisionError:\n # velocity vector magnitude was zero so there should be no change in heading!\n next_heading = current_heading\n \"\"\" \n\n # angle between current vector with the next heading vector\n # from a * b = |a|*|b|cos(angle)\n delta_psi = np.arccos(np.dot(current_heading, next_heading) / (LA.norm(current_heading)*LA.norm(next_heading)))\n # cross product allow us to determine rotating direction\n norm_v = np.cross(current_heading,next_heading)\n\n if norm_v > 0:\n yaw += delta_psi\n elif norm_v < 0:\n yaw -= delta_psi\n else:\n # normv = 0! if there is no change in yaw, do not modify it!\n pass\n\n # dirty hack, quadcopter's yaw range represented by quaternion is [-pi, pi]\n while yaw > np.pi:\n yaw = yaw - 2*np.pi\n\n # print next_heading, current_heading, \"yaw\", yaw*180/np.pi, 'pos', pos\n current_heading = next_heading\n #print(current_heading)\n yawdot = delta_psi / 0.005 # dt is control period\n max_yawdot = 5.0 #rad/s\n if(abs(yawdot) > max_yawdot):\n yawdot = (yawdot/abs(yawdot))*max_yawdot # make it 5rad/s with appropriate direction\n \n yaw = np.sin(2*t)*0.0\n yawdot = 2*np.cos(2*t)*0.0\n yawddot = -4*np.sin(2*t)*0.0\n return DesiredState(pos, vel, acc, jerk, snap, yaw, yawdot, yawddot)", "def trajectory_control(self,position_trajectory,\n yaw_trajectory,\n time_trajectory,\n current_time):\n ind_min = np.argmin(np.abs(np.array(time_trajectory)-current_time))\n time_ref = time_trajectory[ind_min]\n \n \n if current_time < time_ref:\n position0 = position_trajectory[ind_min-1]\n position1 = position_trajectory[ind_min]\n \n time0 = time_trajectory[ind_min-1]\n time1 = time_trajectory[ind_min]\n yaw_cmd = yaw_trajectory[ind_min-1]\n \n else:\n yaw_cmd = yaw_trajectory[ind_min]\n if ind_min >= len(position_trajectory)-1:\n position0 = position_trajectory[ind_min]\n position1 = position_trajectory[ind_min]\n \n time0 = 0.0\n time1 = 1.0\n else:\n\n position0 = position_trajectory[ind_min]\n position1 = position_trajectory[ind_min+1]\n time0 = time_trajectory[ind_min]\n time1 = time_trajectory[ind_min+1]\n \n position_cmd = (position1-position0)* \\\n (current_time-time0)/(time1-time0)+position0\n velocity_cmd = (position1-position0)/(time1-time0)\n \n \n return (position_cmd,velocity_cmd,yaw_cmd)", "def position(x,v,t,a):\n return x + v*t + 0.5*a*t**2 # pos = initial position + velocity*time + .5(acceleration)(time squared)", "def make_trajectory(self, NextwpPosition, NextwpOrientation):\n d = np.linalg.norm(self.CurrentPosition - NextwpPosition)\n inter_segment_distance = 1\n self.no_of_segments = 1+int(d//inter_segment_distance)\n \n\n # enter sequence of waypoints: no of points should be self.no_of_segments+1\n x_wp = np.linspace(self.CurrentPosition[0], NextwpPosition[0], self.no_of_segments+1)\n y_wp = np.linspace(self.CurrentPosition[1], NextwpPosition[1], self.no_of_segments+1)\n z_wp = np.linspace(self.CurrentPosition[2], NextwpPosition[2], self.no_of_segments+1)\n \n # add intial and final condiions vel, acc, jerk\n x_ic = np.array([0, 0, 0])\n x_fc = np.array([0, 0, 0])\n x0 = np.array([x_wp[0], x_ic[0], x_ic[1], x_ic[2]])\n xT = np.array([x_wp[-1], x_fc[0], x_fc[1], x_fc[2]])\n\n y_ic = np.array([0, 0, 0])\n y_fc = np.array([0, 0, 0])\n y0 = np.array([y_wp[0], y_ic[0], y_ic[1], y_ic[2]])\n yT = np.array([y_wp[-1], y_fc[0], y_fc[1], y_fc[2]])\n \n z_ic = np.array([0, 0, 0])\n z_fc = np.array([0, 0, 0])\n z0 = np.array([z_wp[0], z_ic[0], z_ic[1], z_ic[2]])\n zT = np.array([z_wp[-1], z_fc[0], z_fc[1], z_fc[2]])\n\n path = [np.sqrt((x_wp[i]-x_wp[i-1])**2 + (y_wp[i]-y_wp[i-1])**2 + (z_wp[i]-z_wp[i-1])**2) for i in range(1, self.no_of_segments+1, 1)]\n\n \n T = []; T.insert(0, 0)\n T.insert(1, T[-1] + path[0]/self.reduced_speed)\n for i in range(1, len(path)-1, 1):\n T.append(T[-1] + path[i]/self.average_speed)\n T.insert(len(T)+1, T[-1]+path[-1]/self.reduced_speed) \n\n\n\n\n #T = []; T.insert(0, 0) # insert 0 at 0 position\n #for i in range(self.no_of_segments): \n # T.append(T[-1]+path[i]/self.average_speed)\n\n r = self.r\n N = 1 + self.N # because number of terms in a polynomial = degree+1\n\n QQ = []; AA_inv = []\n\n for i in range(self.no_of_segments): \n q = self.construct_Q(N, r, T[i], T[i+1])\n a = self.construct_A(N, r, T[i], T[i+1])\n a_inv = scipy.linalg.pinv(a)\n QQ = block_diag(QQ, q)\n AA_inv = block_diag(AA_inv, a_inv)\n \n order = 2*r*self.no_of_segments\n R = np.dot(AA_inv.T, np.dot(QQ, AA_inv))\n \n bx = self.construct_b(x0, xT)\n by = self.construct_b(y0, yT)\n bz = self.construct_b(z0, zT)\n\n m = Model(\"qp\")\n order = 2*r*self.no_of_segments\n dx = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dx\")\n dy = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dy\") \n dz = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dz\") \n\n # making objective using quicksum, takes a lot of time \n #obj1 = quicksum(dx[i] * quicksum(R[i][j] * dx[j] for j in range(order)) for i in range(order))\n #obj2 = quicksum(dy[i] * quicksum(R[i][j] * dy[j] for j in range(order)) for i in range(order))\n #obj3 = quicksum(dz[i] * quicksum(R[i][j] * dz[j] for j in range(order)) for i in range(order))\n \n # using LinExpr for the second expression is significantly faster \n obj1 = quicksum(dx[i] * LinExpr([(R[i][j], dx[j]) for j in range(order)]) for i in range(order))\n obj2 = quicksum(dy[i] * LinExpr([(R[i][j], dy[j]) for j in range(order)]) for i in range(order))\n obj3 = quicksum(dz[i] * LinExpr([(R[i][j], dz[j]) for j in range(order)]) for i in range(order))\n obj = obj1 + obj2 + obj3\n j = 0\n for i in range(order): \n if i < r: \n m.addConstr(dx[i] == bx[i])\n m.addConstr(dy[i] == by[i])\n m.addConstr(dz[i] == bz[i])\n elif i >= order-r: \n m.addConstr(dx[i] == bx[r+j])\n m.addConstr(dy[i] == by[r+j])\n m.addConstr(dz[i] == bz[r+j])\n j += 1\n \n c = 1 # counter\n for i in range(r, order-2*r, 2*r): \n #m.addConstr(dx[i] == self.x_wp[c])\n #m.addConstr(dy[i] == self.y_wp[c])\n #m.addConstr(dz[i] == self.z_wp[c])\n m.addConstr(dx[i] <= x_wp[c] + 0.2)\n m.addConstr(dx[i] >= x_wp[c] - 0.2)\n m.addConstr(dy[i] <= y_wp[c] + 0.2)\n m.addConstr(dy[i] >= y_wp[c] - 0.2)\n m.addConstr(dz[i] <= z_wp[c] + 0.2)\n m.addConstr(dz[i] >= z_wp[c] - 0.2)\n c = c+1\n for j in range(r): \n m.addConstr(dx[i+j] == dx[i+j+r])\n m.addConstr(dy[i+j] == dy[i+j+r])\n m.addConstr(dz[i+j] == dz[i+j+r])\n #if j ==2: \n # m.addConstr(dx[i+j] == 2.0)\n\n m.setObjective(obj, GRB.MINIMIZE)\n #m.write('model.lp')\n m.setParam('OutputFlag', 0)\n m.setParam('PSDtol', 1e-1)\n m.optimize()\n\n\n runtime = m.Runtime\n\n\n x_coeff = [dx[i].X for i in range(order)]\n y_coeff = [dy[i].X for i in range(order)]\n z_coeff = [dz[i].X for i in range(order)]\n\n Dx = np.asarray(x_coeff)[np.newaxis].T\n Dy = np.asarray(y_coeff)[np.newaxis].T \n Dz = np.asarray(z_coeff)[np.newaxis].T \n pcx = np.dot(AA_inv, Dx); pcy = np.dot(AA_inv, Dy); pcz = np.dot(AA_inv, Dz)\n\n\n poly_coeff_x = pcx.T.ravel().tolist()\n poly_coeff_y = pcy.T.ravel().tolist()\n poly_coeff_z = pcz.T.ravel().tolist()\n\n return poly_coeff_x, poly_coeff_y, poly_coeff_z, T, time.time()\n #self.publish(poly_coeff_x, poly_coeff_y, poly_coeff_z)", "def point_head(self, point: Tuple[int, int], image_timestamp: Time):\n self.running = False\n\n point = Looker.correct_point(point)\n\n self.looker.target.header.frame_id = '/xtion_rgb_optical_frame'\n self.looker.pointing_frame = '/xtion_rgb_optical_frame'\n\n self.looker.pointing_axis.x = 0.0\n self.looker.pointing_axis.y = 0.0\n self.looker.pointing_axis.z = 1.0\n self.looker.max_velocity = 0.2\n\n look_point = PointStamped()\n look_point.header.frame_id = '/xtion_rgb_optical_frame'\n look_point.point.x = (point[0] - self.camera_intrinsics[0, 2]) / (self.camera_intrinsics[0, 0])\n look_point.point.y = (point[1] - self.camera_intrinsics[1, 2]) / (self.camera_intrinsics[1, 1])\n look_point.point.z = 1.0\n look_point.header.stamp = image_timestamp\n\n self.looker.target = look_point\n self.point_head_goal.send_goal(self.looker)", "def target_position(self, time):\n # get joint positions and use fk to get end effector position?\n # ar_tag from topic\n\n cur_pos = self.target_velocity(time)*time + self.start_pos\n\n self.points_generated.append(cur_pos)\n #print(self.start_pos)\n # print(cur_pos)\n return cur_pos", "def transform_point(p,R,t):\r\n x = R[0][0]*p[0]+R[0][1]*p[1]+t[0]\r\n y = R[1][0]*p[0]+R[1][1]*p[1]+t[1]\r\n return [x,y]", "def executeTrajectory():\n driveStraight(1, 0.6)\n rotate(0.25)\n driveStraight(1, .45)\n rotate(-0.25)", "def create_joint_trajectory(start_position, end_position,\n duration_of_trajectory, frequency_of_trajectory):\n\n frequency_of_ros_messages = frequency_of_trajectory # in Hz.\n number_of_way_points = duration_of_trajectory * frequency_of_ros_messages\n number_of_joints = start_position.__len__()\n trajectory = np.zeros((number_of_joints, number_of_way_points))\n\n for i in xrange(number_of_joints):\n trajectory[i] = np.linspace(start_position[i], end_position[i],\n number_of_way_points)\n trajectory = trajectory.T.copy()\n vel_lims = np.diff(trajectory, axis=0)\n #Because this is discrete differentiation,\n # the last value is missing: len(vel_lims) = len(trajectory) - 1\n # so we just repeat the last calculated velocity.\n vel_lims = np.append(vel_lims, [[x for x in vel_lims[-1,:]]], axis = 0)\n vel_lims = vel_lims * frequency_of_trajectory\n vel_lims = np.absolute(vel_lims)\n\n if vel_lims.all() > 1.0:\n raise ValueError(\"One or more of the values in the specified velocities\"\n \"Exceed 1 rad / second. The robot won't like this.\"\n \"Adjust the trajectory so that each point can be \"\n \"reached without exceeding this limit.\")\n return trajectory, vel_lims", "def move_base(self, x, y, z):\n # fill ROS message\n goal = control_msgs.msg.FollowJointTrajectoryGoal()\n traj = trajectory_msgs.msg.JointTrajectory()\n traj.joint_names = [\"odom_x\", \"odom_y\", \"odom_t\"]\n p = trajectory_msgs.msg.JointTrajectoryPoint()\n p.positions = [x, y, z]\n p.velocities = [0, 0, 0]\n p.time_from_start = rospy.Time(15)\n traj.points = [p]\n goal.trajectory = traj\n\n # send message to the action server\n self.cli.send_goal(goal)\n\n # wait for the action server to complete the order\n self.cli.wait_for_result()\n return self._running", "def line_param(point_a, point_b, t):\n new_point = point_a - point_b\n return point_b + t*new_point", "def interp_cubic(p0, p1, t_abs):\n\tT = (p1.time_from_start - p0.time_from_start).to_sec()\n\tt = t_abs - p0.time_from_start.to_sec()\n\tq = [0] * 6\n\tqdot = [0] * 6\n\tqddot = [0] * 6\n\tfor i in range(len(p0.positions)):\n\t\ta = p0.positions[i]\n\t\tb = p0.velocities[i]\n\t\tc = (-3 * p0.positions[i] + 3 * p1.positions[i] - 2 * T * p0.velocities[i] - T * p1.velocities[i]) / T**2\n\t\td = (2 * p0.positions[i] - 2 * p1.positions[i] + T * p0.velocities[i] + T * p1.velocities[i]) / T**3\n\n\t\tq[i] = a + b * t + c * t**2 + d * t**3\n\t\tqdot[i] = b + 2 * c * t + 3 * d * t**2\n\t\tqddot[i] = 2 * c + 6 * d * t\n\treturn JointTrajectoryPoint(positions=q, velocities=qdot, accelerations=qddot, time_from_start=rospy.Duration(t_abs))", "def _trajectory_to_multi_dof_joint_trajectory(self, p, v, a, j, s, c):\n\n msg = tm.MultiDOFJointTrajectory()\n point = tm.MultiDOFJointTrajectoryPoint()\n msg.points.append(point)\n\n #print(p)\n\n transform = gm.Transform()\n transform.translation.x = p[0]\n transform.translation.y = p[1]\n transform.translation.z = p[2]\n quaternion = tft.quaternion_from_euler(0.0, 0.0, p[3])\n transform.rotation.x = quaternion[0]\n transform.rotation.y = quaternion[1]\n transform.rotation.z = quaternion[2]\n transform.rotation.w = quaternion[3]\n point.transforms.append(transform)\n\n velocity = gm.Twist()\n velocity.linear.x = v[0]\n velocity.linear.y = v[1]\n velocity.linear.z = v[2]\n velocity.angular.z = v[3]\n point.velocities.append(velocity)\n\n acceleration = gm.Twist()\n acceleration.linear.x = a[0]\n acceleration.linear.y = a[1]\n acceleration.linear.z = a[2]\n point.accelerations.append(acceleration)\n\n return msg", "def getTrajectoryMessage(action, jointOrder, velocity):\n # Set up a trajectory message to publish.\n actionMsg = JointTrajectory()\n actionMsg.joint_names = jointOrder\n # Create a point to tell the robot to move to.\n target = JointTrajectoryPoint()\n actionFloat = [float(i) for i in action]\n target.positions = actionFloat\n target.velocities = [velocity]*action.size\n\n target.time_from_start.nanosec = 1000000\n\n actionMsg.points = [target]\n return actionMsg", "def trajectory1(self):\r\n\r\n trackt = [] # particle trajectory,\r\n trackx = [] # particle trajectory\r\n an = [] # analitical s**2 + x**2 = t**2\r\n s1 = [] # s = 10; s = 0, light\r\n s2 = [] # s = 20;\r\n s3 = [] # s = 40;\r\n for i in range(0, len(self.dt.obs.obt_g)):\r\n trackt.append(float(i))\r\n trackx.append(self.dt.x[i])\r\n an.append(math.sqrt(float(i) ** 2 + self.dt.x[i] ** 2))\r\n s1.append(math.sqrt(1.0 ** 2 + self.dt.x[i] ** 2))\r\n s2.append(math.sqrt(2.0 ** 2 + self.dt.x[i] ** 2))\r\n s3.append(math.sqrt(4.0 ** 2 + self.dt.x[i] ** 2))\r\n\r\n # plots:\r\n\r\n (fig, ax) = plt.subplots() # figsize=(7,5)\r\n\r\n # trajectory\r\n\r\n ax.plot(\r\n trackx,\r\n trackt,\r\n marker='+',\r\n linewidth=1,\r\n linestyle='-',\r\n color='green',\r\n label='treck',\r\n )\r\n\r\n # measurement t\r\n # ax.plot(self.dt.x, self.dt.t, marker=\"+\", linestyle=\" \", color=\"blue\", label=\"result of measurement\")\r\n\r\n ax.plot(\r\n self.dt.x,\r\n self.dt.t,\r\n marker='o',\r\n linestyle=' ',\r\n color='black',\r\n label='result of measurement',\r\n )\r\n\r\n # analitical t\r\n\r\n ax.plot(self.dt.x, an, linestyle='-', color='red',\r\n label='continuum')\r\n\r\n # light trajectory\r\n\r\n ax.plot(trackx, trackx, linestyle='-', color='yellow',\r\n label='s=0 (light)')\r\n\r\n # s(x) curves\r\n\r\n ax.plot(\r\n trackx,\r\n s1,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=1.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s2,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=2.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s3,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=4.0',\r\n )\r\n\r\n # error of measurement t\r\n\r\n ax.errorbar(self.dt.x, self.dt.t, fmt='k ', yerr=self.dt.t_err)\r\n\r\n # signature on the horizontal x-axis\r\n\r\n ax.set_xlabel('x in metres')\r\n xm = -1.0\r\n for i in range(len(self.dt.x)):\r\n if self.dt.x[i] > xm:\r\n xm = self.dt.x[i]\r\n stepx = round(xm / float(len(self.dt.x)), 1)\r\n xm = round(xm + stepx, 1)\r\n ax.set_xlim([0.0, xm])\r\n\r\n # signature on vertical y axis\r\n\r\n ax.set_ylabel('t in metres of light time ')\r\n ym = -1.0\r\n for i in range(len(self.dt.t)):\r\n if self.dt.t[i] > ym:\r\n ym = self.dt.t[i]\r\n stepy = round(ym / float(len(self.dt.t)), 1)\r\n ym = round(ym + stepy, 1)\r\n ax.set_ylim([0.0, ym])\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on x)\r\n\r\n locatorx = matplotlib.ticker.MultipleLocator(base=stepx)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.xaxis.set_major_locator(locatorx)\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on y)\r\n\r\n locatory = matplotlib.ticker.MultipleLocator(base=stepy)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.yaxis.set_major_locator(locatory)\r\n\r\n ax.grid()\r\n\r\n # show legend\r\n\r\n ax.legend(loc='upper left')\r\n\r\n # show drawing\r\n\r\n plt.show()", "def update_position(self, event):\n\n # Create a copy of the most recent stored twist data to perform calculations\n with self.lock:\n velocity_data = copy.deepcopy(self.twist)\n\n # Time elapsed since last update position call\n if hasattr(event, 'last_real'):\n if event.last_real is None:\n time = rospy.Duration(0.05)\n else:\n time = event.current_real - event.last_real\n \n time = time.to_sec()\n\n # Calculate angle turned in the given time using omega = theta/time\n angle = velocity_data.angular.z*time\n\n # Calculate distance travelled in the given time using linear velocity = arc distance/time\n distance = velocity_data.linear.x*time\n\n # Calculate yaw of the robot\n self.vehicle_yaw += angle\n\n # Calculate vehicle x, y, z position coordinates\n # TODO recalculate the position based on traveling in a circular arc.\n self.pose.position.x += (distance)*cos(self.vehicle_yaw)\n self.pose.position.y += (distance)*sin(self.vehicle_yaw)\n\n # Calculate z position using linear interpolation and create cloud array\n \n # 1. Create ranges to be used in interpolation function\n terrain_points_x = np.arange(0, self.gaussian_array.shape[1]*self.resolution, self.resolution)\n terrain_points_y = np.arange(0, self.gaussian_array.shape[0]*self.resolution, self.resolution)\n\n # 2. Create array of points to be converted to point cloud for vizualization\n terrain_mesh_x, terrain_mesh_y = np.meshgrid(terrain_points_x, terrain_points_y)\n terrain_x = terrain_mesh_x.ravel()\n terrain_y = terrain_mesh_y.ravel()\n terrain_z = self.gaussian_array.ravel()\n terrain_grid_points = np.stack((terrain_x, terrain_y, terrain_z), axis=1)\n\n # 3. Create interpolation function based on the ranges and gaussian data\n interp_func = RectBivariateSpline(terrain_points_y, terrain_points_x, self.gaussian_array)\n\n # 4. Find z value for x and y coordinate of vehicle using interpolation function\n # TODO compute z height based on footprint\n self.pose.position.z = interp_func(self.pose.position.y, self.pose.position.x)\n\n # Convert Euler Angles to Quarternion\n V_rotation = tf.transformations.quaternion_from_euler(0.0, 0.0, self.vehicle_yaw)\n\n # Broadcast vehicle frame which is a child of the world frame\n br = tf.TransformBroadcaster()\n br.sendTransform((self.pose.position.x, self.pose.position.y, self.pose.position.z), \n V_rotation, rospy.Time.now(),\"vehicle_frame\", \"map\")\n\n # Construct the homogenous transformation matrix for map to vehicle frame\n V_translation = [self.pose.position.x, self.pose.position.y, self.pose.position.z]\n map_T_V = tf.transformations.quaternion_matrix(V_rotation) \n map_T_V[:3,3] = np.array(V_translation)\n\n # Create footprint of vehicle\n V_footprint_range_x = np.linspace((-self.vehicle_length/2), (self.vehicle_length/2), 30)\n V_footprint_range_y = np.linspace((-self.vehicle_width/2), (self.vehicle_width/2), 15)\n V_footprint_mesh_x, V_footprint_mesh_y = np.meshgrid(V_footprint_range_x, V_footprint_range_y)\n V_footprint_x = V_footprint_mesh_x.ravel()\n V_footprint_y = V_footprint_mesh_y.ravel()\n\n # For every point in the vehicle footprint, calculate the position wrt to the vehicle's frame\n # and its interpolated z value. Add this point to a list of points for visualization.\n # TODO Flatten into a single matrix multiply to remove for loop\n V_viz_points = []\n for i in range(V_footprint_x.shape[0]):\n p = Point()\n V_footprint_point = np.array([[V_footprint_x[i]],[V_footprint_y[i]], [0.0], [1.0]])\n V_footprint_point = np.matmul(map_T_V, V_footprint_point)\n V_footprint_point[2, 0] = interp_func(V_footprint_point[1, 0], V_footprint_point[0, 0])\n p.x = V_footprint_point[0, 0]\n p.y = V_footprint_point[1, 0]\n p.z = V_footprint_point[2, 0]\n V_viz_points.append(p)\n\n #####################################################################################\n # Create a copy of the most recent stored JointState data to perform calculations\n with self.joint_lock:\n joint_data = copy.deepcopy(self.joint)\n\n # If the data is empty on first run, fill with 0.0\n if not joint_data.velocity:\n joint_data.velocity = [0.0,0.0]\n \n # Calculate angle based on velocity data and time\n angle = joint_data.velocity[0]*time\n angle2 = joint_data.velocity[1]*time\n\n self.joint1_pitch += angle\n self.joint2_pitch += angle2\n\n # Transformations from vehicle frame to Joint1 and Joint2\n \n # Static rotation about z-axis \n static_rot = tf.transformations.quaternion_from_euler(0.0, 0.0, 3.14159)\n translation = [0.0, 0.0, 0.0]\n V_T_SRz = tf.transformations.quaternion_matrix(static_rot)\n V_T_SRz[:3,3] = np.array(translation)\n\n # Dynamic rotation about the y-axis of Joint 1\n rot_SRz_T_J1 = [[cos(self.joint1_pitch), 0.0, sin(self.joint1_pitch)],\n [0.0, 1.0, 0.0],\n [-sin(self.joint1_pitch), 0.0, cos(self.joint1_pitch)]]\n\n trans_SRz_T_J1 = [0.0, 0.0, 0.0, 1.0]\n\n SRz_T_J1 = np.zeros((4,4))\n SRz_T_J1[:3,:3] = rot_SRz_T_J1\n SRz_T_J1[:4,3] = trans_SRz_T_J1\n\n # Translation based on length of Joint 1 arm \n no_rot = tf.transformations.quaternion_from_euler(0.0, 0.0, 0.0)\n translation = [self.joint1_length, 0.0, 0.0]\n J1_T_STx = tf.transformations.quaternion_matrix(no_rot)\n J1_T_STx[:3,3] = np.array(translation)\n\n # Dynamic rotation about y-axis of Joint 2\n dynamic_rot2 = tf.transformations.quaternion_from_euler(0.0, self.joint2_pitch, 0.0)\n translation = [0.0, 0.0, 0.0]\n STx_T_J2 = tf.transformations.quaternion_matrix(dynamic_rot2)\n STx_T_J2[:3,3] = np.array(translation)\n\n # matrix multiplication to form the homogenous matrices\n V_T_J1 = np.matmul(V_T_SRz, SRz_T_J1)\n V_T_STx = np.matmul(V_T_J1, J1_T_STx)\n V_T_J2 = np.matmul(V_T_STx, STx_T_J2)\n\n frame_J1 = tf_conversions.fromMatrix(V_T_J1)\n frame_J2 = tf_conversions.fromMatrix(V_T_J2)\n\n # The ripper tip is a point in the J2's frame, this is based on the length of the ripper\n ripper_tip_point_J2 = [self.ripper_length, 0.0, 0.0, 1.0]\n map_T_J2 = np.matmul(map_T_V, V_T_J2)\n ripper_tip_pt_map = np.matmul(map_T_J2, ripper_tip_point_J2)\n ripper_tip_point_viz = Point()\n ripper_tip_point_viz.x = ripper_tip_pt_map[0]\n ripper_tip_point_viz.y = ripper_tip_pt_map[1]\n ripper_tip_point_viz.z = ripper_tip_pt_map[2]\n V_viz_points.append(ripper_tip_point_viz)\n\n # use the ripper's position as an index value to access the gaussian array\n ripper_tip_cell_index_x = int(ripper_tip_pt_map[1]/self.resolution)\n ripper_tip_cell_index_y = int(ripper_tip_pt_map[0]/self.resolution)\n\n # Create a range of index values surrounding index_x and y\n nearby_index_cells_range_x = np.arange((ripper_tip_cell_index_x-1),(ripper_tip_cell_index_x+2), 1)\n nearby_index_cells_range_y = np.arange((ripper_tip_cell_index_y-1),(ripper_tip_cell_index_y+2), 1)\n nearby_index_cells_mesh_x, nearby_index_cells_mesh_y = np.meshgrid(nearby_index_cells_range_x,nearby_index_cells_range_y)\n nearby_index_cells_x = nearby_index_cells_mesh_x.ravel()\n nearby_index_cells_y = nearby_index_cells_mesh_y.ravel()\n\n # First check if the index is within the gaussian array, if it is, then check if the tip of\n # the ripper is beneath the soil, if it is, then remove the soil above the tip and disperse\n # it to the surrounding cells, provided those cells are also within the gaussian array\n # TODO Remove use of for loops and excess if statements\n\n if (0 <= ripper_tip_cell_index_x <= (self.gaussian_array.shape[0]-1)) and (0 <= ripper_tip_cell_index_y <= (self.gaussian_array.shape[1]-1)):\n if (self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] > ripper_tip_pt_map[2]):\n diff = self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] - ripper_tip_pt_map[2]\n for i in range(nearby_index_cells_x.shape[0]):\n if (0 <= nearby_index_cells_x[i] <= (self.gaussian_array.shape[0]-1)) and (0 <= nearby_index_cells_y[i] <= (self.gaussian_array.shape[1]-1)):\n self.gaussian_array[nearby_index_cells_x[i]][nearby_index_cells_y[i]] += diff/8\n self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] = ripper_tip_pt_map[2]\n \n\n # Publish all messages\n self.publish_messages(V_translation, V_rotation, terrain_grid_points, V_viz_points, frame_J1, frame_J2)", "def goto_point(self,targetx,targety):\n #if point is 0,0, make 0.01,0.01 to avoid divide by 0\n if targetx == 0 and targety == 0:\n targetx = 0.01\n targety = 0.01\n self.targetdistance = math.sqrt((self.currentx-targetx)**2 + (self.currenty-targety)**2)\n self.targetangle = math.atan2(targety-self.currenty,targetx-self.currentx)\n self.angledifference = self.angle_diff(self.targetangle,self.orientation)\n if abs(self.angledifference) < .10:\n self.turnspeed = 0\n else:\n self.turnspeed = math.tanh(self.kturn*self.angledifference)\n self.speed = math.tanh(self.targetdistance*self.kspeed/self.angledifference)\n if self.speed < 0:\n self.speed = 0\n self.linearVector = Vector3(x=self.speed, y=0.0, z=0.0)\n self.angularVector = Vector3(x = 0.0, y = 0.0, z = self.turnspeed)\n # print \"currentx = \" + str(self.currentx)\n # print \"currenty = \" + str(self.currenty)\n # print \"orientation = \" + str(self.orientation)\n # print \"targetangle = \" + str(self.targetangle)\n # print \"angledifference = \" + str(self.angledifference)\n #print \"turnspeed = \" + str(self.turnspeed)\n #print \"speed = \" + str(self.speed)", "def execute_tp(self):\n self.status_message = \"State: Execute TP- Executing Motion Plan with trajectory planner\"\n self.current_state = \"execute_tp\"\n self.next_state = \"idle\"\n collect_info = threading.Thread(\n target=self.write_joint_pos, args=(\"tp_wp\",))\n collect_info.start()\n for wp in self.waypoints:\n # Ensure the correct number of joint angles\n full_wp = [0.0] * self.rexarm.num_joints\n full_wp[0:len(wp)] = wp\n self.tp.set_initial_wp()\n self.tp.set_final_wp(full_wp)\n\n if self.next_state == \"estop\":\n break\n # TODO: Set the positions and break if estop is needed\n self.tp.go()\n # self.rexarm.set_positions(wp)\n # time.sleep(1.5)", "def target(self, time, points, dt, num_way):\n start_index = min(int(time / dt), num_way - 1)\n end_index = min(start_index + 1, num_way - 1)\n start_point = points[start_index]\n end_point = points[end_index]\n fraction = float(time % dt) / dt\n return linear_interpolation_two_points(start_point, end_point, fraction).reshape(3)", "def compute_trajectory():\n pass", "def __init__(self, P, t):\n n = len(t)\n assert len(P) == n # same number of time and control points\n self.X, self.Y = _unzip(P) # points in X, Y components\n self._n = xrange(n) # time/control point iterator\n self.t = t", "def move_trapezoid(point, uvw_func, dt):\n uvw_1 = uvw_func(point)\n first_step = move(point, uvw_1, dt)\n uvw_2 = uvw_func(first_step)\n uvw = ((uvw_1[0] + uvw_2[0]) / 2,\n (uvw_1[1] + uvw_2[1]) / 2,\n (uvw_1[2] + uvw_2[2]) / 2)\n return move(point, uvw, dt)", "def moveTo(self, pt: Tuple[float, float]) -> None:\n raise NotImplementedError", "def _generate_trajectory(self, current_pos, velocity, goal, dt, prediction_time):\n\n history = list(np.copy(self._reached_goals))\n\n\n\n\n out = []\n out.append(np.copy(current_pos))\n first_goal_idx = np.where(self._goals == goal)[0][0]\n selected_goal = goal\n reached_goal = False\n counter_in_goal = 0\n\n\n\n for _ in range(prediction_time):\n\n # Particle reached selected goal\n # This will continuously chose a next goal, if a particle already reached its predecessor goal\n\n if np.linalg.norm(current_pos - selected_goal) <= 0.1:\n reached_goal = True\n\n\n\n\n\n\n if counter_in_goal > self.curr_stay_duration_goals[self._is_human_in_range_of_goal(selected_goal)[1]] / dt:\n\n selected_goal_idx = np.where(self._goals == selected_goal)[0][0]\n\n if len(history) > 0:\n if not selected_goal_idx == history[-1]:\n history.append(selected_goal_idx)\n else:\n history.append(selected_goal_idx)\n #print \"history:\"\n #print history\n # Select next goal based on the pre-learned goal-change probabilities\n\n\n #print \"selected goal {}\".format(selected_goal_idx)\n probs,available_goals = self.compute_transition_probs_for_goal_from_history(history,selected_goal_idx)\n\n\n for p in probs:\n if p < self._belief_threshold:\n p = 0.0\n\n print \"probs sampling: \"\n print probs / np.sum(np.asarray(probs))\n selected_goal = self._goals[np.random.choice(available_goals, p=probs / np.sum(np.asarray(probs)))]\n\n counter_in_goal = 0.0\n\n #print(\"switching\")\n\n else:\n counter_in_goal += 1\n #print(\"incr counter\")\n\n\n\n\n\n if reached_goal:\n #print self.curr_stay_duration_goals\n #print self.curr_stay_duration_goals[ self._is_human_in_range_of_goal(selected_goal)[1] ]\n\n new_pos = self.transition_human(current_pos, velocity, selected_goal, dt)\n\n out.append(new_pos)\n current_pos = new_pos\n\n\n\n else:\n new_pos = self.transition_human(current_pos, velocity, selected_goal, dt)\n out.append(new_pos)\n current_pos = new_pos\n\n return np.asarray(out)", "def verlet_next_pos(pos_t,vel_t,accel_t,dt):\n pos_t_plus_dt = pos_t.copy()\n pos_t_plus_dt += vel_t * dt + 0.5 * accel_t * (dt**2) \n\n return pos_t_plus_dt", "def send_joint_trajectory(trajectory, velocities, frequency=250):\n pub = rospy.Publisher(\"/wam/jnt_pos_cmd\", RTJointPos, queue_size=10)\n #If wam_node is running, it will be connected to this publisher.\n #Mostly this loop is here because you want to make sure the publisher\n #gets set up before it starts sending information.\n while pub.get_num_connections() < 1:\n print \"Waiting on the publisher to go up.\"\n rospy.sleep(0.5)\n\n trajectory_length = trajectory.__len__()\n finished = False\n traj_row = 0\n message_for_service = RTJointPos()\n\n r = rospy.Rate(frequency)\n\n while not rospy.is_shutdown() and not finished:\n message_for_service.joints = trajectory[traj_row]\n message_for_service.rate_limits = velocities[traj_row]\n traj_row += 1\n pub.publish(message_for_service)\n if traj_row == trajectory_length - 1:\n finished = True\n r.sleep()", "def _make_point(self, active_point):\n # --------------------------------\n # Create Random Parameters\n # --------------------------------\n for _ in range(self._k):\n # Defines radial distance from active_point.\n rho = np.random.uniform(self._radius, 2 * self._radius)\n # Defines angle from active_point. Requires multiple angles for higher dimensional planes.\n theta = [np.random.uniform(0, 2 * np.pi) for _ in range(self._dim - 1)]\n\n # --------------------------------\n # Create New Point\n # --------------------------------\n\n # Create a 2D point using first theta angle.\n new_point = [active_point[0] + rho * np.cos(theta[0]), active_point[1] + rho * np.sin(theta[0])]\n # Generate more components of the coordinate for higher dimensional planes.\n new_point.extend([active_point[i] + rho * np.sin(theta[i-1]) for i in range(2, active_point.shape[0])])\n new_point = np.array(new_point)\n\n # Confirm point is valid\n if self._valid_point(new_point):\n return new_point\n return None" ]
[ "0.7366635", "0.6119101", "0.6077254", "0.59451604", "0.5930126", "0.5914397", "0.5907721", "0.58628654", "0.58250636", "0.58205104", "0.57535905", "0.5750186", "0.5665363", "0.566258", "0.56621027", "0.5604078", "0.560013", "0.5575069", "0.5521112", "0.55184346", "0.5504239", "0.54938686", "0.5472923", "0.5468082", "0.5448964", "0.5443295", "0.5438601", "0.54358786", "0.54295266", "0.5414734" ]
0.7287572
1
Remember to call the constructor of MotionPath
def __init__(self, total_time, kin, limb, ar_tag_pos): # raise NotImplementedError self.r = .1 MotionPath.__init__(self, limb, kin, total_time) self.ar_tag_pos = np.array([ar_tag_pos[0],ar_tag_pos[1],ar_tag_pos[2]]) self.ar_tag_pos[2] = 0.282 self.start_pos = [ar_tag_pos[0],ar_tag_pos[1]+self.r,ar_tag_pos[2]] self.w = 2*math.pi/self.total_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, limb, kin, total_time, goal_pos, num_way, start_pos=None):\n MotionPath.__init__(self, limb, kin, total_time)\n self.start_pos = start_pos\n self.goal_pos = goal_pos\n self.num_way = num_way\n self.base_frame = 'base'\n self.tool_frame = 'left_hand_camera' # 'left_gripper'\n self.plan_path()\n # self.plot_path()", "def __init__(self, limb, kin, total_time, center_pos, num_way, radius, z_height=0.1):\n MotionPath.__init__(self, limb, kin, total_time)\n self.center_pos = center_pos\n self.num_way = num_way\n self.radius = radius\n self.z_height = z_height\n self.base_frame = 'base'\n self.tool_frame = 'left_gripper'\n self.plan_path()\n self.plot_path()", "def __init__(self, total_time, kin, limb, start_pos, ar_tag_pos):\n #raise NotImplementedError\n MotionPath.__init__(self, limb, kin, total_time)\n start_pos = np.array([abs(start_pos[0]), abs(start_pos[1]), abs(start_pos[2])])\n\n\n #The ar tracking function was not working properly so we added the block below \n #to correct it.\n if start_pos[1]>start_pos[0]:\n a = start_pos[0]\n b = start_pos[1]\n start_pos[0] = b\n start_pos[1] = a\n print(\"### Modified start_pos ###\")\n\n\n ar_tag_pos = np.array([abs(ar_tag_pos[0]), abs(ar_tag_pos[1]), abs(ar_tag_pos[2])])\n if ar_tag_pos[1]>ar_tag_pos[0]:\n a = ar_tag_pos[0]\n b = ar_tag_pos[1]\n ar_tag_pos[0] = b\n ar_tag_pos[1] = a\n self.start_pos = start_pos\n\n print(\"!!!!!!!!!!!!!!!!start_pos!!!!!!!!!!!!!!!!\")\n print(start_pos)\n print(\"!!!!!!!!!ar_tag_pos in LinearPath!!!!!!!!!\")\n print(ar_tag_pos)\n ar_tag_pos[2] = start_pos[2]\n self.ar_tag_pos = ar_tag_pos\n self.points_generated = []", "def __init__(self, start_x, start_y):\n self.x = start_x\n self.y = start_y\n\n self._dead_end_direction = [[]]\n self._moved = False\n self._junction_index = [1]\n\n self.path = [[start_x, start_y]]", "def __init__(self,path=None, init_pos=None, wind_map_ang=None, \n wind_map_mag=None, inlet_ang=None, inlet_mag=None,\n ground_speed=10, ghost_delta=2.0, traj_res=0.1,\n sim_dt=0.1, mass=3.71, max_tilt_angle=35, \n air_speed=10, command_airspeed = False):\n self.ground_speed = ground_speed\n self.air_speed = air_speed\n self.command_airspeed = command_airspeed\n self.traj_res = traj_res\n self.sim_dt = sim_dt\n self.mass = mass\n self.max_tilt_angle = max_tilt_angle\n self.path = path\n self.payload = 0\n self.air_density = 1.1718938453052181\n if init_pos is None:\n init_x = 0\n init_y = 0\n init_z = 20\n else:\n init_x = init_pos[0]\n init_y = init_pos[1]\n init_z = init_pos[2]\n self.init_state = QuadState(pos=np.array([init_x,init_y,init_z]))\n self.states_hist = []\n self.append_states_hist(0, self.init_state)\n self.ghost_delta = ghost_delta #secs\n self.actual_path = [self.init_state]\n self.xy_path = [(init_x,init_y)]\n dryden_upsample = 10\n if command_airspeed:\n # print(\"commanding airspeed\")\n self.dryden = DrydenSim(self.sim_dt/dryden_upsample, self.air_speed)\n else:\n self.dryden = DrydenSim(self.sim_dt/dryden_upsample, self.ground_speed)\n\n if inlet_ang is not None and inlet_mag is not None:\n self.windField = WindField(wind_map_ang, wind_map_mag, \n inlet_ang, inlet_mag)\n else:\n self.windField = WindField(wind_map_ang, wind_map_mag)", "def __init__(self, coords, goal_coords, current_path_length) :\r\n\r\n self.coords = coords #The coordinates of the node\r\n\r\n #Calculating the g(n) value of node\r\n self.calculate_gn_value(current_path_length)\r\n\r\n #Calculating the h(n) value of node\r\n self.calculate_hn_value(goal_coords)\r\n\r\n #Calculating f(n) value of node\r\n self.calculate_fn_value()", "def __init__(self):\n self.positionx = 400\n self.positiony = 600\n # direction goes from [0,360)\n self.direction = (45)", "def __init__(self):\n self.center = Point()\n self.velocity = Velocity()", "def __init__(self, x, y):\n\n self.x = x\n self.y = y\n self.path_x = []\n self.path_y = []\n self.parent = None\n self.cost = 0.0", "def __init__(self, mass, x, y,px=0.0,py=0.0):\n self.mass = mass\n self.position = np.array([x,y])\n self.momentum = np.array([px,py])", "def __init__(self, mass, radius, position, velocity):\n self.mass = mass\n self.radius = radius\n self.position = position\n self.velocity = velocity\n print(self.velocity)\n self.vafter = np.copy(velocity) # temp storage for velocity of next step\n self.delete = False", "def __init__(self):\n super().__init__()\n self.waypoint_vector = [-1, 10]", "def __init__(self):\r\n self.position=(0,0)\r\n self.direction = 0.\r\n self.speed = 0\r\n self.state = 0\r\n pass", "def __init__(self,mass,time,**kwargs):\n self.setTime(time)\n ParticlePhaseCoordinates.__init__(self,mass,**kwargs)", "def __init__(self, inPs, outPs):\n self.angle = 0.0\n self.speed = 16.5\n\n super(MovementControl,self).__init__(inPs, outPs)", "def __init__(self, num_points=5000):\n self.num_points = num_points\n\n # All motion starts at (0,0).\n self.x_values = [0]\n self.y_values = [0]", "def __init__(self, x_coor, x_speed, y_coor, y_speed, direction):\n self.__x_coor = x_coor\n self.__x_speed = x_speed\n self.__y_coor = y_coor\n self.__y_speed = y_speed\n self.__direction = direction\n self.__radius = self.TORPEDO_RADIUS", "def __init__(self):\n self.position = Vector2()\n self.velocity = Vector2()\n self.update_parameters()\n self.mass = 0.18 # Mass of Sphero robot in kilograms", "def __init__(self):\n self.x = 0.0\n self.y = 0.0\n self.theta = 0.0\n self.total_distance_covered = 0.0", "def __init__(self, path):\r\n self.path = path\r\n # 0.75 deg per grid box latitudinally\r\n self.lat_unit = 360", "def __init__(self):\n raise NotImplementedError('cannot create independent arc')", "def __init__(self, x_axis_p, y_axis_p, x_speed=0, y_speed=0, direction=0):\r\n self.__x_axis = x_axis_p\r\n self.__y_axis = y_axis_p\r\n self.__X_speed = x_speed\r\n self.__y_speed = y_speed\r\n self.__direction = direction\r\n self.__lives = 3", "def __init__(self, coords, direction):\n self.y, self.x = coords\n self.direction = direction", "def __init__(self,x,y,width,height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.velocity_x = 0.0", "def __init__(self, Position, M):\n self.pos = Position # Sets x position\n self.m = M # Sets mass\n # Initial velocity and acceleration set to be zero\n self.vel = np.zeros((2,))\n self.acc = np.zeros((2,))", "def __init__(self, position, momentum, mass):\n self.position = position\n self.momentum = momentum\n self.mass = mass", "def __init__(self, pos):\r\n self.pos = pos\r\n self.vel = [0, 0]\r\n self.acc = [0, 0]\r\n self.heading = math.pi\r\n self.screen = [0, 0]", "def __init__(self, x_0, y_0, initX, initY,h=5):\n self.x_0=x_0\n self.y_0=y_0\n self.x_init=initX\n self.y_init=initY\n self.step=h", "def __init__(self, direction=0, x=0, y=0):\n self.direction = direction\n self.x = x\n self.y = y", "def __init__(self, path_fun, disp_fun):\n \n if path_fun.nx != 1 or disp_fun.nx != 1:\n raise ValueError(\"path_fun and disp_fun must have 1 input variable\")\n \n # The number of output coordinates is defined by the\n # number of output functions of `path_fun`\n nQ = path_fun.nf\n \n # The number of input coordinates equals the number\n # of path-displacements plus 1.\n if disp_fun.nf % nQ != 0:\n raise ValueError('dispfun.nf must be a multiple of nQ')\n n = disp_fun.nf // nQ # The number of displacement coordinates \n \n nQp = n + 1 # The total number of input coordinates \n \n Qpstr = ['s'] + [f\"d{i:d}\" for i in range(n)]\n Qstr = [f\"Q{i:d}\" for i in range(nQ)]\n \n # The maxderiv is the minimum of either path_fun \n # or disp_fun \n maxderiv = dfun._merged_maxderiv(path_fun.maxderiv, disp_fun.maxderiv)\n \n # If the displace vectors have a finite zlevel, then the \n # zlevel for that term is just 1 more (from the linear dependence on \n # Q'). The total zlevel is then the merged zlevel of the path function\n # and the displacement term \n #\n if disp_fun.zlevel is None:\n disp_zlevel = None \n else: \n disp_zlevel = disp_fun.zlevel + 1 \n zlevel = dfun._merged_zlevel(path_fun.zlevel, disp_zlevel)\n \n super().__init__(self._pathtrans_fun, nQp = nQp, nQ = nQ,\n name = 'Path', Qpstr = Qpstr, Qstr = Qstr,\n maxderiv = maxderiv, zlevel = zlevel)\n \n self.path_fun = path_fun \n self.disp_fun = disp_fun" ]
[ "0.7820282", "0.7787644", "0.77386826", "0.6963891", "0.6757834", "0.65536493", "0.65116596", "0.64888525", "0.6445253", "0.63971484", "0.6376502", "0.636337", "0.63444155", "0.6312085", "0.6276762", "0.6248574", "0.62414914", "0.6194726", "0.6188483", "0.6171967", "0.61705893", "0.6142406", "0.6133022", "0.6122119", "0.60554755", "0.60498303", "0.6037605", "0.6024924", "0.5999505", "0.59954375" ]
0.78976136
0
Returns the arm's desired velocity in workspace coordinates at time t. You should NOT simply take a finite difference of self.target_position()
def target_velocity(self, time): x_v = self.w*self.r*cos(self.w*time) y_v = -self.w*self.r*sin(self.w*time) z_v = 0 # raise NotImplementedError return np.array([x_v,y_v,z_v])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def target_velocity(self, time):\n return self.target(time, self.velocities, self.dt, self.num_way)", "def target_velocity(self, time):\n return self.target(time, self.velocities, self.dt, self.num_way)", "def target_velocity(self, time):\n path, path_time = self.get_current_path(time)\n return path.target_velocity(path_time)", "def robot_arm_vel(self):\n return self.sim.data.qvel[self.arm_index]", "def vel(self, time):\n if (time < self.ti):\n t = 0\n elif (time > self.tf):\n t = self.tf - self.ti\n else:\n t = time - self.ti\n return self.a1 + 2.0 * self.a2 * t + 3.0 * self.a3 * pow(t, 2) + 4.0 * self.a4 * pow(t, 3) + 5.0 * self.a5 * pow(t, 4)", "def get_velocity(self):\n return self.momentum/self.mass", "def target_velocity(self, time):\n pass", "def target_velocity(self, time):\n pass", "def velocity(self,l=None, t=None, squeeze=True):\n # convert the l or t input to a t input:\n if t is None and l is None:\n raise IOError('you must input l or t.')\n if t is None:\n l = np.array(l)\n if len(l.shape) == 0:\n l = np.array([l])\n t = self.time(l=l, squeeze=squeeze)\n else:\n t = np.array(t)\n if len(t.shape) == 0:\n t = np.array([t])\n \n #formatting to make sure I can perform the array operations\n g = np.expand_dims(\n np.expand_dims(self.params.GRAVITY,0),0)\n t = np.expand_dims(t - self.t,2)\n x = np.expand_dims(self.x,0)\n v = np.expand_dims(self.v,0)\n \n vel = v + g * t\n \n if squeeze:\n vel = np.squeeze(vel, axis=0)\n return vel", "def orbitalVelocity( self ): # returns [m/s]\n velocity = self.orbitalAngularVelocity * self.r # current orbital velocity [m/s]\n return velocity", "def target_velocity(self, time):\n\n avg_vel = (self.ar_tag_pos - self.start_pos)/self.total_time\n\n return avg_vel", "def velocity_trajectory(self):\n return self._read(MX_VELOCITY_TRAJECTORY)", "def getVelocity(self):\n return self.v", "def get_velocity(self):\n return (self._I85_msg_from_device(self.node.sdo[0x606c].phys)) / 10 # rad/s", "def getVelocity(self):\n\n return self.vel", "def velocity(self):\n return self._velocity", "def velocity(self):\n return self._velocity", "def velocity(self):\n return self.base_platform.velocity", "def getVelocity(self):\n\t\tif len(self.prevPositions) < 2:\n\t\t\tself.velocity = 0\n\t\telse:\n\t\t\ttime = self.position[2] - self.prevPositions[len(self.prevPositions)-1][2]\n\t\t\txdist = self.position[0][0] - self.prevPositions[len(self.prevPositions)-1][0][0]\n\t\t\tydist = self.position[0][1] - self.prevPositions[len(self.prevPositions)-1][0][1]\n\t\t\tself.velocity = (xdist,ydist,time.total_seconds())\n\t\treturn self.velocity\n\t\t\t#speed = math.pow(math.pow(1.0*xdist,2) + math.pow(1.0*ydist,2),0.5) / (1.0*time.total_seconds())", "def speed(v, t):\n rf = v.orbit.body.non_rotating_reference_frame\n vec = v3minus(v.velocity(rf), t.velocity(rf))\n a = vec[0] * vec[0]\n b = vec[1] * vec[1]\n c = vec[2] * vec[2]\n return math.sqrt(a + b + c)", "def target_velocity(self, s):\n # YOUR CODE HERE\n if s > self.total_length:\n s = self.total_length\n angular = np.array([0, 0 ,0])\n else:\n angular = np.array([0,0,self.targ_angular])\n\n theta = s/self.total_length*self.angle\n v_x = np.cos(theta)*self.targ_speed\n if self.left_turn:\n v_y = np.sin(theta)*self.targ_speed\n else:\n v_y = -np.sin(theta)*self.targ_speed\n \n linear = np.array([v_x, v_y, 0])\n # linear = np.append(np.dot(self.g, np.array([v_x, v_y, 0]))[:2], [0])\n \n toRet = [linear, angular]\n \n return toRet", "def relative_velocity(self):\n return self.base_platform.relative_velocity", "def target_velocity(self, s):\n if s > self.total_length:\n linear = np.array([0,0,0])\n else:\n linear = np.array([self.sign*self.targ_speed, 0, 0])\n \n angular = np.array([0,0,0])\n toRet = [linear, angular]\n\n return toRet", "def velocity(self) -> np.ndarray:\n return self._state[3:5]", "def get_angular_velocity(r, T):\n # http://www.hep.fsu.edu/~berg/teach/phy2048/0918.pdf\n # velocity = 2(pi)r/T\n return (2*math.pi*r)/T", "def obtain_vel_timestep(self, tstep):\n return np.swapaxes(np.array([np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"FlowSolution_%04d\" % tstep][\"VelocityX\"][\" data\"][:, :, :]),\n np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"FlowSolution_%04d\" % tstep][\"VelocityY\"][\" data\"][:, :, :]),\n np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"FlowSolution_%04d\" % tstep][\"VelocityZ\"][\" data\"][:, :, :])]), 0, 1)", "def get_start_velocity(self):\n # uniform circular motion have a start velocity of omega\n # TODO generate from start position and rotation direction\n return np.array([0, self.wz, 0])", "def _compute_solar_torque(self):\n pass", "def target_velocity(self, s):\n raise NotImplementedError()", "def initial_velocity(self) -> float:\n return self._initial_velocity" ]
[ "0.7068331", "0.7068331", "0.70444334", "0.7034289", "0.6955907", "0.68580544", "0.6856776", "0.6856776", "0.67172503", "0.6685114", "0.66773754", "0.6674364", "0.66353154", "0.66283923", "0.6606402", "0.65924555", "0.65924555", "0.6582541", "0.6543292", "0.6520295", "0.651792", "0.6505511", "0.6492536", "0.6471303", "0.64436543", "0.64351404", "0.64217573", "0.6357475", "0.63086164", "0.63078463" ]
0.72390187
0
Returns the arm's desired x,y,z acceleration in workspace coordinates at time t. You should NOT simply take a finite difference of self.target_velocity()
def target_acceleration(self, time): x_a = -self.w**2*self.r*sin(self.w*time) y_a = -self.w**2*self.r*cos(self.w*time) z_a = 0 # raise NotImplementedError return np.array([x_a,y_a,z_a])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def target_velocity(self, time):\n\n x_v = self.w*self.r*cos(self.w*time)\n y_v = -self.w*self.r*sin(self.w*time)\n z_v = 0\n # raise NotImplementedError\n return np.array([x_v,y_v,z_v])", "def acceleration(v,u,t):\n return ((v-u)/t)", "def acceleration(self):\n ux,uy = np.gradient(self._obj['u'],self._obj['x'],self._obj['y'],axis=(0,1))\n vx,vy = np.gradient(self._obj['v'],self._obj['x'],self._obj['y'],axis=(0,1))\n \n ax = self._obj['u']*ux + self._obj['v']*uy\n ay = self._obj['u']*vx + self._obj['v']*vy\n\n self._obj['w'] = xr.DataArray(np.sqrt(ax**2+ay**2), dims=['x', 'y','t'])\n\n if len(self._obj.attrs['units']) == 4:\n vel_units = self._obj.attrs['units'][-1]\n self._obj.attrs['units'].append(f'{vel_units}^2')\n else:\n vel_units = self._obj.attrs['units'][-2]\n self._obj.attrs['units'][-1] = (f'{vel_units}^2')\n\n\n return self._obj", "def target_acceleration(self, time):\n return self.target(time, self.accelerations, self.dt, self.num_way)", "def target_acceleration(self, time):\n return self.target(time, self.accelerations, self.dt, self.num_way)", "def get_acceleration(self,v,el,T_s,T_i):\n\n rad = radiation.Radiation()\n T_atm = rad.getTemp(el)\n p_atm = rad.getPressure(el)\n rho_atm = rad.getDensity(el)\n g = rad.getGravity(el)\n\n\n rho_int = p_atm/(self.Rsp_air*T_i) # Internal air density\n\n Cd = .5 # Drag Coefficient\n F_b = (rho_atm - rho_int)*self.vol*g # Force due to buyoancy\n F_d = Cd*(0.5*rho_atm*math.fabs(v)*v)*self.cs_area# Force due to Drag\n\n if F_d > 0:\n F_d = F_d * self.Upsilon\n vm = (self.massEnv + self.mp) + rho_atm*self.vol + self.vm_coeff*rho_atm*self.vol #Virtual Mass\n accel = ((F_b - F_d - (self.massEnv + self.mp)*g)/vm)\n\n return accel", "def f(r,t):\r\n x = r[0]\r\n y = r[2]\r\n z = r[4]\r\n vx = r[1]\r\n vy = r[3]\r\n vz = r[5]\r\n velocity = np.sqrt(vx**2+vy**2+vz**2)\r\n #if np.abs(z)>eps:\r\n velocity = np.sqrt((vx+c*radius*wy)**2+(vy-c*radius*wx)**2+(-e*vz)**2)\r\n \r\n # equations for a cricket ball in motion\r\n return np.array([vx, (-k_d*velocity*vx+k_l*(wy*vz-wz*vy)),\r\n vy, (-k_d*velocity*vy+k_l*(wz*vx-wx*vz)),\r\n vz,(-k_d*velocity*vz+k_l*(wz*vy-wy*vx)-g)], float)", "def eval_accel(self,t,endBehavior='halt') -> Vector:\n res = Trajectory.deriv_state(self,t,endBehavior)\n return res[len(res)//2:]", "def get_analytical_accelerations(self):\n # create empty numpy array for accelerations\n accelerations = np.zeros((3, len(self.times)))\n # radial accelerations is equal to angular velocity^2 / radius but radius is unitary is this trajectory\n radial_acceleration = self.wz ** 2\n # decompose radial accelerations in x and y components\n accelerations[0, :] = radial_acceleration * -cos(self.th[:, 2])\n accelerations[1, :] = radial_acceleration * -sin(self.th[:, 2])\n # accelerations along x axis is constant\n accelerations[2, :] = self.ax\n return accelerations", "def position(x,v,t,a):\n return x + v*t + 0.5*a*t**2 # pos = initial position + velocity*time + .5(acceleration)(time squared)", "def target_acceleration(self, time):\n #return np.array([0, 0, 0])\n if time <= self.total_time/4:\n return self.path1.target_acceleration(time)\n elif time <= self.total_time/2:\n return self.path2.target_acceleration(time)\n elif time <= self.total_time/4*3:\n return self.path3.target_acceleration(time)\n else:\n return self.path4.target_acceleration(time)", "def target_acceleration(self, time):\n return np.array([0, 0, 0])", "def target_acceleration(self, time):\n path, path_time = self.get_current_path(time)\n return path.target_acceleration(path_time)", "def position(self, t):\n return vector_add(self.origin, self.direction.scale(t))", "def acceleration(self):\n return self.__accel", "def do_time_step(t, dt, x, y, z, vx, vy, vz, m, B2, g, S0, omega):\n t = t + dt\n x = x + vx*dt\n y = y + vy*dt\n z = z + vz*dt\n\n vz = vz - S0*vx*omega/m*dt # update vz first to use current value of vx\n v = math.sqrt(vx*vx + vy*vy + vz*vz)\n vx = vx - B2/m*v*vx*dt\n vy = vy - g*dt - B2/m*v*vy*dt\n\n return t, x, y, z, vx, vy, vz", "def target_position(self, time):\n\n x_pos = self.r*sin(self.w*time)+self.ar_tag_pos[0]\n y_pos = self.r*cos(self.w*time)+self.ar_tag_pos[1]\n z_pos = self.ar_tag_pos[2]\n # print(x_pos,y_pos)\n # raise NotImplementedError\n return np.array([x_pos,y_pos,z_pos])", "def speed(v, t):\n rf = v.orbit.body.non_rotating_reference_frame\n vec = v3minus(v.velocity(rf), t.velocity(rf))\n a = vec[0] * vec[0]\n b = vec[1] * vec[1]\n c = vec[2] * vec[2]\n return math.sqrt(a + b + c)", "def t2x(self, t ):\n \n # Find time index\n i = (np.abs(self.t_sol - t)).argmin()\n \n # Find associated control input\n x = self.x_sol[i,:]\n \n return x", "def get_analytical_velocities(self):\n # create empty numpy array for accelerations\n velocities = np.zeros((3, len(self.times)))\n # tangential velocity is angular velocity multiplied by radius but radius is one\n vt = self.wz\n # decompose tangential velocity in x and y components\n velocities[0, :] = vt * -sin(self.th[:, 2])\n velocities[1, :] = vt * cos(self.th[:, 2])\n # linear velocity along z axis\n velocities[2, :] = self.v0x + self.ax * self.times\n return velocities", "def _compute_solar_torque(self):\n pass", "def c( self , y , r , t ):\n \n u = self.trajectory.t2u( t )\n \n return u", "def OrbitPos(self, rv, t, m):\n \n params = np.array(rv)\n params = params.flatten()\n \n def GravityODE(rv,t):\n G = 6.67e-11\n m = 5.972e24\n x = rv[0]\n y = rv[1]\n vx = rv[2]\n vy = rv[3]\n \n dvydt = -((G*m*y)/((x**2+y**2)**(3/2)))\n dvxdt = -((G*m*x)/((x**2+y**2)**(3/2)))\n dxdt = vx\n dydt = vy\n\n pos_derivs = np.array([dxdt,dydt])\n v_deriv = np.array([dvxdt,dvydt])\n derivs = np.hstack((pos_derivs,v_deriv))\n \n return derivs \n \n satellite_orbit = integrate.odeint(GravityODE,params,t)\n \n return satellite_orbit[:,0],satellite_orbit[:,1]", "def to_cart(self, t):\n # mean motion:\n n = np.sqrt(self.GM / self.a / self.a / self.a) * 86400.0 # [rad/day]\n # mean anomaly at t:\n M = n * (t - self.t0) + self.M0\n # print(np.fmod(M, 2*np.pi))\n # solve Kepler equation, get eccentric anomaly:\n E = self.kepler(self.e, M)\n cosE = np.cos(E)\n sinE = np.sin(E)\n # get true anomaly and distance from focus:\n sinv = np.sqrt(1.0 - self.e ** 2) * sinE / (1.0 - self.e * cosE)\n cosv = (cosE - self.e) / (1.0 - self.e * cosE)\n r = self.a * (1.0 - self.e ** 2) / (1.0 + self.e * cosv)\n # r = self.a*(1 - self.e*cosE)\n #\n sinw = np.sin(self.w)\n cosw = np.cos(self.w)\n sinu = sinw * cosv + cosw * sinv\n cosu = cosw * cosv - sinw * sinv\n # position\n cosNode = np.cos(self.Node)\n sinNode = np.sin(self.Node)\n cosi = np.cos(self.i)\n sini = np.sin(self.i)\n x = r * (cosu * cosNode - sinu * sinNode * cosi)\n y = r * (cosu * sinNode + sinu * cosNode * cosi)\n z = r * sinu * sini\n # velocity\n p = self.a * (1.0 - self.e ** 2)\n V_1 = np.sqrt(self.GM / p) * self.e * sinv\n V_2 = np.sqrt(self.GM / p) * (1.0 + self.e * cosv)\n vx = x * V_1 / r + (-sinu * cosNode - cosu * sinNode * cosi) * V_2\n vy = y * V_1 / r + (-sinu * sinNode + cosu * cosNode * cosi) * V_2\n vz = z * V_1 / r + cosu * sini * V_2\n\n state = np.array([x, y, z, vx, vy, vz])\n state = np.reshape(np.asarray(state), (3, 2), 'F')\n\n return state", "def get_rotationalAngularPosition(self, t): # returns [rad]\n angle = self.theta0 + self.rotationalAngularVelocity * t # angular position [rad]\n return angle", "def advanceVelocity(self,time,acceleration):\n if not isinstance(acceleration,Cartesian3DVector):\n raise CoordinateException(\"Advancing particle momentum with the incorrect acceleration type.\")\n return self.v + time*acceleration", "def acceleration(self):\n # speed is by formula: x axis speed: by cos of the heading and y\n # axis by sine of the heading\n self.x_speed += math.cos(math.radians(self.degrees))\n self.y_speed += math.sin(math.radians(self.degrees))", "def get_start_velocity(self):\n # uniform circular motion have a start velocity of omega\n # TODO generate from start position and rotation direction\n return np.array([0, self.wz, 0])", "def compute_acceleration(self, x, y):\n a_x = G * self.m / (x*x + y*y) * -x/(np.sqrt(x*x + y*y))\n a_y = G * self.m / (x*x + y*y) * -y/(np.sqrt(x*x + y*y))\n return np.array([a_x, a_y])", "def path(self, t=10, u=None, x0=None):\n if x0 is None:\n x0 = np.zeros(3)\n\n def dynamics(t, x, vehicle, u):\n # u = vehicle.control(demand, x)\n \n return vehicle.deriv(x, u)\n\n if base.isscalar(t):\n t_span = (0, t)\n t_eval = np.linspace(0, t, 100)\n elif isinstance(t, np.ndarray):\n t_span = (t[0], t[-1])\n t_eval = t\n else:\n raise ValueError('bad time argument')\n sol = integrate.solve_ivp(dynamics, t_span, x0, t_eval=t_eval, method=\"RK45\", args=(self, u))\n\n return (sol.t, sol.y.T)" ]
[ "0.6654176", "0.65230024", "0.64852095", "0.64031357", "0.64031357", "0.63639945", "0.6288034", "0.62409705", "0.61979336", "0.6173782", "0.61587775", "0.6153133", "0.6142052", "0.6091978", "0.6033012", "0.60062426", "0.599504", "0.5980418", "0.58893937", "0.58403546", "0.58055806", "0.5783974", "0.5782522", "0.5757616", "0.5739489", "0.56724524", "0.56581587", "0.56543696", "0.5645363", "0.5641847" ]
0.70134026
0
Reduce this Dataset's data by applying ``count`` along some dimension(s).
def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: return self.reduce( duck_array_ops.count, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"count\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"count\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"count\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def count(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"count\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.count,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def count(self, axis=None):\n return self.data.count(axis=axis)", "def getDataSetCount(self):\n\t\treturn int(self.numberOfImages / self.slicesPerTimepoint)", "def count_dims(da):\n return len(da.dims)", "def count(self) -> Tuple[groupable, pdarray]:\n repMsg = generic_msg(\n cmd=\"countReduction\",\n args={\"segments\": cast(pdarray, self.segments), \"size\": self.length},\n )\n self.logger.debug(repMsg)\n return self.unique_keys, create_pdarray(repMsg)", "def size(self, index):\n return self.d1.size(index)\n # FILTER BASED ON D1", "def density_to_counts(\n x: Union[_cpp.DataArray, _cpp.Dataset], dim: str\n) -> Union[_cpp.DataArray, _cpp.Dataset]:\n return _call_cpp_func(_cpp.density_to_counts, x, dim)", "def dimension_count(self):\n return self._dimensionCount", "def __len__(self):\n return self.flatten_dim(self.shape[0])", "def getDataCount(self, filter: t.Mapping[t.Text, t.Any] = {}\n ) -> DatasetCount:\n aggregate_data = self.getAggregateData(\n pipeline={\"count\": {\"$sum\": 1}},\n filter=filter,\n )\n count = first(aggregate_data.data)[\"count\"]\n return DatasetCount(count=count)", "def counts_to_density(\n x: Union[_cpp.DataArray, _cpp.Dataset], dim: str\n) -> Union[_cpp.DataArray, _cpp.Dataset]:\n return _call_cpp_func(_cpp.counts_to_density, x, dim)", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def __len__(self):\n return self.dataset.shape[0]", "def __len__(self):\n return sum(f.count for f in self.filters)", "def dim_reduce(means, weights, d):\n return dim_reduce_data(means, d)", "def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)", "def dtype_element_count( dtype, name = None ):\n def count( descr ):\n if len( descr ) > 2:\n shape = descr[ 2 ]\n # multiply the shape\n return reduce( lambda x, y: x * y, shape )\n else:\n return 1\n\n if name:\n shape = dtype[ name ].shape\n else:\n shape = dtype.shape\n\n if len(shape) > 0:\n return reduce( lambda x, y: x * y, shape )\n else:\n descr = dtype.descr\n size = 0\n for type in descr:\n size += count( type )\n return size", "def dim_reduction(data_set, components):\n transformed = []\n index = -1\n transformed = data_set @ components\n return transformed", "def count(self):\r\n return self.data_array.size", "def __len__(self):\n return self._dataset.size(dirs=self._dirs)", "def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def size(self, batch):\n x,y,m = batch \n return sum([mm.sum() for mm in m])", "def reduce_dimension(self, n_components=2):\n\n reducer = PCA(n_components=n_components)\n\n X = self.data.values.astype(np.float32)\n\n norm = Normalizer()\n Xnorm = norm.fit_transform(X)\n\n return reducer.fit_transform(Xnorm)", "def dim(self):\n raise NotImplementedError" ]
[ "0.730545", "0.69733924", "0.69733924", "0.69529074", "0.69529074", "0.6121086", "0.60577554", "0.5984994", "0.5982864", "0.59388554", "0.5934392", "0.5903536", "0.5890546", "0.5856696", "0.5820845", "0.5794014", "0.5787277", "0.5708209", "0.56682146", "0.5646369", "0.5555573", "0.55479443", "0.55466497", "0.55341995", "0.5512039", "0.54768074", "0.5466465", "0.54419017", "0.5440367", "0.54394823" ]
0.7285971
1
Reduce this Dataset's data by applying ``prod`` along some dimension(s).
def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prod(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.multiply.reduce(\n self, out=out, axis=axis, keepdims=keepdims, dtype=dtype\n )", "def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"prod\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def prod(input, axis=None, dtype=None, keepdims=False, acc_dtype=None,\r\n no_zeros_in_input=False):\r\n\r\n out = elemwise.Prod(axis, dtype=dtype, acc_dtype=acc_dtype,\r\n no_zeros_in_input=no_zeros_in_input)(input)\r\n\r\n if keepdims:\r\n out = makeKeepDims(input, out, axis)\r\n return out", "def prod(tensor, axis=None):\n raise NotImplementedError", "def prod(self):\n return self._summarize(lambda c: c.prod)", "def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)", "def prod(self, values):\n return self.aggregate(values, \"prod\")", "def convert_prod(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes is not None:\n node = onnx.helper.make_node(\n 'ReduceProd',\n inputs=input_nodes,\n outputs=[name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n\n return [node]\n else:\n node = onnx.helper.make_node(\n 'ReduceProd',\n inputs=input_nodes,\n outputs=[name],\n keepdims=keepdims,\n name=name\n )\n\n return [node]", "def prod(self):\n # skipna == True\n # only_numerical == True\n return self._lift(\"prod\")", "def prod(self, x, y):\n return self.reduce(x + y)", "def prod(self):\n r = 0\n for i in range(len(self)):\n r *= self[i]\n\n return r", "def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def dim_reduce(means, weights, d):\n return dim_reduce_data(means, d)", "def prod(iterable):\n \n return reduce(operator.mul, iterable, 1)", "def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def prod(iterable):\n return reduce(operator.mul, iterable, 1)", "def prod(iterable):\n return reduce(operator.mul, iterable, 1)", "def prod(iterable):\n return reduce(operator.mul, iterable, 1)", "def prod(self, args):\n assert len(args) > 0, \"Cannot compute an empty product in a semigroup\"\n return prod(args[1:], args[0])", "def prod(x):\n return functools.reduce(lambda a, b: a * b, x, 1)", "def frob_prod(self, B):\n m, n = self.shape\n k, r = B.shape\n assert (m == k\n and n == r), (\"Distinct shapes ({}, {}) and ({}, {})\".format(\n m, n, k, r))\n sum_ = 0\n for i in range(m):\n for j in range(n):\n sum_ += self[(i, j)] * B[(i, j)]\n return sum_", "def prod(self, values: pdarray, skipna: bool = True) -> Tuple[groupable, pdarray]:\n k, v = self.aggregate(values, \"prod\", skipna)\n return k, cast(pdarray, v)" ]
[ "0.7481061", "0.735098", "0.714532", "0.714532", "0.700226", "0.700226", "0.6688766", "0.65455145", "0.65137213", "0.64294696", "0.6274849", "0.6176995", "0.6066634", "0.59346235", "0.5929033", "0.591033", "0.591033", "0.591033", "0.58753616", "0.5752229", "0.5747925", "0.5747925", "0.5747925", "0.57227224", "0.57227224", "0.57227224", "0.5711836", "0.5691028", "0.5679095", "0.5631104" ]
0.74074686
1