query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Return bdaqmid corresponding to an index.
def GetBDAQMid(self, index): return self._bdaqmids[index]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def qindex2index(index):\n r = index.row()\n c = index.column()\n if c > 0x10:\n return (0x10 * r) + c - 0x11\n else:\n return (0x10 * r) + c", "def get_id(self, index):\n return self.__keys[index]", "def get_index(self, index):\n return self.get_node_from_index(index).data", "def get_q_id_by_rec_idx(self, rec_idx):\n return self._record_idx_to_record[rec_idx]['q_id']", "def getbarcidxj(self,idx_):\n j_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbarcidxj(self.__nativep,idx_,ctypes.byref(j_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n j_ = j_.value\n _j_return_value = j_\n return (_j_return_value)", "def get_indexname(self,index):\n if index in self.indexname2index:\n return index\n else:\n for name,i in self.indexname2index.items():\n if self.index(index)==i:\n return name\n return None", "def cmid(self):\n return self[\"cmid\"]", "def detect_k_from_index(index_dir):\n\n config = load_index_config(index_dir)\n return config['k']", "def index2qindexb(self, index):\n r = index // 0x10\n c = index % 0x10\n return self.index(r, c)", "def getbaraidxij(self,idx_):\n i_ = ctypes.c_int32()\n j_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbaraidxij(self.__nativep,idx_,ctypes.byref(i_),ctypes.byref(j_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n i_ = i_.value\n _i_return_value = i_\n j_ = j_.value\n _j_return_value = j_\n return (_i_return_value,_j_return_value)", "def get_key_at_index(self, index):\n return self.chain_key.subkey(index)", "def get_block_hash(index):\n # TODO: Require implementation\n pass", "def get_dna_value(self, index: int):\n return self.dna[index]", "def get_by_index_key(self, index, key=str):\n return str(self.get(key, self.get_all_childname(key)[index]))", "def getblockhash(self, index):\n return self.proxy.getblockhash(index)", "def index_to_ijk(self, index):\n return self.indices_to_ijk_array([index])[0]", "def value_for_index(self, index):\r\n return self[self.keyOrder[index]]", "def get_index(self, qubit_name):\n if isinstance(qubit_name, int):\n return qubit_name\n try:\n return self.qubitDict[qubit_name]\n except KeyError:\n return self.readoutDict[qubit_name]", "def get_device_id(self, device_index):\n return self.drt_manager.get_id_from_index(device_index)", "def get_index(band_nums,chan_num):\n ch_index=np.searchsorted(band_nums,chan_num)\n return int(ch_index)", "def get_index(self, key):\r\n\t\tindex = self._hash_function(key) % self.capacity\r\n\t\treturn index", "def get_prefix(self, index):\n return bytes(self.bytes[:index])", "def _get_index(self, key):\n return self._hash_function(key) % self.capacity", "def getbarcidxinfo(self,idx_):\n num_ = ctypes.c_int64()\n res = __library__.MSK_XX_getbarcidxinfo(self.__nativep,idx_,ctypes.byref(num_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n num_ = num_.value\n _num_return_value = num_\n return (_num_return_value)", "def __get_column(self, index: int) -> int:\n return index % self.columns", "def get_atomic_number(molecule, atom_index):\n return molecule.GetAtomAtomicNumber(atom_index)", "def mid(self):\n return self._mid", "def get_key(self, proxy_index):\n return self.treeItem(proxy_index)", "def getB(self, idx):\n if isinstance(idx, int):\n return self.dB[[idx]]\n else:\n return self.dB[idx]", "def _get_cand_index(signature):\n\n # This map translates between the last \"I<n>\" field value and the\n # actual CAND cell index.\n INDEX_MAP = {\n 10: 0,\n 9: 1,\n 8: 2,\n 7: 3,\n 6: 4,\n }\n\n # Split the signature\n parts = signature.split(\".\")\n\n # Get the last \"I<n>\" field\n for i, word in enumerate(parts):\n if word in [\"I_hilojoint\", \"I_enjoint\"]:\n part = parts[i-1]\n break\n else:\n assert False, signature\n\n # Decode the index\n idx = int(part[1:])\n\n # Remap the index\n assert idx in INDEX_MAP, (signature, idx)\n return INDEX_MAP[idx]" ]
[ "0.6528486", "0.58731806", "0.5741325", "0.5674524", "0.5642983", "0.5593458", "0.55704445", "0.55371284", "0.55042464", "0.5444057", "0.5420806", "0.5414107", "0.5401721", "0.5376091", "0.53653544", "0.5362031", "0.5356424", "0.5294231", "0.5288574", "0.52797323", "0.52567625", "0.52326137", "0.5198776", "0.5197813", "0.519339", "0.51822996", "0.51760024", "0.51680493", "0.51659906", "0.5165523" ]
0.7945044
0
Remove existing strategy from the model.
def RemoveStrategy(self): self.strategy = None self.postracker = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unload(self):\r\n self.gox.signal_strategy_unload(self, None)\r\n self.strategy_object_list = []", "def remove(self):\n self.model_or_sim.remove_package(self)", "def delete_model(self):\n os.remove(self.filepath)\n self.cmodel = None", "def remove_model(self, model):\n assert isinstance(model, Model)\n\n self.model_list.remove(model)\n del self.model_dict[model.model_id]\n model.structure = None\n\n ## if the default model is being removed, choose a new default model\n ## if possible\n if model == self.default_model:\n if len(self.model_list) > 0:\n self.default_model = self.model_list[0]\n else:\n self.default_model = None", "def delete (self):\n self.binary_features_model._remove_feature(self)\n del Feature._cache[(self.binary_features_model, self.name)]\n self.binary_features_model = None", "def remove(self, name_or_klass):\n _logger().log(5, 'removing mode %r', name_or_klass)\n mode = self.get(name_or_klass)\n mode.on_uninstall()\n self._modes.pop(mode.name)\n return mode", "def __del__(self) -> None:\n if hasattr(self, \"model\") and hasattr(self, \"_destruct\"):\n self._destruct(self.model)", "def delete(self, using=None):\n self.model.remove_field(self)", "def remove_from_model(self, destructive=False):\n self._model.remove_metabolites(self, destructive)", "def unregister_model(self, storagemodel:object, delete_table=False):\n \n # get modeldefinition\n modeldefinition = self.getmodeldefinition(storagemodel, True)\n\n # remove from modeldefinitions\n for i in range(len(self._modeldefinitions)):\n if self._modeldefinitions[i]['modelname'] == modeldefinition['modelname']:\n del self._modeldefinitions[i]\n break\n \n # delete table from storage if delete_table == True \n if delete_table:\n self.__deletetable__(modeldefinition)\n pass", "def remove_model(model):\n rospy.wait_for_service('/gazebo/delete_model')\n try:\n rospy.logwarn(\"Call the method for removing the model: \" + model)\n remove_model_proxy = rospy.ServiceProxy('/gazebo/delete_model', DeleteModel)\n remove_model_proxy(model)\n except rospy.ServiceException, ex:\n print \"Service call delete_model failed: %e\" % ex", "def unregister_resource_for_model(model):\n del _model_to_resources[model]", "def delete_model_package(ModelPackageName=None):\n pass", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n traci.vehicle.remove(self.id)", "def remove(self):\n traci.vehicle.remove(self.id)", "def delete(self):\n self.dbm().model_delete(self)" ]
[ "0.6563255", "0.6157167", "0.6131671", "0.6086076", "0.60653824", "0.6010454", "0.59444267", "0.58840036", "0.58366513", "0.5824092", "0.5808003", "0.57579666", "0.573553", "0.57235825", "0.57235825", "0.57235825", "0.57235825", "0.57235825", "0.57235825", "0.57235825", "0.57235825", "0.57235825", "0.57235825", "0.57235825", "0.57235825", "0.57235825", "0.57235825", "0.57022876", "0.57022876", "0.566604" ]
0.788478
0
Update the strategy frequency.
def UpdateFrequency(self, newfreq): if self.strategy: setattr(self.strategy, managers.UTICK, newfreq)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_frequencies():\n pass", "def _update(self, count=True, forced=False):", "def change_frequency(self):\n if not self.ftext.text():\n return\n frequency = float(self.ftext.text())\n if frequency > 6.0:\n frequency = 6.0\n self.qbpm.change_frequency(frequency)\n self.ftext.setText(str(self.qbpm.frequency))", "def set_frequency(self, new_freq):\n self.freq = new_freq\n self.ts_resample()", "def _number_frequency_changed(self, *a):\r\n self.api.set_frequency(self.number_frequency.get_value())", "def setFreq(self,newfreq):\n\t\tself.freq = newfreq;", "def frequency(self, frequency: int):\n self._freq = freq", "def set_frequency(self):\n def f():\n freq = float(self.freq_edit.get())\n duty = float(self.duty_edit.get())\n if duty == 0:\n duty = 1\n if duty > 1:\n duty = duty / 100\n self.parent.update_frequency(freq, duty, self.model.upper())\n return f", "def update_freq_dist(filename):\r\n pass", "def update_count(self):\n pass", "def set_frequency(self, newval):\n rest_val = str(int(round(newval * 65536.0, 1)))\n return self._setAttr(\"frequency\", rest_val)", "def frequency(self, frequency: int):\n\n self._frequency = frequency", "def frequency(self, frequency):\n\n self._frequency = frequency", "def change_frequency(self, frequency):\n self.frequency = frequency\n self.change_backlog(self.backlog)", "def set_Freq(self,freq):\n super(self.__class__, self).setFreq(self, freq)", "def set_frequency(self, f=1e9):\r\n self.f = f", "def update_count(self):\n pass # Do nothing", "def GetFrequency(self):\n ...", "def set_frequency(self, f=1e9):\r\n return self._api.set_frequency(f)", "def freq(self, frequency: Optional[int]):", "def update(self, word, freq):\n if word in self.dict:\n self.dict[word] = freq", "def updateItem(self, value):\n self.value = value\n self.age = 0\n self.freq += 1", "def get_frequency(self):\r\n # print '*********in get freq'\r\n self.cntr.run('FREQ 1')\r\n f_0_ = self.cntr.get_measurements(1)\r\n self.f_0 = f_0_[0]\r\n self.cntr.run('FREQ 2')\r\n f_rep_ = self.cntr.get_measurements(1)\r\n self.f_rep = f_rep_[0]", "def update_during_training(self, trainer, update_frequency):\n self.ready()\n trainer.every(update_frequency, lambda _1, _2: self.update())", "def update_cluster_freq(self, state, cpu_id):\n # For IKS devices cluster changes only possible when\n # freq changes, for other it is determine by cpu_id.\n if self.device.scheduler != 'iks':\n self.current_cluster = self.get_cluster(cpu_id, state)\n if self.get_state_name(state) == \"freqstate\":\n self.current_cluster = self.get_cluster(cpu_id, state)\n self.current_frequency_of_clusters[self.current_cluster] = state", "def update_counts(self, new_alpha, new_beta, decay):\n\n self._alpha = self._alpha / decay + new_alpha\n self._beta = self._beta / decay + new_beta\n self._n_updates += 1", "def update(self, strategy):\n if strategy.experience.current_experience <= self._current_experience:\n TopkAccuracyPluginMetric.update(self, strategy)", "def frequency(self, freq):\n self.set_frequency(f'{freq}' if self._is_min_max(freq) else f'{freq}HZ')", "def freq(self, freq=None):\n raise NotImplementedError()", "def update_period(self):\n return 0.1" ]
[ "0.7685292", "0.69893366", "0.6860977", "0.67751503", "0.6760968", "0.6751761", "0.6685722", "0.66378486", "0.6615687", "0.6576171", "0.6569539", "0.6564842", "0.6536715", "0.65170115", "0.65103734", "0.6373934", "0.63418764", "0.63390464", "0.6316844", "0.6234857", "0.6225441", "0.6188111", "0.61593825", "0.6147146", "0.6139528", "0.6124189", "0.6104622", "0.60946125", "0.60805947", "0.60637224" ]
0.8178904
0
Attach the disk to the instance at mountpoint using info.
def attach_volume(self, context, connection_info, instance, mountpoint, disk_bus=None, device_type=None, encryption=None):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def attach_volume(self, instance_name, device_path, mountpoint):\n return True", "def _AttachDisk(self, idx, params, _):\n uuid = params.get(\"uuid\", None)\n name = params.get(constants.IDISK_NAME, None)\n\n disk = self.GenericGetDiskInfo(uuid, name)\n\n # Rename disk before attaching (if disk is filebased)\n if disk.dev_type in constants.DTS_INSTANCE_DEPENDENT_PATH:\n # Add disk size/mode, else GenerateDiskTemplate will not work.\n params[constants.IDISK_SIZE] = disk.size\n params[constants.IDISK_MODE] = str(disk.mode)\n dummy_disk = self._GenerateDiskTemplateWrapper(idx, disk.dev_type, params)\n new_logical_id = dummy_disk.logical_id\n result = self.rpc.call_blockdev_rename(self.instance.primary_node,\n [(disk, new_logical_id)])\n result.Raise(\"Failed before attach\")\n self.cfg.SetDiskLogicalID(disk.uuid, new_logical_id)\n disk.logical_id = new_logical_id\n\n # Attach disk to instance\n self.cfg.AttachInstanceDisk(self.instance.uuid, disk.uuid, idx)\n\n # re-read the instance from the configuration\n self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)\n\n changes = [\n (\"disk/%d\" % idx,\n \"attach:size=%s,mode=%s\" % (disk.size, disk.mode)),\n ]\n\n disks_ok, _, payloads = AssembleInstanceDisks(self, self.instance,\n disks=[disk])\n if not disks_ok:\n changes.append((\"disk/%d\" % idx, \"assemble:failed\"))\n return disk, changes\n\n if self.op.hotplug:\n _, link_name, uri = payloads[0]\n msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,\n constants.HOTPLUG_TARGET_DISK,\n disk, (link_name, uri), idx)\n changes.append((\"disk/%d\" % idx, msg))\n\n return (disk, changes)", "def attach_volume(self, context, connection_info, instance, mountpoint,\n disk_bus=None, device_type=None, encryption=None):\n data = connection_info['data']\n vm = self._get_instance(instance.uuid)\n data_disks = vm.storage_profile.data_disks\n luns = [i.lun for i in data_disks]\n new_lun = 1\n # azure allow upto 16 extra datadisk, 1 os disk + 1 ephemeral disk\n # ephemeral disk will always be sdb for linux.\n for i in range(1, 16):\n if i not in luns:\n new_lun = i\n break\n else:\n msg = 'Can not attach volume, exist volume amount upto 16.'\n LOG.error(msg)\n raise nova_ex.NovaException(msg)\n disk = self.disks.get(CONF.azure.resource_group, data['disk_name'])\n managed_disk = dict(id=disk.id)\n data_disk = dict(lun=new_lun,\n name=data['disk_name'],\n managed_disk=managed_disk,\n create_option='attach')\n data_disks.append(data_disk)\n self._create_update_instance(instance, vm)\n LOG.info(_LI(\"Attach Volume to Instance in Azure finish\"),\n instance=instance)", "def attach_volume(self,\n context,\n connection_info,\n instance,\n mountpoint,\n disk_bus=None,\n device_type=None,\n encryption=None):\n\n def _check_available_lun(data_disks):\n # We can attach upto 16 data disks to an instance\n luns = [i.lun for i in data_disks]\n for i in range(1, 16):\n if i not in luns:\n return i\n raise Exception(\"Could not attach volume\")\n\n volume_data = connection_info['data']\n azure_name = self._get_omni_name_from_instance(instance)\n azure_instance = utils.get_instance(\n self.compute_client, drv_conf.resource_group, azure_name)\n data_disks = azure_instance.storage_profile.data_disks\n lun = _check_available_lun(data_disks)\n name = volume_data['name']\n id = volume_data['id']\n data_disk = {\n 'name': name,\n 'create_option': 'attach',\n 'lun': lun,\n 'managed_disk': {\n 'id': id\n }\n }\n data_disks.append(data_disk)\n utils.create_or_update_instance(self.compute_client,\n drv_conf.resource_group, azure_name,\n azure_instance)\n LOG.info(\"Attached volume %s to instance %s\" % (name, instance.uuid))", "def attach_volume(self, connection_info, instance, mountpoint):\n instance_name = instance['name']\n vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)\n if vm_ref is None:\n raise exception.InstanceNotFound(instance_id=instance_name)\n # Attach Volume to VM\n LOG.debug(_(\"Attach_volume: %(connection_info)s, %(instance_name)s, \"\n \"%(mountpoint)s\") % locals())\n driver_type = connection_info['driver_volume_type']\n if driver_type not in ['iscsi']:\n raise exception.VolumeDriverNotFound(driver_type=driver_type)\n data = connection_info['data']\n mount_unit = volume_util.mountpoint_to_number(mountpoint)\n\n # Discover iSCSI Target\n device_name, uuid = self.discover_st(data)\n if device_name is None:\n raise volume_util.StorageError(_(\"Unable to find iSCSI Target\"))\n\n # Get the vmdk file name that the VM is pointing to\n hardware_devices = self._session._call_method(vim_util,\n \"get_dynamic_property\", vm_ref,\n \"VirtualMachine\", \"config.hardware.device\")\n vmdk_file_path, controller_key, adapter_type, disk_type, unit_number \\\n = vm_util.get_vmdk_path_and_adapter_type(hardware_devices)\n # Figure out the correct unit number\n if unit_number < mount_unit:\n unit_number = mount_unit\n else:\n unit_number = unit_number + 1\n self.attach_disk_to_vm(vm_ref, instance_name,\n adapter_type, disk_type=\"rdmp\",\n controller_key=controller_key,\n unit_number=unit_number,\n device_name=device_name)\n LOG.info(_(\"Mountpoint %(mountpoint)s attached to \"\n \"instance %(instance_name)s\") % locals())", "def connect_disk(self, instance, disk_info, stg_ftsk=None):\n raise NotImplementedError()", "def mount(fstype, export, vol_name, mountpoint, instance, options=None):\n with __manager__.get_state() as mount_state:\n mount_state.mount(fstype, export, vol_name, mountpoint, instance,\n options)", "def attach_disk(self, instance, disk, zone):\n return self.call_api(\n '/zones/%s/instances/%s/attachDisk' % (zone, instance),\n method='POST',\n payload={\n 'autoDelete': True,\n 'deviceName': disk,\n 'source': 'projects/%s/zones/%s/disks/%s' % (\n self.project_id, zone, disk),\n },\n )", "def attachDisk(\n positive, alias, vm_name, active=True, read_only=False, disk_id=None,\n interface='virtio', bootable=None,\n):\n if disk_id:\n name = disk_id\n attribute = 'id'\n else:\n name = alias\n attribute = 'name'\n disk_object = get_disk_obj(name, attribute)\n # This is only needed because for legacy reason we also want to modify\n # the read_only property when we attach a disk\n # Also for attaching a disk the active parameter is pass inside the disk\n # object\n updated_disk = _prepareDiskObject(\n id=disk_object.get_id(), read_only=read_only\n )\n vm_disks = getObjDisks(vm_name)\n logger.info(\"Attaching disk %s to vm %s\", alias, vm_name)\n disk_attachment = prepare_disk_attachment_object(\n updated_disk.get_id(), interface=interface, bootable=bootable,\n disk=updated_disk, active=active\n )\n return DISK_ATTACHMENTS_API.create(\n disk_attachment, positive, collection=vm_disks\n )[1]", "def AttachDisk(self, disk: 'AZComputeDisk') -> None:\n vm = self.compute_client.virtual_machines.get(\n self.resource_group_name, self.name)\n data_disks = vm.storage_profile.data_disks\n # ID to assign to the data disk to attach\n lun = 0 if len(data_disks) == 0 else len(data_disks) + 1\n\n update_data = {\n 'lun': lun,\n 'name': disk.name,\n 'create_option': models.DiskCreateOption.attach,\n 'managed_disk': {'id': disk.resource_id}\n }\n\n data_disks.append(update_data)\n\n try:\n request = self.compute_client.virtual_machines.begin_update(\n self.resource_group_name, self.name, vm)\n while not request.done():\n sleep(5) # Wait 5 seconds before checking vm status again\n except azure_exceptions.CloudError as exception:\n raise RuntimeError(\n 'Could not attach disk {0:s} to instance {1:s}: {2:s}'.format(\n disk.name, self.name, str(exception))) from exception", "def attachDiskToMinipad(self , disk):\n return", "def update_volume_after_attached_to_vm(self, info, vms):\n path = info[0]['path']\n path_list = path.split(sep='/')\n machine_path_list = [\"~\", \"Home\"]\n machine_path_list.extend(path_list[3:])\n info[0]['machine_path'] = \"/\".join(machine_path_list)\n info[0]['AttachedToVm'] = vms\n info[0]['State'] = 'in-use'\n info[0]['time'] = datetime.datetime.now()\n return info", "def mount(self, fstype, export, vol_name, mountpoint, instance, options):\n\n # NOTE(mdbooth): mount() may currently be called multiple times for a\n # single attachment. Any operation which calls\n # LibvirtDriver._hard_reboot will re-attach volumes which are probably\n # already attached, resulting in multiple mount calls.\n\n LOG.debug('_HostMountState.mount(fstype=%(fstype)s, '\n 'export=%(export)s, vol_name=%(vol_name)s, %(mountpoint)s, '\n 'options=%(options)s) generation %(gen)s',\n {'fstype': fstype, 'export': export, 'vol_name': vol_name,\n 'mountpoint': mountpoint, 'options': options,\n 'gen': self.generation}, instance=instance)\n with self._get_locked(mountpoint) as mount:\n if os.path.ismount(mountpoint):\n LOG.debug(('Mounting %(mountpoint)s generation %(gen)s, '\n 'mountpoint already mounted'),\n {'mountpoint': mountpoint, 'gen': self.generation},\n instance=instance)\n else:\n LOG.debug('Mounting %(mountpoint)s generation %(gen)s',\n {'mountpoint': mountpoint, 'gen': self.generation},\n instance=instance)\n\n fileutils.ensure_tree(mountpoint)\n\n try:\n nova.privsep.fs.mount(fstype, export, mountpoint, options)\n except processutils.ProcessExecutionError:\n # Check to see if mountpoint is mounted despite the error\n # eg it was already mounted\n if os.path.ismount(mountpoint):\n # We're not going to raise the exception because we're\n # in the desired state anyway. However, this is still\n # unusual so we'll log it.\n LOG.exception(\n 'Error mounting %(fstypes export %(export)s on '\n '%(mountpoint)s. Continuing because mountpount is '\n 'mounted despite this.',\n {'fstype': fstype, 'export': export,\n 'mountpoint': mountpoint}, instance=instance)\n else:\n # If the mount failed there's no reason for us to keep\n # a record of it. It will be created again if the\n # caller retries.\n\n # Delete while holding lock\n del self.mountpoints[mountpoint]\n\n raise\n\n mount.add_attachment(vol_name, instance.uuid)\n\n LOG.debug('_HostMountState.mount() for %(mountpoint)s '\n 'generation %(gen)s completed successfully',\n {'mountpoint': mountpoint, 'gen': self.generation},\n instance=instance)", "def attach_volume(self, context, volume_id, instance_uuid, host_name,\n mountpoint, mode):\n @utils.synchronized(volume_id, external=True)\n def do_attach():\n # check the volume status before attaching\n volume = self.db.volume_get(context, volume_id)\n volume_metadata = self.db.volume_admin_metadata_get(\n context.elevated(), volume_id)\n if volume['status'] == 'attaching':\n if (volume['instance_uuid'] and volume['instance_uuid'] !=\n instance_uuid):\n msg = _(\"being attached by another instance\")\n raise exception.InvalidVolume(reason=msg)\n if (volume['attached_host'] and volume['attached_host'] !=\n host_name):\n msg = _(\"being attached by another host\")\n raise exception.InvalidVolume(reason=msg)\n if (volume_metadata.get('attached_mode') and\n volume_metadata.get('attached_mode') != mode):\n msg = _(\"being attached by different mode\")\n raise exception.InvalidVolume(reason=msg)\n elif volume['status'] != \"available\":\n msg = _(\"status must be available\")\n raise exception.InvalidVolume(reason=msg)\n # TODO(jdg): attach_time column is currently varchar\n # we should update this to a date-time object\n # also consider adding detach_time?\n self.db.volume_update(context, volume_id,\n {\"instance_uuid\": instance_uuid,\n \"mountpoint\": mountpoint,\n \"attached_host\": host_name\n })\n\n self.db.volume_admin_metadata_update(context.elevated(),\n volume_id,\n {\"attached_mode\": mode},\n False)\n return do_attach()", "def attach(self, node, device=None):\r\n\r\n return self.driver.attach_volume(node=node, volume=self, device=device)", "def attach(self, instance_id, device):\r\n return self.connection.attach_volume(self.id, instance_id, device)", "def disk(self, disk):\n self._context[\"disk\"] = disk", "def attach_volume(self):\n\n # Choose volume\n volume_id = self._choose_among_available_volumes()\n\n # Cancel\n if not volume_id:\n print 'Operation cancelled'\n return\n\n # Choose instance\n instance_id = self._choose_among_running_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Attach the volume\n print '# Attaching volume \"%s\"!' % volume_id\n if self.compute.attach_volume(volume_id, instance_id):\n print 'The volume has been attached!'\n else:\n print 'The volume could not been attached'", "def attach(self, storages):\n self.tracer.info(\"%s.attach method called\" % self.__class__.__name__)\n\n # reload global.ini\n self._cfg.reload()\n\n # connect to Google API\n conn = self.api_conn()\n\n # fetch the GCE zone for this host\n zone = self.get_zone(conn, HOSTNAME)\n\n for storage in storages:\n # fetch pd & dev variables from global.ini for specified partition & usage\n connectionData = self._getConnectionDataForLun(storage.get(\"partition\"), storage.get(\"usage_type\"))\n try:\n pd = connectionData[\"pd\"]\n dev = connectionData[\"dev\"]\n except:\n raise Exception(\"pd or dev not set in global.ini\")\n\n # fetch mount options from global.ini\n try:\n mount_options = connectionData[\"mountoptions\"]\n except:\n mount_options = \"\"\n\n # fetch fencing options from global.ini\n try:\n fencing = connectionData[\"fencing\"]\n except:\n fencing = \"\"\n\n # fetch the host which currently owns the disk & the file path\n pdhost = self.get_pd_host(conn, pd, zone)\n path = storage.get(\"path\")\n\n # check if the require disk is already attached somewhere. If it is, detach it and fence the old host\n if pdhost == HOSTNAME:\n self.tracer.info(\"disk %s is already attached to %s(%s)\" % (pd, HOSTNAME, zone))\n self.mount(dev, path, mount_options)\n continue\n elif pdhost != \"\":\n self.tracer.info(\"unable to attach %s to %s(%s) as it is still attached to %s\" % (pd, HOSTNAME, zone, pdhost))\n self.detach_pd(conn, pdhost, pd)\n if fencing.lower() == \"enabled\" or fencing.lower() == \"true\" or fencing.lower() == \"yes\":\n self.fence(conn, pdhost)\n\n # prepare payload for API call\n pdurl = self.zonal_url(zone, \"disks\", pd)\n body = {\n \"deviceName\": pd,\n \"source\": pdurl\n }\n\n # send API call to disconnect disks\n self.tracer.info(\"attempting to attach %s to %s(%s)\" % (pd, HOSTNAME, zone))\n operation = conn.instances().attachDisk(project=PROJECT, zone=zone, instance=HOSTNAME, body=body).execute()\n self.wait_for_operation(conn, operation, zone)\n\n # check if disk is attached and if so, mount the volumes\n if self.get_pd_host(conn, pd, zone) == HOSTNAME:\n self.tracer.info(\"successfully attached %s to %s(%s)\" % (pd, HOSTNAME, zone))\n self.mount(dev, path, mount_options)\n else:\n raise Exception(\"failed to attached %s to %s(%s)\" % (pd, HOSTNAME, zone))\n\n # tell HANA is all good and to continue the load process\n return 0", "def attach_volume(\n self,\n volume: Union[dto.Volume, str],\n machine: Union[dto.Machine, str]\n ) -> dto.Volume:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def Mount(self, mount_point, mount_options_by_format=\"\"):\n fstab = self.fstab\n if fstab:\n p = fstab[mount_point]\n mount_dict = {}\n if mount_options_by_format is not None:\n for option in mount_options_by_format.split(\"|\"):\n if \"=\" in option:\n key, value = option.split(\"=\", 1)\n mount_dict[key] = value\n mount_flags = mount_dict.get(p.fs_type, \"\")\n if p.context is not None:\n mount_flags = p.context + (\",\" + mount_flags if mount_flags else \"\")\n self.script.append('mount(\"%s\", \"%s\", %s, \"%s\", \"%s\");' % (\n p.fs_type, common.PARTITION_TYPES[p.fs_type],\n self._GetSlotSuffixDeviceForEntry(p),\n p.mount_point, mount_flags))\n self.mounts.add(p.mount_point)", "def attach_volume(self, host_path: str, container_path: str, mode: str = None):\n self.volumes[host_path] = {\n \"bind\": container_path,\n \"mode\": mode or \"Z\"\n }", "def create_filesystem_mounting_point(username, password, domainName, path, instanceId):\n createFileSystemMountingPointParams = {\n \"Username\": username,\n \"Password\": password,\n \"DomainName\": domainName,\n \"Path\": path,\n \"InstanceId\": instanceId\n }\n return createFileSystemMountingPointParams", "def create(self, disk):\n logging.info('Adding type %d partition to disk image: %s' % (self.type, disk.filename))\n run_cmd('parted', '--script', '--', disk.filename, 'mkpart', 'primary', self.parted_fstype(), self.begin, self.end)", "def mount_device(uuid):\n mount_point = f'/mnt/{uuid}/back-up'\n # Create mountpoint if it doesn't exist\n pathlib.Path(mount_point).mkdir(parents=True, exist_ok=True)\n\n # Mount device\n out, err = run_cmd(['mount', '--uuid', uuid, mount_point])\n\n if not err:\n return mount_point\n else:\n abort(err, cause='mount')", "def do_mount(devpath, mountpoint, fstype):\n try:\n if check_already_mounted(devpath, mountpoint):\n return\n\n mounter = Mounter()\n mounter.mount(devpath, mountpoint, fstype)\n except exceptions.MountException:\n try:\n mounter.make_filesystem(devpath, fstype)\n mounter.mount(devpath, mountpoint, fstype)\n except exceptions.FuxiException as e:\n with excutils.save_and_reraise_exception():\n LOG.error(str(e))", "def _prepareDiskObject(**kwargs):\n storage_domain_name = kwargs.pop('storagedomain', None)\n\n # Tuple (lun_address, lun_target, lun_id, lun_port)\n lun = (kwargs.pop('lun_address', None), kwargs.pop('lun_target', None),\n kwargs.pop('lun_id', None), kwargs.pop('lun_port', 3260))\n # Tuple (username, password)\n lun_creds = (kwargs.pop('lun_username', None),\n kwargs.pop('lun_password', None))\n type_ = kwargs.pop('type_', None)\n\n storage_connection = kwargs.pop('storage_connection', None)\n\n if lun != (None, None, None, 3260) and storage_connection:\n logger.error(\n \"You cannot set storage connection id and LUN params in one call!\")\n return None\n kwargs.pop('active', None)\n\n disk = kwargs.pop('update', None)\n if disk is None:\n disk = data_st.Disk(**kwargs)\n\n if storage_connection is not None:\n storage = data_st.HostStorage()\n storage.id = storage_connection\n disk.set_lun_storage(storage)\n\n if storage_domain_name is not None:\n storage_domain = STORAGE_DOMAIN_API.find(storage_domain_name,\n NAME_ATTR)\n storage_domains = data_st.StorageDomains()\n storage_domains.add_storage_domain(storage_domain)\n disk.storage_domains = storage_domains\n\n # quota\n quota_id = kwargs.pop('quota', None)\n if quota_id == '':\n disk.set_quota(data_st.Quota())\n elif quota_id:\n disk.set_quota(data_st.Quota(id=quota_id))\n\n if lun != (None, None, None, 3260):\n direct_lun = data_st.LogicalUnit(address=lun[0], target=lun[1],\n id=lun[2], port=lun[3])\n if lun_creds != (None, None):\n direct_lun.set_username(lun_creds[0])\n direct_lun.set_password(lun_creds[1])\n\n logical_units = data_st.LogicalUnits(logical_unit=[direct_lun])\n disk.set_lun_storage(\n data_st.HostStorage(logical_units=logical_units, type_=type_)\n )\n\n # id\n disk_id = kwargs.pop('id', None)\n if disk_id:\n disk.set_id(disk_id)\n\n # read_only\n read_only = kwargs.pop('read_only', None)\n if read_only is not None:\n disk.set_read_only(read_only)\n\n # snapshot\n snapshot = kwargs.pop('snapshot', None)\n if snapshot:\n disk.set_snapshot(snapshot)\n\n # description\n description = kwargs.pop('description', None)\n if description is not None:\n disk.set_description(description)\n\n # qcow_version\n qcow_version = kwargs.pop('qcow_version', None)\n if qcow_version:\n disk.set_qcow_version(qcow_version)\n\n return disk", "def mount(f, mountpoint=DMG_MOUNT, read_only=False, dry_run=ARGS.dry_run):\n result = None\n cmd = ['/usr/bin/hdiutil', 'attach', '-mountpoint', str(mountpoint), '-plist', f]\n\n # Insert read only option in the correct spot\n if read_only:\n cmd.insert(2, '-readonly')\n\n if not dry_run:\n _p = subprocess.run(cmd, capture_output=True)\n LOG.debug('{cmd} ({returncode})'.format(cmd=' '.join([str(x) for x in cmd]), returncode=_p.returncode))\n\n if _p.returncode == 0:\n _entities = plist.read_string(_p.stdout).get('system-entities')\n\n if _entities:\n result = mount_device(_entities)\n LOG.warning('Mounted {dmg} to {mountpoint}'.format(dmg=f, mountpoint=mountpoint))\n else:\n LOG.info(_p.stderr.decode('utf-8').strip())\n else:\n LOG.warning('Mount {dmg} to {mountpoint}'.format(dmg=f, mountpoint=mountpoint))\n\n return result", "def attach_disk_to_vm(self, vm_ref, instance_name,\n adapter_type, disk_type, vmdk_path=None,\n disk_size=None, linked_clone=False,\n controller_key=None, unit_number=None,\n device_name=None):\n client_factory = self._session._get_vim().client.factory\n vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(\n client_factory, adapter_type, disk_type,\n vmdk_path, disk_size, linked_clone,\n controller_key, unit_number, device_name)\n\n LOG.debug(_(\"Reconfiguring VM instance %(instance_name)s to attach \"\n \"disk %(vmdk_path)s or device %(device_name)s with type \"\n \"%(disk_type)s\") % locals())\n reconfig_task = self._session._call_method(\n self._session._get_vim(),\n \"ReconfigVM_Task\", vm_ref,\n spec=vmdk_attach_config_spec)\n self._session._wait_for_task(instance_name, reconfig_task)\n LOG.debug(_(\"Reconfigured VM instance %(instance_name)s to attach \"\n \"disk %(vmdk_path)s or device %(device_name)s with type \"\n \"%(disk_type)s\") % locals())", "def attach_volume(self, volume_id, instance_id, device):\r\n params = {'InstanceId' : instance_id,\r\n 'VolumeId' : volume_id,\r\n 'Device' : device}\r\n return self.get_status('AttachVolume', params, verb='POST')" ]
[ "0.7112061", "0.70221376", "0.7012122", "0.6979955", "0.68031716", "0.6700837", "0.6697146", "0.6559337", "0.6488255", "0.63370275", "0.61489516", "0.6141329", "0.60744673", "0.6069949", "0.6043172", "0.59649885", "0.59368604", "0.57600784", "0.57503486", "0.57471454", "0.57347566", "0.5726881", "0.5693637", "0.5655904", "0.5636782", "0.56328523", "0.55845773", "0.55484194", "0.550424", "0.54764956" ]
0.75799954
0
Detach the disk attached to the instance.
def detach_volume(self, connection_info, instance, mountpoint, encryption=None):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detach(self):\r\n\r\n return self.driver.detach_volume(volume=self)", "def detachDisk(positive, alias, vmName):\n logger.info(\"Detaching disk %s from vm %s\", alias, vmName)\n disk_attachment = get_disk_attachment(vmName, alias, attr='name')\n return DISK_ATTACHMENTS_API.delete(disk_attachment, positive)", "def detach(self, force=False):\r\n instance_id = None\r\n if self.attach_data:\r\n instance_id = self.attach_data.instance_id\r\n device = None\r\n if self.attach_data:\r\n device = self.attach_data.device\r\n return self.connection.detach_volume(self.id, instance_id, device, force)", "def detach_volume(self, connection_info, instance, mountpoint,\n encryption=None):\n vhd_name = connection_info['data']['disk_name']\n vm = self._get_instance(instance.uuid)\n data_disks = vm.storage_profile.data_disks\n not_found = True\n for i in range(len(data_disks)):\n if vhd_name == data_disks[i].name:\n del data_disks[i]\n not_found = False\n break\n if not_found:\n LOG.info(_LI('Volume: %s was not attached to Instance!'),\n vhd_name, instance=instance)\n return\n self._create_update_instance(instance, vm)\n LOG.info(_LI(\"Detach Volume to Instance in Azure finish\"),\n instance=instance)", "def _DetachDisk(self, idx, root, _):\n hotmsg = \"\"\n if self.op.hotplug:\n hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,\n constants.HOTPLUG_TARGET_DISK,\n root, None, idx)\n\n # Always shutdown the disk before detaching.\n ShutdownInstanceDisks(self, self.instance, [root])\n\n # Rename detached disk.\n #\n # Transform logical_id from:\n # <file_storage_dir>/<instance_name>/<disk_name>\n # to\n # <file_storage_dir>/<disk_name>\n if root.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE):\n file_driver = root.logical_id[0]\n instance_path, disk_name = os.path.split(root.logical_id[1])\n new_path = os.path.join(os.path.dirname(instance_path), disk_name)\n new_logical_id = (file_driver, new_path)\n result = self.rpc.call_blockdev_rename(self.instance.primary_node,\n [(root, new_logical_id)])\n result.Raise(\"Failed before detach\")\n # Update logical_id\n self.cfg.SetDiskLogicalID(root.uuid, new_logical_id)\n\n # Remove disk from config\n self.cfg.DetachInstanceDisk(self.instance.uuid, root.uuid)\n\n # re-read the instance from the configuration\n self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)\n\n return hotmsg", "def disconnect_disk(self, instance, stg_ftsk=None, disk_type=None):\n raise NotImplementedError()", "def detachDiskFromMinipad(self , disk):\n return", "def detach_volume(self, host_path: str):\n del self.volumes[host_path]", "def disk_detach(vmdk_path, vm):\n\n device = findDeviceByPath(vmdk_path, vm)\n\n if not device:\n # Could happen if the disk attached to a different VM - attach fails\n # and docker will insist to sending \"unmount/detach\" which also fails.\n msg = \"*** Detach failed: disk={0} not found. VM={1}\".format(\n vmdk_path, vm.config.uuid)\n logging.warning(msg)\n return err(msg)\n\n spec = vim.vm.ConfigSpec()\n dev_changes = []\n\n disk_spec = vim.vm.device.VirtualDeviceSpec()\n disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove\n disk_spec.device = device\n dev_changes.append(disk_spec)\n spec.deviceChange = dev_changes\n\n try:\n wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])\n except vim.fault.GenericVmConfigFault as ex:\n for f in ex.faultMessage:\n logging.warning(f.message)\n return err(\"Failed to detach \" + vmdk_path)\n\n setStatusDetached(vmdk_path)\n logging.info(\"Disk detached %s\", vmdk_path)\n return None", "def detach_volume(self,\n connection_info,\n instance,\n mountpoint,\n encryption=None):\n volume_data = connection_info['data']\n azure_name = self._get_omni_name_from_instance(instance)\n azure_instance = utils.get_instance(\n self.compute_client, drv_conf.resource_group, azure_name)\n data_disks = azure_instance.storage_profile.data_disks\n name = volume_data['name']\n filtered_disks = [disk for disk in data_disks if disk.name != name]\n if len(filtered_disks) == len(data_disks):\n LOG.error(\"Volume %s was not attached to instance %s\" %\n (name, instance.uuid))\n return\n azure_instance.storage_profile.data_disks = filtered_disks\n utils.create_or_update_instance(self.compute_client,\n drv_conf.resource_group, azure_name,\n azure_instance)\n LOG.info(\"Detached volume %s from instance %s\" % (name, instance.uuid))", "def detach_pd(self, conn, host, pd):\n zone = self.get_zone(conn, host)\n pdhost = self.get_pd_host(conn, pd, zone)\n if pdhost == \"\":\n self.tracer.info(\n \"disk %s is already attached to %s(%s)\" % (pd, host, zone))\n elif pdhost == host:\n self.tracer.info(\"attempting to detach %s from %s(%s)\" % (pd, host, zone))\n operation = conn.instances().detachDisk(project=PROJECT, zone=zone, instance=host, deviceName=pd).execute()\n self.wait_for_operation(conn, operation, zone)\n if self.get_pd_host(conn, pd, zone) == \"\":\n self.tracer.info(\"successfully detached %s from %s(%s)\" % (pd, host, zone))", "def detach_disk_from_vm(self, vm_ref, instance_name, device):\n client_factory = self._session._get_vim().client.factory\n vmdk_detach_config_spec = vm_util.get_vmdk_detach_config_spec(\n client_factory, device)\n disk_key = device.key\n LOG.debug(_(\"Reconfiguring VM instance %(instance_name)s to detach \"\n \"disk %(disk_key)s\") % locals())\n reconfig_task = self._session._call_method(\n self._session._get_vim(),\n \"ReconfigVM_Task\", vm_ref,\n spec=vmdk_detach_config_spec)\n self._session._wait_for_task(instance_name, reconfig_task)\n LOG.debug(_(\"Reconfigured VM instance %(instance_name)s to detach \"\n \"disk %(disk_key)s\") % locals())", "def detach_volume(self, instance_name, mountpoint):\n return True", "def detach(self):\n raise io.UnsupportedOperation", "def detach_volume(self, connection_info, instance, mountpoint):\n instance_name = instance['name']\n vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)\n if vm_ref is None:\n raise exception.InstanceNotFound(instance_id=instance_name)\n # Detach Volume from VM\n LOG.debug(_(\"Detach_volume: %(instance_name)s, %(mountpoint)s\")\n % locals())\n driver_type = connection_info['driver_volume_type']\n if driver_type not in ['iscsi']:\n raise exception.VolumeDriverNotFound(driver_type=driver_type)\n data = connection_info['data']\n\n # Discover iSCSI Target\n device_name, uuid = volume_util.find_st(self._session, data,\n self._cluster)\n if device_name is None:\n raise volume_util.StorageError(_(\"Unable to find iSCSI Target\"))\n\n # Get the vmdk file name that the VM is pointing to\n hardware_devices = self._session._call_method(vim_util,\n \"get_dynamic_property\", vm_ref,\n \"VirtualMachine\", \"config.hardware.device\")\n device = vm_util.get_rdm_disk(hardware_devices, uuid)\n if device is None:\n raise volume_util.StorageError(_(\"Unable to find volume\"))\n self.detach_disk_from_vm(vm_ref, instance_name, device)\n LOG.info(_(\"Mountpoint %(mountpoint)s detached from \"\n \"instance %(instance_name)s\") % locals())", "def detach_volume(self):\n\n # Choose the volume\n volume_id = self._choose_among_used_volumes()\n\n # Cancel\n if not volume_id:\n print 'Operation cancelled'\n return\n\n # Detach the volume\n print '# Detaching volume \"%s\"!' % volume_id\n if self.compute.detach_volume(volume_id):\n print 'The volume has been detached!'\n else:\n print 'The volume could not been detached'", "def detach_volume(self, context, volume_id):\n # TODO(vish): refactor this into a more general \"unreserve\"\n # TODO(sleepsonthefloor): Is this 'elevated' appropriate?\n # self.db.volume_detached(context.elevated(), volume_id)\n self.db.volume_admin_metadata_delete(context.elevated(), volume_id,\n 'attached_mode')", "def detach(args, **_):\n\n volume_id = \\\n utils.get_external_resource_id_or_raise(\n 'detach volume', ctx.source.instance)\n instance_id = \\\n utils.get_external_resource_id_or_raise(\n 'detach volume', ctx.target.instance)\n\n if _detach_external_volume_or_instance():\n return\n\n ctx.logger.debug('Detaching EBS volume {0}'.format(volume_id))\n\n volume_object = _get_volumes_from_id(volume_id)\n\n if not volume_object:\n raise NonRecoverableError(\n 'EBS volume {0} not found in account.'.format(volume_id))\n\n try:\n detached = volume_object.detach(**args)\n except (boto.exception.EC2ResponseError,\n boto.exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n\n if not detached:\n raise NonRecoverableError(\n 'Failed to detach volume {0} from instance {1}'\n .format(volume_id, instance_id))\n\n utils.unassign_runtime_property_from_resource(\n 'instance_id', ctx.source.instance)\n ctx.logger.info(\n 'Detached volume {0} from instance {1}.'\n .format(volume_id, instance_id))", "def detach(self, name):\n volume_info = self.cm.find_name(name)\n if volume_info and volume_info[0]['State'] != \"deleted\":\n vms = volume_info[0]['AttachedToVm']\n path = volume_info[0]['path']\n if len(vms) == 0:\n Console.error(f\"{name} is not attached to any vm\")\n else:\n removed = []\n for vm in vms:\n result = self.unmount(path=f\"{path}/{name}\", vm=vm)\n mounts = result['mounts']\n if f\"{path}/{name}\" not in mounts.keys():\n removed.append(vm)\n for vm in removed:\n vms.remove(vm)\n result = self.update_volume_after_detach(volume_info, vms)\n return result[0]\n else:\n Console.error(\"volume does not exist or volume had been deleted\")", "def detach_volume(self, volume_id, instance_id=None,\r\n device=None, force=False):\r\n params = {'VolumeId' : volume_id}\r\n if instance_id:\r\n params['InstanceId'] = instance_id\r\n if device:\r\n params['Device'] = device\r\n if force:\r\n params['Force'] = 'true'\r\n return self.get_status('DetachVolume', params, verb='POST')", "def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):\n raise NotImplementedError()", "async def eject(self) -> None:\n await self.dbus.Drive.call_eject(UDISKS2_DEFAULT_OPTIONS)", "def _wipe(self):\n log_method_call(self, self.name, status=self.status)\n\n start = self.partedPartition.geometry.start\n part_len = self.partedPartition.geometry.end - start\n bs = self.partedPartition.geometry.device.sectorSize\n device = self.partedPartition.geometry.device.path\n\n # Erase 1MiB or to end of partition\n count = int(Size(\"1 MiB\") / bs)\n count = min(count, part_len)\n\n cmd = [\"dd\", \"if=/dev/zero\", \"of=%s\" % device, \"bs=%s\" % bs,\n \"seek=%s\" % start, \"count=%s\" % count]\n try:\n util.run_program(cmd)\n except OSError as e:\n log.error(str(e))\n finally:\n # If a udev device is created with the watch option, then\n # a change uevent is synthesized and we need to wait for\n # things to settle.\n udev.settle()", "def _detach_volume(self, server, volume):\n try:\n volume = self.volumes_client.show_volume(volume['id'])['volume']\n # Check the status. You can only detach an in-use volume, otherwise\n # the compute API will return a 400 response.\n if volume['status'] == 'in-use':\n self.servers_client.detach_volume(server['id'], volume['id'])\n except lib_exc.NotFound:\n # Ignore 404s on detach in case the server is deleted or the volume\n # is already detached.\n pass", "def _detach_volume(self, server, volume):\n try:\n volume = self.volumes_client.show_volume(volume['id'])['volume']\n # Check the status. You can only detach an in-use volume, otherwise\n # the compute API will return a 400 response.\n if volume['status'] == 'in-use':\n self.servers_client.detach_volume(server['id'], volume['id'])\n except lib_exc.NotFound:\n # Ignore 404s on detach in case the server is deleted or the volume\n # is already detached.\n pass", "def detach_volume(self, volume_id, instance_id = \"\"):\n response = volume.detach_volume(self.url, self.verb, self.headers,\n self.version, volume_id, instance_id)\n if response is not None :\n res = DetachVolumeResponse.DetachVolumeResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def detach(self):\n if (self.status == volume_status.ATTACHED or self.status == volume_status.IN_USE) \\\n and self.volume:\n try:\n self.volume.detach()\n except EC2ResponseError, e:\n log.error(\"Detaching volume '%s' from instance '%s' failed. Exception: %s\"\n % (self.volume_id, self.app.cloud_interface.get_instance_id(), e))\n return False\n self.wait_for_status(volume_status.AVAILABLE, 240)\n if self.volume and self.status != volume_status.AVAILABLE:\n log.debug('Attempting to detach again.')\n try:\n self.volume.detach()\n except EC2ResponseError, e:\n log.error(\"Detaching volume '%s' from instance '%s' failed. Exception: %s\" % (\n self.volume_id, self.app.cloud_interface.get_instance_id(), e))\n return False\n if not self.wait_for_status(volume_status.AVAILABLE, 60):\n log.warning('Volume {0} did not detach properly. Left in state {1}'\n .format(self.volume_id, self.status))\n return False\n else:\n log.debug(\"Volume '%s' already not attached ('%s')\"\n % (self.volume_id, self.status))\n return False\n return True", "def detach(self, storages):\n self.tracer.info(\"%s.attach method called\" % self.__class__.__name__)\n\n # init variables & arrays\n all_pds = []\n all_vgs = []\n unmount_err = 0\n\n # reload global.ini\n self._cfg.reload()\n\n # connect to Google API\n conn = self.api_conn()\n\n # fetch the GCE zone for this host\n zone = self.get_zone(conn, HOSTNAME)\n\n for storage in storages:\n # fetch pd & dev variables for specified partition & usage\n connectionData = self._getConnectionDataForLun(storage.get(\"partition\"), storage.get(\"usage_type\"))\n try:\n pd = connectionData[\"pd\"]\n dev = connectionData[\"dev\"]\n except:\n raise Exception(\"pd or dev not set in global.ini\")\n\n # fetch the host which currently owns the disk & the file path\n path = storage.get(\"path\")\n\n # try to unmount the file system twice\n self._forcedUnmount(dev, path, 2)\n\n # if it's still mounted, try killing blocking processes and umount again\n if os.path.ismount(path):\n self._lsof_and_kill(path)\n self._forcedUnmount(dev, path, 2)\n\n # if still mounted, raise exception. The taking over node will stonith this host\n if os.path.ismount(path):\n self.tracer.warning(\"A PID belonging to someone other than SIDADM is blocking the unmount. This node will be fenced\")\n self._umount(path, lazy=True)\n mount_err = 1\n\n # add to list of devices.\n all_pds.append(pd)\n\n # check to see if the device is a VG. If so, add it to the list of VG's\n all_vgs.append(self.get_vg(dev))\n\n # Stop each unique VG\n all_vgs = list(set(all_vgs))\n for vg in all_vgs:\n Helper._runOsCommand(\"sudo /sbin/vgchange -an %s\" % vg, self.tracer)\n self.tracer.info(\"stopping volume group %s\" % (vg))\n\n # for each unique disk detected, detach it using Google API's\n all_pds = list(set(all_pds))\n for pd_member in all_pds:\n self.detach_pd(conn, HOSTNAME, pd_member)\n\n # if there was an error unmounting, self fence\n if unmount_err == 1:\n self.fence(conn, pdhost)\n\n # tell HANA we successfully detached\n return 0", "def umount(self, vol_name, mountpoint, instance):\n LOG.debug('_HostMountState.umount(vol_name=%(vol_name)s, '\n 'mountpoint=%(mountpoint)s) generation %(gen)s',\n {'vol_name': vol_name, 'mountpoint': mountpoint,\n 'gen': self.generation}, instance=instance)\n with self._get_locked(mountpoint) as mount:\n try:\n mount.remove_attachment(vol_name, instance.uuid)\n except KeyError:\n LOG.warning(\"Request to remove attachment (%(vol_name)s from \"\n \"%(mountpoint)s, but we don't think it's in use.\",\n {'vol_name': vol_name, 'mountpoint': mountpoint},\n instance=instance)\n\n if not mount.in_use():\n mounted = os.path.ismount(mountpoint)\n\n if mounted:\n mounted = self._real_umount(mountpoint)\n\n # Delete our record entirely if it's unmounted\n if not mounted:\n del self.mountpoints[mountpoint]\n\n LOG.debug('_HostMountState.umount() for %(mountpoint)s '\n 'generation %(gen)s completed successfully',\n {'mountpoint': mountpoint, 'gen': self.generation},\n instance=instance)", "def delete_disk(self, disk, delete_vmdk=True):\n backend_disk = self.get_backend_disk(disk)\n\n try:\n self.client.delete_disk(disk.vm.backend_id, disk.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)\n\n if delete_vmdk:\n vdm = self.soap_client.content.virtualDiskManager\n task = vdm.DeleteVirtualDisk(\n name=backend_disk.backing.fileName,\n datacenter=self.get_disk_datacenter(backend_disk),\n )\n try:\n pyVim.task.WaitForTask(task)\n except Exception:\n logger.exception('Unable to delete VMware disk. Disk ID: %s.', disk.id)\n raise VMwareBackendError('Unknown error.')\n signals.vm_updated.send(self.__class__, vm=disk.vm)" ]
[ "0.74193597", "0.7338237", "0.721738", "0.71526825", "0.714438", "0.7000114", "0.6941674", "0.69158816", "0.68212986", "0.67936045", "0.6760271", "0.67468965", "0.6729655", "0.66621906", "0.6634049", "0.657201", "0.6467935", "0.63898975", "0.63229585", "0.62806314", "0.62591475", "0.6244758", "0.6198081", "0.61515975", "0.61515975", "0.61251324", "0.6081085", "0.60716814", "0.6053163", "0.6009992" ]
0.7359824
1
Return usage info for volumes attached to vms on a given host.
def get_all_volume_usage(self, context, compute_host_bdms): volusage = [] return volusage
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_volume_info(host, disk_object, dc_obj):\n host_resource = get_host_resource_by_name(host)\n\n vol_id = disk_object.get_image_id()\n sd_id = disk_object.get_storage_domains().get_storage_domain()[0].get_id()\n image_id = disk_object.get_id()\n sp_id = dc_obj.get_id()\n\n args = {\n \"storagepoolID\": sp_id,\n \"storagedomainID\": sd_id,\n \"imageID\": image_id,\n \"volumeID\": vol_id,\n }\n\n return host_resource.vds_client(cmd=\"Volume.getInfo\", args=args)", "def host_info(vm_hostname):\n with _get_vm(vm_hostname) as vm:\n\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n info = vm.info()\n\n # Disconnect fabric now to avoid messages after the table\n disconnect_all()\n\n categories = (\n ('General', (\n 'hypervisor',\n 'status',\n )),\n ('Network', (\n 'intern_ip',\n 'mac_address',\n )),\n ('Resources', (\n 'num_cpu',\n 'max_cpus',\n 'memory',\n 'memory_free',\n 'max_mem',\n 'disk',\n 'disk_size_gib',\n 'disk_free_gib',\n )),\n # Anything else will appear in this section\n ('Other', None),\n )\n\n def _progress_bar(free_key, capacity_key, result_key, unit):\n \"\"\"Helper to show nice progress bars.\"\"\"\n if free_key not in info or capacity_key not in info:\n return\n free = info[free_key]\n del info[free_key]\n capacity = info[capacity_key]\n del info[capacity_key]\n\n simple_stats = (\n 'Current: {} {unit}\\n'\n 'Free: {} {unit}\\n'\n 'Max: {} {unit}'.format(\n capacity - free, free, capacity, unit=unit))\n\n if not 0 <= free <= capacity > 0:\n log.warning(\n '{} ({}) and {} ({}) have weird ratio, skipping progress '\n 'calculation'.format(\n free_key, free, capacity_key, capacity)\n )\n info[result_key] = red(simple_stats)\n return\n\n assert 0 <= free <= capacity\n ratio = 1 - float(free) / float(capacity)\n if ratio >= 0.9:\n color = red\n elif ratio >= 0.8:\n color = yellow\n else:\n color = green\n\n max_bars = 20\n num_bars = int(round(ratio * max_bars))\n info[result_key] = (\n '[{}{}] {}%\\n{}'.format(\n color('#' * num_bars), ' ' * (max_bars - num_bars),\n int(round(ratio * 100)),\n simple_stats,\n )\n )\n\n _progress_bar('memory_free', 'memory', 'memory', 'MiB')\n _progress_bar('disk_free_gib', 'disk_size_gib', 'disk', 'GiB')\n\n max_key_len = max(len(k) for k in info.keys())\n for category, keys in categories:\n # Handle 'Other' section by defaulting to all keys\n keys = list(keys or info.keys())\n\n # Any info available for the category?\n if not any(k in info for k in keys):\n continue\n\n print('')\n print(white(category, bold=True))\n for k in keys:\n if k not in info:\n continue\n\n # Properly re-indent multiline values\n value = str(info.pop(k))\n value = ('\\n' + ' ' * (max_key_len + 3)).join(\n value.splitlines()\n )\n print('{} : {}'.format(k.ljust(max_key_len), value))", "def get_disk_usage():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><disk-space></disk-space></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def _get_vms_on_host(self, host_ref):\n vm_data = []\n vm_ret = self._session._call_method(vutil,\n \"get_object_property\",\n host_ref,\n \"vm\")\n # if there are no VMs on the host, we don't need to look further\n if not vm_ret:\n return vm_data\n\n vm_mors = vm_ret.ManagedObjectReference\n result = self._session._call_method(vutil,\n \"get_properties_for_a_collection_of_objects\",\n \"VirtualMachine\", vm_mors,\n [\"config.instanceUuid\", \"runtime.powerState\",\n \"config.hardware.memoryMB\", \"config.managedBy\"])\n with vutil.WithRetrieval(self._session.vim, result) as objects:\n for obj in objects:\n vm_props = propset_dict(obj.propSet)\n # sometimes, the vCenter finds a file it thinks is a VM and it\n # doesn't even have a config attribute ... instead of crashing\n # with a KeyError, we assume this VM is not running and totally\n # doesn't matter as nova also will not be able to handle it\n if 'config.instanceUuid' not in vm_props:\n continue\n\n vm_data.append((\n vm_props['config.instanceUuid'],\n vm_props['config.hardware.memoryMB'],\n vm_props['runtime.powerState'],\n vm_props.get('config.managedBy'),\n vutil.get_moref_value(obj.obj)))\n return vm_data", "def _get_host_utilization(context, host, ram_mb, disk_gb):\n instances = instance_get_all_by_host(context, host)\n vms = len(instances)\n free_ram_mb = ram_mb - FLAGS.reserved_host_memory_mb\n free_disk_gb = disk_gb - (FLAGS.reserved_host_disk_mb * 1024)\n\n work = 0\n for instance in instances:\n free_ram_mb -= instance.memory_mb\n free_disk_gb -= instance.root_gb\n free_disk_gb -= instance.ephemeral_gb\n if instance.vm_state in [vm_states.BUILDING, vm_states.REBUILDING,\n vm_states.MIGRATING, vm_states.RESIZING]:\n work += 1\n return dict(free_ram_mb=free_ram_mb,\n free_disk_gb=free_disk_gb,\n current_workload=work,\n running_vms=vms)", "def _get_mount_status(self, vm=None):\n result = Shell.run(f\"multipass info {vm} --format=json\")\n\n if f'instance \"{vm}\" does not exist' in result:\n dict_result = {\n 'name': vm,\n 'status': \"instance does not exist\"\n }\n else:\n result = json.loads(result)\n dict_result = {\n 'name': vm,\n 'status': result[\"info\"][vm]['state'],\n 'mounts': result[\"info\"][vm]['mounts']\n }\n return dict_result", "def disk():\n run(env.disk_usage_command % env)", "def getvg(host, disk):\r\n sshCommand = \"lspv | grep '^%s ' | awk '{print $3}'\" % disk\r\n vgName = sub.Popen([\"ssh\", \"-q\", host, sshCommand],\r\n shell=False, stdout=sub.PIPE, stderr=sub.PIPE\r\n ).communicate()[0].strip()\r\n return vgName", "def get_disk_usage():\n\n disk_usage = {}\n diskinfo = subprocess.Popen(['df','-P'], shell=False, stdout=subprocess.PIPE)\n diskinfo.stdout.readline()\n for line in diskinfo.stdout:\n disk_usage[line.split()[5]] = { 'filesystem' : line.split()[0], 'size' : int(line.split()[1]), \\\n'used' : int(line.split()[2]), 'avail' : int(line.split()[3]), 'capacity' : line.split()[4] }\n diskinfo = subprocess.Popen(['df','-i','-P'], shell=False, stdout=subprocess.PIPE)\n diskinfo.stdout.readline()\n for line in diskinfo.stdout:\n disk_usage[line.split()[5]].update( { 'iused' : int(line.split()[2]), 'ifree' : int(line.split()[3]), 'icapacity' : line.split()[4] } )\n return disk_usage", "def get_lun_storage_info(lun_id):\n host = ll_hosts.get_spm_host(config.HOSTS)\n host_ip = ll_hosts.get_host_ip(host)\n executor = rhevm_helpers.get_host_executor(\n host_ip, config.VDC_ROOT_PASSWORD\n )\n # Execute 'pvscan' to display the latest volume info\n storage_resources.pvscan(host)\n logger.info(\"Executing command 'pvs | grep %s'\", lun_id)\n status, output, err = executor.run_cmd(\n shlex.split(PVS_SHOW_LUN_INFO % lun_id)\n )\n if status:\n logger.info(\n \"Status was False executing 'pvs | grep %s'. Err: %s\",\n lun_id, err\n )\n return 0, 0\n\n # Format the output into the 6 expected display parameters (PV, VG,\n # Format, LV Attributes, Physical size and Physical free size)\n formatted_output = shlex.split(output)\n logger.info(\n \"The output received when running pvs on LUN id %s is: %s\"\n % (lun_id, formatted_output)\n )\n # The 2nd last displayed data output is needed - Physical size\n lun_size = formatted_output[-2]\n lun_size = lun_size.replace(\"g\", \"\")\n lun_free_space = formatted_output[-1]\n lun_free_space = lun_free_space.replace(\"g\", \"\")\n lun_size_bytes = float(lun_size) * config.GB\n logger.info(\"The LUN size in bytes is '%s'\", str(lun_size_bytes))\n lun_free_bytes = float(lun_free_space) * config.GB\n logger.info(\"The LUN free space in bytes is '%s'\", str(lun_free_bytes))\n\n return int(lun_size_bytes), int(lun_free_bytes)", "def get_volume_info(volumes):\n if type(volumes) is not list:\n volumes = [volumes]\n volume_info_list = []\n for volume in volumes:\n command = 'cinder show %s' % volume['id']\n volume_info = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])\n att = volume_info['attachments'].replace(\"'\", \"\\\"\").replace(\n \"u\\\"\", \"\\\"\").replace(\" None,\", \" \\\"None\\\",\")\n volume_info['device'] = json.loads(att)[0]['device']\n volume_info_list.append(volume_info)\n return volume_info_list", "def disk_usage(self):\n self.monitoring_object['disk_usage'] =\\\n psutil.disk_usage('/')", "def describe_volumes(InstanceId=None, StackId=None, RaidArrayId=None, VolumeIds=None):\n pass", "def _get_vm_stats(self, vm_name):\n host = VDS(hosts.get_host_vm_run_on(vm_name), config.VDC_ROOT_PASSWORD)\n return host.vds_client(\"VM.getStats\", {\"vmID\": self.vm_id})[0]", "def get_storage_devices(vm_name, filter='vd[a-z]'):\n vm_executor = get_vm_executor(vm_name)\n\n command = 'ls /sys/block | egrep \\\"%s\\\"' % filter\n rc, output, error = vm_executor.run_cmd(cmd=shlex.split(command))\n if rc:\n logger.error(\n \"Error while retrieving storage devices from VM '%s, output is \"\n \"'%s', error is '%s'\", output, error\n )\n return False\n return output.split()", "def update_volume_after_attached_to_vm(self, info, vms):\n path = info[0]['path']\n path_list = path.split(sep='/')\n machine_path_list = [\"~\", \"Home\"]\n machine_path_list.extend(path_list[3:])\n info[0]['machine_path'] = \"/\".join(machine_path_list)\n info[0]['AttachedToVm'] = vms\n info[0]['State'] = 'in-use'\n info[0]['time'] = datetime.datetime.now()\n return info", "def collect():\n\n command = \"cat /proc/meminfo |grep MemTotal|awk -F' ' '{print $2}'\"\n memTotal_f = round(float(os.popen(command).read())/1024/1000,0)\n memTotal = int(memTotal_f)\n cmd = 'df -h |grep \"/dev/s\"'\n metric_disk = os.popen(cmd).readlines()\n hardNum=[]\n for i in metric_disk:\n hard_space = float((i.strip().split()[1])[:-1])\n hardNum.append(hard_space)\n\n disk_info = sum(hardNum)\n disk_use = {}\n metric_disks=os.popen('df -x tmpfs -x devtmpfs | grep -Eo \" /\\S*$\" ').readlines()\n for disk in metric_disks:\n cmd = 'df|grep -E \"%s$\"' % disk.strip()\n disks = os.popen(cmd).readlines()[0]\n disk_list = disks.split()\n disk_use[disk_list[5]]=disk_list[4]\n hard = {\n \"disk_used\" : disk_use,\n \"disk_total\":disk_info,\n \"mem_total\":memTotal\n }\n\n return hard", "def get_basic_volume_info_all():\n vl = None\n try:\n d, err = xml_parse.run_gluster_command(\n '/usr/sbin/gluster volume info all --xml')\n if err:\n raise Exception(err)\n\n root = d[\"root\"]\n\n # Get the admin vol name so it can be excluded from the list\n admin_vol_name, err = config.get_admin_vol_name()\n if err:\n raise Exception(err)\n\n # Now get the all the volume info for user created volumes\n vl, err = xml_parse.get_volume_info(root, admin_vol_name)\n if err:\n raise Exception(err)\n except Exception, e:\n return None, 'Error getting basic volume information for all volumes : %s' % str(e)\n else:\n return vl, None", "def test_update_volume_stats_cached(self):\n self._fail_host_storage = True\n actual = self.driver.get_volume_stats(False)\n self.assertEqual('HGST', actual['vendor_name'])\n self.assertEqual('hgst', actual['storage_protocol'])\n self.assertEqual(90, actual['total_capacity_gb'])\n self.assertEqual(87, actual['free_capacity_gb'])\n self.assertEqual(0, actual['reserved_percentage'])", "def get_amount_of_file_type_volumes(host_ip, sp_id, sd_id, image_id):\n # Build the path to the Disk's location on the file system\n volume_path = FILE_SD_VOLUME_PATH_IN_FS % (sp_id, sd_id, image_id)\n command = GET_FILE_SD_NUM_DISK_VOLUMES % volume_path\n executor = rhevm_helpers.get_host_executor(\n ip=host_ip, password=config.VDC_ROOT_PASSWORD\n )\n rc, output, err = executor.run_cmd(shlex.split(command))\n\n assert not rc, errors.CommandExecutionError(\"Output: %s\" % output)\n # There are a total of 3 files/volume, the volume metadata (.meta),\n # the volume lease (.lease) and the volume content itself (no\n # extension)\n num_volumes = int(output)/3\n logger.debug(\n \"The number of file type volumes found is '%s'\",num_volumes\n )\n return num_volumes", "def main():\n results = []\n results.extend(check_mounts())\n results.extend(diskusage())\n return results", "def usage(self, host):", "def _get_system_volume(vm_):\n\n # Override system volume size if 'disk_size' is defined in cloud profile\n disk_size = get_size(vm_)[\"disk\"]\n if \"disk_size\" in vm_:\n disk_size = vm_[\"disk_size\"]\n\n # Construct the system volume\n volume = Volume(\n name=\"{} Storage\".format(vm_[\"name\"]),\n size=disk_size,\n disk_type=get_disk_type(vm_),\n )\n\n if \"image_password\" in vm_:\n image_password = vm_[\"image_password\"]\n volume.image_password = image_password\n\n # Retrieve list of SSH public keys\n ssh_keys = get_public_keys(vm_)\n volume.ssh_keys = ssh_keys\n\n if \"image_alias\" in vm_.keys():\n volume.image_alias = vm_[\"image_alias\"]\n else:\n volume.image = get_image(vm_)[\"id\"]\n # Set volume availability zone if defined in the cloud profile\n if \"disk_availability_zone\" in vm_:\n volume.availability_zone = vm_[\"disk_availability_zone\"]\n\n return volume", "def ls(cls):\n for vm in cls._vm_agents_for_host():\n with vm:\n running = vm.qemu.process_exists()\n\n if running:\n vm_mem = vm.qemu.proc().memory_full_info()\n\n expected_size = (\n vm.cfg[\"memory\"] * 1024 * 1024\n + vm.qemu.vm_expected_overhead * 1024 * 1024\n )\n\n log.info(\n \"online\",\n machine=vm.name,\n cores=vm.cfg[\"cores\"],\n memory_booked=\"{:,.0f}\".format(vm.cfg[\"memory\"]),\n memory_pss=\"{:,.0f}\".format(vm_mem.pss / MiB),\n memory_swap=\"{:,.0f}\".format(vm_mem.swap / MiB),\n )\n else:\n log.info(\"offline\", machine=vm.name)", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def get_info(volpath):\n dhandle = vol_open_path(volpath, VMDK_OPEN_DISKCHAIN_NOIO)\n\n if not disk_is_valid(dhandle):\n logging.warning(\"Failed to open disk - %s\", volpath)\n return None\n\n sinfo = disk_info()\n res = lib.DiskLib_GetSize(dhandle, 0, VMDK_MAX_SNAPS, byref(sinfo))\n\n lib.DiskLib_Close(dhandle)\n if res != 0:\n logging.warning(\"Failed to get size of disk %s - %x\", volpath, res)\n return None\n\n return {VOL_SIZE: convert(sinfo.size), VOL_ALLOC: convert(sinfo.allocated)}", "def info(self, name=None):\n data = self.cloudman.list_servers(filters={'name': name})\n\n \"\"\"\n vms = self.list()\n print (\"VMS\", vms)\n data = None\n for entry in vms:\n print (\"FFF\", entry['name'])\n if entry['name'] == name:\n data = entry\n break\n \"\"\"\n\n if data is None:\n raise ValueError(f\"vm not found {name}\")\n\n r = self.update_dict(data, kind=\"vm\")\n return r", "def get_hdd():\n return {\n 'HDD': string_chopped_to_float(psutil.disk_usage('/'), 'percent=', ')'),\n }", "def update_volume_after_detach(self, info, vms):\n info[0]['AttachedToVm'] = vms\n if len(vms) == 0:\n info[0]['machine_path'] = None\n info[0]['State'] = 'available'\n info[0]['time'] = datetime.datetime.now()\n return info", "def show(self, req, id):\n ctxt = req.environ['nova.context']\n authorize(ctxt)\n try:\n LOG.info(\"List the info on nova-compute '%s'\" % id)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n instances = dbapi.show_instances_on_host(ctxt, id)\n instances = [{'uuid': c.uuid,\n 'name': c.display_description,\n 'status': c.vm_state} for c in instances]\n compute_node = dbapi.compute_node_get_by_host(ctxt, id)\n total_ram = float(compute_node.memory_mb)\n used_ram = float(compute_node.memory_mb_used)\n percent = int(round((used_ram / total_ram) * 100))\n return {'host': {'name': id,\n 'percentUsed': percent,\n 'totalRAM': int(total_ram),\n 'usedRAM': int(used_ram),\n 'instances': instances}}\n except exception.ComputeHostNotFound:\n raise webob.exc.HTTPNotFound()" ]
[ "0.6463681", "0.62752587", "0.6237475", "0.6067815", "0.5918124", "0.586192", "0.58459896", "0.57930577", "0.57628095", "0.57522666", "0.5672278", "0.5626577", "0.56147677", "0.56082857", "0.55949056", "0.55703175", "0.5569759", "0.5569127", "0.5548287", "0.5530609", "0.552922", "0.5528931", "0.55260247", "0.55188954", "0.54773027", "0.5442593", "0.5434247", "0.54285264", "0.5426922", "0.5422958" ]
0.7156149
0
Removes the named VM, as if it crashed. For testing.
def test_remove_vm(self, instance_name): self.instances.pop(instance_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove():\n vbox = Vbox(env.vm_name)\n vbox.remove()", "def destroy_vm(self, name_of_vm):\n self.power_off(name_of_vm)\n # import pdb;pdb.name_of_vm()\n vm = self.get_dc_object([vim.VirtualMachine], name_of_vm)\n task = vm.Destroy_Task()\n WaitForTask(task)\n states = [vim.TaskInfo.State.success, vim.TaskInfo.State.error]\n while task.info.state not in states:\n time.sleep(1)\n status = task.info.state\n if status == \"success\":\n return status\n if status == \"error\":\n log.error(task.info.error.msg)\n log.info(task.info.error)\n return status", "def destroy_iperf_vm(name):\n\n cmd = \"virsh list\"\n (ret, stdout, stderr) = VPPUtil.exec_command(cmd)\n if ret != 0:\n logging.debug(stderr)\n raise RuntimeError(\n \"Couldn't execute the command {} : {}\".format(cmd, stderr)\n )\n\n if re.findall(name, stdout):\n cmd = \"virsh destroy {}\".format(name)\n (ret, stdout, stderr) = VPPUtil.exec_command(cmd)\n if ret != 0:\n logging.debug(stderr)\n raise RuntimeError(\n \"Couldn't execute the command {} : {}\".format(cmd, stderr)\n )", "def delete(vmname, deldisk=True):\n\n dom = _conn.lookupByName(vmname)\n if dom.isActive():\n dom.destroy()\n infokeeper.update_status_vm(vmname, Instance.STATUS_POWER_OFF)\n dom.undefine()\n infokeeper.delete_vm(vmname)\n if deldisk:\n os.remove(os.path.join(base_disk_path, dom.name() + '.img'))\n return 'VM %s deleted' % vmname", "def delete_virtual_machine(self, vm):\n try:\n self.client.delete_vm(vm.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)", "def destroy(vm, env=''):\n local( main_dir + '/vagrant/bin/vm.sh destroy ' + str(vm) + ' ' + str(env) )", "def remove_resource(self, name):\n self._NDL_API('removeresource', { 'vm': name, }, None)", "def undeploy_vm(context, vm):\n monitor = context.getMonitoringService().getVirtualMachineMonitor()\n print \"Uneploying virtual machine %s... This may take some time.\" \\\n % vm.getInternalName()\n vm.undeploy()\n monitor.awaitCompletionUndeploy(vm)\n return refresh_vm(context, vm)", "def delete_vm(client, resource_group_name, vm_name):\n return client.delete(resource_group_name, vm_name)", "def destroy(self):\n\n # destroys virtual machines\n for vm in self.vms:\n try:\n vm.name = \"%s_%s\" % (self.resource_pool, vm.name)\n vm.destroy_with_files(manager=self.manager, host_address=self.host_address,\n host_user=self.host_user,\n host_password=self.host_password)\n except Manager.ExistenceException:\n self.logger.info(\"Couldn't find '%s', probably it already removed\" % vm.name)\n except:\n self.logger.error(\"Error with destroying VM '%s'\" % vm.name)\n\n sw_name = None\n\n # destroys isolated networks with vSwitches\n for net in self.networks:\n try:\n if net.isolated:\n sw_name = \"%s_%s_%s\" % (self.config.SWITCH_PREFIX, self.resource_pool, net.name)\n switch = Switch(sw_name)\n switch.destroy(self.manager, self.host_name)\n except Manager.ExistenceException:\n pass\n except:\n self.logger.error(\"Error with destroying switch '%s'\" % sw_name)\n\n # destroys common vSwitch if exist\n try:\n shared_sw_name = '%s_%s' % (self.config.SWITCH_PREFIX, self.resource_pool)\n switch = Switch(shared_sw_name)\n switch.destroy(self.manager, self.host_name)\n except Manager.ExistenceException:\n pass\n\n # destroys resource pool\n try:\n ResourcePool(self.resource_pool).destroy(self.manager, with_vms=True)\n except Manager.ExistenceException:\n pass\n except Exception as e:\n self.logger.error(e.message)\n raise e", "def delete_vm(self, account, vm_id):\n node = Node()\n node.id = vm_id\n self.driver(account).destroy_node(node)", "def DeleteVM(self):\n status = self.VMStatus()\n\n if status != 'POWERED OFF':\n LOGGER.warning('Virtual machine must be stopped before deleting!')\n status = self.VMStop()\n\n if status != 'POWERED OFF':\n LOGGER.error('An error occured while stopping virtual machine \"{}\"!'.format(VM_NAME))\n\n else:\n LOGGER.debug('Trying to delete virtual machine \"{}\"...'.format(VM_NAME))\n try:\n oK, msg = self.vSphereServerInstance.delete_vm_by_name(VM_NAME, remove_files=True)\n\n if oK:\n LOGGER.info('Virtual machine \"{}\" deleted successful with message: \"{}\".'.format(VM_NAME, msg))\n\n else:\n LOGGER.warning('Virtual machine \"{}\" NOT delete with message: \"{}\".'.format(VM_NAME, msg))\n\n except Exception as e:\n LOGGER.debug(e)\n LOGGER.error(traceback.format_exc())\n LOGGER.error('An error occured while deleting virtual machine \"{}\"!'.format(VM_NAME))", "def delete(self, psvm):\n self._delete('/os-psvm/%s' % (base.getid(psvm)))", "def remove(self):\n\t\tcall_sdk_function('PrlVmDev_Remove', self.handle)", "def deleteVirtualMachine(self,node,vmid):\n data = self.connect('delete',\"nodes/%s/qemu/%s\" % (node,vmid),None)\n return data", "def AptUninstall(vm):\n _Uninstall(vm)", "def rm_vector(self, name):\n logging.debug(\"Remove vector %s\" % name)\n self.vectors_db.remove(name)\n self.vectors_norm_db.remove(name)", "def remove_machine(self, url):\n\n model = TestMachine.objects.filter(url=url).first()\n if model:\n self.deactivate_model(model)\n print \"Removed test machine: %s\" % url", "def delete_vm(self, userid):\n LOG.info(\"Begin to delete vm %s\", userid)\n self._smtclient.delete_vm(userid)\n LOG.info(\"Complete delete vm %s\", userid)", "def destroy():\n\n # Stop the program if no init has occurred.\n Vagrant.stop_if_not_init()\n\n # Run vagrant destroy from the vagrant folder.\n command = [\"vagrant\", \"destroy\"]\n cwd = Settings.devbox_folder\n try:\n result = subprocess.check_call(command, cwd=cwd)\n except subprocess.CalledProcessError:\n Utilities.log(\"Could not run 'vagrant destroy'.\")\n exit(1)", "def delete_runtime(self, runtime_name, memory):\n self.compute_handler.delete_runtime(runtime_name, memory)", "def shutdownVM(self):\n\t\tlog.info(\"\\tStopping the container...\")\n#\t\texecuteCommandSSH(\"lxc-stop\" + \" -n \" + self.id)\n\t\texecuteCommandSSH(\"lxc-destroy\" + \" -n \" + self.id)\n\t\texecuteCommandSSH(\"shutdown -h now\")", "def vcac_vm_destroy(self):\n logging.info(\"Inside vcac_vm_destroy\")\n return None", "def vm_delete(vm_hostname, retire=False):\n\n with _get_vm(vm_hostname, unlock=retire, allow_retired=True) as vm:\n if vm.dataset_obj['datacenter_type'] == 'aws.dct':\n vm_status_code = vm.aws_describe_instance_status(\n vm.dataset_obj['aws_instance_id'])\n if vm_status_code != AWS_RETURN_CODES['stopped']:\n raise InvalidStateError(\n '\"{}\" is still running.'.format(vm.fqdn))\n else:\n vm.aws_delete()\n elif vm.dataset_obj['datacenter_type'] == 'kvm.dct':\n # Make sure the VM has a hypervisor and that it is defined on it.\n # Abort if the VM has not been defined.\n _check_defined(vm)\n\n # Make sure the VM is shut down, abort if it is not.\n if (\n vm.hypervisor\n and vm.hypervisor.vm_defined(vm)\n and vm.is_running()\n ):\n raise InvalidStateError('\"{}\" is still running.'.format(\n vm.fqdn)\n )\n\n # Delete the VM from its hypervisor if required.\n if vm.hypervisor and vm.hypervisor.vm_defined(vm):\n vm.hypervisor.undefine_vm(vm)\n else:\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n # Delete the machines cert from puppet in case we want to build one with the same name in the future\n clean_cert(vm.dataset_obj)\n\n # Delete the serveradmin object of this VM\n # or update its state to 'retired' if retire is True.\n if retire:\n vm.dataset_obj['state'] = 'retired'\n vm.dataset_obj.commit()\n log.info(\n '\"{}\" is destroyed and set to \"retired\" state.'.format(\n vm.fqdn)\n )\n else:\n vm.dataset_obj.delete()\n vm.dataset_obj.commit()\n log.info(\n '\"{}\" is destroyed and deleted from Serveradmin'.format(\n vm.fqdn)\n )", "def delete(self, request, m_name):\n machine = Machine.objects.get(name=m_name)\n machine.delete()\n return HttpResponse(HTTPStatus.OK)", "def machine_rm(node=\"dev\", driver='virtualbox'):\n machine = Dockerizing(driver)\n\n # Check that the requested node does not already exist\n if node not in machine.list():\n print(colors.warn | \"Failed:\", colors.bold |\n \"Machine '%s' does not exist\" % node)\n return\n\n _logger.info(colors.bold | \"Trying to remove '%s'\" % node)\n print(machine.remove(node))\n _logger.info(colors.green | \"Removed\")", "def remove_virtualsource(self, name):\n self._auraliser.remove_object(name)", "def destroy(name=None, call=None):\n if call == \"function\":\n raise SaltCloudSystemExit(\n \"The destroy action must be called with -d, --destroy, -a or --action.\"\n )\n ret = {}\n __utils__[\"cloud.fire_event\"](\n \"event\",\n \"destroying instance\",\n \"salt/cloud/{}/destroying\".format(name),\n args={\"name\": name},\n sock_dir=__opts__[\"sock_dir\"],\n transport=__opts__[\"transport\"],\n )\n session = _get_session()\n vm = _get_vm(name)\n if vm:\n # get vm\n record = session.xenapi.VM.get_record(vm)\n log.debug(\"power_state: %s\", record[\"power_state\"])\n # shut down\n if record[\"power_state\"] != \"Halted\":\n task = session.xenapi.Async.VM.hard_shutdown(vm)\n _run_async_task(task, session)\n\n # destroy disk (vdi) by reading vdb on vm\n ret[\"vbd\"] = destroy_vm_vdis(name, session)\n # destroy vm\n task = session.xenapi.Async.VM.destroy(vm)\n _run_async_task(task, session)\n ret[\"destroyed\"] = True\n __utils__[\"cloud.fire_event\"](\n \"event\",\n \"destroyed instance\",\n \"salt/cloud/{}/destroyed\".format(name),\n args={\"name\": name},\n sock_dir=__opts__[\"sock_dir\"],\n transport=__opts__[\"transport\"],\n )\n if __opts__.get(\"update_cachedir\", False) is True:\n __utils__[\"cloud.delete_minion_cachedir\"](\n name, _get_active_provider_name().split(\":\")[0], __opts__\n )\n __utils__[\"cloud.cachedir_index_del\"](name)\n return ret", "def _delete_network_vm(args):\n libvirtConn = libvirt.openReadOnly(None)\n if libvirtConn is None:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n net = None\n try:\n net = libvirtConn.networkLookupByName(args.network_name)\n except libvirt.libvirtError:\n print('Cannot find network named [%s]' % args.network_name, file=sys.stderr)\n return 1\n print('Network found:\\n')\n print(xml.dom.minidom.parseString(net.XMLDesc()).toprettyxml(indent=\" \", newl=''))\n print('')\n\n if not args.yes:\n if not input('Really destroy this network ?').strip().lower() in ('y', 'yes'):\n return 1\n return oci_utils.kvm.virt.delete_virtual_network(network_name=args.network_name)", "def destroyMachine(self, machine):\r\n try:\r\n self._machines.remove(machine)\r\n except KeyError:\r\n raise InternalError('Tried to remove a non existent machine.')\r\n\r\n machine.destroy()" ]
[ "0.7664913", "0.7518741", "0.71721107", "0.70731395", "0.69732076", "0.6950697", "0.66600686", "0.6613145", "0.651427", "0.65076905", "0.6507135", "0.64830834", "0.6366839", "0.6351328", "0.622328", "0.6189141", "0.6183088", "0.61540633", "0.61245495", "0.61129415", "0.6093355", "0.60925573", "0.6086223", "0.6083816", "0.60536504", "0.60449356", "0.6037946", "0.6037354", "0.603735", "0.60249245" ]
0.7901843
0
Calls a method within the module specified with args provided.
def _call_method(self, module, method, *args, **kwargs): return self.invoke_api(module, method, *args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass", "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass", "def _call_method(self, module, method, *args, **kwargs):\n if not self._is_vim_object(module):\n return self.invoke_api(module, method, self.vim, *args, **kwargs)\n else:\n return self.invoke_api(module, method, *args, **kwargs)", "def call_module(session, module, args, libgmt):\n c_call_module = libgmt.GMT_Call_Module\n c_call_module.argtypes = [ctypes.c_void_p, ctypes.c_char_p,\n ctypes.c_int, ctypes.c_void_p]\n c_call_module.restype = ctypes.c_int\n\n mode = get_constant('GMT_MODULE_CMD', libgmt)\n status = c_call_module(session, module.encode(), mode,\n args.encode())\n check_status_code(status, 'GMT_Call_Module')", "def _call(self, args):\n a = args.split(' ', 1)\n if a:\n getattr(self, a[0])(*a[1:])", "def do_run(self, args):\n logger.debug(\"do_run() was called\")\n\t\n parser = CrispyArgumentParser(description=self.do_run.__doc__, prog=\"run\")\n parser.add_argument(\"module\", metavar=\"<module>\", help=\"module name\")\n parser.add_argument(\"session_id\", metavar=\"<session id>\", help=\"session to run on\")\n parser.add_argument(\"arguments\", nargs=argparse.REMAINDER, metavar=\"<arguments>\", help=\"module arguments\")\n \n try:\n pargs = parser.parse_args(shlex.split(args))\n except MyParserException as e:\n print e\n return\n\n try:\n target = self.srv.get_client(int(pargs.session_id))\n except Exception as e:\n fprint.error(\"Session id should be an integer.\")\n return\n\n if not target:\n fprint.error(\"Improper session id.\")\n return\n\n try:\n mod = self.srv.get_module(pargs.module)(target)\n except Exception as me:\n fprint.error(\"Error loading \\\"{}\\\" module: {}\".format(pargs.module, me))\n return\n\n try:\n margs = mod.check_args(pargs.arguments)\n except MyParserException as e:\n print e\n return\n\n try:\n target.run_module(mod, margs)\n except Exception as e:\n fprint.error(\"Error running module: {}\".format(e))\n return", "def call(self, *args, **kwargs):", "def call(self, source, method, *args):\n\n function = self.callable(method)\n function.source = source\n return function(*args)", "def exec_method(self, module_name, version=None, client_class=None,\n method_name=None, *args, **kwargs):\n client_class = client_class or 'Client'\n client_version = version or 2\n _client = self.create_client(module_name, client_version,\n client_class)\n try:\n # NOTE(kiennt): method_name could be a combination\n # for example 'servers.list'. Here is the\n # workaround.\n method = getattr(_client, method_name.split('.')[0])\n for attr in method_name.split('.')[1:]:\n method = getattr(method, attr)\n return method(*args, **kwargs)\n except Exception as err:\n raise err", "def execute_module(self, module, *args, **opts):\n module_file = module.__file__\n if module_file.endswith('.pyc'):\n module_file = module_file[:-1]\n cmd = [self._path]\n if 'python_options' in opts:\n cmd.extend(opts['python_options'])\n del opts['python_options']\n cmd.append(module_file)\n cmd.extend(args)\n return get_cmd_output(*cmd, **opts)", "def run_module_ground_plan(args):\n raise NotImplementedError", "def run_module(self, module_name, args=[], kwargs={}):\n if not module_loader.has_plugin(module_name):\n raise UnsupportedAnsibleModule(\"Unsupported ansible module \\\"{}\\\"\".format(module_name))\n self.module_name = module_name\n\n previous_frame = inspect.currentframe().f_back\n caller_info = inspect.getframeinfo(previous_frame)\n kwargs.update({\"caller_info\": caller_info})\n\n return self._run_ansible_module(*args, **kwargs)", "def call_method(self, action):\n\n\t\tif action[0] in self.methods:\n\t\t\tself.methods[action[0]](action[0:])\n\t\telse:\n\t\t\tself.no_such_method()", "def run_module(self, path):\n\n module = self.import_module(path)\n result = None\n\n if module:\n try:\n result = module.run()\n except AttributeError:\n self.error('Error Running Module: Missing run() method.')\n except Exception:\n e = sys.exc_info()[1]\n traceback = sys.exc_info()[2]\n self.warning('Exeption caught in module: {0} line: {1}'.format(\n e,\n traceback.tb_lineno))\n self.calls.append({path: result})\n state.save_hook_call(path, result)\n return result", "def execute_module(self):\n raise NotImplementedError", "def call(self, method, name, params=None, payload=None, **kwds):", "def call(args) :\n from caller import bam_call\n bam_call(args)", "def Run(self, args):\n pass", "def _call(self, rpc_method_name, *args, **kwargs):\n method = getattr(self, rpc_method_name)\n return method(*args, **kwargs)", "def call(self, method, *args):\n flatcall = flatten(\n m(n=method, t=self.groupName)[[\n squish(x) for x in args if x is not None]])\n self.socket.write(flatcall + '\\0')", "def runAction(plugin_dir, module_name, function_name, function_args, request, client_info):\n\n \n\n # We will load plugins every time this function is called. It is maybe a little\n # inefficient but it means we don't have to restart when new plugins are added.\n plugins = importPlugins(plugin_dir)\n\n # Find the corresponding module object\n if module_name not in plugins:\n print \"Module {} is not in the plugin list\".format(module_name)\n return False\n\n module_obj = plugins[module_name]\n\n # And the function, if it's present (and is a function)\n if function_name not in module_obj.__dict__:\n print \"Function {} not in module function list\".format(function_name)\n return False\n\n function = module_obj.__dict__[function_name]\n\n if type(function) != types.FunctionType:\n print \"Function {} exists but is not a function! ({})\".format(function_name, type(function))\n return False\n \n # Parse the args into a list, possibly with param substitution happening\n function_args = _parseArgs(function_args, client_info)\n\n response = function(function_args, request=request, client_info=client_info)\n\n return response", "def __call__(self, args):", "def exec_module(self, module):\n pass", "def exec_function(self, args):\n raise NotImplementedError()", "def call(self, method, *args, **kwargs):\n if method in self.handlers:\n handler = self.handlers[method]\n if self.single_server:\n self.send_request(self.single_server, method, handler, *args, **kwargs)\n else:\n if method in [\"completion\", \"completion_item_resolve\", \"diagnostics\", \"code_action\", \"execute_command\"]:\n method_server_names = self.multi_servers_info[method]\n else:\n method_server_names = [self.multi_servers_info[method]]\n\n for method_server_name in method_server_names:\n method_server = self.multi_servers[method_server_name]\n self.send_request(method_server, method, handler, *args, **kwargs)\n elif hasattr(self, method):\n getattr(self, method)(*args, **kwargs)", "def __launch_python_module(path, cmd, args):\n\n mod_class = None\n mod_inst = None\n\n # We should always be in TOP\n if prop.TOP is not None:\n os.chdir(prop.TOP)\n\n # Next, get the path setup.\n if __update_path() != 0:\n log.e(TAG, \"Unable to update library path!\")\n return -7\n\n # If we got here, we try to load as a python module.\n module = imp.load_source(cmd, path)\n\n if module is None:\n log.e(TAG, \"Error launching module '%s'.\" % cmd)\n return -5\n\n try:\n mod_class = getattr(module, cmd)\n mod_inst = mod_class()\n\n except AttributeError:\n log.e(TAG, \"Unable to find class '%s' in module!\" % cmd)\n return -6\n\n return mod_inst.run(args)", "def call(self, port, method, *args, **kwargs):\n method = self.provides[port][method]\n return method(*args, **kwargs)", "def run(self):\n self.method(*self.args)\n self._schedule()", "def _run_ansible_module(self, *args, **kwargs):\n caller_info = kwargs.pop(\"caller_info\", None)\n if not caller_info:\n previous_frame = inspect.currentframe().f_back\n caller_info = inspect.getframeinfo(previous_frame)\n\n module_args = copy.deepcopy(args)\n module_kwargs = copy.deepcopy(kwargs)\n\n verbosity = module_kwargs.pop(\"verbosity\", None)\n if not verbosity:\n verbosity = self.options.get(\"verbosity\", 2)\n module_ignore_errors = module_kwargs.pop(\"module_ignore_errors\", False)\n module_attrs = module_kwargs.pop(\"module_attrs\", {})\n\n module_info = {\n \"module_name\": self.module_name,\n \"args\": module_args,\n \"kwargs\": module_kwargs,\n \"module_attrs\": module_attrs\n }\n self._log_modules(caller_info, module_info, verbosity)\n\n task = self.build_task(**module_info)\n results = self.run_tasks(self.host_pattern, self.loader, self.im, self.vm, self.options, tasks=[task])\n\n self._log_results(caller_info, module_info, results, verbosity)\n self._check_results(caller_info, module_info, results, module_ignore_errors, verbosity)\n\n if isinstance(self, AnsibleHost):\n results = results[self.hostnames[0]]\n\n return results", "def run(self):\n self.fn(*self.args, **self.kwargs)" ]
[ "0.71005887", "0.71005887", "0.67746526", "0.67205906", "0.66875315", "0.6634551", "0.63056356", "0.6267577", "0.6220619", "0.62195957", "0.61969537", "0.61894363", "0.61247283", "0.6002316", "0.599401", "0.59516126", "0.59427494", "0.59212786", "0.59182453", "0.58980024", "0.5860427", "0.58406645", "0.583374", "0.57871085", "0.5765235", "0.57642174", "0.5764107", "0.57358396", "0.5703977", "0.5693731" ]
0.7608473
0
Iterate through the order and get all recurring billing items
def get_recurring_orderitems(self): subscriptions = [] for orderitem in self.order.orderitem_set.all(): product = orderitem.product if product.is_subscription: self.log_extra("Found subscription product: %s", product.slug) if product.subscriptionproduct.recurring: self.log_extra("Subscription is recurring: %s", product.slug) subscriptions.append(orderitem) elif product.subscriptionproduct.trial_set.count() > 0: self.log_extra( "Not recurring, but it has a trial: %s", product.slug ) subscriptions.append(orderitem) else: self.log_extra("Not a recurring product: %s ", product.slug) else: self.log_extra("Not a subscription product: %s", product.slug) return subscriptions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recurring(self):\n return self.filter(total_billing_cycles__isnull=True)", "def get_all_orders():", "def get_all_orders(): \n data = order_obj.get_all_orders()\n return data", "def get_billing_items(self, identifier):\n\n mask = \"\"\"mask[\n id, description, hostName, domainName, oneTimeAfterTaxAmount, recurringAfterTaxAmount, createDate,\n categoryCode,\n category[name],\n location[name],\n children[id, category[name], description, oneTimeAfterTaxAmount, recurringAfterTaxAmount]\n ]\"\"\"\n return self.client.call(\n 'Billing_Invoice',\n 'getInvoiceTopLevelItems',\n id=identifier,\n mask=mask,\n iter=True,\n limit=100\n )", "def _amount_all(self):\n for order in self:\n amount_untaxed = 0.0\n for line in order.order_items_ids:\n amount_untaxed += line.price_subtotal\n order.update({\n 'amount_untaxed': amount_untaxed,\n })", "def invoice_items(self,org_id=None,query={}):\n if org_id is None:\n org_id = self.org_id\n query_end_date = datetime.strptime(query['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n # Given a 'query_end_date' to find the invoice containing the\n # line items for that date we need to find the invoice which \n # has 'endDate' equal to the end of the month of the `query_end_date`\n query_first_next_month = query_end_date + relativedelta(months=+1) - relativedelta(days=(query_end_date.day-1))\n target_invoices = []\n invoices = self.invoices(org_id)\n if self.verbose:\n print('Searching invoices org_id={}'.format(org_id))\n print('query={} query_end_date={}'.format(query,query_end_date))\n print('Result keys: {}'.format( invoices['content'].keys() ))\n print('Total result count: {}'.format( invoices['content']['totalCount'] ))\n for invoice in invoices['content']['results']:\n #pprint.pprint(invoice)\n end_date = datetime.strptime(invoice['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if self.verbose: \n print('invoice({})[\\'endDate\\']={} end_date={}'.format(invoice['id'],invoice['endDate'],end_date))\n if end_date == query_first_next_month:\n target_invoices.append(invoice)\n \n if self.verbose: \n print('Target invoices: {}'.format(target_invoices))\n \n\n target_line_items = []\n for invoice in target_invoices:\n invoice_details = self.invoices(org_id,invoice['id']) \n print('invoice_details: {}'.format(invoice_details))\n for item in invoice_details['content']['lineItems']:\n end_date = datetime.strptime(item['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if end_date == query_end_date:\n target_line_items.append(item)\n if self.verbose:\n print('target_line_items: {}'.format(target_line_items)) \n return target_line_items", "def get_order(self):\n #store the orders for the current cycle inside the class\n self.orders = self.firebase.get_data(\"orders\")", "def _amount_all(self):\r\n for order in self:\r\n amount_untaxed = amount_tax = amount_discount = timbre = 0.0\r\n for line in order.order_line:\r\n amount_untaxed += line.price_subtotal\r\n if line.product_id.timbre_fiscal:\r\n amount_tax += line.price_tax - 0.60\r\n timbre = 0.60\r\n else :\r\n amount_tax += line.price_tax\r\n amount_discount += (line.product_uom_qty * line.price_unit * line.discount)/100\r\n order.update({\r\n 'amount_untaxed': order.pricelist_id.currency_id.round(amount_untaxed),\r\n 'amount_tax': order.pricelist_id.currency_id.round(amount_tax),\r\n 'amount_discount': order.pricelist_id.currency_id.round(amount_discount),\r\n 'price_total_no_discount': amount_untaxed + amount_discount,\r\n 'timbre': timbre,\r\n 'amount_total': amount_untaxed + amount_tax + timbre,\r\n })", "def generate_orders(self, cr, uid, ids, context=None):\n voucher_pool = self.pool.get('account.voucher')\n payment_term_obj = self.pool.get('account.payment.term')\n account_budget_confirmation_obj = self.pool.get('account.budget.confirmation')\n period_obj = self.pool.get('account.period')\n if context is None:\n context = {}\n for order in self.browse(cr, uid, ids, context=context):\n #################################to remind\n total_fixed = total_percent = 0\n for line in order.payment_term.line_ids:\n if line.value == 'fixed':\n total_fixed += line.value_amount\n if line.value == 'procent':\n total_percent += line.value_amount\n total_fixed = (total_fixed * 100) / (order.amount or 1.0)\n if (total_fixed + total_percent) > 100:\n raise orm.except_orm(_('Error!'), _(\"Can not create the payments !\\n\\\n The related payment term is probably miss configured as it gives a computed amount greater than the total permanent payment amount. \\\n The latest line of your payment term must be of type 'balance' to avoid rounding issues.\"))\n # create one move line for the total and possibly adjust the other lines amount\n totlines1 = []\n for o in order.line_ids:\n totlines1 += payment_term_obj.compute(cr, uid, order.payment_term.id, o.amount, order.date or False, context=context)\n \n d = {}\n for k, v in totlines1:\n d.setdefault(k, [k]).append(v)\n totlines = map(tuple, d.values())\n\n for t in totlines :\n #to substract date from the interval number \n order_date = t[0]\n entered_date = datetime.datetime.strptime(order_date, '%Y-%m-%d')\n entered_date = entered_date.date()\n account_id = (order.partner_id.property_account_payable and order.partner_id.property_account_payable.id) or \\\n (order.journal_id.default_credit_account_id and order.journal_id.default_credit_account_id.id)\n period_id = period_obj.find(cr, uid, t[0], context=context)[0]\n\n list_confirm = [conf.id for conf in o.confirmation_ids]\n confirmations = account_budget_confirmation_obj.search(cr, uid, [('id','in', list_confirm),('period_id','=', period_id)], context=context) #('date','=',t[0]),\n\n for confirm in confirmations:\n confirm_id = confirm\n\n voucher_lines = [(0, 0, {'name':ol.name, 'account_id':ol.account_id.id, 'type':'dr',\n 'amount':t[count + 1], 'account_analytic_id':ol.account_analytic_id.id, 'budget_confirm_id': confirm_id })\n for count, ol in enumerate(order.line_ids)]\n res = voucher_pool.onchange_price(cr, uid, 0, voucher_lines, [], partner_id=order.partner_id.id, context=context).get(\"value\", {})\n voucher_dict = {\n 'partner_id' : order.partner_id.id,\n 'account_id': account_id,\n 'company_id' : order.company_id.id,\n 'journal_id' : order.journal_id.id,\n 'period_id': order.period_id.id,\n 'type':'purchase',\n 'date' : t[0],\n 'reference': order.name,\n 'payment_permanent_voucher_id': order.id,\n 'line_ids':voucher_lines,\n 'amount':res.get(\"amount\", 0.0)\n }\n voucher_pool.create(cr, uid, voucher_dict, context=context)\n return self.write(cr, uid, ids, {'state':'done'}, context=context)", "def test_get_invoice_items(self):\n invoice = Invoice(self.client, 123456)\n items = invoice.items\n\n self.assertEqual(len(items), 1)\n item = items[0]\n\n self.assertEqual(item.label, \"Linode 2048 - Example\")\n self.assertEqual(item.type, \"hourly\")\n self.assertEqual(item.amount, 9.51)\n self.assertEqual(item.quantity, 317)\n self.assertEqual(item.unit_price, \"0.03\")\n self.assertEqual(item.from_date, datetime(year=2014, month=12, day=19, hour=0, minute=27, second=2))\n self.assertEqual(item.to_date, datetime(year=2015, month=1, day=1, hour=4, minute=59, second=59))", "def get_account_billing_items(self, create=None, category=None, mask=None):\n\n if mask is None:\n mask = \"\"\"mask[\n orderItem[id,order[id,userRecord[id,email,displayName,userStatus]]],\n nextInvoiceTotalRecurringAmount,\n location, hourlyFlag\n ]\"\"\"\n\n object_filter = {\n \"allTopLevelBillingItems\": {\n \"cancellationDate\": {\n \"operation\": \"is null\"\n },\n \"id\": {\n \"operation\": \"orderBy\",\n \"options\": [\n {\n \"name\": \"sort\",\n \"value\": [\"ASC\"]\n }\n ]\n }\n }\n }\n\n if category:\n object_filter = utils.dict_merge(object_filter,\n {\"allTopLevelBillingItems\": {\"categoryCode\": {\"operation\": category}}})\n if create:\n object_filter = utils.dict_merge(object_filter,\n {\"allTopLevelBillingItems\": {\"createDate\": {\"operation\": \"*=\" + create}}})\n\n return self.client.call('Account', 'getAllTopLevelBillingItems',\n mask=mask, filter=object_filter, iter=True, limit=100)", "def test_get_invoice_items(self):\n invoice = Invoice(self.client, 123456)\n items = invoice.items\n\n self.assertEqual(len(items), 1)\n item = items[0]\n\n self.assertEqual(item.label, \"Linode 2048 - Example\")\n self.assertEqual(item.type, \"hourly\")\n self.assertEqual(item.amount, 9.51)\n self.assertEqual(item.quantity, 317)\n self.assertEqual(item.unit_price, \"0.03\")\n self.assertEqual(\n item.from_date,\n datetime(year=2014, month=12, day=19, hour=0, minute=27, second=2),\n )\n self.assertEqual(\n item.to_date,\n datetime(year=2015, month=1, day=1, hour=4, minute=59, second=59),\n )", "def test_get_order_items(self):\n pass", "def one_day(status, after):\n return woo.fetch_all_orders(status, after)", "def get_items(self):\n return self.order_items", "def iterate_payments(**kwargs):\n return Payment.payments.filter(**kwargs).all()", "def get_all_receipts(driver):\n for rs in driver.find_elements_by_css_selector('#receiptList > .receipt'):\n m = rs.find_element_by_class_name('merchant').text\n a = rs.find_element_by_class_name('amount').text\n tags = get_tags(rs)\n # created = rs.find_element_by_class_name('created').text\n yield {\n 'merchant': m,\n 'amount': a,\n 'tags': tags,\n # 'created': created\n }", "def _amount_all(self):\n for order in self:\n amount_untaxed = amount_tax = 0.0\n order_amount_total = 0.0\n for line in order.order_line:\n amount_untaxed += line.price_subtotal\n amount_tax += line.price_tax\n self_amount_total = amount_untaxed + amount_tax\n if not order.discount_fixed_percent:\n order_amount_total = self_amount_total\n if order.discount_fixed_percent == 'Percent':\n order_amount_total = self_amount_total * (1 - (order.discount or 0.0) / 100.0)\n if order.discount_fixed_percent == 'Fixed':\n order_amount_total = self_amount_total - order.discount_value\n order.update({\n 'amount_untaxed': order.pricelist_id.currency_id.round(amount_untaxed),\n 'amount_tax': order.pricelist_id.currency_id.round(amount_tax),\n 'amount_before_disc': amount_untaxed + amount_tax,\n 'amount_total': order_amount_total,\n })", "def _compute_gasto_subtotal(self):\n for sub in self:\n sub.recurring_total = sum(\n line.gasto for line in sub.animales_ids)", "def get_account_all_billing_orders(self, limit=100, mask=None):\n\n if mask is None:\n mask = \"\"\"\n orderTotalAmount, userRecord,\n initialInvoice[id,amount,invoiceTotalAmount],\n items[description]\n \"\"\"\n return self.client.call('Billing_Order', 'getAllObjects',\n limit=limit, mask=mask)", "def trackOrderRequest(self):\n\t\tstart_dat=datetime.today()\n\t\tstart_date = start_dat - timedelta( hours=start_dat.time().hour,minutes=start_dat.time().minute,seconds=start_dat.time().second ) \n\t\tend_date=start_dat\n\t\tans=None\n\t\t#print start_dat.time().hour\n\t\tprint end_date\n\t\tans=Order.objects.filter(date_of_order__range=(start_date,end_date))\n\t\tlst=[]\n\t\tfor b in ans:\n\t\t\towneradd=b.owner_id.address\n\t\t\tuseradd=b.userid.address\n\t\t\tusername=b.userid.email\n\t\t\townername=b.owner_id.email\n\t\t\tuserphone=b.userid.contact_no\n\t\t\townerphone=b.owner_id.contact_no\n\t\t\tbookname=b.bookid.title\n\t\t\tstatus=b.paymentid.ispending\n\t\t\tbook=b.__dict__\n\t\t\tbook['owneradd']=owneradd\n\t\t\tbook['useradd']=useradd\n\t\t\tbook['username']=username\n\t\t\tbook['ownername']=ownername\n\t\t\tbook['userphone']=userphone\n\t\t\tbook['ownerphone']=ownerphone\n\t\t\tbook['name']=bookname\n\t\t\tif status==True:\n\t\t\t\tbook['status']=\"Pending\"\n\t\t\telse:\n\t\t\t\tbook['status']=\"Delivered\"\n\t\t\tlst.append(book)\n\t\t#print ans\n\t\t\n\t\treturn lst", "def get_orders_list(\n self\n ) -> list:\n\n response = self.session.get(\"http://automationpractice.com/index.php?controller=history\")\n\n self.HTMLParser.set_html(response.text)\n\n tbody = self.HTMLParser.find_elements_by_xpath(\"//tbody/tr\")\n\n if not len(tbody):\n raise NoOrderError()\n\n orders = list()\n\n for tr in tbody:\n\n tr = self.HTMLParser.convert_node(tr)\n tds = tr.xpath(\"//td\")\n\n orders.append({\n \"reference\": self._find_reference(tds[0]),\n \"date\": tds[1].text_content().strip(),\n \"value\": tds[2].get(\"data-value\"),\n \"payment_method\": tds[3].text_content(),\n \"status\": self._find_status(tds[4]),\n \"invoice_link\": self._find_invoice_link(tds[5]),\n \"id_order\": self._find_id(tds[5])\n })\n\n return orders", "def returnOrderTrades(self, order_number):", "def _query_get(self, cr, uid, obj='l', context=None):\n \n fiscalyear_obj = self.pool.get('account.fiscalyear')\n fiscalperiod_obj = self.pool.get('account.period')\n account_obj = self.pool.get('account.account')\n journal_obj = self.pool.get('account.journal')\n initial_bal = context.get('initial_bal', False)\n fiscalyear_ids = []\n if context is None:\n context = {}\n #Only Valid Move Lines (BALANCE MOVES)\n query = obj+\".state <> 'draft' \"\n #Filter by Company\n if context.get('company_id', False):\n query += \" AND \" +obj+\".company_id = %s\" % context['company_id']\n #Filter by Move State\n if context.get('state', False):\n if type(context['state']) in (list,tuple) :\n query += \" AND \"+obj+\".move_id IN (SELECT id FROM account_move WHERE state !='reversed') \" \n # query += \" AND \"+obj+\".move_id IN (SELECT id FROM account_move WHERE state IN (\"+st+\")) \"\n elif context['state'].lower() != 'all':\n query += \" AND \"+obj+\".move_id IN (SELECT id FROM account_move WHERE account_move.state != '\"+context['state']+\"') \"\n #Get Selected FiscalYear\n if not context.get('fiscalyear', False):\n if context.get('all_fiscalyear', False):\n fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])\n else:\n if context.get('date_from', False):\n #fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])\n date_from=context.get('date_from', False)\n date_from2 = datetime.strptime( date_from, '%Y-%m-%d')\n f_code=date_from2.year \n fiscalyear_ids = fiscalyear_obj.search(cr,uid, [ ('code', '=', f_code)])\n else:\n fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])\n \n else:\n #make the context['fiscalyear'] in one dimention list or ids\n fiscalyear_ids = type(context['fiscalyear']) is list and context['fiscalyear'] or [context['fiscalyear']]\n fiscalyear_clause = (','.join(map(str, fiscalyear_ids)))\n #Duration Filters\n\n if context.get('date_from', False) and context.get('date_to', False):\n \n if initial_bal:\n \n init_period = fiscalperiod_obj.search(cr, uid, [('special', '=', True), ('fiscalyear_id', 'in', fiscalyear_ids)])\n date_start = fiscalperiod_obj.browse(cr, uid, init_period[0], context=context).date_start\n \n query += \" AND \"+obj+\".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) ) \" % (fiscalyear_clause,)\n\n date_from=context['date_from']\n if context.get('date_from', False)==date_start:\n date_1 = datetime.strptime(date_from, DEFAULT_SERVER_DATE_FORMAT)\n date_from= date_1+timedelta(days=1)\n query += \" AND \" +obj+\".move_id IN (SELECT id FROM account_move WHERE date <='%s') \" %(context['date_from'],)\n query += \" AND \" +obj+\".move_id IN (SELECT id FROM account_move WHERE date <'%s') \" %(date_from,)\n\n else:\n if context['type']=='statement':\n \n query += \" AND \" +obj+\".move_id IN (SELECT id FROM account_move WHERE date >= '%s' AND date <= '%s') \"%(context['date_from'],context['date_to']) \n elif context['type']=='balance':\n init_period = fiscalperiod_obj.search(cr, uid, [('special', '=', True), ('fiscalyear_id', 'in', fiscalyear_ids)])\n\n date_start = fiscalperiod_obj.browse(cr, uid, init_period[0], context=context).date_start\n date_from=context['date_from']\n if context.get('date_from', False)==date_start:\n date_1 = datetime.strptime(date_from, DEFAULT_SERVER_DATE_FORMAT)\n date_from= date_1+timedelta(days=1)\n query += \" AND \" +obj+\".move_id IN (SELECT id FROM account_move WHERE date > '%s' AND date <= '%s') \"%(date_from,context['date_to']) \n query += \" AND \" +obj+\".move_id IN (SELECT id FROM account_move WHERE date >= '%s' AND date <= '%s') \"%(context['date_from'],context['date_to']) \n if context.get('period_from', False) and context.get('period_to', False) and not context.get('periods', False) and context.get('type', False)!='statement':\n if initial_bal:\n period_company_id = fiscalperiod_obj.browse(cr, uid, context['period_from'], context=context).company_id.id\n first_period = fiscalperiod_obj.search(cr, uid, [('company_id', '=', period_company_id), ('fiscalyear_id', 'in', fiscalyear_ids)], order='date_start')\n context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, first_period[0], first_period[first_period.index(context['period_from'])-1])\n else:\n context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, context['period_from'], context['period_to'])\n\n if context.get('periods', False) and context.get('type', False)!='statement':\n period_ids = ','.join(map(str, context['periods']))\n query += \" AND \"+obj+\".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND id IN (%s)) \" % (fiscalyear_clause, period_ids)\n else:\n sub_query = \"\"\n if not context.get('date_from', False) or context.get('period_from', False):\n special = initial_bal and (not context.get('date_from', False))\n sub_query = \"AND special = %s\"%(special,)\n query += \" AND \"+obj+\".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) %s) \" % (fiscalyear_clause, sub_query)\n\n #Filter by Journal\n #situation_journal = set(journal_obj.search(cr, uid, [('type', '=', 'situation')], context=context))\n #selected_journals = set(context.get('journal_ids', False) or journal_obj.search(cr, uid, [], context=context))\n #TEST: situation journal when opening balance & not\n #journal_ids = context.get('selected_journals', False) and selected_journals or \\\n # (initial_bal and list(selected_journals | situation_journal) or list(selected_journals-situation_journal))\n # if journal_ids:\n # query += ' AND '+obj+'.journal_id IN (%s) ' % ','.join(map(str, journal_ids))\n #if not context.get('selected_journals', False) and not initial_bal and situation_journal:\n #query += ' AND '+obj+'.journal_id NOT IN (%s) ' % ','.join(map(str, situation_journal))\n #Filter by chart of Account\n if context.get('chart_account_id', False):\n child_ids = account_obj._get_children_and_consol(cr, uid, [context['chart_account_id']], context=context)\n query += ' AND '+obj+'.account_id IN (%s) ' % ','.join(map(str, child_ids))\n #Filter by Move Line Statement\n if 'statement_id' in context:\n if context.get('statement_id', False):\n query += ' AND '+obj+'.statement_id IN (%s) ' % ','.join(map(str, context['statement_id']))\n else:\n query += ' AND '+obj+'.statement_id IS NULL '\n #Filter by Move Line\n if context.get('move_line_ids', False):\n query += ' AND '+obj+'.id IN (%s) ' % ','.join(map(str, context['move_line_ids']))\n #Filter by Analytic Account Type\n if context.get('analytic_display', False):\n query += ' AND '+obj+\".analytic_account_id IN (SELECT id FROM account_analytic_account WHERE analytic_type=%s) \" % (context.get('analytic_display', False).id,)\n\n return query", "def get_entries(order):\n users_entries = {}\n for item in order.items.all():\n entries_per_order = []\n entries = Entries.objects.filter(orderItem=item.id)\n for ent in entries:\n entries_per_order.append(ent.ticket_number)\n n_order = {\n item.id: entries_per_order\n }\n users_entries.update(n_order)\n return users_entries", "def get_item_balances(self, acc: Account) -> list:\n items = []\n entries = self.get_entries(acc)\n for item in entries.filter(source_invoice=self).order_by(\"id\"):\n assert isinstance(item, AccountEntry)\n settlements = sum_queryset(entries.filter(settled_item=item))\n bal = item.amount + settlements if item.amount is not None else settlements\n items.append((item, bal))\n return items", "def parse_orders(self):\n #save the information from the firebase for this cycle\n self.get_order()\n #Loop through all the stores\n for store_name,store_orders in self.orders.items():\n #Loop through all the orders\n for order_id,order_details in store_orders.items():\n #store order\n self.store_order(store_name,store_orders,order_id,order_details)\n pass", "def get_orders(self):\n return self.order_lst", "def getOrderList(self):\r\n\t\treturn self.orders", "def get_all_orders():\n response = requests.get(\n settings.SHOPIFY_ORDERS_URL,\n auth=(settings.SHOPIFY_API_KEY, settings.SHOPIFY_PASSWORD),\n )\n return response.json()[\"orders\"]" ]
[ "0.6549946", "0.6459227", "0.586903", "0.56301016", "0.55795187", "0.5551268", "0.5542085", "0.55151325", "0.54951745", "0.5471996", "0.54617655", "0.54159284", "0.54017246", "0.53745145", "0.5372561", "0.5370826", "0.536859", "0.5344888", "0.52568704", "0.5231659", "0.5229909", "0.52140254", "0.5205426", "0.51909643", "0.5188204", "0.5163211", "0.5161194", "0.51582795", "0.51580906", "0.5153687" ]
0.6720224
0
Send a log message if EXTRA_LOGGING is set in settings.
def log_extra(self, msg, *args): if self.settings.EXTRA_LOGGING.value: self.log.info("(Extra logging) " + msg, *args)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log(message):\n if LOGPLEASE:\n logging.info(message)", "def debug_log(self, msg, *args, **kwargs):\n if self.debug:\n self.log.debug(msg, *args, **kwargs)", "def logger(self, message):\n if hasattr(self.log, '__call__'):\n self.log(message.strip())", "def logIt(self, msg):\n\n if (self.logger): self.logger.logIt(msg)", "def logging(self, value: LogLevel) -> None:\n self._data[ATTR_LOGGING] = value\n self.modify_log_level()", "def _log(self, msg, mode=\"info\"):\n if mode == \"info\":\n self._logger.info(msg)\n elif mode == \"warning\":\n self._logger.warning(msg)", "def log(self, level, msg, *args, **kwargs):\n\n if self.logger:\n self.logger.log(level, msg, *args, **kwargs)", "def log(self, level, msg, *args, **kwargs):\n pass", "def debug(msg):\n if(CONFIG['debug']):\n logIt(msg)", "def logIt(self, msg):\n\n\t\tif( self.logger ): self.logger.logIt( msg )", "def log(self, level, message, *args, **kwargs):\n if not self.isEnabledFor(level):\n return\n\n message = message.strip()\n message, kwargs = self.process(message, kwargs)\n\n app_id = self.extra['app_id']\n version = self.extra['version']\n\n if cloud_logger_bg is not None:\n # The following usage is highly unconventional:\n # There appears to be no way to log a structured log message\n # asynchronously using the cloud logging client properly.\n # However, internally, the library uses structured logging. As long\n # as we queue the right payload into the worker's queue, we're OK.\n # This works well with google-cloud-logging==1.9.0.\n # It looks shady, but the only decent way to use the async logging\n # support for Stackdriver.\n cloud_logger_bg.worker._queue.put_nowait(\n {\n 'info': {\n 'app_id': app_id,\n 'version': version,\n 'message': message,\n 'level': getLevelName(level)\n },\n 'severity': getLevelName(level),\n 'resource': Resource(type='global', labels={})\n }\n )\n\n message_pretty = f'{app_id}::{version} => {message}'\n self.logger.log(level, message_pretty, *args, **kwargs)", "def log_info(self, msg, *args, **kwargs):\n if self.action_logging_enabled and self._log is not None:\n self._log.info(msg, *args, **kwargs)\n return", "def push_log(self, info, level=logging.INFO, *args, **kwargs):\n log.log(level, info, *args, **kwargs)", "def gated_loginfo(quiet, msg):\n\n if(not quiet):\n loginfo(msg)", "def handle_log(self, workunit, level, *msg_elements):\r\n if level <= self.settings.log_level:\r\n self.do_handle_log(workunit, level, *msg_elements)", "def log_once_at_level(logging_level, message, *args, **kwargs):\n global LOG_ONCE_CACHE\n\n if message not in LOG_ONCE_CACHE:\n LOG_ONCE_CACHE.add(message)\n LOGGER.log(logging_level, message, *args, **kwargs)\n else:\n LOGGER.debug(message, *args, **kwargs)", "def log( self, level, msg, *args, **kwargs ):\n if self.isEnabledFor( level ):\n msg, kwargs = self.process( msg, kwargs )\n self.logger._log( level, Message( msg, args ), (), **kwargs )", "def logging(self, connection:MQTTConnection, level:int, message:str) -> bool:\n\t\treturn True", "def log(self, msg: str) -> None:\n if self.args.verbose:\n print(msg)", "def log(self, message):", "def logg(msg):\n if VERBOSE: print msg", "def log(self, msg, level=1):\n if self.verbosity >= level:\n print(msg)", "def logging_sensitive(*args, **kwargs):\n if config.debug_sensitive:\n logging.debug(*args, **kwargs)\n else:\n logging.debug(\"sensitive false: non-templated message %s\", args[0])", "async def step_log(self, context: CallbackContext) -> None:\n try:\n msg = self.telegram_queue.get_nowait()\n await context.bot.send_message(chat_id=self.logging_chat_id, text=msg)\n except queue.Empty:\n pass", "def test(self):\n self.info(\"LOGGING: Testing log messages\")\n self.debug(\"This is a debugging message\")\n self.info(\"This is an informational message\")\n self.warning(\"This is a warning message\")\n self.error(\"This is an error message\")\n self.critical(\"This is a critical message\")\n self.info(\"LOGGING: Testing log messages COMPLETE\")\n return", "def log(self, message):\n if VERBOSE:\n print self, message", "def _log_some_info(self):\n logging.info('info')", "def log(self, message: str):", "def on_message(self, msg):\n self.log.info(msg)", "def _log(self, format, args, level=None):\n if level is None:\n level = self.log_level\n xbmc.log(\n \"metadata.movie.stupid: %s - - [%s] %s\\n\" % (\n self.client_address[0], self.log_date_time_string(),\n format % args),\n level)" ]
[ "0.64407986", "0.6168677", "0.6071726", "0.6048908", "0.5984047", "0.59096014", "0.58847517", "0.58728087", "0.581197", "0.57885957", "0.5749739", "0.57324046", "0.5715226", "0.5708756", "0.5693098", "0.5669395", "0.5666494", "0.56495476", "0.56373507", "0.5629469", "0.5620782", "0.5618377", "0.56145054", "0.56110334", "0.56086695", "0.5589072", "0.5585492", "0.55832535", "0.5580541", "0.5578333" ]
0.8017174
0
Obtain exactly one item from the iterable or raise an exception.
def exactly_one(iterable): i = iter(iterable) try: item = next(i) except StopIteration: raise ValueError("Too few items. Expected exactly one.") try: next(i) except StopIteration: return item raise ValueError("Too many items. Expected exactly one.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_only(seq: Iterable[T]) -> T:\n it = iter(seq)\n try:\n first_element = it.__next__()\n # we use the sentinel approach rather than the usual (evil) Python \"attempt can catch the\n # exception\" approach to avoid raising zillions of spurious exceptions on the expected\n # code path, which makes debugging a pain\n sentinel = object()\n second_element = next(it, sentinel)\n if second_element is sentinel:\n return first_element\n else:\n got_msg: str\n if isinstance(seq, Sized):\n got_msg = str_list_limited(seq, limit=10)\n else:\n got_msg = f\"{first_element!r}, {second_element!r}, and possibly more.\"\n raise ValueError(f\"Expected one item in sequence but got {got_msg}\")\n except StopIteration:\n raise ValueError(\"Expected one item in sequence but got none\")", "def one(self):\n try:\n result = self.next()\n except StopIteration:\n raise ValueError('Less than one result from .one()')\n try:\n self.next()\n except StopIteration:\n return result\n raise ValueError('More than one result from .one()')", "def first(items):\n return next(iter(items or []), None)", "def only(it: Union[Iterator[_T], Iterable[_T]]) -> _T:\n if hasattr(it, \"__next__\"):\n # noinspection PyTypeHints\n iterator: Iterator[_T] = it # type: ignore\n else:\n iterator = iter(it)\n\n try:\n ret = next(iterator)\n except StopIteration:\n raise ValueError(\"Expected only a single element in an iterable, but got none\")\n\n second_element = next(iterator, _SENTINEL)\n if second_element != _SENTINEL:\n raise ValueError(\"Expected only a single element in iterable, but got at least 2\")\n return ret", "def one(self):\n return next(iter(self), None)", "def first(iterable: t.Iterable[T]) -> T:\n return next(iter(iterable))", "def get_one(_set):\r\n assert _set # _set is not empty\r\n return next(iter(_set))", "def first(collection):\n return next(iter(collection))", "def first(collection):\n return next(iter(collection))", "def fetchone(self):\n try:\n return next(self._results)\n except StopIteration:\n return None", "def get(s: Iterable[T]) -> T:\n return next(iter(s))", "def one(self):\n return self._iter().one()", "def first(self):\n try:\n return self.next()\n except StopIteration:\n return None", "def _NextItem(self):\n if self._injected:\n self._injected = False\n return self._injected_value\n try:\n # Object is a generator or iterator.\n return self._iterable.next()\n except AttributeError:\n pass\n except StopIteration:\n self._tap.Done()\n raise\n try:\n # Object is a list.\n return self._iterable.pop(0)\n except (AttributeError, KeyError, TypeError):\n pass\n except IndexError:\n self._tap.Done()\n raise StopIteration\n # Object is not iterable -- treat it as the only item.\n if self._iterable is None or self._stop:\n self._tap.Done()\n raise StopIteration\n self._stop = True\n return self._iterable", "def get_only_element_from_collection(one_element_collection):\n if len(one_element_collection) != 1:\n raise AssertionError(u'Expected a collection with exactly one element, but got: {}'\n .format(one_element_collection))\n return funcy.first(one_element_collection)", "def _first(self, \n iterable, \n condition=lambda x: True):\n try:\n return next(x for x in iterable if condition(x))\n except:\n return None", "def _resolver_first(self, item: Any, *_: Any) -> Any:\n try:\n return next(iter(item))\n except StopIteration:\n assert False # not supposed to happen in current tests", "def one(self, except_all=False):\n\n length = len(self)\n if length > 1:\n raise MultipleResultsFound(\"%s results found.\" % length)\n\n result = self.first()\n if result is None and except_all:\n raise NoResultFound\n return result", "def first(items):\r\n return items[0]", "def get_next_if_any(self):\n try:\n ret = self.work[deepcopy(self.i)]\n self.i += 1\n # print \"Trickling item\", self.i\n return ret\n except Exception:\n return None", "def only_get_one(seq, key, source):\n\n def uniquify(seq):\n return list(frozenset(seq))\n res = uniquify([s[key] for s in seq])\n\n if len(res) > 1:\n print >>sys.stderr, 'error: %s too many %ss:' % (source, key)\n for r in res:\n print ' ', r\n return None\n elif not res:\n print 'error: %s zero %ss' % (source, key)\n return None\n else:\n return res[0]", "def peek(self):\n if self.count() <= 0:\n raise ValueError('Cannot peek at value that does not exist')\n return self.items[1]", "def fetchone(cursor):\n\t# type: (Cursor, ) -> Any\n\n\trows = cursor.fetchall()\n\tif len(rows) == 0:\n\t\traise NoResult(\"No result found\")\n\telif len(rows) == 1:\n\t\treturn rows[0]\n\telse:\n\t\traise InconsistentState(\"More than one result found\")", "def find_one(cls, *a, **ka):\n try:\n return cls.find(*a, **ka).next()\n except StopIteration:\n raise KeyError", "def _item_or_tuple(self, seq):\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]", "def _assert_non_empty(iterable):\n first_elem = six.next(iterable, None)\n assert first_elem is not None, first_elem\n return itertools.chain([first_elem], iterable)", "def get_next_as_optional(iterator):\n return iterator.get_next_as_optional()", "def first(l):\n return next(iter(l), None)", "def one_or_none(self):\n return self._iter().one_or_none()", "def first(seq):\n try: # try iterator interface\n return seq.next()\n except AttributeError:\n pass\n try: # seq is no iterator, try indexed lookup\n return seq[0]\n except IndexError:\n pass\n raise TypeError(\n \"Argument to `first()` method needs to be iterator or sequence.\")" ]
[ "0.73700535", "0.71891385", "0.6987197", "0.68558383", "0.683411", "0.683213", "0.6811499", "0.6796202", "0.6796202", "0.668981", "0.66084677", "0.6602133", "0.65939295", "0.6576815", "0.657131", "0.6566624", "0.64851004", "0.64576167", "0.63524705", "0.6350618", "0.634518", "0.6335714", "0.63163644", "0.6301374", "0.62962353", "0.6281642", "0.62811255", "0.62685657", "0.62416995", "0.6234583" ]
0.82768756
0
Obtain the last item from an iterable.
def last(iterable): d = deque(iterable, maxlen=1) try: return d.pop() except IndexError: raise ValueError("Cannot return last item from empty iterable {!r}".format(iterable))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last(iterable):\n it = iter(iterable)\n item = next(it)\n for item in it:\n pass\n return item", "def last(iterator):\n item = None\n for item in iterator:\n pass\n return item", "def return_last(iter):\n for thing in iter:\n pass\n return thing", "def last_item(self):\n return self.container[self.length-1]", "def last(iterable, *default):\n\tassert len(default) <= 1\n\titerable = iter(iterable)\n\n\ttry:\n\t\tx = next(iterable)\n\texcept StopIteration:\n\t\tif default:\n\t\t\treturn default[0]\n\t\traise\n\n\tfor x in iterable:\n\t\tpass\n\treturn x", "def last(seq):\n try:\n return seq[-1]\n except TypeError:\n old = None\n it = iter(seq)\n while True:\n try:\n old = next(it)\n except StopIteration:\n return old", "def last_el(x):\n if N.isscalar(x): return x\n else: return x[-1]", "def last(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._tail._element", "def last(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._tail._prev._element", "def last(self):\n return self.deque[-1]", "def last(self):\n return _(self._[-1])", "def getLast(self):\r\n return self._data[-1]", "def last(self):\n if self.is_empty():\n raise Emtpy(\"List is empty!\")\n return self._trailer._prev._element", "def last(self):\n if self.ordered:\n queryset = self.reverse()\n else:\n self._check_ordering_first_last_queryset_aggregation(method=\"last\")\n queryset = self.order_by(\"-pk\")\n for obj in queryset[:1]:\n return obj", "def get_last(self):\n return self.get_block(len(self.chain)-1)", "def getLast(self):\n\n if self.firstItem == None:\n raise Exception(\"cannot getLast - linked list is empty\")\n\n # 1. Find the last item\n lastItem = self.firstItem\n while lastItem.next != None:\n lastItem = lastItem.next\n\n # 2. Return the value\n return lastItem", "def last(self):\n return self.last and self.last.value or None", "def at_last(self):\n return self._collection.at_last()", "def peek_last(self):\n if self.is_empty(): raise RuntimeError(\"Empty list\")\n return self.tail.data", "def last_item_index(self) -> int:\n return len(self.all_items) - 1", "def findlastindex(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return next((i for i, value in reversed(tuple(enumerate(seq))) if iteratee(value)), -1)", "def peek(self):\n item = self._items[-1]\n return item", "def last(self):\n if self.tail:\n self.cursor = self.tail\n return self.cursor\n return None", "def last(self, callback: Callable = None) -> Any:\n if callback:\n return self.filter(callback).last()\n\n return self[-1]", "def get_last(self, limit = 1):\n if len(self.data) == 0:\n return None\n self.sort_and_reduce()\n if len(self.data) < limit:\n limit = len(self.data)\n\n return self.data[-limit:][0]", "def last_index(self, item):\n return _(self.size()._ - 1 - self.reverse().index(item)._)", "def getlast(self, key, default=None):\n \n values = self.getlist(key)\n return values[-1] if values else default", "def last(self):\n if self.is_empty():\n raise Empty(\"Deque is empty\")\n return self._trailer._prev._element #real item just before trailer", "def get_last(self, count):", "def pop_last(self):\n self.pop_item(-1)" ]
[ "0.87920225", "0.8336608", "0.8012257", "0.7952512", "0.7778646", "0.7550775", "0.71611106", "0.7121031", "0.7003908", "0.6988808", "0.69499695", "0.6938263", "0.6879468", "0.68462396", "0.6845747", "0.67840874", "0.678374", "0.6782314", "0.6776444", "0.67622405", "0.67414", "0.67351407", "0.67233706", "0.67119306", "0.66757846", "0.6657703", "0.66344637", "0.6585827", "0.6531066", "0.6430554" ]
0.8511971
1
Given a sequence of proof terms, take resolution on them one by one.
def resolution(self, step): res_pts = [self.proof[num] for num in step.assms] pt_0 = self.proof[step.assms[0]] arity1 = self.steps[step.assms[0]-1].arity for i in step.assms[1:]: arity2 = self.steps[i-1].arity assert self.proof[i].prop == self.steps[i-1].concl, i pt_1 = pt_0 pt_0, arity1 = verit_resolution(pt_0, self.proof[i], arity1, arity2) if pt_0.prop == step.concl: self.proof[step.seq_num] = pt_0 else: concl_disjs = strip_num(step.concl, step.arity) pt_disjs = strip_num(pt_0.prop, step.arity) assert set(concl_disjs) == set(pt_disjs) implies_pt_norm = ProofTerm("imp_disj", term.Implies(pt_0.prop, Or(*concl_disjs))) self.proof[step.seq_num] = implies_pt_norm.implies_elim(pt_0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findLinkedTerms(self):\n for key in self.summaryDict.keys(): # v' in the formula\n if self.getCoverFromModalityInDictionnary(self.summaryDict, key) == 0:\n correlation = 0\n else:\n dep = self.getCoverFromModalityInDictionnary(self.summaryFilteredDict,key) / self.getCoverFromModalityInDictionnary(self.summaryDict, key) #cover(v',R')/cover(v'R)\n if dep <= 1:\n correlation = 0\n else:\n correlation = 1 - (1 / dep)\n self.correlationDict[key] = correlation", "def do_resolve(self,args):\n try:\n for solution in self.resolve_all(args):\n self.print_solution(solution)\n except:\n traceback.print_exc(file=sys.stdout)", "def prove_N() -> Proof:\n # Optional Task 6.8", "def prove_R() -> Proof:\n # Optional Task 6.7g", "def solve(self):\n solved = self.formula.satisfy_one() or {}\n sol = [self.fid_to_var(str(var)) for var in list(solved.keys()) if solved[var] == 1]\n sol.sort(key = lambda var: var.split('_')[-1])\n count = self.formula.satisfy_count()\n\n return (sol, count)", "def tfae(self, lst):\n s = lst + [lst[0]]\n for i in range(len(lst)):\n p = pr9(self.axioms+[s[i]], [s[i+1]], seconds, self.options)\n if type(p)==list:\n print i,\"->\",i+1,\":\",s[i+1], \"proved\"\n else:\n print i,\"->\",i+1,\":\", p\n return False, 'No conclusions'\n proofs.append(p)\n return True, proofs", "def _try_heuristics(f):\n if f.is_ground:\n return []\n if f.is_monomial:\n return [S.Zero]*f.degree()\n\n if f.length() == 2:\n if f.degree() == 1:\n return list(map(cancel, roots_linear(f)))\n else:\n return roots_binomial(f)\n\n result = []\n\n for i in [-1, 1]:\n if not f.eval(i):\n f = f.quo(Poly(f.gen - i, f.gen))\n result.append(i)\n break\n\n n = f.degree()\n\n if n == 1:\n result += list(map(cancel, roots_linear(f)))\n elif n == 2:\n result += list(map(cancel, roots_quadratic(f)))\n elif f.is_cyclotomic:\n result += roots_cyclotomic(f)\n elif n == 3 and cubics:\n result += roots_cubic(f, trig=trig)\n elif n == 4 and quartics:\n result += roots_quartic(f)\n elif n == 5 and quintics:\n result += roots_quintic(f)\n\n return result", "def test_solve_polynomial_cv_1a():\n assert solveset_real(sqrt(x) - 1, x) == FiniteSet(1)\n assert solveset_real(sqrt(x) - 2, x) == FiniteSet(4)\n assert solveset_real(x**Rational(1, 4) - 2, x) == FiniteSet(16)\n assert solveset_real(x**Rational(1, 3) - 3, x) == FiniteSet(27)\n assert solveset_real(x*(x**(S(1) / 3) - 3), x) == \\\n FiniteSet(S(0), S(27))", "def pfd_solve (r, w) :\n\tglobal targets\n\ta = [0, 0]\n\tpfd_initialize(r, a)\n\ttargets_array = []\n\tpfd_find_first_target()\n\t\n\tresultArr = []\n\n\twhile len(targets) > 0:\n\t\ttarget = heapq.heappop(targets)\n\t\tresultArr.append(target+1)\n\t\tnew_targets = pfd_clear(target)\n\n\t\tfor i in new_targets:\n\t\t\tdependencies_list[i]-=1\n\t\t\tif dependencies_list[i] == 0:\n\t\t\t\theapq.heappush(targets,i)\n\t\t\t\t\n\t#Prints the result\n\tfor i in xrange(len(resultArr)) :\n\t print resultArr[i],", "def resolve_references_as_possible(s):\n refs = []\n resolved = []\n\n # ask all graphs for REFs\n for graph in s.graphs.values():\n refs.extend( graph.list_of_all_unpointed_refs() )\n\n # try to resolve all REFs\n for ref in refs:\n if ref.try_to_point():\n resolved.append(ref)\n\n # for REFs that link up,\n for ref in resolved:\n s.resolve_single_ref( ref )", "def compute_prf_on_selection(arts, forms_set):\n tp=0\n fn=0\n fp=0\n for article in arts:\n for entity in article.entity_mentions:\n if entity.mention in forms_set:\n if entity.gold_link==entity.sys_link:\n tp+=1\n else:\n if entity.sys_link!='--NME--':\n fp+=1\n if entity.gold_link!='--NME--':\n fn+=1\n print(tp, fp, fn)\n p=tp/(tp+fp)\n r=tp/(tp+fn)\n f1=2*p*r/(p+r)\n print(p,r,f1)\n return f1", "def test_solve_polynomial_cv_1a():\n assert solveset_real(sqrt(x) - 1, x) == FiniteSet(1)\n assert solveset_real(sqrt(x) - 2, x) == FiniteSet(4)\n assert solveset_real(x**Rational(1, 4) - 2, x) == FiniteSet(16)\n assert solveset_real(x**Rational(1, 3) - 3, x) == FiniteSet(27)\n assert solveset_real(x*(x**(S.One / 3) - 3), x) == \\\n FiniteSet(S.Zero, S(27))", "def evaluate_terms(terms):\n expr_terms = [x for x in terms]\n\n while expr_terms.count('^') != 0:\n expr_terms = eval_expon(expr_terms)\n\n while MUL_DIV_RE.search(str(expr_terms)) is not None:\n expr_terms = eval_a_op_b(expr_terms, 'md')\n\n while len(expr_terms) != 1:\n expr_terms = eval_a_op_b(expr_terms, 'pm')\n\n return expr_terms[0]", "def get_prerequirements(requirements: List[Req]) -> Prereqs:\n all_steps = {step for req in requirements for step in req}\n \n # initialize\n prereqs = {step: set() for step in all_steps}\n\n # add actual requirements\n for pre, post in requirements:\n prereqs[post].add(pre)\n \n return prereqs", "def prediction2(sequence, listPaires, rules) :\n prediction=[]\n for e in sequence:\n for r in rules :\n if r[0]==e[0]:\n next_event=predict_event2(r,e)\n prediction.append(next_event)\n return prediction", "def eterms(tol=0.001):\n from math import e\n\n terms = 1\n approx = 1\n fact = 1\n while abs(approx - e) > tol:\n fact = fact * terms # Factorial of the number of terms\n approx = approx + 1/fact\n terms = terms + 1\n return terms", "def solve(original):\n for noun in range(0, 100):\n for verb in range(0, 100):\n memory = init_memory(original, noun, verb)\n run_program(memory)\n if memory[0] == 19690720:\n return 100 * noun + verb\n raise ValueError(\"No (noun, verb) pair returned the expected output.\")", "def resolve():\n while _TO_RESOLVE:\n obj = _TO_RESOLVE.pop()\n annotations(obj)", "def resolve(self,**bindings):\n for solution in self.resolve_all(**bindings):\n return solution", "def evaluate_polynomials(polynomials: List[Poly]):\n fft = MultiDimNonBinaryFFT(field, root_of_unity, width)\n values = fft.multi_fft(polynomials)\n return values", "def generateAssociationRule(freqSet):", "def test_reuse_loadable_terms(self):\n f1 = SomeFactor([SomeDataSet.foo, SomeDataSet.bar])\n f2 = SomeOtherFactor([SomeDataSet.bar, SomeDataSet.buzz])\n\n graph = self.make_execution_plan(to_dict([f1, f2]))\n resolution_order = list(graph.ordered())\n\n # bar should only appear once.\n assert len(resolution_order) == 6\n assert len(set(resolution_order)) == 6\n self.check_dependency_order(resolution_order)", "def interpret_point_requirements(requirements):\n requirements_for_major_set = set()\n for string in sorted(requirements):\n requirement_object = interpret_requirement(string)\n requirements_for_major_set.add(requirement_object)\n return requirements_for_major_set", "def Cprimebound(relatorlist,Lambda=1):\n F,rels=fg.parseinputwords(relatorlist)\n if not all(r==F.cyclic_reduce(r) for r in rels):\n raise ValueError(\"Relators are not cyclically reduced.\")\n biggestratio=Fraction(1,min(len(r) for r in rels))\n if biggestratio>=Fraction(1,Lambda):\n return 1\n rels.sort(key=len) # sort list of relators with shortest first\n irels=[rel for rel in itertools.chain.from_iterable(zip([w() for w in rels],[(w**(-1))() for w in rels]))] # arrange relators and inverses in a list of the form relator1, inverse of relator1, relator2, inverse of relator2,...\n drels=[x+x for x in irels] # double the relators to look for pieces that would have wrapped\n for relatorindex in range(len(rels)):\n relator=irels[2*relatorindex]\n foundbiggest=False\n for L in range(len(relator),int(biggestratio*len(relator)),-1):# only check subwords of length L that would give biggest ratio if they were a piece\n for startingindex in range(len(relator)):\n p=(relator+relator)[startingindex:startingindex+L] # the subword of length L starting at index i in relator as a cyclic word\n # now we need to check if p is a piece\n # we do not need to check lower relatorindices, because we already scanned those relators for pieces\n if any(p in x for x in [(relator+relator)[startingindex+1:len(relator)+startingindex+L-1]]+[drels[i] for i in range(2*relatorindex+1,len(drels))]):# look in this (doubled) relator at higher starting indices, and in all later relators, for other copies of p. If found a matching subword, p is a piece.\n biggestratio=Fraction(len(p),len(relator))\n foundbiggest=True # we search pieces by decreasing length, so first one we find is longest\n if biggestratio>=Fraction(1,Lambda):\n return 1\n break\n if foundbiggest: # if we found the biggest piece in this relator we can move on to the next relator. \n break\n return biggestratio", "def solve_part1(input, verbose=False):\n equations = parse(input)\n\n result = []\n for eq in equations:\n result.append(solve_equation_same_precedence(eq, verbose))\n\n if verbose:\n print(f\"results: {result}\")\n\n return sum(result)", "def process_what_to_run_concepts(pairs_to_test):\n\n pairs_for_sstesting = []\n # prepare pairs for concpet vs random.\n for pair in pairs_to_test:\n for concept in pair[1]:\n pairs_for_sstesting.append([pair[0], [concept]])\n return pairs_for_sstesting", "def fn(x):\n if len(x) == len(s): ans.append(x)\n for k, v in freq.items(): \n if v >= 2: \n freq[k] -= 2\n fn(k + x + k)\n freq[k] += 2", "def distribution(urls):\n distr = []\n for url in urls:\n r = find_philosophy(url, [], 0)\n if r != -1:\n distr.append(r)\n return distr", "def find_matching(graph, round_to=3):\n \n utils.sanity_check(graph)\n\n if round_to < 1:\n raise utils.AlgorithmError(\"The algorithm requires half-integral intermediate solutions, so rounding to <1 d.p. would break it.\")\n\n F = []\n Gamma = [{i:0 for i in range(len(graph.vs))}] * (len(graph.es) + 1) #Gamma is implemented as a list of dicts.\n\n while True:\n x = linprogs.solve_primal(graph, F, round_to)\n if utils.is_integral(x):\n return [int(i) for i in x] #Because it might not be integral, x is a float. Now we know it is, we'll convert it to an int.\n duals = linprogs.solve_duals(graph, F, x, Gamma, round_to)\n\n H1 = set()\n for S in F:\n if max([Pi.get(S, 0) for Pi in duals]) > 0: #Technically we only need know if there exists a Pi such that Pi(S)>0. However, using max does the same thing and is easier to implement and read.\n H1.add(S)\n\n H2 = set()\n cycles = utils.get_cycles(graph, x)\n for cycle in cycles:\n C = set(cycle)\n for element in cycle:\n for cut in H1:\n if element in cut:\n C = set.union(C, set(cut))\n H2.add(tuple(C))\n\n F = list(set.union(H1, H2))\n Gamma = deepcopy(duals)", "def resolve(requirements, obtainer=None, interpreter=None, platform=None):\r\n cache = _DistributionCache()\r\n interpreter = interpreter or PythonInterpreter.get()\r\n platform = platform or Platform.current()\r\n obtainer = obtainer or Obtainer.default(platform=platform, interpreter=interpreter)\r\n\r\n requirements = maybe_requirement_list(requirements)\r\n distribution_set = defaultdict(list)\r\n requirement_set = defaultdict(list)\r\n processed_requirements = set()\r\n\r\n def packages(requirement, existing=None):\r\n if existing is None:\r\n existing = obtainer.iter(requirement)\r\n return [package for package in existing\r\n if package.satisfies(requirement)\r\n and package.compatible(interpreter.identity, platform)]\r\n\r\n def requires(package, requirement):\r\n if not cache.has(package):\r\n dist = obtainer.obtain(package)\r\n if dist is None:\r\n raise Untranslateable('Package %s is not translateable.' % package)\r\n if not distribution_compatible(dist, interpreter, platform):\r\n raise Untranslateable('Could not get distribution for %s on appropriate platform.' %\r\n package)\r\n cache.put(package, dist)\r\n dist = cache.get(package)\r\n return dist.requires(extras=requirement.extras)\r\n\r\n while True:\r\n while requirements:\r\n requirement = requirements.pop(0)\r\n requirement_set[requirement.key].append(requirement)\r\n # TODO(wickman) This is trivially parallelizable\r\n distribution_list = distribution_set[requirement.key] = packages(\r\n requirement,\r\n existing=distribution_set.get(requirement.key))\r\n if not distribution_list:\r\n raise Unsatisfiable('Cannot satisfy requirements: %s' % requirement_set[requirement.key])\r\n\r\n # get their dependencies\r\n for requirement_key, requirement_list in requirement_set.items():\r\n new_requirements = OrderedSet()\r\n highest_package = distribution_set[requirement_key][0]\r\n for requirement in requirement_list:\r\n if requirement in processed_requirements:\r\n continue\r\n new_requirements.update(requires(highest_package, requirement))\r\n processed_requirements.add(requirement)\r\n requirements.extend(list(new_requirements))\r\n\r\n if not requirements:\r\n break\r\n\r\n to_activate = set()\r\n for distributions in distribution_set.values():\r\n to_activate.add(cache.get(distributions[0]))\r\n return to_activate" ]
[ "0.5308942", "0.5248787", "0.52151734", "0.5174763", "0.5106836", "0.5090688", "0.50833756", "0.5019919", "0.49778193", "0.4962607", "0.49478337", "0.4927897", "0.49254432", "0.49050832", "0.4892555", "0.48887312", "0.48810247", "0.48710805", "0.48520687", "0.48300308", "0.48213813", "0.48201483", "0.48096198", "0.4805111", "0.48006034", "0.47910058", "0.47854987", "0.4785166", "0.47810757", "0.47796208" ]
0.5916597
0
Provides a context manager for doing model inference. This puts certain layers into "inference mode", if necessary (e.g. batch normalization and dropout).
def inference_mode_on(): Layer.inference_mode = True with ng.metadata(mode="inference"): yield Layer.inference_mode = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inference_context(model):\n training_mode = model.training\n model.eval()\n yield\n model.train(training_mode)", "def inference(model, data, diagnostics, seed, extra_fitting_args):\n pass", "def inference(self, dataset, model_dir):\n raise NotImplementedError", "def create_inference_session(self):\n sess_options = onnxruntime.SessionOptions()\n sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL\n self.infer_session = onnxruntime.InferenceSession(\n self.augmented_model_path,\n sess_options=sess_options,\n providers=self.execution_providers,\n )", "def set_mode_inference(self):\n self._set_mode('inference')\n return self", "def inference(self):\n raise NotImplementedError", "def inference():\n print(\"setting up vgg initialized conv layers ...\")\n model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)\n\n mean = model_data['normalization'][0][0][0]\n mean_pixel = np.mean(mean, axis=(0, 1))\n\n weights = np.squeeze(model_data['layers'])\n\n\n with tf.variable_scope(\"inference\"):\n vgg_net(weights)", "def inference_session(self, **kwargs) -> InferenceSession:\n\n with InferenceSession(self.sequence_manager, **kwargs) as session, self.use_session(session):\n yield session", "def do_inference(self, output_file = None):\n return", "def infer(self, example, model):\n asp_input = model + '\\n\\n' + example + '\\n\\n' + inference_program_ec\n ctl = clingo.Control()\n ctl.add(\"base\", [], asp_input)\n ctl.ground([(\"base\", [])], context=self)\n ctl.solve(on_model=self.show_model)", "def run_inference(dataset, model, executor_):\n for batch in dataset:\n results = model.inference(batch)\n for stats in model.worker_pool.imap(get_stats_from_code, zip(results, batch, [executor_]*len(batch))):\n if stats is not None:\n yield stats\n return", "def inference_preprocess(self):\n return", "def run_infer(infer_model, model_dir, infer_sess):\n with infer_model.graph.as_default():\n loaded_infer_model, global_step = model_helper.create_or_load_model(\n model_dir, infer_model.model, infer_sess)\n \n output_tuple = loaded_infer_model.infer(infer_sess)\n return output_tuple", "def inference(self):\n for i in range(len(self.nodes)):\n for j in range(len(self.nodes[i])):\n self.pipes[i][j].send(\"inference\")\n \n ## wait for the finalization to be completed\n for i in range(len(self.nodes)):\n for j in range(len(self.nodes[i])):\n self.pipes[i][j].recv()", "def infer(self, request, datastore=None):\n model = request.get(\"model\")\n if not model:\n raise MONAILabelException(\n MONAILabelError.INVALID_INPUT,\n \"Model is not provided for Inference Task\",\n )\n\n task = self._infers.get(model)\n if not task:\n raise MONAILabelException(\n MONAILabelError.INVALID_INPUT,\n f\"Inference Task is not Initialized. There is no model '{model}' available\",\n )\n\n request = copy.deepcopy(request)\n request[\"description\"] = task.description\n\n image_id = request[\"image\"]\n if isinstance(image_id, str):\n datastore = datastore if datastore else self.datastore()\n if os.path.exists(image_id):\n request[\"save_label\"] = False\n else:\n request[\"image\"] = datastore.get_image_uri(request[\"image\"])\n\n if os.path.isdir(request[\"image\"]):\n logger.info(\"Input is a Directory; Consider it as DICOM\")\n\n logger.debug(f\"Image => {request['image']}\")\n else:\n request[\"save_label\"] = False\n\n if self._infers_threadpool:\n\n def run_infer_in_thread(t, r):\n handle_torch_linalg_multithread(r)\n return t(r)\n\n f = self._infers_threadpool.submit(run_infer_in_thread, t=task, r=request)\n result_file_name, result_json = f.result(request.get(\"timeout\", settings.MONAI_LABEL_INFER_TIMEOUT))\n else:\n result_file_name, result_json = task(request)\n\n label_id = None\n if result_file_name and os.path.exists(result_file_name):\n tag = request.get(\"label_tag\", DefaultLabelTag.ORIGINAL)\n save_label = request.get(\"save_label\", False)\n if save_label:\n label_id = datastore.save_label(\n image_id, result_file_name, tag, {\"model\": model, \"params\": result_json}\n )\n else:\n label_id = result_file_name\n\n return {\"label\": label_id, \"tag\": DefaultLabelTag.ORIGINAL, \"file\": result_file_name, \"params\": result_json}", "def __call__(self, *args, **kwargs):\n is_learning = kwargs.get('is_learning', True)\n if is_learning:\n return self.encoder_learning_model(args[0])\n else:\n return self.encoder_inference_model(args[0])", "def __call__(self, *args, **kwargs):\n is_learning = kwargs.get('is_learning', True)\n if is_learning:\n return self.encoder_learning_model(args[0])\n else:\n return self.encoder_inference_model(args[0])", "def inference(images, sess, num_classes, for_training=False, restore_logits=True,\n scope=None):\n # Parameters for BatchNorm.\n batch_norm_params = {\n # Decay for the moving averages.\n 'decay': BATCHNORM_MOVING_AVERAGE_DECAY,\n # epsilon to prevent 0s in variance.\n 'epsilon': 0.001,\n }\n # Set weight_decay for weights in Conv and FC layers.\n with scopes.arg_scope([ops.conv2d, ops.fc], weight_decay=0.00004):\n with scopes.arg_scope([ops.conv2d],\n stddev=0.1,\n activation=tf.nn.relu,\n batch_norm_params=batch_norm_params):\n logits, endpoints = inception_v3(\n images,\n sess,\n dropout_keep_prob=0.8,\n num_classes=num_classes,\n is_training=False,\n restore_logits=False,\n scope=scope)", "def inference_step(self, batch: Any, **kwargs) -> Dict[str, Any]:\n return self.model.inference_step(batch, **kwargs)", "def __setup_model(self, **kwargs):\n self.model_architecture = kwargs['model_architecture'].upper()\n self.model = Classifier.IMAGENET_MODELS[self.model_architecture](\n pretrained=True\n )\n\n if 'input_size' in kwargs: # Loading from a checkpoint\n self.input_size = kwargs['input_size']\n self.model.current_epoch = kwargs['current_epoch']\n\n else: # No checkpoint, will be creating a new classifier for the model\n # The number of features coming from the feature detector CNN\n if 'ALEXNET' in self.model_architecture:\n self.input_size = self.model.classifier[1].in_features\n elif 'VGG' in self.model_architecture:\n self.input_size = self.model.classifier[0].in_features\n elif 'DENSENET' in self.model_architecture:\n self.input_size = self.model.classifier.in_features\n\n # Freeze the feature detector parameters to prevent backpropagating\n # through them.\n for param in self.model.parameters():\n param.requires_grad = False\n\n self.model.current_epoch = 1\n\n self.output_size = kwargs['output_size']\n self.hidden_layers = kwargs['hidden_layers']\n self.learn_rate = kwargs['learn_rate']\n self.drop_p = kwargs['drop_p']\n\n self.model.class_to_idx = kwargs['class_to_idx']\n self.model.classifier = Network(self.input_size,\n self.output_size,\n self.hidden_layers,\n self.drop_p)\n\n if 'model_state_dict' in kwargs: # load the state from checkpoint\n self.model.load_state_dict(kwargs['model_state_dict'])\n\n self.criterion = nn.NLLLoss()\n self.optimizer = optim.Adam(self.model.classifier.parameters(),\n lr=self.learn_rate)\n\n if 'optimizer_state_dict' in kwargs: # load the state from checkpoint\n self.optimizer.load_state_dict(kwargs['optimizer_state_dict'])", "def fallback_inference(self, onnx_model):\n from polygraphy.comparator import IterationResult\n\n with G_LOGGER.verbosity(G_LOGGER.severity + 10):\n load_model = onnx_backend.ModifyOutputs(onnx_model, outputs=constants.MARK_ALL, copy=True)\n with onnxrt_backend.OnnxrtRunner(\n onnxrt_backend.SessionFromOnnx(onnx_backend.BytesFromOnnx(load_model))\n ) as runner:\n # We want to set input_metadata only - not user_input_metadata, so that user_input_metadata\n # will be populated by the --model-inputs argument.\n data_loader = self.data_loader_args.get_data_loader()\n data_loader.input_metadata = runner.get_input_metadata()\n feed_dict = data_loader[0]\n\n with G_LOGGER.verbosity(G_LOGGER.severity - 10):\n G_LOGGER.info(\n \"Running fallback shape inference using input metadata:\\n{:}\".format(\n TensorMetadata.from_feed_dict(feed_dict)\n )\n )\n\n outputs = runner.infer(feed_dict)\n # We include the inputs here so that we have values for all tensors in the model.\n outputs.update(feed_dict)\n # Use IterationResult here since it can handle very large tensors by saving to disk.\n # Layerwise outputs might otherwise take up too much memory.\n return IterationResult(outputs), TensorMetadata.from_feed_dict(outputs)", "def __call__(self, *args, **kwargs):\n is_learning = kwargs.get('is_learning', True)\n if is_learning:\n return self.encoder_trainable_model(args[0])\n return self.encoder_inference_model(args[0])", "def inference(model, image, batch_size):\n image = Variable(image)\n image = image.cuda()\n return common.time_inference(inference_func=model, \n inference_func_args={'x': image}, \n batch_size=batch_size)", "def infinite_infer_run():\n try:\n # This cat-dog model is implemented as binary classifier, since the number\n # of labels is small, create a dictionary that converts the machine\n # labels to human readable labels.\n model_type = 'classification'\n output_map = {0: 'dog', 1: 'cat'}\n\n # Create an IoT client for sending to messages to the cloud.\n client = greengrasssdk.client('iot-data')\n iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])\n\n # Create a local display instance that will dump the image bytes to a FIFO\n # file that the image can be rendered locally.\n local_display = LocalDisplay('480p')\n local_display.start()\n\n # The sample projects come with optimized artifacts, hence only the artifact\n # path is required.\n model_path = '/opt/awscam/artifacts/mxnet_resnet18-catsvsdogs_FP32_FUSED.xml'\n\n # Load the model onto the GPU.\n client.publish(topic=iot_topic, payload='Loading action cat-dog model')\n model = awscam.Model(model_path, {'GPU': 1})\n client.publish(topic=iot_topic, payload='Cat-Dog model loaded')\n\n # Since this is a binary classifier only retrieve 2 classes.\n num_top_k = 2\n\n # The height and width of the training set images\n input_height = 224\n input_width = 224\n\n # Do inference until the lambda is killed.\n while True:\n # inference loop to add. See the next step \n ...\n\n\n except Exception as ex:\n client.publish(topic=iot_topic, payload='Error in cat-dog lambda: {}'.format(ex))\n# snippet-end:[deeplens.python.deeplens_inference_lambda.inference_loop]\n\n# snippet-start:[deeplens.python.deeplens_inference_lambda.inference_step]\n # Get a frame from the video stream\n ret, frame = awscam.getLastFrame()\n if not ret:\n raise Exception('Failed to get frame from the stream')\n # Resize frame to the same size as the training set.\n frame_resize = cv2.resize(frame, (input_height, input_width))\n # Run the images through the inference engine and parse the results using\n # the parser API, note it is possible to get the output of doInference\n # and do the parsing manually, but since it is a classification model,\n # a simple API is provided.\n parsed_inference_results = model.parseResult(model_type,\n model.doInference(frame_resize))\n # Get top k results with highest probabilities\n top_k = parsed_inference_results[model_type][0:num_top_k]\n # Add the label of the top result to the frame used by local display.\n # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html\n # for more information about the cv2.putText method.\n # Method signature: image, text, origin, font face, font scale, color, and thickness\n cv2.putText(frame, output_map[top_k[0]['label']], (10, 70),\n cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 165, 20), 8)\n # Set the next frame in the local display stream.\n local_display.set_frame_data(frame)\n # Send the top k results to the IoT console via MQTT\n cloud_output = {}\n for obj in top_k:\n cloud_output[output_map[obj['label']]] = obj['prob']\n client.publish(topic=iot_topic, payload=json.dumps(cloud_output))", "def inference(self, x, inference_args, spemb=None, *args, **kwargs):\n # setup batch axis\n ilens = torch.tensor([x.shape[0]], dtype=torch.long, device=x.device)\n xs = x.unsqueeze(0)\n if spemb is not None:\n spembs = spemb.unsqueeze(0)\n else:\n spembs = None\n\n # get option\n alpha = getattr(inference_args, \"fastspeech_alpha\", 1.0)\n\n # inference\n _, outs, _ = self._forward(\n xs,\n ilens,\n spembs=spembs,\n is_inference=True,\n alpha=alpha,\n ) # (1, L, odim)\n\n return outs[0], None, None", "def inference():\n #to feed the network\n _Xs_images = tf.placeholder(tf.float32,shape=[None,IMG_FLAT],name='images')\n _Xs = tf.reshape(_Xs_images, shape=[-1, IMAGE_DIM,IMAGE_DIM,IMAGE_DEPTH])\n _Ys_labels = tf.placeholder(tf.int32,shape=[None],name='labels')\n _Ys = tf.one_hot(_Ys_labels,depth=NUM_CLASSES)\n #input the image and get the softmax output \n fc_layer2 = model(_Xs) \n \n # predicted output and actual output\n _y_pred = tf.cast(tf.argmax(fc_layer2,1),dtype=tf.float32)\n _y = tf.cast(tf.argmax(_Ys,1),dtype=tf.float32)\n #finding the accuracy \n correct_prediction = tf.equal(_y_pred,_y)\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n \n with tf.Session() as session:\n session.run(tf.initialize_all_variables())\n saver = tf.train.import_meta_graph('/home/jay/Deep_Structures/TF/my_test_model.meta')\n saver.restore(session,'/home/jay/Deep_Structures/TF/my_test_model')\n all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n print all_vars\n #session.run(tf.initialize_all_variables())\n #ckpt = tf.train.get_checkpoint_state(os.path.dirname('/home/jay/Deep Network Structures/Tensorflow/TrainedModels/'))\n #if ckpt and ckpt.model_checkpoint_path:\n # tf.train.Saver.restore(session, ckpt.model_checkpoint_path)\n _batch_acc = []\n images,labels = get_data(isTraining=False)\n for j in range(images.shape[0] / BATCH_SIZE + 1):\n \n _trainXs = images[j*BATCH_SIZE:(j+1)*BATCH_SIZE,:]\n _trainYs= labels[j*BATCH_SIZE:(j+1)*BATCH_SIZE]\n \n feed_dict={_Xs_images:_trainXs,_Ys_labels:_trainYs}\n _miniAcc = session.run(accuracy,feed_dict)\n _batch_acc.append(_miniAcc) \n msg = \"Accuracy on Test-Set: {0:.1%}\"\n print(msg.format(sum(_batch_acc)/float(len(_batch_acc))))", "def single_inference_process_fn(inference_initializer, inference_mode_config, in_project_meta_json, request_queue,\n result_meta_queue, progress_queue, project):\n single_image_inference = inference_initializer()\n inference_mode = InferenceModeFactory.create(\n inference_mode_config, ProjectMeta.from_json(in_project_meta_json), single_image_inference)\n\n project_meta_sent = False\n req = ''\n while req is not None:\n req = request_queue.get()\n if req is not None:\n # Send the resulting project meta to the parent project to make sure we only write the meta JSON once.\n if not project_meta_sent:\n try:\n result_meta_queue.put(inference_mode.out_meta.to_json(), block=False)\n except queue.Full:\n pass\n project_meta_sent = True\n\n in_ann = Annotation.load_json_file(req.item_paths.ann_path, inference_mode.out_meta)\n ann = inference_mode.infer_annotate_image_file(req.item_paths.img_path, in_ann)\n out_dataset = project.datasets.get(req.ds_name)\n out_dataset.add_item_file(\n req.item_name, req.item_paths.img_path, ann=ann, _validate_item=False, _use_hardlink=True)\n progress_queue.put(1)", "def main(_):\n if not FLAGS.model_output_dir:\n raise ValueError(\n \"Undefined model output directory. Perhaps you forgot to set the --model_output_dir flag?\")\n \n if FLAGS.predict_input_file:\n decode()\n else:\n train()", "def main():\n args, config = parse_args()\n\n \"\"\"\n Log on wandb for track of experiments\n \"\"\"\n wandb.init(project=\"adaptive-finetuning-resnet\", name=f'Inference_{config.VERSION}', config=config)\n\n \"\"\"\n Set config GPUs and torch cuda device\n \"\"\"\n config.GPUS = str(0)\n torch.cuda.set_device(0)\n\n \"\"\"\n Create the model, put it to GPU and then create dataloader\n \"\"\"\n model = eval(config.MODULE)(config=config.NETWORK)\n model = model.cuda()\n\n val_loader = make_dataloader(config, mode='val', distributed=False)\n\n \"\"\"\n Load the model with pretrained weights\n \"\"\"\n assert config.NETWORK.PRETRAINED_MODEL != '', \"For inference, there must be pre-trained weights\"\n\n pretrain_state_dict = torch.load(config.NETWORK.PRETRAINED_MODEL, map_location = lambda storage, loc: storage)['net_state_dict']\n smart_model_load(model, pretrain_state_dict, loading_method=config.NETWORK.PRETRAINED_LOADING_METHOD)\n\n \"\"\"\n Pass the model and val loader for validation\n \"\"\"\n print(\"Inference started!!\")\n val_accuracy = do_validation(config, model, val_loader)\n print(f\"Inference complete!!\\nAccuracy:{val_accuracy}\")\n\n wandb.log({'Accuracy': val_accuracy})", "def infer(self, input):\n input = self._get_encoding_form(input)\n input = self.inference_model(input)\n self.latent.infer(input)" ]
[ "0.7924351", "0.66990066", "0.6458931", "0.6441347", "0.6344806", "0.63110465", "0.6292445", "0.61777896", "0.6047463", "0.5930157", "0.58026665", "0.57661873", "0.57472277", "0.5713225", "0.56964165", "0.5596042", "0.5596042", "0.55773693", "0.5556982", "0.55514663", "0.54896045", "0.5465052", "0.54561114", "0.5424826", "0.54236716", "0.53887695", "0.5386428", "0.5325443", "0.53231937", "0.531275" ]
0.71585065
1
Initialization function for the lut. After using the initialization to fill the whole array, set the part that represents padding to be 0.
def lut_init(self, axes, pad_word_axis, pad_idx): init_w = self.init(axes) if axes.index(pad_word_axis) is 0: init_w[pad_idx] = 0 else: init_w[:, pad_idx] = 0 return init_w
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def padding(old, l):\n new = deepcopy(old)\n for i, j in enumerate(new):\n new[i] += [0] * (l - len(j))\n new[i] = j[:l]\n return new", "def zero_pad(data):\n N = len(data)\n pow_2 = np.ceil(np.log2(N))\n return np.pad(data,(0,int((2**pow_2)-N)),'constant')", "def pad_with_zeros(array, padding_by_axis):\n shape = [\n size + padding_by_axis.get(axis, 0)\n for axis, size in enumerate(array.shape)\n ]\n\n position = tuple(\n slice(padding_by_axis.get(axis, 0) // 2,\n size - (padding_by_axis.get(axis, 0) + 1) // 2, 1)\n for axis, size in enumerate(shape)\n )\n\n padded_array = np.zeros(shape=shape, dtype=array.dtype)\n padded_array[position] = array\n\n return padded_array", "def init_Npad(ROI, compression = 8):\n \n if (ROI[2]-ROI[0])>(ROI[3]-ROI[1]):\n Npad = (ROI[2]-ROI[0])//compression \n else:\n Npad = (ROI[3]-ROI[1])//compression \n \n return Npad", "def pad(x, padding, fill_value=0):\n input_shape = x.shape\n output_shape = []\n indices = []\n\n for dim, pad in enumerate(padding):\n try:\n left_pad, right_pad = pad\n except TypeError:\n left_pad = right_pad = pad\n output_shape.append(left_pad + input_shape[dim] + right_pad)\n indices.append(slice(left_pad, left_pad + input_shape[dim]))\n\n if fill_value:\n out = T.ones(output_shape) * fill_value\n else:\n out = T.zeros(output_shape)\n return T.set_subtensor(out[tuple(indices)], x)", "def pad_from_beginning_fast(vals, maxlen):\r\n length = len(vals)\r\n matrix = np.zeros((length, maxlen))\r\n lens = [len(v) for v in vals] # only iteration\r\n mask = np.arange(maxlen)[::-1] < np.array(lens)[:, None] # key line\r\n matrix[mask] = np.concatenate(vals)\r\n return matrix", "def padding(a, dim):\n\n return np.pad(a, (0, dim-len(a)), 'constant', constant_values=(0))", "def zfill(self, width):\n return asarray(zfill(self, width))", "def padArray(ori_array, pad_size):\n if (pad_size > 0):\n [x_size, y_size] = ori_array.shape\n lg_array = numpy.ones((x_size+2*pad_size,y_size+2*pad_size))\n lg_array[pad_size:(x_size+pad_size),pad_size:(y_size+pad_size)] = ori_array.astype(numpy.float64)\n lg_array[0:pad_size,:] = numpy.flipud(lg_array[pad_size:2*pad_size,:])\n lg_array[(x_size+pad_size):(x_size+2*pad_size),:] = numpy.flipud(lg_array[x_size:(x_size+pad_size),:])\n lg_array[:,0:pad_size] = numpy.fliplr(lg_array[:,pad_size:2*pad_size])\n lg_array[:,(y_size+pad_size):(y_size+2*pad_size)] = numpy.fliplr(lg_array[:,y_size:(y_size+pad_size)])\n return lg_array\n \n else:\n return ori_array", "def _pad_region(region_data: bytes, *,\n tile_size: int,\n offset: int,\n pad_value: int = 0) -> bytes:\n region_size = len(region_data)\n assert len(region_data) % tile_size == 0\n assert tile_size >= offset\n assert pad_value < 2 ** 8\n tile_count = region_size // tile_size\n original_linear = np.frombuffer(region_data, dtype=np.ubyte)\n original_2d = original_linear.reshape(tile_count,\n tile_size)\n padded = np.insert(original_2d, offset, pad_value, axis=1)\n return padded.tobytes()", "def __init__(self, padding, padding_mode, **kwargs):\n self._padding = padding\n self._padding_mode = padding_mode\n super(Pad1D, self).__init__(**kwargs)", "def padding_mask(lens):\n bs, max_len = len(lens), max(lens)\n mask = torch.zeros(bs, 1, max_len)\n for i, l in enumerate(lens):\n mask[i, :, :l] = 1\n mask = mask > 0\n return mask", "def lut(threshold):\n\n return [255 if i<=threshold else 0 for i in range(256)]", "def initializeOffsets(bitPosition: int, value: typing.Any) -> int:\n\n return value.initializeOffsets(bitPosition)", "def initializeOffsets(bitPosition: int, _value: float) -> int:\n\n return bitPosition + Float64ArrayTraits.bitSizeOf()", "def __init__(\n self, z, scale=None, stretch=None, shift=None, linear=None, const=None,\n ):\n # stretch can be eliminated by bringing into shift\n # (since multiplying does not change zero locations)\n if stretch is not None and shift is not None:\n shift = shift/stretch\n stretch = None\n\n self._z = z\n\n # we can also eliminate scaling,\n # but this is taken care of by parent class\n super(ZerosInd, self).__init__(\n scale=scale, stretch=stretch, shift=shift,\n linear=linear, const=const,\n )", "def zero_pad(X, padding_width, dims):\n dims = (dims) if isinstance(dims, int) else dims\n pad = [(0, 0) if idx not in dims else (padding_width, padding_width)\n for idx in range(len(X.shape))]\n X_padded = np.pad(X, pad, 'constant')\n return X_padded", "def _pad_with_zeros(self, X, margin):\n newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2 * margin, X.shape[2]))\n x_offset = margin\n y_offset = margin\n newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X\n return newX", "def pad_zeros_(self, n):\n if n == 0:\n return\n self.factors = [np.column_stack((f, np.zeros((f.shape[0], n))))\n for f in self.factors]\n self.rank += n", "def set_0_n(tik_instance, dst):\n tmp = tik_instance.Tensor(\n \"float32\", [64], name=\"tmp\", scope=tik.scope_ubuf)\n with tik_instance.for_range(0, 64) as i:\n tmp[i] = i\n\n data_len = dst.shape[0]\n repeat_times = data_len // 64\n tail = data_len % 64\n\n if repeat_times > 0:\n with tik_instance.for_range(0, repeat_times) as i:\n tik_instance.vadds(64, dst[i * 64], tmp, i * 64, 1, 1, 1, 8, 8)\n\n if tail > 0:\n tik_instance.vadds(tail, dst[repeat_times * 64],\n tmp, repeat_times * 64,\n 1, 1, 1, 8, 8)", "def pad_and_onehot(data, pad_len=None, extra_padding=200):\n if pad_len is None:\n pad_len = max(len(x) for x in data) + extra_padding\n data = [\n onehot(np.pad(trace, (0, pad_len - len(trace)), mode=\"constant\"))\n for trace in data\n ]\n return pad_len, np.array(data)", "def len_unpadded(self) -> int:", "def _pad_data(data, pad_length, padding_type='same'):\n\n # get the sampling period (or distance between sampling points, for PLUX devices this is always 1)\n # it is assumed that the signals are equidistantly sampled therefore only the distance between to sampling points\n # is needed to calculate the sampling period\n T = data[:, 0][1] - data[:, 0][0]\n\n if padding_type == 'same':\n\n # create the 'same' padding array\n padding = np.tile(data[-1, 1:], (pad_length, 1))\n\n elif padding_type == 'zero':\n\n # get the number of columns for the zero padding\n num_cols = data.shape[1] - 1 # ignoring the time/sample column\n\n # create the zero padding array\n padding = np.zeros((pad_length, num_cols))\n\n else:\n\n IOError('The padding type you chose is not defined. Use either \\'same\\ or \\'zero\\'.')\n\n # create the time / sample axis that needs to be padded\n start = data[:, 0][-1] + T\n stop = start + (T * pad_length)\n time_pad = np.arange(start, stop, T)\n time_pad = time_pad[:pad_length] # crop the array if there are to many values\n\n # expand dimension for hstack operation\n time_pad = np.expand_dims(time_pad, axis=1)\n\n # hstack the time_pad and the zero_pad to get the final padding array\n pad_array = np.hstack((time_pad, padding))\n\n # vstack the pad_array and the new_array\n padded_data = np.vstack([data, pad_array])\n\n return padded_data", "def initializeOffsets(bitPosition: int, value: int) -> int:\n\n return bitPosition + VarUInt64ArrayTraits.bitSizeOf(bitPosition, value)", "def _pad_simple(array, pad_width, fill_value=None):\n # Allocate grown array\n new_shape = tuple(\n left + size + right\n for size, (left, right) in zip(array.shape, pad_width)\n )\n order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order\n padded = np.empty(new_shape, dtype=array.dtype, order=order)\n\n if fill_value is not None:\n padded.fill(fill_value)\n\n # Copy old array into correct space\n original_area_slice = tuple(\n slice(left, left + size)\n for size, (left, right) in zip(array.shape, pad_width)\n )\n padded[original_area_slice] = array\n\n return padded, original_area_slice", "def im_lut(q, z):\n lut = np.zeros(BITS)\n for i in range(len(q)):\n lut[z[i]:z[i+1]] = q[i]\n lut[-1] = q[-1] # Handle with the edge\n return lut", "def _zero_pad(self, kernel, size):\n if len(size) != kernel.ndim:\n size = kernel.shape[:1] + tuple(size) + kernel.shape[-1:]\n padsize = np.array(size) - np.array(kernel.shape)\n paddown = padsize // 2\n padup = padsize - paddown\n padarray = np.concatenate((padup[..., None],\n paddown[..., None]), axis=1)\n pads = tuple([tuple(p) for p in padarray])\n kernel_pad = np.pad(kernel, pads, 'constant', constant_values=0)\n return kernel_pad", "def initializeOffsets(bitPosition: int, value: int) -> int:\n\n return bitPosition + VarInt64ArrayTraits.bitSizeOf(bitPosition, value)", "def __init__(self, nbits, fill=0):\n # assuming that the size of ``unsigned int`` is 32 bits.\n if divisible(nbits, BitArray._UNSIGNED_INT):\n nints = nbits // BitArray._UNSIGNED_INT\n else:\n nints = (nbits // BitArray._UNSIGNED_INT) + 1\n fill = (2 ** BitArray._UNSIGNED_INT - 1) if fill else 0\n self.bits = array.array('I')\n for _ in xrange(nints):\n self.bits.append(fill)", "def proper_padding(self, prediction, k_space_slice):\n h = prediction.shape[-3]\n w = prediction.shape[-2]\n w_pad = (k_space_slice.shape[-2] - w) // 2\n h_pad = (k_space_slice.shape[-3]-h) // 2\n return torch.nn.functional.pad(prediction, (0,0,w_pad,w_pad,h_pad,h_pad), \"constant\", 0)" ]
[ "0.57504845", "0.57208264", "0.55567306", "0.5440776", "0.53998905", "0.5392926", "0.5346752", "0.52881277", "0.5240125", "0.5209327", "0.51652426", "0.51546246", "0.51528627", "0.5150088", "0.5145783", "0.51411784", "0.5137112", "0.51338357", "0.5132683", "0.5129447", "0.51156056", "0.51027834", "0.5069598", "0.50675446", "0.50666535", "0.5062659", "0.50557125", "0.50556934", "0.49744835", "0.49730912" ]
0.74816704
0
Create the filter axes. They are ordered as (C, D, H, W, K).
def _filter_axes(self, channel_axes, spatial_axes): f_axes = ng.make_axes() f_axes += ng.make_axis(length=self.nout, name="K") f_axes += channel_axes for key, ax in zip(self.spatial_keys, spatial_axes): f_axes += ng.make_axis(length=self.filter_spatial_shape[key], name=ax.name) return f_axes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _filter_axes(self, channel_axis, spatial_axes):\n f_axes = ng.make_axis(length=self.nout, name=\"K\")\n for key, ax in zip(\"DHW\", spatial_axes):\n f_axes += ng.make_axis(length=self.filter_shape[key],\n name=ax.name)\n f_axes += channel_axis\n return f_axes", "def make_conv_output_axes(input, filter, conv_params):\n # type: (TensorOp, TensorOp, Dict) -> Axes\n number_output_features = filter.axes[-1].length\n mini_batch_size = input.axes[-1].length\n\n input_d, input_h, input_w = input.axes.lengths[1:4] # axes order C, D, H, W, N\n filter_d, filter_h, filter_w = filter.axes.lengths[1:4] # axes order J, T(d), R(h), S(w), K\n\n def output_dim(input_x, filter_x, pad_x, str_x, dil_x):\n return floor((input_x + 2 * pad_x - filter_x - (filter_x - 1) * (dil_x - 1)) / str_x) + 1\n\n convp = conv_params\n output_d = output_dim(input_d, filter_d, convp['pad_d'], convp['str_d'], convp['dil_d'])\n output_h = output_dim(input_h, filter_h, convp['pad_h'], convp['str_h'], convp['dil_h'])\n output_w = output_dim(input_w, filter_w, convp['pad_w'], convp['str_w'], convp['dil_w'])\n\n output_axes = ng.make_axes(axes=(\n ng.make_axis(name='C', docstring='output features', length=int(number_output_features)),\n ng.make_axis(name='D', docstring='depth', length=int(output_d)),\n ng.make_axis(name='H', docstring='height', length=int(output_h)),\n ng.make_axis(name='W', docstring='width', length=int(output_w)),\n ng.make_axis(name='N', docstring='mini-batch size', length=int(mini_batch_size)),\n ))\n return output_axes", "def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.conv_axis_names:\n output_axes += ng.make_axis(name=ax.name,\n length=utils.conv_output_dim(\n ax.length,\n self.filter_spatial_shape[name],\n pad_int[name],\n self.strides[name],\n False,\n self.dilation[name]))\n elif name == \"C\":\n output_axes += ng.make_axis(name=name, length=self.nout)\n else:\n output_axes += ax\n return output_axes", "def show_filters(self):\n w_mat = np.transpose(self.sess.run(self.W_fc1))\n\n plt.figure(figsize=(10,10), facecolor='w', edgecolor='w')\n plot_positions = [(0,0),(0,1),(1,0),(1,1)]\n for ch in range(self.n_input_channels):\n grid,_ = ia.image_grid_RGB( w_mat,\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res), n_x=6, n_y=6,\n channel_order=(ch,ch,ch), amplitude_scaling=(1,1,1),\n line_color=1, auto_scale=True, return_borders=False )\n colormax = np.abs(grid).max()\n with sns.axes_style(\"white\"):\n ax = plt.subplot2grid( (2,2), plot_positions[ch] )\n ax.imshow( grid[:,:,0], interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Hidden units, channel {}\".format(ch))\n plt.axis('tight')\n plt.axis('off')\n plt.tight_layout()", "def show_filters(self):\n weight_mat = self.sess.run(self.W_fc_out)\n\n # Loop channels\n plt.figure(figsize=(12,5), facecolor='w', edgecolor='w')\n with sns.axes_style(\"white\"):\n for cl in range(weight_mat.shape[1]):\n # Get filters of this output class\n w_list = ia.vec2image( lin_image=weight_mat[:,cl],\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res) )\n\n # Show channels\n for ch,w in enumerate(w_list):\n colormax = np.abs(w).max()\n ax = plt.subplot2grid( (self.n_output_classes,\n self.n_input_channels), (cl,ch) )\n ax.imshow( w, interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Class {}, Channel {}\".format(cl,ch))\n plt.axis('tight')\n plt.axis('off')\n colormax = np.abs(w).max()\n\n if self.n_output_classes == 2:\n plt.figure(figsize=(12,5), facecolor='w', edgecolor='w')\n with sns.axes_style(\"white\"):\n # Get filters of this output class\n w_list0 = ia.vec2image( lin_image=weight_mat[:,0],\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res) )\n w_list1 = ia.vec2image( lin_image=weight_mat[:,1],\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res) )\n for ch in range(len(w_list)):\n w_both = w_list1[ch]-w_list0[ch]\n\n colormax = np.abs(w_both).max()\n ax = plt.subplot2grid( (1,\n self.n_input_channels), (0,ch) )\n ax.imshow( w_both, interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Class {}, Channel {}\".format(cl,ch))\n plt.axis('tight')\n plt.axis('off')\n plt.tight_layout()", "def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.conv_axis_names:\n output_axes += ng.make_axis(name=ax.name,\n length=utils.deconv_output_dim(ax.length,\n self.filter_shape[name],\n pad_int[name],\n self.strides[name],\n self.dilation[name]))\n elif name == \"C\":\n output_axes += ng.make_axis(name=name, length=self.nout)\n else:\n output_axes += ax\n\n return output_axes", "def filter_show(filters, nx=8):\n FN, C, FH, FW = filters.shape\n ny = int(np.ceil(FN / nx))\n\n fig = plt.figure()\n fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)\n\n for i in range(FN):\n ax = fig.add_subplot(ny, nx, i+1, xticks=[], yticks=[])\n ax.imshow(filters[i, 0], cmap=plt.cm.gray_r, interpolation='nearest')\n plt.show()", "def plot_filters(net, layer, x, y):\n filters = net.layers[layer].w.eval()\n fig = plt.figure()\n for j in range(len(filters)):\n ax = fig.add_subplot(y, x, j)\n ax.matshow(filters[j][0], cmap = matplotlib.cm.binary)\n plt.xticks(np.array([]))\n plt.yticks(np.array([]))\n plt.tight_layout()\n return plt", "def generate_panel(self):\r\n \r\n self.PanelData = self.RawData.filter(['ID', 'X', 'Z', 'W', 'R', 'β', 'LFP', 'H'], axis=1)", "def initialize_axis_stack(n_ax, make_cbar=False, Wfig=90, Hfig=90, hfrac=None, wfrac=0.6, x0frac=None, y0frac=0.12,\n vspace=5, hspace=5, fontsize=8, wcbar_frac=0.2, cbar_aratio=0.1, cbar_orientation='vertical',\n cbarspace=5, tspace=8, **kwargs):\n # This method returns and ImageGrid instance\n # ax = AxesGrid(fig, 111, # similar to subplot(111)\n # nrows_ncols=(n_ax, 1), # creates 2x2 grid of axes\n # axes_pad=0.1, # pad between axes in inch.\n # share_all=True,\n # )\n fig = sps.figure_in_mm(Wfig, Hfig)\n label_params = dict(size=fontsize, fontweight='normal')\n\n if hfrac is None:\n hfrac = 0.8 / float(n_ax) - ((n_ax - 2.) * float(vspace) + tspace) / (float(Hfig) * float(n_ax))\n if make_cbar and cbar_orientation == 'horizontal':\n # colorbar is going on top, with space cbarspace\n hfrac -= float(cbarspace) / (float(Hfig) * float(n_ax))\n print('hfrac = ', hfrac)\n if x0frac is None:\n x0 = (1. - wfrac) * 0.5 * Wfig\n else:\n x0 = x0frac * Wfig\n y0 = y0frac * Hfig\n ws = wfrac * Wfig\n hs = hfrac * Hfig\n print('hs = ', hs)\n xywh_list = [[x0, y0 + (n_ax - 1 - ii) * (hs + vspace), ws, hs, None] for ii in range(n_ax)]\n\n print('xywh_list = ', xywh_list)\n ax = [sps.axes_in_mm(x0, y0, width, height, label=part, label_params=label_params, **kwargs)\n for x0, y0, width, height, part in xywh_list]\n\n if make_cbar:\n wcbar = Wfig * wcbar_frac\n hcbar = cbar_aratio * wcbar\n if cbar_orientation == 'vertical':\n cbar_ax = sps.axes_in_mm(x0 + ws + hspace, (Hfig - wcbar) * 0.5, hcbar, wcbar, label='',\n label_params=label_params, **kwargs)\n elif cbar_orientation == 'horizontal':\n cbar_ax = sps.axes_in_mm(x0 + (ws - wcbar) * 0.5, y0 + n_ax * (hs + vspace) + cbarspace, wcbar, hcbar,\n label='', label_params=label_params)\n else:\n cbar_ax = None\n\n return fig, ax, cbar_ax", "def _make_filters(self):\n\n \"\"\"\n filter_bank = bandpass_filterbank(\n self.bands, fs=self.fs, order=order, output=output\n )\n\n return [lambda sig: sosfiltfilt(bpf, sig) for bpf in filter_bank]\n \"\"\"\n\n # This seems to work only for Octave bands out of the box\n centers = self.centers\n n = len(self.centers)\n\n new_bands = [[centers[0] / 2, centers[1]]]\n for i in range(1, n - 1):\n new_bands.append([centers[i - 1], centers[i + 1]])\n new_bands.append([centers[-2], self.fs / 2])\n\n n_freq = self.n_fft // 2 + 1\n freq_resp = np.zeros((n_freq, n))\n freq = np.arange(n_freq) / self.n_fft * self.fs\n\n for b, (band, center) in enumerate(zip(new_bands, centers)):\n lo = np.logical_and(band[0] <= freq, freq < center)\n freq_resp[lo, b] = 0.5 * (1 + np.cos(2 * np.pi * freq[lo] / center))\n\n if b != n - 1:\n hi = np.logical_and(center <= freq, freq < band[1])\n freq_resp[hi, b] = 0.5 * (1 - np.cos(2 * np.pi * freq[hi] / band[1]))\n else:\n hi = center <= freq\n freq_resp[hi, b] = 1.0\n\n filters = np.fft.fftshift(\n np.fft.irfft(freq_resp, n=self.n_fft, axis=0),\n axes=[0],\n )\n\n # remove the first sample to make them odd-length symmetric filters\n self.filters = filters[1:, :]", "def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.pool_axis_names:\n output_axes += ng.make_axis(name=name,\n length=utils.conv_output_dim(ax.length,\n self.pool_shape[name],\n pad_int[name],\n self.strides[name],\n pooling=True))\n else:\n output_axes += ax\n\n return output_axes", "def _make_axes(self, hdr, quiet=False, novec=False, vonly=False, simple=False):\n\n # PULL THE IMAGE/CUBE SIZES FROM THE HEADER\n naxis = int(hdr['NAXIS'])\n naxis1 = int(hdr['NAXIS1'])\n naxis2 = int(hdr['NAXIS2'])\n if naxis > 2:\n naxis3 = hdr['NAXIS3']\n\n ## EXTRACT FITS ASTROMETRY STRUCTURE\n ww = astropy.wcs.WCS(hdr)\n\n #IF DATASET IS A CUBE THEN WE MAKE THE THIRD AXIS IN THE SIMPLEST WAY POSSIBLE (NO COMPLICATED ASTROMETRY WORRIES FOR FREQUENCY INFORMATION)\n if naxis > 3:\n #GRAB THE RELEVANT INFORMATION FROM THE ASTROMETRY HEADER\n cd = ww.wcs.cd\n crpix = ww.wcs.crpix\n cdelt = ww.wcs.crelt\n crval = ww.wcs.crval\n\n if naxis > 2:\n # MAKE THE VELOCITY AXIS (WILL BE M/S)\n v = np.arange(naxis3) * 1.0\n vdif = v - (hdr['CRPIX3']-1)\n vaxis = (vdif * hdr['CDELT3'] + hdr['CRVAL3'])\n\n # CUT OUT HERE IF WE ONLY WANT VELOCITY INFO\n if vonly:\n return vaxis\n\n #IF 'SIMPLE' IS CALLED THEN DO THE REALLY TRIVIAL THING:\n if simple:\n print('Using simple aproach to make axes.')\n print('BE SURE THIS IS WHAT YOU WANT! It probably is not.')\n raxis = np.arange(naxis1) * 1.0\n rdif = raxis - (hdr['CRPIX1'] - 1)\n raxis = (rdif * hdr['CDELT1'] + hdr['CRVAL1'])\n\n daxis = np.arange(naxis2) * 1.0\n ddif = daxis - (hdr['CRPIX1'] - 1)\n daxis = (ddif * hdr['CDELT1'] + hdr['CRVAL1'])\n\n rimg = raxis # (fltarr(naxis2) + 1.)\n dimg = (np.asarray(naxis1) + 1.) # daxis\n return rimg, dimg\n\n # OBNOXIOUS SFL/GLS THING\n glspos = ww.wcs.ctype[0].find('GLS')\n if glspos != -1:\n ctstr = ww.wcs.ctype[0]\n newtype = 'SFL'\n ctstr.replace('GLS', 'SFL')\n ww.wcs.ctype[0] = ctstr\n print('Replaced GLS with SFL; CTYPE1 now =' + ww.wcs.ctype[0])\n\n glspos = ww.wcs.ctype[1].find('GLS')\n if glspos != -1:\n ctstr = ww.wcs.ctype[1]\n newtype = 'SFL'\n ctstr.replace('GLS', 'SFL')\n ww.wcs.ctype[1] = ctstr\n print('Replaced GLS with SFL; CTYPE2 now = ' + ww.wcs.ctype[1])\n\n # CALL 'xy2ad' TO FIND THE RA AND DEC FOR EVERY POINT IN THE IMAGE\n if novec:\n rimg = np.zeros((naxis1, naxis2))\n dimg = np.zeros((naxis1, naxis2))\n for i in range(naxis1):\n j = np.asarray([0 for i in xrange(naxis2)])\n\n pixcrd = np.array([[zip(float(i), float(j))]], numpy.float_)\n ra, dec = ww.all_pix2world(pixcrd, 1)\n\n rimg[i, :] = ra\n dimg[i, :] = dec\n else:\n ximg = np.arange(naxis1) * 1.0\n yimg = np.arange(naxis1) * 1.0\n X, Y = np.meshgrid(ximg, yimg, indexing='xy')\n ss = X.shape\n xx, yy = X.flatten(), Y.flatten()\n\n pixcrd = np.array(zip(xx, yy), np.float_)\n img_new = ww.all_pix2world(pixcrd, 0)\n rimg_new, dimg_new = img_new[:,0], img_new[:,1]\n\n rimg = rimg_new.reshape(ss)\n dimg = dimg_new.reshape(ss)\n\n # GET AXES FROM THE IMAGES. USE THE CENTRAL COLUMN AND CENTRAL ROW\n raxis = np.squeeze(rimg[:, naxis2/2])\n daxis = np.squeeze(dimg[naxis1/2, :])\n\n return rimg, dimg", "def __init__(self, axes=()):\n self._axes = []\n self._dimension = 0\n for axis in axes:\n self.add_axis(axis)", "def _make_axes(self):\n ax_idx = self.atlas.space.axes_order.index(\"frontal\")\n\n # make acustom axes dict\n atlas_shape = np.array(self.atlas.metadata[\"shape\"]) * np.array(\n self.atlas.metadata[\"resolution\"]\n )\n z_range = np.array([-atlas_shape[2], 0])\n z_ticks = [\n (-v, str(np.abs(v).astype(np.int32)))\n for v in np.linspace(\n 0,\n atlas_shape[ax_idx],\n 10,\n )\n ]\n\n if self.atlas.atlas_name == \"allen_human_500um\":\n z_range = None\n z_ticks = None\n logger.debug(\n \"RENDER: manually forcing axes size for human atlas, atlas needs fixing\"\n )\n\n # make custom axes dict\n axes = dict(\n axesLineWidth=3,\n tipSize=0,\n xtitle=\"AP (μm)\",\n ytitle=\"DV (μm)\",\n ztitle=\"LR (μm)\",\n textScale=0.8,\n xTitleRotation=180,\n zrange=z_range,\n zValuesAndLabels=z_ticks,\n xyGrid=False,\n yzGrid=False,\n zxGrid=False,\n xUseBounds=True,\n yUseBounds=True,\n zUseBounds=True,\n xLabelRotation=180,\n yLabelRotation=180,\n zLabelRotation=90,\n )\n\n return axes", "def initialize_axes(self):\r\n self.x_lim = np.array([self.vals[:, 0].min(), self.vals[:, 0].max()])\r\n self.y_lim = np.array([self.vals[:, 1].min(), self.vals[:, 1].max()])\r\n self.z_lim = np.array([self.vals[:, 2].min(), self.vals[:, 2].max()])", "def _generate_axes_(self):\n\n return AxesTuple(self._axis(i) for i in range(self.ndim))", "def create_plots(self, keys):\n\n self.plots = VPlotContainer(resizable = \"hv\", bgcolor=\"lightgray\",\n fill_padding=True, padding = 10)\n # this looks cryptic, but it is equivalent to\n # ArrayPlotData(a=[], b=[], c=[])\n # if the keys are a,b,c. This just does it for all of the keys.\n self.plotdata = ArrayPlotData(**dict(zip(keys, [[]]*len(keys))))\n\n # figure out which key will be the x axis\n if 'Step' in keys:\n x = 'Step'\n elif 'Time (ps)' in keys:\n x = 'Time (ps)'\n else:\n raise ValueError('The reporter published neither the step nor time'\n 'count, so I don\\'t know what to plot on the x-axis!')\n\n\n colors = itertools.cycle(['blue', 'green', 'silver', 'pink', 'lightblue',\n 'red', 'darkgray', 'lightgreen',])\n for y in filter(lambda y: y != x, keys):\n self.plots.add(chaco_scatter(self.plotdata, x_name=x, y_name=y,\n color=colors.next()))", "def generate_filter_plots(\n data: AnnData, plot_filt: str, plot_filt_figsize: str = None\n) -> None:\n\n df_plot_before = data.obs[[\"Channel\", \"n_genes\", \"n_counts\", \"percent_mito\"]].copy()\n df_plot_before.reset_index(drop=True, inplace=True)\n df_plot_before[\"status\"] = \"original\"\n\n data = data[data.obs[\"passed_qc\"]] # focusing only on filtered cells\n\n df_plot_after = data.obs[[\"Channel\", \"n_genes\", \"n_counts\", \"percent_mito\"]].copy()\n df_plot_after.reset_index(drop=True, inplace=True)\n df_plot_after[\"status\"] = \"filtered\"\n df_plot = pd.concat((df_plot_before, df_plot_after), axis=0)\n\n from sccloud.plotting import plot_qc_violin\n\n figsize = None\n if plot_filt_figsize is not None:\n width, height = plot_filt_figsize.split(\",\")\n figsize = (int(width), int(height))\n\n plot_qc_violin(\n df_plot,\n \"count\",\n plot_filt + \".filt.UMI.pdf\",\n xattr=\"Channel\",\n hue=\"status\",\n xlabel=\"Channel\",\n split=True,\n linewidth=0,\n figsize=figsize,\n )\n\n plot_qc_violin(\n df_plot,\n \"gene\",\n plot_filt + \".filt.gene.pdf\",\n xattr=\"Channel\",\n hue=\"status\",\n xlabel=\"Channel\",\n split=True,\n linewidth=0,\n figsize=figsize,\n )\n\n plot_qc_violin(\n df_plot,\n \"mito\",\n plot_filt + \".filt.mito.pdf\",\n xattr=\"Channel\",\n hue=\"status\",\n xlabel=\"Channel\",\n split=True,\n linewidth=0,\n figsize=figsize,\n )\n\n logger.info(\"Filtration plots are generated.\")", "def generate_plots(self):\n freq_to_channel = {v: k for k, v in self.frequency_dict.iteritems()}\n data_axes = None\n for index, frequency in enumerate(sorted(freq_to_channel)):\n channel = freq_to_channel[frequency]\n td_f = self.frequency_dict[channel]\n title = 'Volume Backscatter (Sv) :Channel #%d: Frequency: %.1f kHz' % (channel, td_f)\n data_axes = self._generate_plot(self.ax[index], self.power_data_dict[channel], title,\n self.min_db, self.max_db)\n\n if data_axes:\n self._display_x_labels(self.ax[2], self.data_times)\n self.fig.tight_layout(rect=[0, 0.0, 0.97, 1.0])\n self._display_colorbar(self.fig, data_axes)", "def calc_axes(self):\n self.y_axis = np.linspace(0, self.image_shape[0] - 1, self.image_shape[0])\n self.x_axis = np.linspace(0, self.image_shape[1] - 1, self.image_shape[1])\n if hasattr(self, 'pixelsize'):\n self.y_axis *= self.pixelsize[0]\n self.x_axis *= self.pixelsize[1]\n\n # %%RETRIEVING FUNCTIONS", "def plot_filterbank_output(signals, spacing=None, axis=-1):\n\n if spacing is None:\n spacing = signals.max()\n\n for k, filtered in enumerate(signals):\n plt.gca().set_prop_cycle(plt.cycler('color', COLORS[:signals.shape[2]]))\n if axis == -1:\n filtered = filtered.T\n plt.plot(filtered + k*spacing*2)", "def write_filters(self, session):\n\n w = self._dual.get_op('w')\n weights_values = session.run(w)\n weights_transpose = np.transpose(weights_values)\n\n filter_height = self._input_shape_visualisation[1]\n filter_width = self._input_shape_visualisation[2]\n np_write_filters(weights_transpose, [filter_height, filter_width])", "def MyFilter(data, window_width=10, beta=2.0, draw_graph=False):\n\n #read data and change the format\n if 'time' in data.columns:\n date_list = []\n for i in data.index:\n date_parse = parse(str(data.ix[i].time))\n date_list.append(date_parse)\n data['date'] = date_list\n data_use = data\n data_use.index = data_use['date'].tolist()\n data_use = data_use.drop(['date','time'], axis=1)\n data_use.index.name = 'time'\n else:\n data_use = data\n #design filter, use the kaiser window here\n window = signal.kaiser(window_width, beta=beta)\n data_use['close_filtered'] = signal.convolve(data_use['close'], window, mode='same') / sum(window)\n data_use['high_frequency'] = data_use['close'] - data_use['close_filtered']\n\n #delete the distortion datas after filtered\n if window_width % 2 == 0:\n data_changed = data_use[window_width/2: -(window_width/2 - 1)]\n else:\n data_changed = data_use[(window_width-1)/2: -(window_width-1)/2]\n\n #draw graph\n if (draw_graph == True) :\n fig = plt.figure()\n ax1 = plt.subplot2grid((3,1), (0,0), rowspan=2)\n data_changed.loc[:,'close'].plot(style='r', label='original')\n data_changed.loc[:,'close_filtered'].plot(style='k', label='filtered')\n plt.title('Kaiser window_width = %d , const = %d' % (window_width, beta))\n plt.legend(loc='best')\n\n ax2 = plt.subplot2grid((3,1), (2,0))\n data_changed.loc[:,'high_frequency'].plot(label='high_frequency')\n ax2.set_ylim([-150, 150])\n plt.title('High Frequency')\n plt.legend(loc='best')\n plt.show()\n # print data_use\n # print data_changed\n data_out = data_changed['close_filtered']\n return np.array(data_out.tolist())", "def readAxes(self):\n for axisElement in self.root.findall(\".axes/axis\"):\n axis = {}\n axis['name'] = name = axisElement.attrib.get(\"name\")\n axis['tag'] = axisElement.attrib.get(\"tag\")\n axis['minimum'] = float(axisElement.attrib.get(\"minimum\"))\n axis['maximum'] = float(axisElement.attrib.get(\"maximum\"))\n axis['default'] = float(axisElement.attrib.get(\"default\"))\n # we're not using the map for anything.\n axis['map'] = []\n for warpPoint in axisElement.findall(\".map\"):\n inputValue = float(warpPoint.attrib.get(\"input\"))\n outputValue = float(warpPoint.attrib.get(\"output\"))\n axis['map'].append((inputValue, outputValue))\n # there are labelnames in the element\n # but we don't need them for building the fonts.\n self.axes[name] = axis\n self.axesOrder.append(axis['name'])", "def make_animation_subset_levels(X, fixed_axes, fixed_value_1, fixed_value_2,\n filtration_size):\n # Create the array indexes\n obj = [slice(None, None, None)] * 4\n obj[fixed_axes[0]] = fixed_value_1\n obj[fixed_axes[1]] = fixed_value_2\n # print obj\n\n # Create sequence of threshold values\n thresholds = np.linspace(start=np.amin(X[obj]), stop=np.amax(X[obj]), num=filtration_size)\n # print thresholds\n # TEST PLOT\n # fig, ax = plt.subplots()\n # # interp = kwargs.get('interpolation', 'none')\n # # colors = kwargs.get('colormap', 'seismic')\n # img0 = ax.imshow(X[obj], cmap='Blues', interpolation='none')\n # fig.colorbar(img0, ax=ax, fraction=0.022, pad=0.01)\n # ax.invert_yaxis()\n # # tx = ax.set_title('%s = %d' % (X.dimensions[sweep_axis], i))\n # fig.tight_layout()\n # fig.show()\n\n # def get_middle(xx):\n # return 1 - (float(np.amax(xx)) / (np.amax(xx) + abs(np.amin(xx))))\n\n def init():\n global fig, ax, im, tx\n fig = plt.figure()\n ax = plt.axes()\n # idx = list(obj)\n # idx[sweep_axis] = slice(None, None, None)\n # middle = get_middle(X[idx])\n # print obj\n im = ax.imshow(X[obj] < thresholds[2], cmap='Blues',#cmap=shiftedColorMap(cm.seismic, midpoint=middle),\n interpolation='none', aspect='auto')\n # vmin=np.amin(X[idx]), vmax=np.amax(X[idx]))\n ax.invert_yaxis()\n # cb = fig.colorbar(im)\n # tx = ax.set_title('%s = %d' % (X.dimensions[sweep_axis], i))\n return\n\n def animate(n):\n # update indexes\n # obj[sweep_axis] = n\n # vmax = np.max(X[obj])\n # vmin = np.min(X[obj])\n im.set_data(X[obj] < thresholds[n])\n # im.set_clim(vmin, vmax)\n # tx.set_text('%s = %d' % (X.dimensions[sweep_axis], n))\n return\n\n init()\n anim = animation.FuncAnimation(fig, animate, frames=np.arange(filtration_size), interval=100, blit=False)\n return anim", "def test_convolution(transformer_factory):\n N = 128\n C, K = 3, 8\n D, T = 1, 1\n H = W = 32\n R = S = 2\n\n padding = dict(pad_d=0, pad_h=0, pad_w=0)\n strides = dict(str_d=1, str_h=1, str_w=1)\n dilation = dict(dil_d=1, dil_h=1, dil_w=1)\n conv_params = padding.copy()\n conv_params.update(strides)\n conv_params.update(dilation)\n\n ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])\n ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])\n ax_i.set_shape((C, D, H, W, N))\n ax_f.set_shape((C, T, R, S, K))\n\n ax_o = ng.make_axes([\n ng.make_axis(roles=[ar.features_input]).named('C'),\n ng.make_axis(roles=[ar.features_0]).named('D'),\n ng.make_axis(roles=[ar.features_1]).named('H'),\n ng.make_axis(roles=[ar.features_2]).named('W'),\n ax.N\n ])\n\n ax_o[:-1].set_shape((\n K,\n output_dim(D, T, padding['pad_d'], strides['str_d']),\n output_dim(H, R, padding['pad_h'], strides['str_h']),\n output_dim(W, S, padding['pad_w'], strides['str_w']))\n )\n\n inputs = ng.placeholder(axes=ax_i)\n filters = ng.placeholder(axes=ax_f)\n\n # randomly initialize\n input_value = rng.uniform(-1, 1, ax_i)\n filter_value = rng.uniform(-1, 1, ax_f)\n\n assert input_value.shape == ax_i.lengths\n assert filter_value.shape == ax_f.lengths\n\n inputs = ng.placeholder(ax_i)\n filters = ng.placeholder(ax_f)\n\n output = ng.convolution(conv_params, inputs, filters, axes=ax_o)\n targets = ng.placeholder(axes=output.axes)\n\n costs = ng.cross_entropy_binary(ng.sigmoid(output), targets)\n error = ng.sum(costs, out_axes=()) / ng.batch_size(costs)\n d_inputs = ng.deriv(error, inputs)\n d_filters = ng.deriv(error, filters)\n\n targets_value = rng.uniform(.1, 0.9, output.axes)\n\n with executor([output, error, d_inputs, d_filters], inputs, filters, targets) as conv_executor:\n result_ng, err_ng, gradI_ng, gradF_ng = \\\n conv_executor(input_value, filter_value, targets_value)\n\n # Now compute reference values via NEON\n NervanaObject.be.bsz = N\n neon_layer = Convolution(fshape=(R, S, K), padding=padding, strides=strides)\n\n inp = neon_layer.be.array(input_value.reshape(C * H * W * D, N))\n neon_layer.W = neon_layer.be.array(filter_value.reshape(C * R * S * T, K))\n neon_layer.dW = neon_layer.be.empty_like(neon_layer.W)\n neon_layer.configure((C, H, W))\n neon_layer.prev_layer = True\n neon_layer.allocate()\n neon_layer.set_deltas(DummyDeltaBuffers())\n\n result_ne = neon_layer.fprop(inp).get().reshape(output.axes.lengths)\n\n act_result_ne = 1. / (1.0 + np.exp(-result_ne))\n err = neon_layer.be.array((act_result_ne - targets_value).reshape(-1, N) / float(N))\n gradI_ne = neon_layer.bprop(err).get().reshape(ax_i.lengths)\n gradF_ne = neon_layer.dW.get().reshape(ax_f.lengths)\n\n # Compare fprop\n ng.testing.assert_allclose(result_ng, result_ne, rtol=0, atol=1e-6)\n\n # Compare bprop\n ng.testing.assert_allclose(gradI_ng, gradI_ne, rtol=0, atol=1e-6)\n\n # Compare update\n ng.testing.assert_allclose(gradF_ng, gradF_ne, rtol=0, atol=1e-4)", "def init_axes(self):\n plt.switch_backend(\"cairo\")\n fig = plt.figure(figsize=(15,10))\n ax = fig.add_axes([0.05, 0.15, 0.9, 0.80,])\n return (fig, ax)", "def _update_axes(self):\n data_shape = self.data.shape\n if len(self.axes) < self.data.ndim + 1:\n self._axes.append(Axis())\n for index in range(self.data.ndim):\n if len(self.axes[index].values) != data_shape[index]:\n self.axes[index].values = np.arange(data_shape[index],\n dtype=np.float64)", "def ferret_custom_axes(id):\n axis_defs = [ None ] * pyferret.MAX_FERRET_NDIM\n axis_defs[0] = ( 1, 2, 1, \"KS,P\", False )\n return axis_defs" ]
[ "0.74516636", "0.6499571", "0.62619257", "0.62412906", "0.6160327", "0.6132655", "0.5812254", "0.5753394", "0.55952036", "0.55551445", "0.55383164", "0.55284905", "0.5506311", "0.54356974", "0.53720635", "0.5354197", "0.5346949", "0.5231423", "0.5220264", "0.5207294", "0.51953644", "0.5172644", "0.5140646", "0.51382935", "0.5126418", "0.5085347", "0.5052111", "0.5050012", "0.50068057", "0.49986792" ]
0.7154249
1
Create the convolution output axes.
def _output_axes(self, in_obj, pad_int): output_axes = ng.make_axes() for ax in in_obj.axes: name = ax.name if name in self.conv_axis_names: output_axes += ng.make_axis(name=ax.name, length=utils.conv_output_dim( ax.length, self.filter_spatial_shape[name], pad_int[name], self.strides[name], False, self.dilation[name])) elif name == "C": output_axes += ng.make_axis(name=name, length=self.nout) else: output_axes += ax return output_axes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_conv_output_axes(input, filter, conv_params):\n # type: (TensorOp, TensorOp, Dict) -> Axes\n number_output_features = filter.axes[-1].length\n mini_batch_size = input.axes[-1].length\n\n input_d, input_h, input_w = input.axes.lengths[1:4] # axes order C, D, H, W, N\n filter_d, filter_h, filter_w = filter.axes.lengths[1:4] # axes order J, T(d), R(h), S(w), K\n\n def output_dim(input_x, filter_x, pad_x, str_x, dil_x):\n return floor((input_x + 2 * pad_x - filter_x - (filter_x - 1) * (dil_x - 1)) / str_x) + 1\n\n convp = conv_params\n output_d = output_dim(input_d, filter_d, convp['pad_d'], convp['str_d'], convp['dil_d'])\n output_h = output_dim(input_h, filter_h, convp['pad_h'], convp['str_h'], convp['dil_h'])\n output_w = output_dim(input_w, filter_w, convp['pad_w'], convp['str_w'], convp['dil_w'])\n\n output_axes = ng.make_axes(axes=(\n ng.make_axis(name='C', docstring='output features', length=int(number_output_features)),\n ng.make_axis(name='D', docstring='depth', length=int(output_d)),\n ng.make_axis(name='H', docstring='height', length=int(output_h)),\n ng.make_axis(name='W', docstring='width', length=int(output_w)),\n ng.make_axis(name='N', docstring='mini-batch size', length=int(mini_batch_size)),\n ))\n return output_axes", "def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.conv_axis_names:\n output_axes += ng.make_axis(name=ax.name,\n length=utils.deconv_output_dim(ax.length,\n self.filter_shape[name],\n pad_int[name],\n self.strides[name],\n self.dilation[name]))\n elif name == \"C\":\n output_axes += ng.make_axis(name=name, length=self.nout)\n else:\n output_axes += ax\n\n return output_axes", "def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.pool_axis_names:\n output_axes += ng.make_axis(name=name,\n length=utils.conv_output_dim(ax.length,\n self.pool_shape[name],\n pad_int[name],\n self.strides[name],\n pooling=True))\n else:\n output_axes += ax\n\n return output_axes", "def conv2d_output_shape(height, width, filter_height, filter_width, out_channels, stride):\n return (out_channels, ((height - filter_height) / stride + 1), ((width - filter_width) / stride + 1))", "def calc_axes(self):\n self.y_axis = np.linspace(0, self.image_shape[0] - 1, self.image_shape[0])\n self.x_axis = np.linspace(0, self.image_shape[1] - 1, self.image_shape[1])\n if hasattr(self, 'pixelsize'):\n self.y_axis *= self.pixelsize[0]\n self.x_axis *= self.pixelsize[1]\n\n # %%RETRIEVING FUNCTIONS", "def _conv_op(self, in_obj, channel_axes, spatial_axes):\n manual_pad = collections.OrderedDict([(ax.name, (0, 0)) for ax in in_obj.axes])\n pad_int, extra_pad = self._get_pad_int(spatial_axes)\n manual_pad.update(extra_pad)\n if any((pad != (0, 0)) for pad in manual_pad.values()):\n in_obj = ng.pad(in_obj, manual_pad.values())\n spatial_axes = in_obj.axes.get_by_names(*ng.make_axes(spatial_axes).names)\n output_axes = self._output_axes(in_obj, pad_int)\n convparams = utils.make_convparams(self.nout, self.filter_spatial_shape,\n self.strides, pad_int, self.dilation)\n return ng.convolution(convparams,\n in_obj,\n self.W,\n axes=output_axes)", "def show_filters(self):\n weight_mat = self.sess.run(self.W_fc_out)\n\n # Loop channels\n plt.figure(figsize=(12,5), facecolor='w', edgecolor='w')\n with sns.axes_style(\"white\"):\n for cl in range(weight_mat.shape[1]):\n # Get filters of this output class\n w_list = ia.vec2image( lin_image=weight_mat[:,cl],\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res) )\n\n # Show channels\n for ch,w in enumerate(w_list):\n colormax = np.abs(w).max()\n ax = plt.subplot2grid( (self.n_output_classes,\n self.n_input_channels), (cl,ch) )\n ax.imshow( w, interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Class {}, Channel {}\".format(cl,ch))\n plt.axis('tight')\n plt.axis('off')\n colormax = np.abs(w).max()\n\n if self.n_output_classes == 2:\n plt.figure(figsize=(12,5), facecolor='w', edgecolor='w')\n with sns.axes_style(\"white\"):\n # Get filters of this output class\n w_list0 = ia.vec2image( lin_image=weight_mat[:,0],\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res) )\n w_list1 = ia.vec2image( lin_image=weight_mat[:,1],\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res) )\n for ch in range(len(w_list)):\n w_both = w_list1[ch]-w_list0[ch]\n\n colormax = np.abs(w_both).max()\n ax = plt.subplot2grid( (1,\n self.n_input_channels), (0,ch) )\n ax.imshow( w_both, interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Class {}, Channel {}\".format(cl,ch))\n plt.axis('tight')\n plt.axis('off')\n plt.tight_layout()", "def set_output_shape(self):\n self.output_shape = ((self.input_shape[0] // self.stride[0],\n self.input_shape[1] // self.stride[1],\n self.input_shape[2]\n ))", "def _generate_axes_(self):\n\n return AxesTuple(self._axis(i) for i in range(self.ndim))", "def test_axis_preservation(conv1d_placeholder, output_size):\n conv_layer = Convolution((3, output_size), lambda x: 1)\n output = conv_layer(conv1d_placeholder)\n assert output.axes == conv1d_placeholder.axes, (\"Output axes are not the same as input axes: \"\n \"{} != {}\").format(output.axes,\n conv1d_placeholder.axes)", "def _filter_axes(self, channel_axis, spatial_axes):\n f_axes = ng.make_axis(length=self.nout, name=\"K\")\n for key, ax in zip(\"DHW\", spatial_axes):\n f_axes += ng.make_axis(length=self.filter_shape[key],\n name=ax.name)\n f_axes += channel_axis\n return f_axes", "def _conv_op(self, in_obj, channel_axes, spatial_axes):\n\n manual_pad = collections.OrderedDict([(ax.name, (0, 0)) for ax in in_obj.axes])\n pad_int, extra_pad = self._get_pad_int(spatial_axes)\n manual_pad.update(extra_pad)\n if any((pad != (0, 0)) for pad in manual_pad.values()):\n in_obj = ng.pad(in_obj, manual_pad.values())\n spatial_axes = in_obj.axes.get_by_names(*ng.make_axes(spatial_axes).names)\n\n output_axes = self._output_axes(in_obj.axes,\n pad_int)\n convparams = utils.make_convparams(self.nout, self.filter_shape,\n self.strides, pad_int, self.dilation)\n return ng.deconvolution(convparams,\n in_obj,\n self.W,\n axes=output_axes)", "def _conv(self, indim, outdim, ksize, stride, padding):\n\n return nn.Sequential(\n nn.BatchNorm2d(indim),\n nn.Conv2d(indim, outdim, ksize, stride, padding),\n self.activ(),\n )", "def test_channel_axis_introduction(conv1d_no_channel_axis, output_size, channel_axis):\n conv_layer = Convolution((3, output_size), lambda x: 1)\n output = conv_layer(conv1d_no_channel_axis)\n t_axes = conv1d_no_channel_axis.axes + channel_axis\n assert output.axes.is_equal_set(t_axes), (\"Output axes are not input axes + channel axis:\"\n \"{} != {} + {}\").format(output.axes,\n conv1d_no_channel_axis.axes,\n channel_axis)", "def conv_dims(self):\n img_w = np.shape(self.image)[0]\n img_h = np.shape(self.image)[1]\n \n x = (img_w - self.size) // self.stride\n y = (img_h - self.size) // self.stride\n \n return x, y", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n \n if type(h_w) is not tuple:\n h_w = (h_w, h_w)\n \n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n \n if type(stride) is not tuple:\n stride = (stride, stride)\n \n if type(pad) is not tuple:\n pad = (pad, pad)\n \n h = (h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1)// stride[0] + 1\n w = (h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1)// stride[1] + 1\n \n return h, w", "def calc_axes(self):\n y_axis = np.linspace(0, self.image_shape[0] - 1, self.image_shape[0])\n x_axis = np.linspace(0, self.image_shape[1] - 1, self.image_shape[1])\n if hasattr(self, 'pixelsize'):\n y_axis *= self.pixelsize[0]\n x_axis *= self.pixelsize[1]\n return x_axis, y_axis", "def conv2d(args):\n inp_ = args[0]\n kernel = args[1]\n stride = args[2]\n padding = args[3]\n (batch_size, in_channels, H, W) = inp_.shape\n (out_channels, in_channels_t, Hk, Wk) = kernel.shape\n Hc = int((H - Hk)/stride)+1\n Wc = int((W - Wk)/stride)+1\n conv_layer = np.zeros((batch_size, out_channels, Hc, Wc))\n for batch_i in range(batch_size):\n for o_chann_i in range(out_channels):\n for in_chann_i in range(in_channels):\n curr_ker = kernel[o_chann_i, in_chann_i, :, :]\n curr_inp = inp_[batch_i, in_chann_i, :, :]\n h_ind = 0\n while h_ind + Hk <= H:\n w_ind = 0\n while w_ind + Wk <= W:\n inp_patch = curr_inp[h_ind:h_ind+Hk, w_ind:w_ind+Wk]\n # Sum the conv_value of all the inp_channels\n conv_layer[batch_i, o_chann_i, h_ind//stride, w_ind//stride] += np.sum(inp_patch*curr_ker)\n w_ind+=stride\n h_ind+=stride\n return conv_layer", "def declaration_conv2d_transpose_impl(data, kernel, strides, padding, out_dtype, output_padding):\n data_pad, kernel_transform = conv2d_transpose_nchw_preprocess(\n data, kernel, strides, padding, out_dtype, output_padding\n )\n batch, in_c, in_h, in_w = data_pad.shape\n out_c, _, filter_h, filter_w = kernel_transform.shape\n\n # convolution stage\n out_c = simplify(out_c)\n\n out_h = simplify(in_h - filter_h + 1)\n out_w = simplify(in_w - filter_w + 1)\n dc = te.reduce_axis((0, in_c), name=\"dc\")\n dh = te.reduce_axis((0, filter_h), name=\"dh\")\n dw = te.reduce_axis((0, filter_w), name=\"dw\")\n\n Output = te.compute(\n (batch, out_c, out_h, out_w),\n lambda b, c, h, w: te.sum(\n data_pad[b, dc, h + dh, w + dw].astype(out_dtype)\n * kernel_transform[c, dc, dh, dw].astype(out_dtype),\n axis=[dc, dh, dw],\n ),\n tag=\"conv2d_transpose_nchw\",\n )\n\n return Output", "def output_channels(self, input_channels):\n pass", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n from math import floor\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n\n if type(pad) is not tuple:\n pad = (pad, pad)\n\n h = floor(((h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1) / stride) + 1)\n w = floor(((h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1) / stride) + 1)\n return h, w", "def _create_conv(cls, onnx_node, inputs, opset_version):\n kernel = tuple(onnx_node.attrs[\"kernel_shape\"])\n padding = tuple(\n onnx_node.attrs[\"pads\"]) if \"pads\" in onnx_node.attrs else (0, 0)\n stride = tuple(onnx_node.getattr('strides', (1, 1)))\n # default the odd_padding is 0, once there are same pad mode, we modify it\n # for odd_padding, please refer the autegrade.py\n odd_padding = (0, 0, 0, 0)\n if \"auto_pad\" in onnx_node.attrs:\n auto_pad = utils.force_unicode(onnx_node.attrs['auto_pad'])\n if auto_pad in ('SAME_UPPER', 'SAME_LOWER'):\n padding, odd_padding = utils.get_padding_shape(\n auto_pad, inputs[0].shape[2:], kernel, stride)\n\n # not support dilation\n dilation = onnx_node.getattr('dilations', 1)\n if dilation != 1 and list(dilation) != [1, 1]:\n raise ValueError(\"Not implemented yet for dilation\")\n group = onnx_node.getattr('group', 1)\n\n # only support 1d or 2d\n if len(kernel) > 2:\n raise ValueError(\"Only implemented for 1d or 2d\")\n\n bias = len(inputs) == 3\n x = inputs[0]\n x_shape = inputs[0].shape\n in_channels = x_shape[1]\n w_shape = inputs[1].shape\n out_channels = w_shape[0]\n assert w_shape[1] == in_channels // group\n\n if inputs[0].device.id() == -1:\n if group != 1:\n raise NotImplementedError\n else:\n handle = singa.ConvHandle(x.data, kernel, stride, padding,\n in_channels, out_channels, bias,\n group)\n else:\n handle = singa.CudnnConvHandle(x.data, kernel, stride, padding,\n in_channels, out_channels, bias,\n group)\n\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(handle, odd_padding)", "def call(self, inputs):\r\n outputs = K.spatial_2d_padding(inputs,\r\n padding=self.padding,\r\n data_format=self.data_format)\r\n\r\n p00, p01 = self.padding[0][0], self.padding[0][1]\r\n p10, p11 = self.padding[1][0], self.padding[1][1]\r\n if self.data_format == \"channels_last\":\r\n\r\n row0 = K.concatenate([inputs[:, p00:0:-1, p10:0:-1, :],\r\n inputs[:, p00:0:-1, :, :],\r\n inputs[:, p00:0:-1, -2:-2-p11:-1, :]],\r\n axis=2)\r\n row1 = K.concatenate([inputs[:, :, p10:0:-1, :],\r\n inputs,\r\n inputs[:, :, -2:-2-p11:-1, :]],\r\n axis=2)\r\n row2 = K.concatenate([inputs[:, -2:-2-p01:-1, p10:0:-1, :],\r\n inputs[:, -2:-2-p01:-1, :, :],\r\n inputs[:, -2:-2-p01:-1, -2:-2-p11:-1, :]],\r\n axis=2)\r\n\r\n outputs = K.concatenate([row0, row1, row2], axis=1)\r\n\r\n else: # self.data_format == \"channels_first\"\r\n\r\n row0 = K.concatenate([inputs[:, :, p00:0:-1, p10:0:-1],\r\n inputs[:, :, p00:0:-1, :],\r\n inputs[:, :, p00:0:-1, -2:-2-p11:-1]],\r\n axis=3)\r\n row1 = K.concatenate([inputs[:, :, :, p10:0:-1],\r\n inputs,\r\n inputs[:, :, :, -2:-2-p11:-1]],\r\n axis=3)\r\n row2 = K.concatenate([inputs[:, :, -2:-2-p01:-1, p10:0:-1],\r\n inputs[:, :, -2:-2-p01:-1, :],\r\n inputs[:, :, -2:-2-p01:-1, -2:-2-p11:-1]],\r\n axis=3)\r\n\r\n outputs = K.concatenate([row0, row1, row2], axis=2)\r\n\r\n return outputs", "def _make_conv_level(in_channels, out_channels, num_convs, norm_func,\n stride=1, dilation=1):\n layers = []\n for i in range(num_convs):\n layers.extend([\n nn.Conv2D(in_channels, out_channels, kernel_size=3,\n stride=stride if i == 0 else 1,\n padding=dilation, bias_attr=False, dilation=dilation),\n norm_func(out_channels),\n nn.ReLU()])\n\n in_channels = out_channels\n\n return nn.Sequential(*layers)", "def conv_output_shape(\n h_w: Tuple[int, int],\n kernel_size: int = 1,\n stride: int = 1,\n pad: int = 0,\n dilation: int = 1,\n ):\n h = floor(\n ((h_w[0] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1\n )\n w = floor(\n ((h_w[1] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1\n )\n return h, w", "def __call__(self, in_obj, channel_axes=\"C\", spatial_axes=(\"D\", \"H\", \"W\"), **kwargs):\n output = super(Deconvolution, self).__call__(in_obj, channel_axes, spatial_axes, **kwargs)\n return self._slice_output(output, spatial_axes, **kwargs)", "def test_convolution(transformer_factory):\n N = 128\n C, K = 3, 8\n D, T = 1, 1\n H = W = 32\n R = S = 2\n\n padding = dict(pad_d=0, pad_h=0, pad_w=0)\n strides = dict(str_d=1, str_h=1, str_w=1)\n dilation = dict(dil_d=1, dil_h=1, dil_w=1)\n conv_params = padding.copy()\n conv_params.update(strides)\n conv_params.update(dilation)\n\n ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])\n ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])\n ax_i.set_shape((C, D, H, W, N))\n ax_f.set_shape((C, T, R, S, K))\n\n ax_o = ng.make_axes([\n ng.make_axis(roles=[ar.features_input]).named('C'),\n ng.make_axis(roles=[ar.features_0]).named('D'),\n ng.make_axis(roles=[ar.features_1]).named('H'),\n ng.make_axis(roles=[ar.features_2]).named('W'),\n ax.N\n ])\n\n ax_o[:-1].set_shape((\n K,\n output_dim(D, T, padding['pad_d'], strides['str_d']),\n output_dim(H, R, padding['pad_h'], strides['str_h']),\n output_dim(W, S, padding['pad_w'], strides['str_w']))\n )\n\n inputs = ng.placeholder(axes=ax_i)\n filters = ng.placeholder(axes=ax_f)\n\n # randomly initialize\n input_value = rng.uniform(-1, 1, ax_i)\n filter_value = rng.uniform(-1, 1, ax_f)\n\n assert input_value.shape == ax_i.lengths\n assert filter_value.shape == ax_f.lengths\n\n inputs = ng.placeholder(ax_i)\n filters = ng.placeholder(ax_f)\n\n output = ng.convolution(conv_params, inputs, filters, axes=ax_o)\n targets = ng.placeholder(axes=output.axes)\n\n costs = ng.cross_entropy_binary(ng.sigmoid(output), targets)\n error = ng.sum(costs, out_axes=()) / ng.batch_size(costs)\n d_inputs = ng.deriv(error, inputs)\n d_filters = ng.deriv(error, filters)\n\n targets_value = rng.uniform(.1, 0.9, output.axes)\n\n with executor([output, error, d_inputs, d_filters], inputs, filters, targets) as conv_executor:\n result_ng, err_ng, gradI_ng, gradF_ng = \\\n conv_executor(input_value, filter_value, targets_value)\n\n # Now compute reference values via NEON\n NervanaObject.be.bsz = N\n neon_layer = Convolution(fshape=(R, S, K), padding=padding, strides=strides)\n\n inp = neon_layer.be.array(input_value.reshape(C * H * W * D, N))\n neon_layer.W = neon_layer.be.array(filter_value.reshape(C * R * S * T, K))\n neon_layer.dW = neon_layer.be.empty_like(neon_layer.W)\n neon_layer.configure((C, H, W))\n neon_layer.prev_layer = True\n neon_layer.allocate()\n neon_layer.set_deltas(DummyDeltaBuffers())\n\n result_ne = neon_layer.fprop(inp).get().reshape(output.axes.lengths)\n\n act_result_ne = 1. / (1.0 + np.exp(-result_ne))\n err = neon_layer.be.array((act_result_ne - targets_value).reshape(-1, N) / float(N))\n gradI_ne = neon_layer.bprop(err).get().reshape(ax_i.lengths)\n gradF_ne = neon_layer.dW.get().reshape(ax_f.lengths)\n\n # Compare fprop\n ng.testing.assert_allclose(result_ng, result_ne, rtol=0, atol=1e-6)\n\n # Compare bprop\n ng.testing.assert_allclose(gradI_ng, gradI_ne, rtol=0, atol=1e-6)\n\n # Compare update\n ng.testing.assert_allclose(gradF_ng, gradF_ne, rtol=0, atol=1e-4)", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n from math import floor\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n h = floor(((h_w[0] + (2 * pad) - (dilation *\n (kernel_size[0] - 1)) - 1) / stride) + 1)\n w = floor(((h_w[1] + (2 * pad) - (dilation *\n (kernel_size[1] - 1)) - 1) / stride) + 1)\n return h, w", "def output_shape(self):\n raise NotImplementedError", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n h = math.floor( ((h_w[0] + (2 * pad) - ( dilation * (kernel_size[0] - 1)\n ) - 1 )/ stride) + 1)\n w = math.floor( ((h_w[1] + (2 * pad) - ( dilation * (kernel_size[1] - 1)\n ) - 1 )/ stride) + 1)\n return h, w" ]
[ "0.7661946", "0.7352949", "0.7192119", "0.6087903", "0.60874796", "0.60789174", "0.60517323", "0.5962896", "0.5934195", "0.58397484", "0.582829", "0.5807316", "0.5701961", "0.5685399", "0.56428283", "0.56378", "0.5607991", "0.5599755", "0.5593294", "0.5579488", "0.55709165", "0.5558978", "0.55588275", "0.5553769", "0.5533254", "0.55306864", "0.55231893", "0.55228287", "0.55172044", "0.55171853" ]
0.7492919
1
Get integer padding values for each spatial axis. If padding is asymmetric, return the required manual paddings.
def _get_pad_int(self, spatial_axes): # Manual padding might be required for asymmetric paddings manual_pad = {} padding_int = {} for name, ax in zip(self.spatial_keys, spatial_axes): pad = utils.ConvParameters(ax.length, self.filter_spatial_shape[name], self.strides[name], self.dilation[name]).get_padding_size(self.padding[name]) symm_pad = min(pad) padding_int[name] = symm_pad if pad[0] != pad[1]: manual_pad[ax.name] = (pad[0] - symm_pad, pad[1] - symm_pad) return padding_int, manual_pad
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_pad_int(self, axes):\n # Manual padding might be required for asymmetric paddings\n manual_pad = {}\n padding_int = {}\n for name, ax in zip(self.pool_axis_names, axes):\n pad = utils.ConvParameters(ax.length,\n self.pool_shape[name],\n self.strides[name],\n pooling=True).get_padding_size(self.padding[name])\n symm_pad = min(pad)\n padding_int[name] = symm_pad\n if pad[0] != pad[1]:\n manual_pad[ax.name] = (pad[0] - symm_pad, pad[1] - symm_pad)\n\n return padding_int, manual_pad", "def padding(self):\n\t\treturn self.paddings_shape_param('W')", "def padding_width(self):\n\t\treturn self.paddings_shape_param('W')", "def padding(self):\n pad = self.ntiles - self.windowsize\n return (int((pad - 1)/2.), int((pad + 1)/2.))", "def get_paddings(self):\n return tf.constant([[0, 0,],\n [self._settings.half_patch_size, self._settings.half_patch_size],\n [self._settings.half_patch_size, self._settings.half_patch_size],\n [0, 0]])", "def padding(self) -> Tuple[int, int, int, int]:\n return (self.ipadding[0].to_pixels(self.width),\n self.ipadding[1].to_pixels(self.width),\n self.ipadding[2].to_pixels(self.height),\n self.ipadding[3].to_pixels(self.height))", "def padding(self):\r\n return self._generate_spacing_info(self.config['padding'])", "def _get_padding(w, h):\n dim_diff = np.abs(h - w)\n pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2\n return (0, pad1, 0, pad2) if h <= w else (pad1, 0, pad2, 0)", "def get_padding_sizes(self, div, dim):\n # ghost cells in the y direction\n target_shape = div * np.ceil(dim / div)\n target_shape_diff = target_shape - dim\n\n pad_low = int(np.ceil(target_shape_diff / 2.0))\n pad_high = int(np.floor(target_shape_diff / 2.0))\n\n return pad_low, pad_high", "def paddings_shape_param(self, param):\n\t\tindex = self.variables['paddings_format'].index(param)\n\t\treturn self.variables['paddings'].shape[index]", "def get_pads(onnx_node): # type: (NodeWrapper) -> Tuple[int, int, int]\n auto_pad = onnx_node.get_attribute_value('auto_pad')\n pads = onnx_node.get_attribute_value('pads', ()) # Padding along each axis\n kernel_shape = onnx_node.get_attribute_value('kernel_shape')\n\n # Attribute 'auto_pad' is deprecated, but is currently used by CNTK\n if auto_pad:\n if auto_pad == 'VALID':\n pads = [0, 0] * len(kernel_shape)\n\n else:\n # SAME_UPPER or SAME_LOWER mean pad the input so that the output size match the input.\n # In case of odd number add the extra padding at the end for SAME_UPPER and at the\n # beginning for SAME_LOWER.\n def pad_value(kernel_dim): # type: (int) -> float\n return (kernel_dim - 1.0) / 2.0\n\n pads_starts = [floor(pad_value(dim)) if auto_pad == 'SAME_UPPER' else\n ceil(pad_value(dim)) for dim in kernel_shape]\n pads_ends = [ceil(pad_value(dim)) if auto_pad == 'SAME_UPPER' else\n floor(pad_value(dim)) for dim in kernel_shape]\n pads = pads_starts + pads_ends\n\n verify_symmetric_padding(onnx_node, pads)\n\n pad_h, pad_w, pad_d = 0, 0, 0\n if pads and len(pads) == 2: # ONNX input axes NCHW\n pad_h, pad_w = pads\n if pads and len(pads) == 3: # ONNX input axes NCHWD\n pad_h, pad_w, pad_d = pads\n if pads and len(pads) == 4: # ONNX input axes NCHW\n pad_h, pad_w, _, _ = pads\n elif pads and len(pads) == 6: # ONNX input axes NCHWD\n pad_h, pad_w, pad_d, _, _, _ = pads\n\n return pad_h, pad_w, pad_d", "def transform_padding(pad_width):\n num_pad_values = len(pad_width)\n onnx_pad_width = [0]*num_pad_values\n\n start_index = 0\n # num_pad_values will always be multiple of 2\n end_index = int(num_pad_values/2)\n for idx in range(0, num_pad_values):\n if idx % 2 == 0:\n onnx_pad_width[start_index] = pad_width[idx]\n start_index += 1\n else:\n onnx_pad_width[end_index] = pad_width[idx]\n end_index += 1\n\n return onnx_pad_width", "def padding_index(self) -> int:\n return self._pad_index", "def padding_depth(self):\n\t\treturn self.paddings_shape_param('D')", "def get_padding_values():\r\n return (tf.constant(0, tf.float32), tf.constant(0, tf.float32), tf.constant(0, tf.float32))", "def _get_padding_width(self, column_index: int) -> int:\n _, pad_right, _, pad_left = self.padding\n if self.collapse_padding:\n if column_index > 0:\n pad_left = max(0, pad_left - pad_right)\n return pad_left + pad_right", "def get_padding_values():\n return tf.constant(0, tf.float32), tf.constant(0, tf.float32), tf.constant(-1, tf.int32)", "def padding_type(spatial, config):\n ret = None\n if 'padding' not in config:\n return 0\n elif isinstance(config['padding'], list):\n ret = torch.tensor(config['padding'])\n elif config['padding'] == 'same':\n\n k = torch.tensor(config['kernel_size'])\n s = torch.tensor(config['stride'])\n\n ret = (spatial*(s-1)-1+k)//2\n\n elif config['padding'] == 'valid':\n ret = torch.zeros(spatial.shape).long()\n else:\n raise ValueError('Pad type is invalid')\n return list(ret.numpy())", "def padding_width(self):\n ...", "def padding_width(self):\n return self.width + self.padding_left + self.padding_right", "def compute_padding(M, N, J):\n M_padded = ((M + 2 ** J) // 2 ** J + 1) * 2 ** J\n N_padded = ((N + 2 ** J) // 2 ** J + 1) * 2 ** J\n return M_padded, N_padded", "def paddings_for_conv2d(\n kernel_size: Sequence[int],\n shifts: Sequence[int] = (0, 0),\n) -> List[Tuple[int, int]]:\n if len(kernel_size) != 2 or len(shifts) != 2:\n raise ValueError('kernel_size and shifts must have length 2')\n\n paddings = [(0, 0)]\n for size, shift in zip(kernel_size, shifts):\n pad_left = (size - shift) // 2\n paddings.append((pad_left, size - pad_left - 1))\n paddings += [(0, 0)]\n return paddings", "def _get_padded_grid_(ax):\n ax_pad = np.zeros(ax.size + 2)\n ax_pad[1:-1] = ax\n ax_pad[0] = ax[0] - (ax[2] - ax[1])\n ax_pad[-1] = ax[-1] + (ax[2] - ax[1])\n\n return ax_pad", "def determine_padding(self, input_shape: int, output_shape: int) -> int:\n padding = (((output_shape - 1) * self.stride) + 1 - input_shape + (self.dilation * (self.kernel_size - 1)))\n\n # integer division\n padding = padding // 2\n assert output_shape == l_out(\n input_shape, padding, self.dilation, self.kernel_size, self.stride\n ) and padding >= 0, f\"Input and output of {input_shape} and {output_shape} with \" \\\n f\"kernel {self.kernel_size}, dilation {self.dilation}, stride {self.stride} \" \\\n f\"are incompatible for a Conv1D network.\"\n return padding", "def _compute_causal_padding(self):\n left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)\n\n if self.data_format == 'channels_last':\n if self.rank == 1:\n causal_padding = [[0, 0], [left_pad, 0], [0, 0]]\n elif self.rank == 2:\n causal_padding = [[0, 0], [left_pad, 0], [0, 0], [0, 0]]\n elif self.rank == 3:\n causal_padding = [[0, 0], [left_pad, 0], [0, 0], [0, 0], [0, 0]]\n else:\n raise ValueError()\n return causal_padding\n else:\n raise ValueError('No support for NCHW yet')", "def padding_height(self):\n\t\treturn self.paddings_shape_param('H')", "def pad(self):\n return self._pad", "def get_padding(x, padding_value=0, dtype=tf.float32):\n # print(\"get_padding\", dtype)\n with tf.name_scope(\"padding\"):\n return tf.cast(tf.equal(x, padding_value), dtype=dtype)", "def make_padding(kernel_size, stride, dilation):\n return -((-kernel_size - (kernel_size - 1) * (dilation - 1)) // stride + 1) // 2", "def get_pad(data_len: int, filt_len: int) -> tuple:\n # pad to ensure we see all filter positions and\n # for pywt compatability.\n # convolution output length:\n # see https://arxiv.org/pdf/1603.07285.pdf section 2.3:\n # floor([data_len - filt_len]/2) + 1\n # should equal pywt output length\n # floor((data_len + filt_len - 1)/2)\n # => floor([data_len + total_pad - filt_len]/2) + 1\n # = floor((data_len + filt_len - 1)/2)\n # (data_len + total_pad - filt_len) + 2 = data_len + filt_len - 1\n # total_pad = 2*filt_len - 3\n\n # we pad half of the total requried padding on each side.\n padr = (2 * filt_len - 3) // 2\n padl = (2 * filt_len - 3) // 2\n\n # pad to even singal length.\n if data_len % 2 != 0:\n padr += 1\n\n return padr, padl" ]
[ "0.74670684", "0.7420271", "0.69187814", "0.68692887", "0.6839506", "0.68172926", "0.6625459", "0.661594", "0.65719265", "0.6523223", "0.6506822", "0.6498268", "0.6497893", "0.63914186", "0.63773", "0.6366422", "0.62827", "0.62254727", "0.61800945", "0.61369365", "0.61293054", "0.61067766", "0.59792817", "0.5964866", "0.5947061", "0.5910476", "0.5886844", "0.5883341", "0.5861185", "0.58539426" ]
0.7919041
0
Create the filter axes. They are ordered as (K, D, H, W, C).
def _filter_axes(self, channel_axis, spatial_axes): f_axes = ng.make_axis(length=self.nout, name="K") for key, ax in zip("DHW", spatial_axes): f_axes += ng.make_axis(length=self.filter_shape[key], name=ax.name) f_axes += channel_axis return f_axes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _filter_axes(self, channel_axes, spatial_axes):\n f_axes = ng.make_axes()\n f_axes += ng.make_axis(length=self.nout, name=\"K\")\n f_axes += channel_axes\n for key, ax in zip(self.spatial_keys, spatial_axes):\n f_axes += ng.make_axis(length=self.filter_spatial_shape[key],\n name=ax.name)\n\n return f_axes", "def make_conv_output_axes(input, filter, conv_params):\n # type: (TensorOp, TensorOp, Dict) -> Axes\n number_output_features = filter.axes[-1].length\n mini_batch_size = input.axes[-1].length\n\n input_d, input_h, input_w = input.axes.lengths[1:4] # axes order C, D, H, W, N\n filter_d, filter_h, filter_w = filter.axes.lengths[1:4] # axes order J, T(d), R(h), S(w), K\n\n def output_dim(input_x, filter_x, pad_x, str_x, dil_x):\n return floor((input_x + 2 * pad_x - filter_x - (filter_x - 1) * (dil_x - 1)) / str_x) + 1\n\n convp = conv_params\n output_d = output_dim(input_d, filter_d, convp['pad_d'], convp['str_d'], convp['dil_d'])\n output_h = output_dim(input_h, filter_h, convp['pad_h'], convp['str_h'], convp['dil_h'])\n output_w = output_dim(input_w, filter_w, convp['pad_w'], convp['str_w'], convp['dil_w'])\n\n output_axes = ng.make_axes(axes=(\n ng.make_axis(name='C', docstring='output features', length=int(number_output_features)),\n ng.make_axis(name='D', docstring='depth', length=int(output_d)),\n ng.make_axis(name='H', docstring='height', length=int(output_h)),\n ng.make_axis(name='W', docstring='width', length=int(output_w)),\n ng.make_axis(name='N', docstring='mini-batch size', length=int(mini_batch_size)),\n ))\n return output_axes", "def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.conv_axis_names:\n output_axes += ng.make_axis(name=ax.name,\n length=utils.conv_output_dim(\n ax.length,\n self.filter_spatial_shape[name],\n pad_int[name],\n self.strides[name],\n False,\n self.dilation[name]))\n elif name == \"C\":\n output_axes += ng.make_axis(name=name, length=self.nout)\n else:\n output_axes += ax\n return output_axes", "def show_filters(self):\n w_mat = np.transpose(self.sess.run(self.W_fc1))\n\n plt.figure(figsize=(10,10), facecolor='w', edgecolor='w')\n plot_positions = [(0,0),(0,1),(1,0),(1,1)]\n for ch in range(self.n_input_channels):\n grid,_ = ia.image_grid_RGB( w_mat,\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res), n_x=6, n_y=6,\n channel_order=(ch,ch,ch), amplitude_scaling=(1,1,1),\n line_color=1, auto_scale=True, return_borders=False )\n colormax = np.abs(grid).max()\n with sns.axes_style(\"white\"):\n ax = plt.subplot2grid( (2,2), plot_positions[ch] )\n ax.imshow( grid[:,:,0], interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Hidden units, channel {}\".format(ch))\n plt.axis('tight')\n plt.axis('off')\n plt.tight_layout()", "def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.conv_axis_names:\n output_axes += ng.make_axis(name=ax.name,\n length=utils.deconv_output_dim(ax.length,\n self.filter_shape[name],\n pad_int[name],\n self.strides[name],\n self.dilation[name]))\n elif name == \"C\":\n output_axes += ng.make_axis(name=name, length=self.nout)\n else:\n output_axes += ax\n\n return output_axes", "def show_filters(self):\n weight_mat = self.sess.run(self.W_fc_out)\n\n # Loop channels\n plt.figure(figsize=(12,5), facecolor='w', edgecolor='w')\n with sns.axes_style(\"white\"):\n for cl in range(weight_mat.shape[1]):\n # Get filters of this output class\n w_list = ia.vec2image( lin_image=weight_mat[:,cl],\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res) )\n\n # Show channels\n for ch,w in enumerate(w_list):\n colormax = np.abs(w).max()\n ax = plt.subplot2grid( (self.n_output_classes,\n self.n_input_channels), (cl,ch) )\n ax.imshow( w, interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Class {}, Channel {}\".format(cl,ch))\n plt.axis('tight')\n plt.axis('off')\n colormax = np.abs(w).max()\n\n if self.n_output_classes == 2:\n plt.figure(figsize=(12,5), facecolor='w', edgecolor='w')\n with sns.axes_style(\"white\"):\n # Get filters of this output class\n w_list0 = ia.vec2image( lin_image=weight_mat[:,0],\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res) )\n w_list1 = ia.vec2image( lin_image=weight_mat[:,1],\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res) )\n for ch in range(len(w_list)):\n w_both = w_list1[ch]-w_list0[ch]\n\n colormax = np.abs(w_both).max()\n ax = plt.subplot2grid( (1,\n self.n_input_channels), (0,ch) )\n ax.imshow( w_both, interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Class {}, Channel {}\".format(cl,ch))\n plt.axis('tight')\n plt.axis('off')\n plt.tight_layout()", "def plot_filters(net, layer, x, y):\n filters = net.layers[layer].w.eval()\n fig = plt.figure()\n for j in range(len(filters)):\n ax = fig.add_subplot(y, x, j)\n ax.matshow(filters[j][0], cmap = matplotlib.cm.binary)\n plt.xticks(np.array([]))\n plt.yticks(np.array([]))\n plt.tight_layout()\n return plt", "def filter_show(filters, nx=8):\n FN, C, FH, FW = filters.shape\n ny = int(np.ceil(FN / nx))\n\n fig = plt.figure()\n fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)\n\n for i in range(FN):\n ax = fig.add_subplot(ny, nx, i+1, xticks=[], yticks=[])\n ax.imshow(filters[i, 0], cmap=plt.cm.gray_r, interpolation='nearest')\n plt.show()", "def generate_panel(self):\r\n \r\n self.PanelData = self.RawData.filter(['ID', 'X', 'Z', 'W', 'R', 'β', 'LFP', 'H'], axis=1)", "def _make_axes(self, hdr, quiet=False, novec=False, vonly=False, simple=False):\n\n # PULL THE IMAGE/CUBE SIZES FROM THE HEADER\n naxis = int(hdr['NAXIS'])\n naxis1 = int(hdr['NAXIS1'])\n naxis2 = int(hdr['NAXIS2'])\n if naxis > 2:\n naxis3 = hdr['NAXIS3']\n\n ## EXTRACT FITS ASTROMETRY STRUCTURE\n ww = astropy.wcs.WCS(hdr)\n\n #IF DATASET IS A CUBE THEN WE MAKE THE THIRD AXIS IN THE SIMPLEST WAY POSSIBLE (NO COMPLICATED ASTROMETRY WORRIES FOR FREQUENCY INFORMATION)\n if naxis > 3:\n #GRAB THE RELEVANT INFORMATION FROM THE ASTROMETRY HEADER\n cd = ww.wcs.cd\n crpix = ww.wcs.crpix\n cdelt = ww.wcs.crelt\n crval = ww.wcs.crval\n\n if naxis > 2:\n # MAKE THE VELOCITY AXIS (WILL BE M/S)\n v = np.arange(naxis3) * 1.0\n vdif = v - (hdr['CRPIX3']-1)\n vaxis = (vdif * hdr['CDELT3'] + hdr['CRVAL3'])\n\n # CUT OUT HERE IF WE ONLY WANT VELOCITY INFO\n if vonly:\n return vaxis\n\n #IF 'SIMPLE' IS CALLED THEN DO THE REALLY TRIVIAL THING:\n if simple:\n print('Using simple aproach to make axes.')\n print('BE SURE THIS IS WHAT YOU WANT! It probably is not.')\n raxis = np.arange(naxis1) * 1.0\n rdif = raxis - (hdr['CRPIX1'] - 1)\n raxis = (rdif * hdr['CDELT1'] + hdr['CRVAL1'])\n\n daxis = np.arange(naxis2) * 1.0\n ddif = daxis - (hdr['CRPIX1'] - 1)\n daxis = (ddif * hdr['CDELT1'] + hdr['CRVAL1'])\n\n rimg = raxis # (fltarr(naxis2) + 1.)\n dimg = (np.asarray(naxis1) + 1.) # daxis\n return rimg, dimg\n\n # OBNOXIOUS SFL/GLS THING\n glspos = ww.wcs.ctype[0].find('GLS')\n if glspos != -1:\n ctstr = ww.wcs.ctype[0]\n newtype = 'SFL'\n ctstr.replace('GLS', 'SFL')\n ww.wcs.ctype[0] = ctstr\n print('Replaced GLS with SFL; CTYPE1 now =' + ww.wcs.ctype[0])\n\n glspos = ww.wcs.ctype[1].find('GLS')\n if glspos != -1:\n ctstr = ww.wcs.ctype[1]\n newtype = 'SFL'\n ctstr.replace('GLS', 'SFL')\n ww.wcs.ctype[1] = ctstr\n print('Replaced GLS with SFL; CTYPE2 now = ' + ww.wcs.ctype[1])\n\n # CALL 'xy2ad' TO FIND THE RA AND DEC FOR EVERY POINT IN THE IMAGE\n if novec:\n rimg = np.zeros((naxis1, naxis2))\n dimg = np.zeros((naxis1, naxis2))\n for i in range(naxis1):\n j = np.asarray([0 for i in xrange(naxis2)])\n\n pixcrd = np.array([[zip(float(i), float(j))]], numpy.float_)\n ra, dec = ww.all_pix2world(pixcrd, 1)\n\n rimg[i, :] = ra\n dimg[i, :] = dec\n else:\n ximg = np.arange(naxis1) * 1.0\n yimg = np.arange(naxis1) * 1.0\n X, Y = np.meshgrid(ximg, yimg, indexing='xy')\n ss = X.shape\n xx, yy = X.flatten(), Y.flatten()\n\n pixcrd = np.array(zip(xx, yy), np.float_)\n img_new = ww.all_pix2world(pixcrd, 0)\n rimg_new, dimg_new = img_new[:,0], img_new[:,1]\n\n rimg = rimg_new.reshape(ss)\n dimg = dimg_new.reshape(ss)\n\n # GET AXES FROM THE IMAGES. USE THE CENTRAL COLUMN AND CENTRAL ROW\n raxis = np.squeeze(rimg[:, naxis2/2])\n daxis = np.squeeze(dimg[naxis1/2, :])\n\n return rimg, dimg", "def _make_filters(self):\n\n \"\"\"\n filter_bank = bandpass_filterbank(\n self.bands, fs=self.fs, order=order, output=output\n )\n\n return [lambda sig: sosfiltfilt(bpf, sig) for bpf in filter_bank]\n \"\"\"\n\n # This seems to work only for Octave bands out of the box\n centers = self.centers\n n = len(self.centers)\n\n new_bands = [[centers[0] / 2, centers[1]]]\n for i in range(1, n - 1):\n new_bands.append([centers[i - 1], centers[i + 1]])\n new_bands.append([centers[-2], self.fs / 2])\n\n n_freq = self.n_fft // 2 + 1\n freq_resp = np.zeros((n_freq, n))\n freq = np.arange(n_freq) / self.n_fft * self.fs\n\n for b, (band, center) in enumerate(zip(new_bands, centers)):\n lo = np.logical_and(band[0] <= freq, freq < center)\n freq_resp[lo, b] = 0.5 * (1 + np.cos(2 * np.pi * freq[lo] / center))\n\n if b != n - 1:\n hi = np.logical_and(center <= freq, freq < band[1])\n freq_resp[hi, b] = 0.5 * (1 - np.cos(2 * np.pi * freq[hi] / band[1]))\n else:\n hi = center <= freq\n freq_resp[hi, b] = 1.0\n\n filters = np.fft.fftshift(\n np.fft.irfft(freq_resp, n=self.n_fft, axis=0),\n axes=[0],\n )\n\n # remove the first sample to make them odd-length symmetric filters\n self.filters = filters[1:, :]", "def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.pool_axis_names:\n output_axes += ng.make_axis(name=name,\n length=utils.conv_output_dim(ax.length,\n self.pool_shape[name],\n pad_int[name],\n self.strides[name],\n pooling=True))\n else:\n output_axes += ax\n\n return output_axes", "def __init__(self, axes=()):\n self._axes = []\n self._dimension = 0\n for axis in axes:\n self.add_axis(axis)", "def _make_axes(self):\n ax_idx = self.atlas.space.axes_order.index(\"frontal\")\n\n # make acustom axes dict\n atlas_shape = np.array(self.atlas.metadata[\"shape\"]) * np.array(\n self.atlas.metadata[\"resolution\"]\n )\n z_range = np.array([-atlas_shape[2], 0])\n z_ticks = [\n (-v, str(np.abs(v).astype(np.int32)))\n for v in np.linspace(\n 0,\n atlas_shape[ax_idx],\n 10,\n )\n ]\n\n if self.atlas.atlas_name == \"allen_human_500um\":\n z_range = None\n z_ticks = None\n logger.debug(\n \"RENDER: manually forcing axes size for human atlas, atlas needs fixing\"\n )\n\n # make custom axes dict\n axes = dict(\n axesLineWidth=3,\n tipSize=0,\n xtitle=\"AP (μm)\",\n ytitle=\"DV (μm)\",\n ztitle=\"LR (μm)\",\n textScale=0.8,\n xTitleRotation=180,\n zrange=z_range,\n zValuesAndLabels=z_ticks,\n xyGrid=False,\n yzGrid=False,\n zxGrid=False,\n xUseBounds=True,\n yUseBounds=True,\n zUseBounds=True,\n xLabelRotation=180,\n yLabelRotation=180,\n zLabelRotation=90,\n )\n\n return axes", "def _generate_axes_(self):\n\n return AxesTuple(self._axis(i) for i in range(self.ndim))", "def create_plots(self, keys):\n\n self.plots = VPlotContainer(resizable = \"hv\", bgcolor=\"lightgray\",\n fill_padding=True, padding = 10)\n # this looks cryptic, but it is equivalent to\n # ArrayPlotData(a=[], b=[], c=[])\n # if the keys are a,b,c. This just does it for all of the keys.\n self.plotdata = ArrayPlotData(**dict(zip(keys, [[]]*len(keys))))\n\n # figure out which key will be the x axis\n if 'Step' in keys:\n x = 'Step'\n elif 'Time (ps)' in keys:\n x = 'Time (ps)'\n else:\n raise ValueError('The reporter published neither the step nor time'\n 'count, so I don\\'t know what to plot on the x-axis!')\n\n\n colors = itertools.cycle(['blue', 'green', 'silver', 'pink', 'lightblue',\n 'red', 'darkgray', 'lightgreen',])\n for y in filter(lambda y: y != x, keys):\n self.plots.add(chaco_scatter(self.plotdata, x_name=x, y_name=y,\n color=colors.next()))", "def initialize_axes(self):\r\n self.x_lim = np.array([self.vals[:, 0].min(), self.vals[:, 0].max()])\r\n self.y_lim = np.array([self.vals[:, 1].min(), self.vals[:, 1].max()])\r\n self.z_lim = np.array([self.vals[:, 2].min(), self.vals[:, 2].max()])", "def initialize_axis_stack(n_ax, make_cbar=False, Wfig=90, Hfig=90, hfrac=None, wfrac=0.6, x0frac=None, y0frac=0.12,\n vspace=5, hspace=5, fontsize=8, wcbar_frac=0.2, cbar_aratio=0.1, cbar_orientation='vertical',\n cbarspace=5, tspace=8, **kwargs):\n # This method returns and ImageGrid instance\n # ax = AxesGrid(fig, 111, # similar to subplot(111)\n # nrows_ncols=(n_ax, 1), # creates 2x2 grid of axes\n # axes_pad=0.1, # pad between axes in inch.\n # share_all=True,\n # )\n fig = sps.figure_in_mm(Wfig, Hfig)\n label_params = dict(size=fontsize, fontweight='normal')\n\n if hfrac is None:\n hfrac = 0.8 / float(n_ax) - ((n_ax - 2.) * float(vspace) + tspace) / (float(Hfig) * float(n_ax))\n if make_cbar and cbar_orientation == 'horizontal':\n # colorbar is going on top, with space cbarspace\n hfrac -= float(cbarspace) / (float(Hfig) * float(n_ax))\n print('hfrac = ', hfrac)\n if x0frac is None:\n x0 = (1. - wfrac) * 0.5 * Wfig\n else:\n x0 = x0frac * Wfig\n y0 = y0frac * Hfig\n ws = wfrac * Wfig\n hs = hfrac * Hfig\n print('hs = ', hs)\n xywh_list = [[x0, y0 + (n_ax - 1 - ii) * (hs + vspace), ws, hs, None] for ii in range(n_ax)]\n\n print('xywh_list = ', xywh_list)\n ax = [sps.axes_in_mm(x0, y0, width, height, label=part, label_params=label_params, **kwargs)\n for x0, y0, width, height, part in xywh_list]\n\n if make_cbar:\n wcbar = Wfig * wcbar_frac\n hcbar = cbar_aratio * wcbar\n if cbar_orientation == 'vertical':\n cbar_ax = sps.axes_in_mm(x0 + ws + hspace, (Hfig - wcbar) * 0.5, hcbar, wcbar, label='',\n label_params=label_params, **kwargs)\n elif cbar_orientation == 'horizontal':\n cbar_ax = sps.axes_in_mm(x0 + (ws - wcbar) * 0.5, y0 + n_ax * (hs + vspace) + cbarspace, wcbar, hcbar,\n label='', label_params=label_params)\n else:\n cbar_ax = None\n\n return fig, ax, cbar_ax", "def readAxes(self):\n for axisElement in self.root.findall(\".axes/axis\"):\n axis = {}\n axis['name'] = name = axisElement.attrib.get(\"name\")\n axis['tag'] = axisElement.attrib.get(\"tag\")\n axis['minimum'] = float(axisElement.attrib.get(\"minimum\"))\n axis['maximum'] = float(axisElement.attrib.get(\"maximum\"))\n axis['default'] = float(axisElement.attrib.get(\"default\"))\n # we're not using the map for anything.\n axis['map'] = []\n for warpPoint in axisElement.findall(\".map\"):\n inputValue = float(warpPoint.attrib.get(\"input\"))\n outputValue = float(warpPoint.attrib.get(\"output\"))\n axis['map'].append((inputValue, outputValue))\n # there are labelnames in the element\n # but we don't need them for building the fonts.\n self.axes[name] = axis\n self.axesOrder.append(axis['name'])", "def MyFilter(data, window_width=10, beta=2.0, draw_graph=False):\n\n #read data and change the format\n if 'time' in data.columns:\n date_list = []\n for i in data.index:\n date_parse = parse(str(data.ix[i].time))\n date_list.append(date_parse)\n data['date'] = date_list\n data_use = data\n data_use.index = data_use['date'].tolist()\n data_use = data_use.drop(['date','time'], axis=1)\n data_use.index.name = 'time'\n else:\n data_use = data\n #design filter, use the kaiser window here\n window = signal.kaiser(window_width, beta=beta)\n data_use['close_filtered'] = signal.convolve(data_use['close'], window, mode='same') / sum(window)\n data_use['high_frequency'] = data_use['close'] - data_use['close_filtered']\n\n #delete the distortion datas after filtered\n if window_width % 2 == 0:\n data_changed = data_use[window_width/2: -(window_width/2 - 1)]\n else:\n data_changed = data_use[(window_width-1)/2: -(window_width-1)/2]\n\n #draw graph\n if (draw_graph == True) :\n fig = plt.figure()\n ax1 = plt.subplot2grid((3,1), (0,0), rowspan=2)\n data_changed.loc[:,'close'].plot(style='r', label='original')\n data_changed.loc[:,'close_filtered'].plot(style='k', label='filtered')\n plt.title('Kaiser window_width = %d , const = %d' % (window_width, beta))\n plt.legend(loc='best')\n\n ax2 = plt.subplot2grid((3,1), (2,0))\n data_changed.loc[:,'high_frequency'].plot(label='high_frequency')\n ax2.set_ylim([-150, 150])\n plt.title('High Frequency')\n plt.legend(loc='best')\n plt.show()\n # print data_use\n # print data_changed\n data_out = data_changed['close_filtered']\n return np.array(data_out.tolist())", "def generate_plots(self):\n freq_to_channel = {v: k for k, v in self.frequency_dict.iteritems()}\n data_axes = None\n for index, frequency in enumerate(sorted(freq_to_channel)):\n channel = freq_to_channel[frequency]\n td_f = self.frequency_dict[channel]\n title = 'Volume Backscatter (Sv) :Channel #%d: Frequency: %.1f kHz' % (channel, td_f)\n data_axes = self._generate_plot(self.ax[index], self.power_data_dict[channel], title,\n self.min_db, self.max_db)\n\n if data_axes:\n self._display_x_labels(self.ax[2], self.data_times)\n self.fig.tight_layout(rect=[0, 0.0, 0.97, 1.0])\n self._display_colorbar(self.fig, data_axes)", "def calc_axes(self):\n self.y_axis = np.linspace(0, self.image_shape[0] - 1, self.image_shape[0])\n self.x_axis = np.linspace(0, self.image_shape[1] - 1, self.image_shape[1])\n if hasattr(self, 'pixelsize'):\n self.y_axis *= self.pixelsize[0]\n self.x_axis *= self.pixelsize[1]\n\n # %%RETRIEVING FUNCTIONS", "def ferret_custom_axes(id):\n axis_defs = [ None ] * pyferret.MAX_FERRET_NDIM\n axis_defs[0] = ( 1, 2, 1, \"KS,P\", False )\n return axis_defs", "def write_filters(self, session):\n\n w = self._dual.get_op('w')\n weights_values = session.run(w)\n weights_transpose = np.transpose(weights_values)\n\n filter_height = self._input_shape_visualisation[1]\n filter_width = self._input_shape_visualisation[2]\n np_write_filters(weights_transpose, [filter_height, filter_width])", "def plot_filterbank_output(signals, spacing=None, axis=-1):\n\n if spacing is None:\n spacing = signals.max()\n\n for k, filtered in enumerate(signals):\n plt.gca().set_prop_cycle(plt.cycler('color', COLORS[:signals.shape[2]]))\n if axis == -1:\n filtered = filtered.T\n plt.plot(filtered + k*spacing*2)", "def generate_filter_plots(\n data: AnnData, plot_filt: str, plot_filt_figsize: str = None\n) -> None:\n\n df_plot_before = data.obs[[\"Channel\", \"n_genes\", \"n_counts\", \"percent_mito\"]].copy()\n df_plot_before.reset_index(drop=True, inplace=True)\n df_plot_before[\"status\"] = \"original\"\n\n data = data[data.obs[\"passed_qc\"]] # focusing only on filtered cells\n\n df_plot_after = data.obs[[\"Channel\", \"n_genes\", \"n_counts\", \"percent_mito\"]].copy()\n df_plot_after.reset_index(drop=True, inplace=True)\n df_plot_after[\"status\"] = \"filtered\"\n df_plot = pd.concat((df_plot_before, df_plot_after), axis=0)\n\n from sccloud.plotting import plot_qc_violin\n\n figsize = None\n if plot_filt_figsize is not None:\n width, height = plot_filt_figsize.split(\",\")\n figsize = (int(width), int(height))\n\n plot_qc_violin(\n df_plot,\n \"count\",\n plot_filt + \".filt.UMI.pdf\",\n xattr=\"Channel\",\n hue=\"status\",\n xlabel=\"Channel\",\n split=True,\n linewidth=0,\n figsize=figsize,\n )\n\n plot_qc_violin(\n df_plot,\n \"gene\",\n plot_filt + \".filt.gene.pdf\",\n xattr=\"Channel\",\n hue=\"status\",\n xlabel=\"Channel\",\n split=True,\n linewidth=0,\n figsize=figsize,\n )\n\n plot_qc_violin(\n df_plot,\n \"mito\",\n plot_filt + \".filt.mito.pdf\",\n xattr=\"Channel\",\n hue=\"status\",\n xlabel=\"Channel\",\n split=True,\n linewidth=0,\n figsize=figsize,\n )\n\n logger.info(\"Filtration plots are generated.\")", "def feature_axes(self):\n raise NotImplementedError()", "def get_axes(self) -> VGroup:\n return self.axes", "def get_axes(self) -> VGroup:\n return self.axes", "def make_animation_subset_levels(X, fixed_axes, fixed_value_1, fixed_value_2,\n filtration_size):\n # Create the array indexes\n obj = [slice(None, None, None)] * 4\n obj[fixed_axes[0]] = fixed_value_1\n obj[fixed_axes[1]] = fixed_value_2\n # print obj\n\n # Create sequence of threshold values\n thresholds = np.linspace(start=np.amin(X[obj]), stop=np.amax(X[obj]), num=filtration_size)\n # print thresholds\n # TEST PLOT\n # fig, ax = plt.subplots()\n # # interp = kwargs.get('interpolation', 'none')\n # # colors = kwargs.get('colormap', 'seismic')\n # img0 = ax.imshow(X[obj], cmap='Blues', interpolation='none')\n # fig.colorbar(img0, ax=ax, fraction=0.022, pad=0.01)\n # ax.invert_yaxis()\n # # tx = ax.set_title('%s = %d' % (X.dimensions[sweep_axis], i))\n # fig.tight_layout()\n # fig.show()\n\n # def get_middle(xx):\n # return 1 - (float(np.amax(xx)) / (np.amax(xx) + abs(np.amin(xx))))\n\n def init():\n global fig, ax, im, tx\n fig = plt.figure()\n ax = plt.axes()\n # idx = list(obj)\n # idx[sweep_axis] = slice(None, None, None)\n # middle = get_middle(X[idx])\n # print obj\n im = ax.imshow(X[obj] < thresholds[2], cmap='Blues',#cmap=shiftedColorMap(cm.seismic, midpoint=middle),\n interpolation='none', aspect='auto')\n # vmin=np.amin(X[idx]), vmax=np.amax(X[idx]))\n ax.invert_yaxis()\n # cb = fig.colorbar(im)\n # tx = ax.set_title('%s = %d' % (X.dimensions[sweep_axis], i))\n return\n\n def animate(n):\n # update indexes\n # obj[sweep_axis] = n\n # vmax = np.max(X[obj])\n # vmin = np.min(X[obj])\n im.set_data(X[obj] < thresholds[n])\n # im.set_clim(vmin, vmax)\n # tx.set_text('%s = %d' % (X.dimensions[sweep_axis], n))\n return\n\n init()\n anim = animation.FuncAnimation(fig, animate, frames=np.arange(filtration_size), interval=100, blit=False)\n return anim" ]
[ "0.7061723", "0.6328764", "0.6086109", "0.60675657", "0.5965842", "0.5962679", "0.55877304", "0.5564867", "0.5475261", "0.5450346", "0.5436063", "0.5395516", "0.5379075", "0.53688693", "0.53572947", "0.5324945", "0.5273135", "0.5239883", "0.5197603", "0.5147273", "0.5143321", "0.5142728", "0.50849175", "0.505669", "0.5049305", "0.4998452", "0.49916667", "0.49592856", "0.49592856", "0.49552926" ]
0.73524797
0
Create the deconvolution output axes.
def _output_axes(self, in_obj, pad_int): output_axes = ng.make_axes() for ax in in_obj.axes: name = ax.name if name in self.conv_axis_names: output_axes += ng.make_axis(name=ax.name, length=utils.deconv_output_dim(ax.length, self.filter_shape[name], pad_int[name], self.strides[name], self.dilation[name])) elif name == "C": output_axes += ng.make_axis(name=name, length=self.nout) else: output_axes += ax return output_axes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, in_obj, channel_axes=\"C\", spatial_axes=(\"D\", \"H\", \"W\"), **kwargs):\n output = super(Deconvolution, self).__call__(in_obj, channel_axes, spatial_axes, **kwargs)\n return self._slice_output(output, spatial_axes, **kwargs)", "def make_conv_output_axes(input, filter, conv_params):\n # type: (TensorOp, TensorOp, Dict) -> Axes\n number_output_features = filter.axes[-1].length\n mini_batch_size = input.axes[-1].length\n\n input_d, input_h, input_w = input.axes.lengths[1:4] # axes order C, D, H, W, N\n filter_d, filter_h, filter_w = filter.axes.lengths[1:4] # axes order J, T(d), R(h), S(w), K\n\n def output_dim(input_x, filter_x, pad_x, str_x, dil_x):\n return floor((input_x + 2 * pad_x - filter_x - (filter_x - 1) * (dil_x - 1)) / str_x) + 1\n\n convp = conv_params\n output_d = output_dim(input_d, filter_d, convp['pad_d'], convp['str_d'], convp['dil_d'])\n output_h = output_dim(input_h, filter_h, convp['pad_h'], convp['str_h'], convp['dil_h'])\n output_w = output_dim(input_w, filter_w, convp['pad_w'], convp['str_w'], convp['dil_w'])\n\n output_axes = ng.make_axes(axes=(\n ng.make_axis(name='C', docstring='output features', length=int(number_output_features)),\n ng.make_axis(name='D', docstring='depth', length=int(output_d)),\n ng.make_axis(name='H', docstring='height', length=int(output_h)),\n ng.make_axis(name='W', docstring='width', length=int(output_w)),\n ng.make_axis(name='N', docstring='mini-batch size', length=int(mini_batch_size)),\n ))\n return output_axes", "def deconv(inp):\n num_filters = inp.get_shape().as_list()[-1]\n\n x = Conv2DTranspose(\n filters=num_filters,\n kernel_size=4,\n strides=2,\n padding=\"same\",\n use_bias=False,\n kernel_initializer=\"he_uniform\",\n )(inp)\n x = BatchNormalization()(x)\n x = Activation(\"elu\")(x)\n\n return x", "def convert_deconvolution(node, **kwargs):\n name, inputs, attrs = get_inputs(node, kwargs)\n\n kernel_dims = list(parse_helper(attrs, \"kernel\"))\n stride_dims = list(parse_helper(attrs, \"stride\", [1, 1]))\n pad_dims = list(parse_helper(attrs, \"pad\", [0, 0]))\n num_group = int(attrs.get(\"num_group\", 1))\n dilations = list(parse_helper(attrs, \"dilate\", [1, 1]))\n adj_dims = list(parse_helper(attrs, \"adj\", [0, 0]))\n\n pad_dims = pad_dims + pad_dims\n\n deconv_node = onnx.helper.make_node(\n \"ConvTranspose\",\n inputs=inputs,\n outputs=[name],\n kernel_shape=kernel_dims,\n strides=stride_dims,\n dilations=dilations,\n output_padding=adj_dims,\n pads=pad_dims,\n group=num_group,\n name=name\n )\n\n return [deconv_node]", "def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.conv_axis_names:\n output_axes += ng.make_axis(name=ax.name,\n length=utils.conv_output_dim(\n ax.length,\n self.filter_spatial_shape[name],\n pad_int[name],\n self.strides[name],\n False,\n self.dilation[name]))\n elif name == \"C\":\n output_axes += ng.make_axis(name=name, length=self.nout)\n else:\n output_axes += ax\n return output_axes", "def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)\n if dropout_rate:\n u = Dropout(dropout_rate)(u)\n u = BatchNormalization(momentum=0.8)(u)\n u = Concatenate()([u, skip_input])\n return u", "def _make_deconv_layer(self, num_filters, num_kernels):\n assert len(num_kernels) == len(num_filters), \\\n 'Deconv filters and kernels number mismatch: {} vs. {}'.format(\n len(num_filters), len(num_kernels))\n\n layers = nn.HybridSequential('deconv_')\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n self.base_network.initialize()\n in_planes = self.base_network(mx.nd.zeros((1, 3, 256, 256))).shape[1]\n for planes, k in zip(num_filters, num_kernels):\n kernel, padding, output_padding = self._get_deconv_cfg(k)\n layers.add(nn.Conv2D(channels=planes,\n kernel_size=3,\n strides=1,\n padding=1,\n in_channels=in_planes))\n layers.add(nn.BatchNorm())\n layers.add(nn.Activation('relu'))\n layers.add(nn.Conv2DTranspose(channels=planes,\n kernel_size=kernel,\n strides=2,\n padding=padding,\n output_padding=output_padding,\n use_bias=False,\n in_channels=planes,\n weight_initializer=BilinearUpSampleInitializer()))\n layers.add(nn.BatchNorm())\n layers.add(nn.Activation('relu'))\n in_planes = planes\n\n return layers", "def deconv2d(layer_input, skip_input, filters, f_size=3, dropout_rate=0, padding='same', strides=2):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=strides, padding=padding, activation='relu')(u)\n if dropout_rate:\n u = Dropout(dropout_rate)(u)\n u = BatchNormalization()(u)\n u = UpSampling2D(size=2)(u)\n u = Concatenate()([u, skip_input])\n return u", "def deconv2d(input_, \n output_dims,\n k_h=5, \n k_w=5,\n d_h=2,\n d_w=2,\n stddev=0.02,\n name='deconv2d',\n with_w=False):\n \n with tf.variable_scope(name):\n # filter : [height, width, output_channels, in_channels]\n w = tf.get_variable('w',\n [k_h, k_w, output_dims[-1], input_.get_shape()[-1]],\n initializer=tf.random_normal_initializer(stddev=stddev))\n\n try:\n deconv = tf.nn.conv2d_transpose(input_,\n w, \n output_shape=output_dims,\n strides=[1, d_h, d_w, 1])\n\n # Support for verisons of TensorFlow before 0.7.0\n except AttributeError:\n deconv = tf.nn.deconv2d(input_,\n w, \n output_shape=output_dims,\n strides=[1, d_h, d_w, 1])\n\n biases = tf.get_variable('biases', [output_dims[-1]], initializer=tf.constant_initializer(0.0))\n deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())\n\n if with_w:\n return deconv, w, biases\n else:\n return deconv", "def deconv2d(layer_input, skip_input, filters, f_size=3, dropout_rate=0, padding='same', strides=2):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=strides, padding=padding, activation='relu')(u)\n if dropout_rate:\n u = Dropout(dropout_rate)(u)\n u = BatchNormalization()(u)\n u = UpSampling2D(size=2)(u)\n u = Concatenate()([u, skip_input])\n return u\n\n # Image input", "def deconv(dims, inplanes, outplanes, kernel_size, stride, bias, dilation):\n padding = math.floor((kernel_size-stride+1)/2)\n if dims==2:\n return nn.ConvTranspose2d(inplanes, outplanes, kernel_size, stride,\n padding=padding, bias=bias) #, dilation=1)\n elif dims==3:\n return nn.ConvTranspose3d(inplanes, outplanes, kernel_size, stride,\n padding = padding, bias=bias) #, dilation=1)\n else:\n raise ValueError('dimension of deconv must be 2 or 3')", "def deconv2d(layer_input, filters, f_size=8, dropout_rate=0,permanent=False):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)\n if dropout_rate and not permanent:\n u = Dropout(dropout_rate)(u)\n elif dropout_rate and permanent:\n # permanent droput from my main man fchollet <3\n u=Lambda(lambda x: K.dropout(x, level=dropout_rate))(u) \n \n u = BatchNormalization(momentum=0.8)(u)\n return u", "def _conv_op(self, in_obj, channel_axes, spatial_axes):\n\n manual_pad = collections.OrderedDict([(ax.name, (0, 0)) for ax in in_obj.axes])\n pad_int, extra_pad = self._get_pad_int(spatial_axes)\n manual_pad.update(extra_pad)\n if any((pad != (0, 0)) for pad in manual_pad.values()):\n in_obj = ng.pad(in_obj, manual_pad.values())\n spatial_axes = in_obj.axes.get_by_names(*ng.make_axes(spatial_axes).names)\n\n output_axes = self._output_axes(in_obj.axes,\n pad_int)\n convparams = utils.make_convparams(self.nout, self.filter_shape,\n self.strides, pad_int, self.dilation)\n return ng.deconvolution(convparams,\n in_obj,\n self.W,\n axes=output_axes)", "def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.pool_axis_names:\n output_axes += ng.make_axis(name=name,\n length=utils.conv_output_dim(ax.length,\n self.pool_shape[name],\n pad_int[name],\n self.strides[name],\n pooling=True))\n else:\n output_axes += ax\n\n return output_axes", "def test_deconv():\n\n # filter params\n R, S = 5, 5\n fshape = (R, S, 1)\n strides = 2\n filter_val_nz = np.arange(1, R * S + 1).reshape(R, S)\n filter_val = np.zeros(fshape)\n filter_val[:, :, 0] = filter_val_nz\n\n deconv = Deconvolution(fshape,\n filter_init=ConstantInit(filter_val),\n strides=strides,\n padding=0,\n dilation=1)\n\n N = ng.make_axis(name='N', length=1) # batch\n image_shape = (1, 8, 8) # CHW\n image_axes = ng.make_axes([ng.make_axis(name=nm, length=l)\n for nm, l in zip('CHW', image_shape)])\n image_axes |= N\n image = ng.placeholder(axes=image_axes)\n\n output = deconv(image)\n\n with closing(ngt.make_transformer()) as transformer:\n comp = transformer.add_computation(ng.computation(output, image))\n input_val = np.zeros(image_shape + (N.length, ), dtype=float)\n input_val[0, 0, 0] = 1\n input_val[0, 5, 5] = 1\n input_val[0, 7, 7] = 1\n result = comp(input_val)\n feature_map = np.squeeze(result)\n\n assert (feature_map[:5, :5] == filter_val_nz).all()\n\n result2 = filter_val_nz.copy()\n result2[-1, -1] = 26\n assert (feature_map[10:15, 10:15] == result2).all()\n\n result3 = filter_val_nz.copy()\n result3[0, 0] = 26\n assert (feature_map[-5:, -5:] == result3).all()", "def deconv(\n in_channels,\n out_channels,\n kernel_size,\n stride=2,\n padding=1,\n batch_norm=True,\n):\n layers = []\n layers.append(\n nn.ConvTranspose2d(\n in_channels, out_channels, kernel_size, stride, padding, bias=False\n )\n )\n if batch_norm:\n layers.append(nn.BatchNorm2d(out_channels))\n return nn.Sequential(*layers)", "def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True):\n layers = []\n # append transpose conv layer\n # TODO: shouldn't we set bias to NOT batch_norm instead of always being False ?\n layers.append(nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False))\n # optional batch norm layer\n if batch_norm:\n layers.append(nn.BatchNorm2d(out_channels))\n return nn.Sequential(*layers)", "def deconv_layer(self, inputs, field_size, channels_size,\n initializer_type, name, act_func=tf.nn.relu):\n batch, height, width, in_channels = inputs.get_shape().as_list()\n #shape = tf.shape(inputs)\n assert in_channels == channels_size[0], (\n 'Number of input channels doe not match filter inputs channels.'\n )\n with tf.variable_scope(name):\n channels_size.reverse() # now [out_c, in_c]\n filter_size = field_size + channels_size\n bias_size = [channels_size[0]]\n\n if initializer_type:\n initializer = tf.contrib.layers.xavier_initializer()\n else:\n initializer = tf.truncated_normal_initializer(stddev=.1)\n\n weights = tf.get_variable('W', filter_size, initializer=initializer)\n biases = tf.get_variable(\n 'b', bias_size, initializer=tf.constant_initializer(.1))\n\n #target_shape_tensor = tf.stack([shape[0], height, width, channels_size[0]])\n conv = tf.nn.conv2d_transpose(\n inputs,\n weights,\n #target_shape_tensor,\n [batch, height, width, channels_size[0]],\n [1, 1, 1, 1],\n padding='SAME')\n conv_bias = tf.nn.bias_add(conv, biases)\n if act_func == None:\n output = conv_bias\n else:\n output = act_func(conv_bias)\n #set_shape does not accept tensor\n #output.set_shape([batch, height, width, channels_size[0]])\n #this sets first size to none. why? Not used.\n #output = tf.reshape(output, target_shape_tensor)\n\n return output", "def deconv2d(layer_input):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(256, kernel_size=3, strides=1, padding='same')(u)\n u = Activation('relu')(u)\n return u", "def deconv(self, input_layer, num_filters, filter_size,\n filter_strides=(2,2), padding='SAME',\n activation=None, use_batch_norm=None):\n num_inputs = input_layer.get_shape().as_list()[1]\n ih, iw = input_layer.get_shape().as_list()[2:]\n output_shape = [-1, num_filters,\n ih*filter_strides[0], iw*filter_strides[1]]\n kernel_shape = [filter_size[0], filter_size[1],\n num_filters, num_inputs]\n strides = [1, 1, filter_strides[0], filter_strides[1]]\n with tf.variable_scope(self._count_layer('deconv')) as scope:\n kernel = self._get_variable('weights', kernel_shape,\n input_layer.dtype)\n x = tf.nn.conv2d_transpose(input_layer, kernel, output_shape,\n strides, padding=padding,\n data_format='NCHW')\n x = self._bias_or_batch_norm(x, scope, use_batch_norm)\n x = self.activate(x, activation)\n return x", "def calc_axes(self):\n self.y_axis = np.linspace(0, self.image_shape[0] - 1, self.image_shape[0])\n self.x_axis = np.linspace(0, self.image_shape[1] - 1, self.image_shape[1])\n if hasattr(self, 'pixelsize'):\n self.y_axis *= self.pixelsize[0]\n self.x_axis *= self.pixelsize[1]\n\n # %%RETRIEVING FUNCTIONS", "def _get_deconv_cfg(self, deconv_kernel):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n else:\n raise ValueError('Unsupported deconvolution kernel: {}'.format(deconv_kernel))\n\n return deconv_kernel, padding, output_padding", "def deconv2d(layer_input,num=256):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(num, kernel_size=3, strides=1, padding='same')(u)\n u = Activation('relu')(u)\n return u", "def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, normalization=True, norm_type='instance_norm'):\n layers = []\n # append transpose conv layer\n layers.append(nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False))\n\n # optional normalization layer\n if normalization == True and norm_type == 'instance_norm':\n layers.append(nn.InstanceNorm2d(out_channels))\n elif normalization == True and norm_type == 'batch_norm':\n layers.append(nn.BatchNorm2d(out_channels))\n return nn.Sequential(*layers)", "def __init__(self, incoming, W=None, b=tf.zeros, ksize: int = None, num_outputs: int = None,\n weight_initializer=None, a=tf.nn.elu, output_shape=None, strides=(1, 2, 2, 1), padding='SAME',\n data_format='NHWC',\n name='DeConvLayer'):\n super(DeConvLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n \n # Set init for W and b\n if all(p is not None for p in [weight_initializer, ksize, num_outputs]):\n W = tofov(weight_initializer,\n shape=(ksize, ksize, num_outputs, incoming.get_output_shape()[-1]),\n var_params=dict(name='W_deconv'))\n else:\n W = tofov(W, shape=None, var_params=dict(name='W_deconv'))\n b = tofov(b, shape=W.get_shape().as_list()[-2], var_params=dict(name='b_deconv'))\n \n if output_shape is None:\n if padding == 'SAME' and strides[0] == 1:\n if len(self.incoming_shape) == 5:\n output_shape = [self.incoming_shape[0], self.incoming_shape[1],\n self.incoming_shape[2] * strides[1], self.incoming_shape[3] * strides[2],\n W.get_shape().as_list()[-2] * strides[3]]\n else:\n output_shape = [self.incoming_shape[0], self.incoming_shape[1] * strides[1],\n self.incoming_shape[2] * strides[2], W.get_shape().as_list()[-2] * strides[3]]\n else:\n raise AttributeError(\"Automatic output_shape calculation not implemented for strides!=1 in \"\n \"first dimension\")\n \n if isinstance(padding, int):\n if len(self.incoming_shape) == 5:\n self.padding = [[0, 0], [0, 0], [padding, padding], [padding, padding], [0, 0]]\n elif len(self.incoming_shape) == 4:\n self.padding = [[0, 0], [padding, padding], [padding, padding], [0, 0]]\n else:\n raise ValueError(\"invalid input shape\")\n else:\n self.padding = padding\n \n self.a = a\n self.b = b\n self.W = W\n \n self.output_shape = output_shape\n self.strides = strides\n \n self.data_format = data_format\n \n self.out = None\n self.name = name", "def apply_deconv2d(input_layer, num_classes, kernel_size=4, strides=(2, 2)):\n return tf.layers.conv2d_transpose(input_layer, num_classes, kernel_size, strides=strides, padding='same',\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))", "def _slice_output(self, output, spatial_axes, **kwargs):\n\n if self.deconv_out_shape is None:\n return output\n\n if isinstance(spatial_axes, dict):\n spatial_axes = tuple(spatial_axes.get(name, name)\n for name in (\"D\", \"H\", \"W\"))\n elif isinstance(spatial_axes, tuple):\n if len(spatial_axes) < 3:\n raise ValueError(\"spatial_axes must have length 3 (e.g. ('D', 'H', 'W'))\")\n spatial_axes = tuple(name if name else default\n for name, default in zip(spatial_axes, (\"D\", \"H\", \"W\")))\n\n slices = [slice(None)] * len(output.axes)\n for ii, ax_name in enumerate(spatial_axes):\n if ax_name in output.axes.names:\n index = output.axes.names.index(ax_name)\n out_size = output.axes[index].length\n trim_size = self.deconv_out_shape[ii]\n if trim_size > out_size:\n raise ValueError('specified {} output dimension {} is greater than {}'\n .format(ax_name, trim_size, out_size))\n elif trim_size < out_size:\n extra = out_size - trim_size\n start = extra // 2\n end = start + trim_size\n slices[index] = slice(start, end)\n\n return ng.tensor_slice(output, slices)", "def _filter_axes(self, channel_axis, spatial_axes):\n f_axes = ng.make_axis(length=self.nout, name=\"K\")\n for key, ax in zip(\"DHW\", spatial_axes):\n f_axes += ng.make_axis(length=self.filter_shape[key],\n name=ax.name)\n f_axes += channel_axis\n return f_axes", "def show_filters(self):\n weight_mat = self.sess.run(self.W_fc_out)\n\n # Loop channels\n plt.figure(figsize=(12,5), facecolor='w', edgecolor='w')\n with sns.axes_style(\"white\"):\n for cl in range(weight_mat.shape[1]):\n # Get filters of this output class\n w_list = ia.vec2image( lin_image=weight_mat[:,cl],\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res) )\n\n # Show channels\n for ch,w in enumerate(w_list):\n colormax = np.abs(w).max()\n ax = plt.subplot2grid( (self.n_output_classes,\n self.n_input_channels), (cl,ch) )\n ax.imshow( w, interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Class {}, Channel {}\".format(cl,ch))\n plt.axis('tight')\n plt.axis('off')\n colormax = np.abs(w).max()\n\n if self.n_output_classes == 2:\n plt.figure(figsize=(12,5), facecolor='w', edgecolor='w')\n with sns.axes_style(\"white\"):\n # Get filters of this output class\n w_list0 = ia.vec2image( lin_image=weight_mat[:,0],\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res) )\n w_list1 = ia.vec2image( lin_image=weight_mat[:,1],\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res) )\n for ch in range(len(w_list)):\n w_both = w_list1[ch]-w_list0[ch]\n\n colormax = np.abs(w_both).max()\n ax = plt.subplot2grid( (1,\n self.n_input_channels), (0,ch) )\n ax.imshow( w_both, interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Class {}, Channel {}\".format(cl,ch))\n plt.axis('tight')\n plt.axis('off')\n plt.tight_layout()", "def _get_deconv_cfg(deconv_kernel):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n else:\n raise ValueError(f'Not supported num_kernels ({deconv_kernel}).')\n\n return deconv_kernel, padding, output_padding" ]
[ "0.6600882", "0.6512945", "0.6240972", "0.620487", "0.6190723", "0.6060082", "0.6006831", "0.5991179", "0.5951186", "0.5951137", "0.58947194", "0.5878189", "0.5848726", "0.5846914", "0.5805691", "0.5782366", "0.5744223", "0.5723216", "0.56546867", "0.563938", "0.56088835", "0.5607671", "0.560046", "0.5585584", "0.5563137", "0.5526732", "0.5526355", "0.55205435", "0.5461558", "0.5445332" ]
0.656358
1
Get integer padding values for each axis. If padding is asymmetric, return the required manual paddings.
def _get_pad_int(self, axes): # Manual padding might be required for asymmetric paddings manual_pad = {} padding_int = {} for name, ax in zip(self.pool_axis_names, axes): pad = utils.ConvParameters(ax.length, self.pool_shape[name], self.strides[name], pooling=True).get_padding_size(self.padding[name]) symm_pad = min(pad) padding_int[name] = symm_pad if pad[0] != pad[1]: manual_pad[ax.name] = (pad[0] - symm_pad, pad[1] - symm_pad) return padding_int, manual_pad
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_pad_int(self, spatial_axes):\n # Manual padding might be required for asymmetric paddings\n manual_pad = {}\n padding_int = {}\n for name, ax in zip(self.spatial_keys, spatial_axes):\n pad = utils.ConvParameters(ax.length,\n self.filter_spatial_shape[name],\n self.strides[name],\n self.dilation[name]).get_padding_size(self.padding[name])\n symm_pad = min(pad)\n padding_int[name] = symm_pad\n if pad[0] != pad[1]:\n manual_pad[ax.name] = (pad[0] - symm_pad, pad[1] - symm_pad)\n\n return padding_int, manual_pad", "def padding(self):\n\t\treturn self.paddings_shape_param('W')", "def get_paddings(self):\n return tf.constant([[0, 0,],\n [self._settings.half_patch_size, self._settings.half_patch_size],\n [self._settings.half_patch_size, self._settings.half_patch_size],\n [0, 0]])", "def padding(self) -> Tuple[int, int, int, int]:\n return (self.ipadding[0].to_pixels(self.width),\n self.ipadding[1].to_pixels(self.width),\n self.ipadding[2].to_pixels(self.height),\n self.ipadding[3].to_pixels(self.height))", "def padding(self):\n pad = self.ntiles - self.windowsize\n return (int((pad - 1)/2.), int((pad + 1)/2.))", "def padding_width(self):\n\t\treturn self.paddings_shape_param('W')", "def _get_padding(w, h):\n dim_diff = np.abs(h - w)\n pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2\n return (0, pad1, 0, pad2) if h <= w else (pad1, 0, pad2, 0)", "def get_padding_sizes(self, div, dim):\n # ghost cells in the y direction\n target_shape = div * np.ceil(dim / div)\n target_shape_diff = target_shape - dim\n\n pad_low = int(np.ceil(target_shape_diff / 2.0))\n pad_high = int(np.floor(target_shape_diff / 2.0))\n\n return pad_low, pad_high", "def transform_padding(pad_width):\n num_pad_values = len(pad_width)\n onnx_pad_width = [0]*num_pad_values\n\n start_index = 0\n # num_pad_values will always be multiple of 2\n end_index = int(num_pad_values/2)\n for idx in range(0, num_pad_values):\n if idx % 2 == 0:\n onnx_pad_width[start_index] = pad_width[idx]\n start_index += 1\n else:\n onnx_pad_width[end_index] = pad_width[idx]\n end_index += 1\n\n return onnx_pad_width", "def padding_index(self) -> int:\n return self._pad_index", "def get_padding_values():\r\n return (tf.constant(0, tf.float32), tf.constant(0, tf.float32), tf.constant(0, tf.float32))", "def padding(self):\r\n return self._generate_spacing_info(self.config['padding'])", "def get_padding_values():\n return tf.constant(0, tf.float32), tf.constant(0, tf.float32), tf.constant(-1, tf.int32)", "def get_pads(onnx_node): # type: (NodeWrapper) -> Tuple[int, int, int]\n auto_pad = onnx_node.get_attribute_value('auto_pad')\n pads = onnx_node.get_attribute_value('pads', ()) # Padding along each axis\n kernel_shape = onnx_node.get_attribute_value('kernel_shape')\n\n # Attribute 'auto_pad' is deprecated, but is currently used by CNTK\n if auto_pad:\n if auto_pad == 'VALID':\n pads = [0, 0] * len(kernel_shape)\n\n else:\n # SAME_UPPER or SAME_LOWER mean pad the input so that the output size match the input.\n # In case of odd number add the extra padding at the end for SAME_UPPER and at the\n # beginning for SAME_LOWER.\n def pad_value(kernel_dim): # type: (int) -> float\n return (kernel_dim - 1.0) / 2.0\n\n pads_starts = [floor(pad_value(dim)) if auto_pad == 'SAME_UPPER' else\n ceil(pad_value(dim)) for dim in kernel_shape]\n pads_ends = [ceil(pad_value(dim)) if auto_pad == 'SAME_UPPER' else\n floor(pad_value(dim)) for dim in kernel_shape]\n pads = pads_starts + pads_ends\n\n verify_symmetric_padding(onnx_node, pads)\n\n pad_h, pad_w, pad_d = 0, 0, 0\n if pads and len(pads) == 2: # ONNX input axes NCHW\n pad_h, pad_w = pads\n if pads and len(pads) == 3: # ONNX input axes NCHWD\n pad_h, pad_w, pad_d = pads\n if pads and len(pads) == 4: # ONNX input axes NCHW\n pad_h, pad_w, _, _ = pads\n elif pads and len(pads) == 6: # ONNX input axes NCHWD\n pad_h, pad_w, pad_d, _, _, _ = pads\n\n return pad_h, pad_w, pad_d", "def paddings_shape_param(self, param):\n\t\tindex = self.variables['paddings_format'].index(param)\n\t\treturn self.variables['paddings'].shape[index]", "def _get_padding_width(self, column_index: int) -> int:\n _, pad_right, _, pad_left = self.padding\n if self.collapse_padding:\n if column_index > 0:\n pad_left = max(0, pad_left - pad_right)\n return pad_left + pad_right", "def compute_padding(M, N, J):\n M_padded = ((M + 2 ** J) // 2 ** J + 1) * 2 ** J\n N_padded = ((N + 2 ** J) // 2 ** J + 1) * 2 ** J\n return M_padded, N_padded", "def padding_depth(self):\n\t\treturn self.paddings_shape_param('D')", "def paddings_for_conv2d(\n kernel_size: Sequence[int],\n shifts: Sequence[int] = (0, 0),\n) -> List[Tuple[int, int]]:\n if len(kernel_size) != 2 or len(shifts) != 2:\n raise ValueError('kernel_size and shifts must have length 2')\n\n paddings = [(0, 0)]\n for size, shift in zip(kernel_size, shifts):\n pad_left = (size - shift) // 2\n paddings.append((pad_left, size - pad_left - 1))\n paddings += [(0, 0)]\n return paddings", "def padding_width(self):\n ...", "def determine_padding(self, input_shape: int, output_shape: int) -> int:\n padding = (((output_shape - 1) * self.stride) + 1 - input_shape + (self.dilation * (self.kernel_size - 1)))\n\n # integer division\n padding = padding // 2\n assert output_shape == l_out(\n input_shape, padding, self.dilation, self.kernel_size, self.stride\n ) and padding >= 0, f\"Input and output of {input_shape} and {output_shape} with \" \\\n f\"kernel {self.kernel_size}, dilation {self.dilation}, stride {self.stride} \" \\\n f\"are incompatible for a Conv1D network.\"\n return padding", "def get_padding(x, padding_value=0, dtype=tf.float32):\n # print(\"get_padding\", dtype)\n with tf.name_scope(\"padding\"):\n return tf.cast(tf.equal(x, padding_value), dtype=dtype)", "def padding_width(self):\n return self.width + self.padding_left + self.padding_right", "def _dynamic_padding(self, batch_data, pad_id = 0 ):\n #print 'dynamic _padding...'\n #print 'pad_id' + str(pad_id)\n max_p_len = 1000\n max_q_len =1000\n pad_p_len = min(max_p_len, max(batch_data['passage_length']))+1\n #print 'pad_p_len' + str(pad_p_len)\n pad_q_len = min(max_q_len, max(batch_data['question_length']))\n #print 'pad_q_len' + str(pad_q_len)\n #for ids in batch_data['passage_token_ids'] :\n #print 'padding: '\n #print (ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n batch_data['passage_token_ids'] = [(ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n for ids in batch_data['passage_token_ids']]\n batch_data['question_token_ids'] = [(ids + [pad_id] * (pad_q_len - len(ids)))[: pad_q_len]\n for ids in batch_data['question_token_ids']]\n return batch_data, pad_p_len, pad_q_len", "def get_padding_values(input_dataset_types, label_padding=-100):\n def map_to_zero(dtypes):\n if isinstance(dtypes, Sequence):\n return tuple((map_to_zero(d) for d in dtypes))\n return tf.cast(0., dtypes)\n\n def map_to_label_padding(dtypes):\n if isinstance(dtypes, Sequence):\n return tuple((map_to_zero(d) for d in dtypes))\n return tf.cast(label_padding, dtypes)\n\n if len(input_dataset_types) == 2:\n data_type, label_type = input_dataset_types\n return (\n map_to_zero(data_type),\n map_to_label_padding(label_type)\n )\n\n if len(input_dataset_types) == 3:\n data_type, label_type, sample_weight_type = input_dataset_types\n return (\n map_to_zero(data_type),\n map_to_label_padding(label_type),\n map_to_zero(sample_weight_type)\n )", "def _determine_padding_from_tf_same(input_dimensions, kernel_dimensions, stride_dimensions):\n\n # get dimensions\n in_height, in_width = input_dimensions\n\n if isinstance(kernel_dimensions, int):\n kernel_height = kernel_dimensions\n kernel_width = kernel_dimensions\n else:\n kernel_height, kernel_width = kernel_dimensions\n\n if isinstance(stride_dimensions, int):\n stride_height = stride_dimensions\n stride_width = stride_dimensions\n else:\n stride_height, stride_width = stride_dimensions\n\n # determine the output size that is to achive by the padding\n out_height = ceil(in_height / stride_height)\n out_width = ceil(in_width / stride_width)\n\n # determine the pad size along each dimension\n pad_along_height = max((out_height - 1) * stride_height + kernel_height - in_height, 0)\n pad_along_width = max((out_width - 1) * stride_width + kernel_width - in_width, 0)\n\n # determine padding 4-tuple (can be asymmetric)\n pad_top = pad_along_height // 2\n pad_bottom = pad_along_height - pad_top\n pad_left = pad_along_width // 2\n pad_right = pad_along_width - pad_left\n\n return pad_left, pad_right, pad_top, pad_bottom", "def parse_padding(padding: PaddingType) -> Tuple4IntType:\n if padding is False or None:\n padding = 0\n assert isinstance(padding, PaddingInstance)\n\n if isinstance(padding, NumberInstance):\n assert padding >= 0, 'padding cannot be a negative number'\n return int(padding), int(padding), int(padding), int(padding)\n else:\n assert 1 <= len(padding) <= 4, 'padding must be a tuple of 2, 3 or 4 elements'\n for i in range(len(padding)):\n assert isinstance(padding[i], NumberInstance), \\\n 'all padding elements must be integers or floats'\n assert padding[i] >= 0, \\\n 'all padding elements must be equal or greater than zero'\n if len(padding) == 1:\n return int(padding[0]), int(padding[0]), int(padding[0]), int(padding[0])\n elif len(padding) == 2:\n return int(padding[0]), int(padding[1]), int(padding[0]), int(padding[1])\n elif len(padding) == 3:\n return int(padding[0]), int(padding[1]), int(padding[2]), int(padding[1])\n else:\n return int(padding[0]), int(padding[1]), int(padding[2]), int(padding[3])", "def _dynamic_padding(self, batch_data, pad_id):\n pad_p_len = min(self.max_p_len, max(batch_data['passage_length']))\n pad_q_len = min(self.max_q_len, max(batch_data['question_length']))\n batch_data['passage_token_ids'] = [(ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n for ids in batch_data['passage_token_ids']]\n batch_data['question_token_ids'] = [(ids + [pad_id] * (pad_q_len - len(ids)))[: pad_q_len]\n for ids in batch_data['question_token_ids']]\n return batch_data, pad_p_len, pad_q_len", "def __calc_padding(self, input_shape, kernel_size, stride=1):\n # default of pytorch for input_size = (C_in, H_in, W_in)\n if len(input_shape) == 3:\n if stride != (1,1):\n raise ValueError(\"calc padding only works for stride=(1,1)\")\n padding = (0,0)\n if kernel_size[0]%2 == 0 or kernel_size[1]%2 == 0:\n raise ValueError(\"the kernel size: {} is incompatible with CnnHighway. With this kernel, the conv output shape will not equal the input shape\".format(kernel_size))\n padding_height = int((kernel_size[0] - 1)/2)\n padding_width = int((kernel_size[1] - 1)/2)\n return (padding_height, padding_width)\n if len(input_shape) == 2:\n if stride != 1:\n raise ValueError(\"calc padding only works for stride=(1)\")\n padding = int((kernel_size -1)/2)\n return padding", "def pad(self):\n return self._pad" ]
[ "0.73760635", "0.70292467", "0.67624974", "0.67057866", "0.6551189", "0.6527535", "0.6488737", "0.6471581", "0.6398685", "0.6383349", "0.6362462", "0.6361514", "0.63430554", "0.63392824", "0.6220768", "0.61472887", "0.6120855", "0.6040636", "0.6040091", "0.6036621", "0.5954419", "0.5947942", "0.5945028", "0.59321547", "0.5906588", "0.58268344", "0.5812872", "0.58064526", "0.57847303", "0.57822305" ]
0.7482885
0
Create the pooling output axes.
def _output_axes(self, in_obj, pad_int): output_axes = ng.make_axes() for ax in in_obj.axes: name = ax.name if name in self.pool_axis_names: output_axes += ng.make_axis(name=name, length=utils.conv_output_dim(ax.length, self.pool_shape[name], pad_int[name], self.strides[name], pooling=True)) else: output_axes += ax return output_axes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _pool_op(self, in_obj, pool_axes):\n manual_pad = collections.OrderedDict([(ax.name, (0, 0)) for ax in in_obj.axes])\n pad_int, extra_pad = self._get_pad_int(pool_axes)\n manual_pad.update(extra_pad)\n if any((pad != (0, 0)) for pad in manual_pad.values()):\n in_obj = ng.pad(in_obj, manual_pad.values())\n output_axes = self._output_axes(in_obj,\n pad_int)\n poolparams = make_poolparams(self.pool_type,\n self.pool_shape,\n self.strides,\n pad_int)\n return ng.pooling(poolparams,\n in_obj,\n axes=output_axes)", "def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.conv_axis_names:\n output_axes += ng.make_axis(name=ax.name,\n length=utils.conv_output_dim(\n ax.length,\n self.filter_spatial_shape[name],\n pad_int[name],\n self.strides[name],\n False,\n self.dilation[name]))\n elif name == \"C\":\n output_axes += ng.make_axis(name=name, length=self.nout)\n else:\n output_axes += ax\n return output_axes", "def run(self):\n # fill the x_values,y_values,z_values dictionaries\n if not self.__fillCoordinatesFromSource():\n self.raiseAWarning('Nothing to Plot Yet. Returning.')\n return\n\n self.counter += 1\n if self.counter > 1:\n self.actcm = None\n clusterDict = deepcopy(self.outStreamTypes)\n\n # start plotting.... loop over the plots that need to be included in this figure\n for pltIndex in range(len(self.outStreamTypes)):\n plotSettings = self.options['plotSettings']['plot'][pltIndex]\n if 'gridLocation' in plotSettings:\n x = None\n y = None\n if 'x' in plotSettings['gridLocation']:\n x = list(map(int, plotSettings['gridLocation']['x'].strip().split(' ')))\n else:\n x = None\n if 'y' in plotSettings['gridLocation'].keys():\n y = list(map(int, plotSettings['gridLocation']['y'].strip().split(' ')))\n else:\n y = None\n if pltIndex == 0:\n self.ax.remove() # remove axis so that there is not an extra axis on plot with subplots\n if (len(x) == 1 and len(y) == 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]], projection='3d')\n elif (len(x) == 1 and len(y) != 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]:y[-1]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]:y[-1]], projection='3d')\n elif (len(x) != 1 and len(y) == 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]], projection='3d')\n else:\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]:y[-1]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]:y[-1]], projection='3d')\n\n if 'gridSpace' in self.options['plotSettings']:\n self.ax.locator_params(axis='y', nbins=4)\n self.ax.locator_params(axis='x', nbins=2)\n if 'range' in plotSettings:\n axes_range = plotSettings['range']\n if 'ymin' in axes_range:\n self.ax.set_ylim(bottom=ast.literal_eval(axes_range['ymin']))\n if 'ymax' in axes_range:\n self.ax.set_ylim(top=ast.literal_eval(axes_range['ymax']))\n if 'xmin' in axes_range:\n self.ax.set_xlim(left=ast.literal_eval(axes_range['xmin']))\n if 'xmax' in axes_range:\n self.ax.set_xlim(right=ast.literal_eval(axes_range['xmax']))\n if self.dim == 3:\n if 'zmin' in axes_range.options['plotSettings']['plot'][pltIndex]:\n if 'zmax' not in axes_range.options['plotSettings']:\n self.raiseAWarning('zmin inputted but not zmax. zmin ignored! ')\n else:\n self.ax.set_zlim(bottom=ast.literal_eval(axes_range['zmin']), top=ast.literal_eval(self.options['plotSettings']['zmax']))\n if 'zmax' in axes_range:\n if 'zmin' not in axes_range:\n self.raiseAWarning('zmax inputted but not zmin. zmax ignored! ')\n else:\n self.ax.set_zlim(bottom=ast.literal_eval(axes_range['zmin']), top=ast.literal_eval(axes_range['zmax']))\n if 'xlabel' not in plotSettings:\n self.ax.set_xlabel('x')\n else:\n self.ax.set_xlabel(plotSettings['xlabel'])\n if 'ylabel' not in plotSettings:\n self.ax.set_ylabel('y')\n else:\n self.ax.set_ylabel(plotSettings['ylabel'])\n if 'zlabel' in plotSettings:\n if self.dim == 2:\n self.raiseAWarning('zlabel keyword does not make sense in 2-D Plots!')\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel(plotSettings['zlabel'])\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel('z')\n else:\n if 'xlabel' not in self.options['plotSettings']:\n self.ax.set_xlabel('x')\n else:\n self.ax.set_xlabel(self.options['plotSettings']['xlabel'])\n if 'ylabel' not in self.options['plotSettings']:\n self.ax.set_ylabel('y')\n else:\n self.ax.set_ylabel(self.options['plotSettings']['ylabel'])\n if 'zlabel' in self.options['plotSettings']:\n if self.dim == 2:\n self.raiseAWarning('zlabel keyword does not make sense in 2-D Plots!')\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel(self.options['plotSettings']['zlabel'])\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel('z')\n\n if 'legend' in self.options['plotSettings']:\n if 'label' not in plotSettings.get('attributes', {}):\n if 'attributes' not in plotSettings:\n plotSettings['attributes'] = {}\n plotSettings['attributes']['label'] = self.outStreamTypes[pltIndex] + ' ' + str(pltIndex)\n #################\n # SCATTER PLOT #\n #################\n self.raiseADebug(f'creating plot {self.name}')\n if self.outStreamTypes[pltIndex] == 'scatter':\n if 's' not in plotSettings:\n plotSettings['s'] = '20'\n if 'c' not in plotSettings:\n plotSettings['c'] = 'b'\n if 'marker' not in plotSettings:\n plotSettings['marker'] = 'o'\n if 'alpha' not in plotSettings:\n plotSettings['alpha'] = 'None'\n if 'linewidths' not in plotSettings:\n plotSettings['linewidths'] = 'None'\n if self.colorMapCoordinates[pltIndex] is not None:\n # Find the max and min colormap values\n firstKey = utils.first(self.xValues[pltIndex].keys())\n vmin = np.amin(self.colorMapValues[pltIndex][firstKey])\n vmax = np.amax(self.colorMapValues[pltIndex][firstKey])\n for key in self.xValues[pltIndex]:\n vmin = min(vmin,np.amin(self.colorMapValues[pltIndex][key]))\n vmax = max(vmax,np.amax(self.colorMapValues[pltIndex][key]))\n plotSettings['norm'] = matplotlib.colors.Normalize(vmin,vmax)\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n scatterPlotOptions = {'s': ast.literal_eval(plotSettings['s']),\n 'marker': (plotSettings['marker']),\n 'alpha': ast.literal_eval(plotSettings['alpha']),\n 'linewidths': ast.literal_eval(plotSettings['linewidths'])}\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['norm'] = plotSettings['norm']\n scatterPlotOptions.update(plotSettings.get('attributes', {}))\n if self.dim == 2:\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['c'] = self.colorMapValues[pltIndex][key][xIndex]\n scatterPlotOptions['cmap'] = matplotlib.cm.get_cmap(\"winter\")\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n try:\n self.actcm.draw_all()\n # this is not good, what exception will be thrown?\n except:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n scatterPlotOptions['cmap'] = plotSettings['cmap']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m, ax=self.ax)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if 'color' not in scatterPlotOptions:\n scatterPlotOptions['c'] = plotSettings['c']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['c'] = self.colorMapValues[pltIndex][key][zIndex]\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n scatterPlotOptions['cmap'] = plotSettings['cmap']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if 'color' not in scatterPlotOptions:\n scatterPlotOptions['c'] = plotSettings['c']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n #################\n # LINE PLOT #\n #################\n elif self.outStreamTypes[pltIndex] == 'line':\n minV = 0\n maxV = 0\n # If the user does not define an appropriate cmap, then use matplotlib's default.\n if 'cmap' not in plotSettings or plotSettings['cmap'] not in matplotlib.cm.datad:\n plotSettings['cmap'] = None\n if bool(self.colorMapValues):\n for key in self.xValues[pltIndex]:\n minV = min(minV,self.colorMapValues[pltIndex][key][-1][-1])\n maxV = max(maxV,self.colorMapValues[pltIndex][key][-1][-1])\n cmap = matplotlib.cm.ScalarMappable(matplotlib.colors.Normalize(minV, maxV, True), plotSettings['cmap'])\n cmap.set_array([minV,maxV])\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n if self.colorMapCoordinates[pltIndex] is not None:\n plotSettings['interpPointsX'] = str(max(200, len(self.xValues[pltIndex][key][xIndex])))\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.dim == 2:\n if self.yValues[pltIndex][key][yIndex].size < 2:\n return\n xi, yi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], plotSettings, returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n self.ax.plot(xi, yi, c=cmap.cmap(self.colorMapValues[pltIndex][key][-1][-1]/(maxV-minV)))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if self.actcm is None:\n self.actcm = self.fig.colorbar(cmap)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n self.actPlot = self.ax.plot(xi, yi, **plotSettings.get('attributes', {}))\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n self.ax.plot(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n self.zValues[pltIndex][key][zIndex],\n c=cmap.cmap(self.colorMapValues[pltIndex][key][-1][-1]/(maxV-minV)))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if self.actcm is None:\n self.actcm = self.fig.colorbar(cmap)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n self.actPlot = self.ax.plot(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n self.zValues[pltIndex][key][zIndex],\n **plotSettings.get('attributes', {}))\n ##################\n # HISTOGRAM PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'histogram':\n if 'bins' not in plotSettings:\n if self.dim == 2:\n plotSettings['bins'] = '10'\n else:\n plotSettings['bins'] = '4'\n if 'normed' not in plotSettings:\n plotSettings['normed'] = 'False'\n if 'weights' not in plotSettings:\n plotSettings['weights'] = 'None'\n if 'cumulative' not in plotSettings:\n plotSettings['cumulative'] = 'False'\n if 'histtype' not in plotSettings:\n plotSettings['histtype'] = 'bar'\n if 'align' not in plotSettings:\n plotSettings['align'] = 'mid'\n if 'orientation' not in plotSettings:\n plotSettings['orientation'] = 'vertical'\n if 'rwidth' not in plotSettings:\n plotSettings['rwidth'] = 'None'\n if 'log' not in plotSettings:\n plotSettings['log'] = 'None'\n if 'color' not in plotSettings:\n plotSettings['color'] = 'b'\n if 'stacked' not in plotSettings:\n plotSettings['stacked'] = 'None'\n if self.sourceData[0].type.strip() == 'HistorySet':\n #####################################################################################################################################\n # @MANDD: This 'if' condition has been added in order to allow the user the correctly create an histogram out of an historySet #\n # If the histogram is created out of the input variables, then the plot has an identical meaning of the one generated by a pointSet #\n # However, if the histogram is created out of the output variables, then the plot consider only the last value of the array #\n #####################################################################################################################################\n data = {}\n data['x'] = np.empty(0)\n data['y'] = np.empty(0)\n for index in range(len(self.outStreamTypes)):\n for key in self.xValues[index]:\n data['x'] = np.append(data['x'], self.xValues[index][key][0][-1])\n if self.dim == 3:\n data['y'] = np.append(data['y'], self.yValues[index][key][0][-1])\n del self.xValues[index]\n self.xValues = {}\n self.xValues[index] = {}\n self.xValues[index][0] = []\n self.xValues[index][0].append(deepcopy(data['x']))\n if self.dim == 3:\n del self.yValues[index]\n self.yValues = {}\n self.yValues[index] ={ }\n self.yValues[index][0] = []\n self.yValues[index][0].append(deepcopy(data['y']))\n\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n try:\n colorss = ast.literal_eval(plotSettings['color'])\n # unknown what specific error is anticipated here, but I don't like a bare except...\n # ast.literal_eval can raise the exceptions listed below (see library docs):\n except (ValueError, TypeError, SyntaxError, MemoryError, RecursionError):\n colorss = plotSettings['color']\n if self.dim == 2:\n self.ax.hist(self.xValues[pltIndex][key][xIndex],\n bins=ast.literal_eval(plotSettings['bins']),\n density=ast.literal_eval(plotSettings['normed']),\n weights=ast.literal_eval(plotSettings['weights']),\n cumulative=ast.literal_eval(plotSettings['cumulative']),\n histtype=plotSettings['histtype'],\n align=plotSettings['align'],\n orientation=plotSettings['orientation'],\n rwidth=ast.literal_eval(plotSettings['rwidth']),\n log=ast.literal_eval(plotSettings['log']),\n color=colorss,\n stacked=ast.literal_eval(plotSettings['stacked']),\n **plotSettings.get('attributes', {}))\n else:\n for yIndex in range(len(self.yValues[pltIndex][key])):\n hist, xedges, yedges = np.histogram2d(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n bins=ast.literal_eval(plotSettings['bins']))\n elements = (len(xedges) - 1) * (len(yedges) - 1)\n if 'x_offset' in plotSettings:\n xoffset = float(plotSettings['x_offset'])\n else:\n xoffset = 0.25\n if 'y_offset' in plotSettings:\n yoffset = float(plotSettings['y_offset'])\n else:\n yoffset = 0.25\n if 'dx' in plotSettings:\n dxs = float(plotSettings['dx'])\n else:\n dxs = (self.xValues[pltIndex][key][xIndex].max() - self.xValues[pltIndex][key][xIndex].min()) / float(plotSettings['bins'])\n if 'dy' in plotSettings:\n dys = float(plotSettings['dy'])\n else:\n dys = (self.yValues[pltIndex][key][yIndex].max() - self.yValues[pltIndex][key][yIndex].min()) / float(plotSettings['bins'])\n xpos, ypos = np.meshgrid(xedges[:-1] + xoffset, yedges[:-1] + yoffset)\n self.actPlot = self.ax.bar3d(xpos.flatten(),\n ypos.flatten(),\n np.zeros(elements),\n dxs*np.ones_like(elements),\n dys*np.ones_like(elements),\n hist.flatten(),\n color=colorss,\n zsort='average',\n **plotSettings.get('attributes', {}))\n ##################\n # STEM PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'stem':\n if 'linefmt' not in plotSettings:\n plotSettings['linefmt'] = 'b-'\n if 'markerfmt' not in plotSettings:\n plotSettings['markerfmt'] = 'bo'\n if 'basefmt' not in plotSettings:\n plotSettings['basefmt'] = 'r-'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.dim == 2:\n self.actPlot = self.ax.stem(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n linefmt=plotSettings['linefmt'],\n markerfmt=plotSettings['markerfmt'],\n basefmt = plotSettings['linefmt'],\n use_line_collection=True,\n **plotSettings.get('attributes', {}))\n else:\n # it is a basic stem plot constructed using a standard line plot. For now we do not use the previous defined keywords...\n for zIndex in range(len(self.zValues[pltIndex][key])):\n for xx, yy, zz in zip(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex]):\n self.ax.plot([xx, xx], [yy, yy], [0, zz], '-')\n ##################\n # STEP PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'step':\n if self.dim == 2:\n if 'where' not in plotSettings:\n plotSettings['where'] = 'mid'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n if self.xValues[pltIndex][key][xIndex].size < 2:\n xi = self.xValues[pltIndex][key][xIndex]\n else:\n xi = np.linspace(self.xValues[pltIndex][key][xIndex].min(), self.xValues[pltIndex][key][xIndex].max(), ast.literal_eval(plotSettings['interpPointsX']))\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.yValues[pltIndex][key][yIndex].size <= 3:\n return\n yi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], plotSettings)\n self.actPlot = self.ax.step(xi, yi, where=plotSettings['where'], **plotSettings.get('attributes', {}))\n else:\n self.raiseAWarning('step Plot not available in 3D')\n return\n ########################\n # PSEUDOCOLOR PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'pseudocolor':\n if self.dim == 2:\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if not self.colorMapCoordinates:\n self.raiseAMessage('pseudocolor Plot needs coordinates for color map... Returning without plotting')\n return\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.colorMapValues[pltIndex][key][zIndex].size <= 3:\n return\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.pcolormesh(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n **plotSettings.get('attributes', {}))\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n else:\n self.actPlot = self.ax.pcolormesh(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(ma.masked_where(np.isnan(Ci), Ci))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n actcm = self.fig.colorbar(m)\n actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.raiseAWarning('pseudocolor Plot is considered a 2D plot, not a 3D!')\n return\n ########################\n # SURFACE PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'surface':\n if self.dim == 2:\n self.raiseAWarning('surface Plot is NOT available for 2D plots, IT IS A 3D!')\n return\n else:\n if 'rstride' not in plotSettings:\n plotSettings['rstride'] = '1'\n if 'cstride' not in plotSettings:\n plotSettings['cstride'] = '1'\n if 'antialiased' not in plotSettings:\n plotSettings['antialiased'] = 'False'\n if 'linewidth' not in plotSettings:\n plotSettings['linewidth'] = '0'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n xig, yig, zi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.zValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n facecolors=matplotlib.cm.get_cmap(name=plotSettings['cmap'])(ma.masked_where(np.isnan(Ci), Ci)),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n if first:\n self.actPlot.cmap = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n if 'color' in plotSettings.get('attributes', {}):\n self.actPlot.set_color = plotSettings.get('attributes', {})['color']\n else:\n self.actPlot.set_color = 'blue'\n else:\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n ########################\n # TRI-SURFACE PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'tri-surface':\n if self.dim == 2:\n self.raiseAWarning('TRI-surface Plot is NOT available for 2D plots, it is 3D!')\n return\n else:\n if 'color' not in plotSettings:\n plotSettings['color'] = 'b'\n if 'shade' not in plotSettings:\n plotSettings['shade'] = 'False'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n metric = (self.xValues[pltIndex][key][xIndex] ** 2 + self.yValues[pltIndex][key][yIndex] ** 2) ** 0.5\n metricIndeces = np.argsort(metric)\n xs = np.zeros(self.xValues[pltIndex][key][xIndex].shape)\n ys = np.zeros(self.yValues[pltIndex][key][yIndex].shape)\n zs = np.zeros(self.zValues[pltIndex][key][zIndex].shape)\n for sindex in range(len(metricIndeces)):\n xs[sindex] = self.xValues[pltIndex][key][xIndex][metricIndeces[sindex]]\n ys[sindex] = self.yValues[pltIndex][key][yIndex][metricIndeces[sindex]]\n zs[sindex] = self.zValues[pltIndex][key][zIndex][metricIndeces[sindex]]\n surfacePlotOptions = {'color': plotSettings['color'],\n 'shade': ast.literal_eval(plotSettings['shade'])}\n surfacePlotOptions.update(plotSettings.get('attributes', {}))\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n surfacePlotOptions['cmap'] = matplotlib.cm.get_cmap(name = plotSettings['cmap'])\n self.actPlot = self.ax.plot_trisurf(xs, ys, zs, **surfacePlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actPlot.cmap = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] != 'None':\n surfacePlotOptions[\"cmap\"] = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n self.actPlot = self.ax.plot_trisurf(xs, ys, zs, **surfacePlotOptions)\n ########################\n # WIREFRAME PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'wireframe':\n if self.dim == 2:\n self.raiseAWarning('wireframe Plot is NOT available for 2D plots, IT IS A 3D!')\n return\n else:\n if 'rstride' not in plotSettings:\n plotSettings['rstride'] = '1'\n if 'cstride' not in plotSettings:\n plotSettings['cstride'] = '1'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n xig, yig, zi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.zValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n self.raiseAWarning(f'Currently, ax.plot_wireframe() in MatPlotLib version: {matplotlib.__version__} does not support a colormap! Wireframe plotted on a surface plot...')\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n alpha=0.4,\n rstride=ast.literal_eval(plotSettings['rstride']),\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n if 'color' in plotSettings.get('attributes', {}):\n self.actPlot.set_color = plotSettings.get('attributes', {})['color']\n else:\n self.actPlot.set_color = 'blue'\n else:\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n ########################\n # CONTOUR PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'contour' or self.outStreamTypes[pltIndex] == 'filledContour':\n if self.dim == 2:\n if 'numberBins' in plotSettings:\n nbins = int(plotSettings['numberBins'])\n else:\n nbins = 5\n for key in self.xValues[pltIndex]:\n if not self.colorMapCoordinates:\n self.raiseAWarning(self.outStreamTypes[pltIndex] + ' Plot needs coordinates for color map... Returning without plotting')\n return\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.actcm:\n first = False\n else:\n first = True\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.outStreamTypes[pltIndex] == 'contour':\n if plotSettings['cmap'] == 'None':\n if 'color' in plotSettings.get('attributes', {}):\n color = plotSettings.get('attributes', {})['color']\n else:\n color = 'blue'\n self.actPlot = self.ax.contour(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n colors=color,\n **plotSettings.get('attributes', {}))\n else:\n self.actPlot = self.ax.contour(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n **plotSettings.get('attributes', {}))\n else:\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.contourf(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n **plotSettings.get('attributes', {}))\n self.ax.clabel(self.actPlot, inline=1, fontsize=10)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actcm = self.fig.colorbar(self.actPlot, shrink=0.8, extend='both')\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax = max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n self.raiseAWarning('contour/filledContour is a 2-D plot, where x,y are the surface coordinates and colorMap vector is the array to visualize!\\n contour3D/filledContour3D are 3-D! ')\n return\n # These should be combined: ^^^ & vvv\n elif self.outStreamTypes[pltIndex] == 'contour3D' or self.outStreamTypes[pltIndex] == 'filledContour3D':\n if self.dim == 2:\n self.raiseAWarning('contour3D/filledContour3D Plot is NOT available for 2D plots, IT IS A 2D! Check \"contour/filledContour\"!')\n return\n else:\n if 'numberBins' in plotSettings:\n nbins = int(plotSettings['numberBins'])\n else:\n nbins = 5\n if 'extend3D' in plotSettings:\n ext3D = bool(plotSettings['extend3D'])\n else:\n ext3D = False\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.actcm:\n first = False\n else:\n first = True\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.outStreamTypes[pltIndex] == 'contour3D':\n if plotSettings['cmap'] == 'None':\n if 'color' in plotSettings.get('attributes', {}):\n color = plotSettings.get('attributes', {})['color']\n else:\n color = 'blue'\n self.actPlot = self.ax.contour3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n colors=color,\n extend3d=ext3D,\n **plotSettings.get('attributes', {}))\n else:\n self.actPlot = self.ax.contour3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n extend3d=ext3D,\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n else:\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.contourf3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n self.ax.clabel(self.actPlot, inline=1, fontsize=10)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actcm = self.fig.colorbar(self.actPlot, shrink = 0.8, extend = 'both')\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax = max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n ########################\n # DataMining PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'dataMining':\n colors = cycle(['#88CCEE', '#DDCC77', '#AA4499', '#117733', '#332288', '#999933', '#44AA99', '#882255', '#CC6677', '#CD6677', '#DC6877', '#886677', '#AA6677', '#556677', '#CD7865'])\n if 's' not in plotSettings:\n plotSettings['s'] = '20'\n if 'c' not in plotSettings:\n plotSettings['c'] = 'b'\n if 'marker' not in plotSettings:\n plotSettings['marker'] = 'o'\n if 'alpha' not in plotSettings:\n plotSettings['alpha'] = 'None'\n if 'linewidths' not in plotSettings:\n plotSettings['linewidths'] = 'None'\n clusterDict[pltIndex] = {}\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n dataMiningPlotOptions = {'s': ast.literal_eval(plotSettings['s']),\n 'marker': (plotSettings['marker']),\n 'alpha': ast.literal_eval(plotSettings['alpha']),\n 'linewidths': ast.literal_eval(plotSettings['linewidths'])}\n if self.colorMapCoordinates[pltIndex] is not None:\n self.raiseAWarning('ColorMap values supplied, however DataMining plots do not use colorMap from input.')\n if plotSettings['cmap'] == 'None':\n self.raiseAWarning('ColorSet supplied, however DataMining plots do not use color set from input.')\n if 'cluster' == plotSettings['SKLtype']:\n # TODO: include the cluster Centers to the plot\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n dataMiningPlotOptions.update(plotSettings.get('attributes', {}))\n if self.dim == 2:\n clusterDict[pltIndex]['clusterValues'] = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n clusterDict[pltIndex]['clusterValues'] = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 3))\n clusterDict[pltIndex]['clusterValues'][:, 0] = self.xValues[pltIndex][key][xIndex]\n clusterDict[pltIndex]['clusterValues'][:, 1] = self.yValues[pltIndex][key][yIndex]\n if self.dim == 2:\n for k, col in zip(range(int(clusterDict[pltIndex]['noClusters'])), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n\n # Handle all of the outlying data\n myMembers = self.clusterValues[pltIndex][1][0] == -1\n # resize the points\n dataMiningPlotOptions['s'] /= 2\n # and hollow out their markers\n if 'facecolors' in dataMiningPlotOptions:\n faceColors = dataMiningPlotOptions['facecolors']\n else:\n faceColors = None\n dataMiningPlotOptions['facecolors'] = 'none'\n\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n color='#000000',\n **dataMiningPlotOptions)\n\n # Restore the plot options to their original values\n dataMiningPlotOptions['s'] *= 2\n if faceColors is not None:\n dataMiningPlotOptions['facecolors'] = faceColors\n else:\n del dataMiningPlotOptions['facecolors']\n\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n clusterDict[pltIndex]['clusterValues'][:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(int(clusterDict[pltIndex]['noClusters'])), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n clusterDict[pltIndex]['clusterValues'][myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n\n # Handle all of the outlying data\n myMembers = self.clusterValues[pltIndex][1][0] == -1\n # resize the points\n dataMiningPlotOptions['s'] /= 2\n # and hollow out their markers\n if 'facecolors' in dataMiningPlotOptions:\n faceColors = dataMiningPlotOptions['facecolors']\n else:\n faceColors = None\n dataMiningPlotOptions['facecolors'] = 'none'\n\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n clusterDict[pltIndex]['clusterValues'][myMembers, 2],\n color='#000000',\n **dataMiningPlotOptions)\n\n # Restore the plot options to their original values\n dataMiningPlotOptions['s'] *= 2\n if faceColors is not None:\n dataMiningPlotOptions['facecolors'] = faceColors\n else:\n del dataMiningPlotOptions['facecolors']\n\n elif 'bicluster' == plotSettings['SKLtype']:\n self.raiseAnError(IOError, 'SKLType Bi-Cluster Plots are not implemented yet!..')\n elif 'mixture' == plotSettings['SKLtype']:\n if 'noMixtures' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noMixtures'] = int(plotSettings.get('attributes', {})['noMixtures'])\n plotSettings.get('attributes', {}).pop('noMixtures')\n else:\n clusterDict[pltIndex]['noMixtures'] = np.amax(self.mixtureValues[pltIndex][1][0]) + 1\n if self.dim == 3:\n self.raiseAnError(IOError, 'SKLType Mixture Plots are only available in 2-Dimensions')\n else:\n clusterDict[pltIndex]['mixtureValues'] = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 2))\n clusterDict[pltIndex]['mixtureValues'][:, 0] = self.xValues[pltIndex][key][xIndex]\n clusterDict[pltIndex]['mixtureValues'][:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'mixtureCovars' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('mixtureCovars', (pltIndex, 0))\n # mixtureCovars = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('mixtureCovars')\n # else:\n # mixtureCovars = None\n if 'mixtureMeans' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('mixtureMeans', (pltIndex, 0))\n # mixtureMeans = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('mixtureMeans')\n # else:\n # mixtureMeans = None\n # mixtureCovars.reshape(3, 4)\n # mixtureMeans.reshape(3, 4)\n # for i, (mean, covar, col) in enumerate(zip(mixtureMeans, mixtureCovars, colors)):\n for i, col in zip(range(clusterDict[pltIndex]['noMixtures']), colors):\n if not np.any(self.mixtureValues[pltIndex][1][0] == i):\n continue\n myMembers = self.mixtureValues[pltIndex][1][0] == i\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['mixtureValues'][myMembers, 0],\n clusterDict[pltIndex]['mixtureValues'][myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n elif 'manifold' == plotSettings['SKLtype']:\n if self.dim == 2:\n manifoldValues = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n manifoldValues = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 3))\n manifoldValues[:, 0] = self.xValues[pltIndex][key][xIndex]\n manifoldValues[:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'clusterLabels' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('clusterLabels', (pltIndex, 0))\n clusterDict[pltIndex]['clusterLabels'] = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('clusterLabels')\n else:\n clusterDict[pltIndex]['clusterLabels'] = None\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n if self.clusterValues[pltIndex][1][0] is not None:\n if self.dim == 2:\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(manifoldValues[myMembers, 0],\n manifoldValues[myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n manifoldValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(manifoldValues[myMembers, 0],\n manifoldValues[myMembers, 1],\n manifoldValues[myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n else:\n if self.dim == 2:\n self.actPlot = self.ax.scatter(manifoldValues[:, 0],\n manifoldValues[:, 1],\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n manifoldValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n self.actPlot = self.ax.scatter(manifoldValues[:, 0],\n manifoldValues[:, 1],\n manifoldValues[:, 2],\n **dataMiningPlotOptions)\n elif 'decomposition' == plotSettings['SKLtype']:\n if self.dim == 2:\n decompositionValues = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n decompositionValues = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 3))\n decompositionValues[:, 0] = self.xValues[pltIndex][key][xIndex]\n decompositionValues[:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n if self.clusterValues[pltIndex][1][0] is not None:\n if self.dim == 2:\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(decompositionValues[myMembers, 0],\n decompositionValues[myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n decompositionValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(decompositionValues[myMembers, 0],\n decompositionValues[myMembers, 1],\n decompositionValues[myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n else:\n # no ClusterLabels\n if self.dim == 2:\n self.actPlot = self.ax.scatter(decompositionValues[:, 0],\n decompositionValues[:, 1],\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n decompositionValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n self.actPlot = self.ax.scatter(decompositionValues[:, 0],\n decompositionValues[:, 1],\n decompositionValues[:, 2],\n **dataMiningPlotOptions)\n else:\n # Let's try to \"write\" the code for the plot on the fly\n self.raiseAWarning('Trying to create a non-predefined plot of type ' + self.outStreamTypes[pltIndex] + '. If this fails, please refer to the and/or the related matplotlib method specification.')\n kwargs = {}\n for kk in plotSettings:\n if kk != 'attributes' and kk != self.outStreamTypes[pltIndex]:\n try:\n kwargs[kk] = ast.literal_eval(plotSettings[kk])\n except ValueError:\n kwargs[kk] = plotSettings[kk]\n try:\n if self.dim == 2:\n customFunctionCall = getattr(self.ax, self.outStreamTypes[pltIndex])\n else:\n customFunctionCall = getattr(self.ax, self.outStreamTypes[pltIndex])\n self.actPlot = customFunctionCall(**kwargs)\n except AttributeError as ae:\n self.raiseAnError(RuntimeError, '<' + str(ae) + '> -> in execution custom plot \"' + self.outStreamTypes[pltIndex] + '\" in Plot ' + self.name + '.\\nSTREAM MANAGER: ERROR -> command has been called in the following way: ' + 'ax.' + self.outStreamTypes[pltIndex])\n\n if 'legend' in self.options['plotSettings']:\n self.fig.legend(**self.options['plotSettings']['legend'])\n\n # SHOW THE PICTURE\n self.__executeActions()\n self.fig.canvas.draw_idle()\n\n if 'screen' in self.destinations and display:\n def handle_close(event):\n \"\"\"\n This method is aimed to handle the closing of figures (overall when in interactive mode)\n @ In, event, instance, the event to close\n @ Out, None\n \"\"\"\n self.fig.canvas.stop_event_loop()\n self.raiseAMessage('Closed Figure')\n self.fig.canvas.mpl_connect('close_event', handle_close)\n # self.plt.pause(1e-6)\n # The following code is extracted from pyplot.pause without actually\n # needing to force the code to sleep, according to MPL's documentation,\n # this feature is experimental, hopefully by not calling the pause\n # function, we can obtain consistent results.\n # We are skipping a few of the sanity checks done in that function,\n # since we are sure we have an interactive backend and access to the\n # correct type of canvas and figure.\n self.fig.canvas.draw()\n # If your graphs are unresponsive to user input, you may want to consider\n # adjusting this timeout, to allow more time for the input to be handled.\n self.fig.canvas.start_event_loop(1e-3)\n\n # self.fig.canvas.flush_events()\n\n for fileType in self.destinations:\n if fileType == 'screen':\n continue\n\n if not self.overwrite:\n prefix = str(self.counter) + '-'\n else:\n prefix = ''\n\n if len(self.filename) > 0:\n name = self.filename\n else:\n name = prefix + self.name + '_' + str(self.outStreamTypes).replace(\"'\", \"\").replace(\"[\", \"\").replace(\"]\", \"\").replace(\",\", \"-\").replace(\" \", \"\")\n\n if self.subDirectory is not None:\n name = os.path.join(self.subDirectory,name)\n\n self.fig.savefig(name + '.' + fileType, format=fileType)\n\n if 'screen' not in self.destinations:\n plt.close(fig=self.fig)\n\n gc.collect()", "def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.conv_axis_names:\n output_axes += ng.make_axis(name=ax.name,\n length=utils.deconv_output_dim(ax.length,\n self.filter_shape[name],\n pad_int[name],\n self.strides[name],\n self.dilation[name]))\n elif name == \"C\":\n output_axes += ng.make_axis(name=name, length=self.nout)\n else:\n output_axes += ax\n\n return output_axes", "def __init__(self):\n super().__init__()\n\n # general attributes\n self.printTag = 'OUTSTREAM PLOT'\n self.options = {} # outstreaming options # no addl info from original developer\n self.counter = 0 # keeps track of how many times the same plot has been plotted\n self.dim = None # default plot is 2D\n self.sourceName = [] # list of source names\n self.sourceData = None # source of data\n self.outStreamTypes = [] # list of the outstream types\n self.destinations = None # where plots should go (screen, file, etc.)\n\n # plot types key is data dimension, value is list of available plot types\n self.availableOutStreamTypes = {2: ['scatter',\n 'line',\n 'histogram',\n 'stem',\n 'step',\n 'pseudocolor',\n 'dataMining',\n 'contour',\n 'filledContour'],\n 3: ['scatter',\n 'line',\n 'histogram',\n 'stem',\n 'surface',\n 'wireframe',\n 'tri-surface',\n 'contour',\n 'filledContour']}\n\n # interpolators that can be used in plots\n self.availableInterpolators = ['nearest',\n 'linear',\n 'cubic',\n 'multiquadric',\n 'inverse',\n 'gaussian',\n 'Rbflinear',\n 'Rbfcubic',\n 'quintic',\n 'thin_plate']\n\n # plot components\n self.fig = None # figure\n self.ax = None # axes\n self.actPlot = None # plot action, ie., ax.plot()\n self.gridSpace = None # subplot setup\n self.actcm = None # colormap\n self.xCoordinates = None # x coordinate name\n self.yCoordinates = None # y coordinate name\n self.zCoordinates = None # z coordinate name\n self.xValues = None # dictionary of x values\n self.yValues = None # dictionary of y values\n self.zValues = None # dictionary of z values\n self.colorMapCoordinates = {} # color map coordinates\n self.colorMapValues = {} # color map values\n\n # For the data-mining plot, I think?\n self.clusterLabels = None\n self.clusterValues = None\n\n # Gaussian Mixtures\n self.mixtureLabels = None\n self.mixtureValues = None\n self.mixtureMeans = None\n self.mixtureCovars = None", "def make_conv_output_axes(input, filter, conv_params):\n # type: (TensorOp, TensorOp, Dict) -> Axes\n number_output_features = filter.axes[-1].length\n mini_batch_size = input.axes[-1].length\n\n input_d, input_h, input_w = input.axes.lengths[1:4] # axes order C, D, H, W, N\n filter_d, filter_h, filter_w = filter.axes.lengths[1:4] # axes order J, T(d), R(h), S(w), K\n\n def output_dim(input_x, filter_x, pad_x, str_x, dil_x):\n return floor((input_x + 2 * pad_x - filter_x - (filter_x - 1) * (dil_x - 1)) / str_x) + 1\n\n convp = conv_params\n output_d = output_dim(input_d, filter_d, convp['pad_d'], convp['str_d'], convp['dil_d'])\n output_h = output_dim(input_h, filter_h, convp['pad_h'], convp['str_h'], convp['dil_h'])\n output_w = output_dim(input_w, filter_w, convp['pad_w'], convp['str_w'], convp['dil_w'])\n\n output_axes = ng.make_axes(axes=(\n ng.make_axis(name='C', docstring='output features', length=int(number_output_features)),\n ng.make_axis(name='D', docstring='depth', length=int(output_d)),\n ng.make_axis(name='H', docstring='height', length=int(output_h)),\n ng.make_axis(name='W', docstring='width', length=int(output_w)),\n ng.make_axis(name='N', docstring='mini-batch size', length=int(mini_batch_size)),\n ))\n return output_axes", "def plot_outputgrid(self, scalefactor=1, **kwargs):\n\n if not (type(scalefactor) == 'int'):\n scalefactor = round(scalefactor)\n\n xx = np.arange(self.xori, self.xend, scalefactor * self.dx)\n yy = np.arange(self.yori, self.yend, scalefactor * self.dy)\n plt.hlines(yy, self.xori, self.xend, linewidth=0.2, **kwargs)\n plt.vlines(xx, self.yori, self.yend, linewidth=0.2, **kwargs)\n\n logger.debug('Adding output grid to plot')", "def plot_multi_outputs(X_list, gp,xlim,n_outputs):\r\n\r\n slices = GPy.util.multioutput.get_slices(X_list)\r\n fig = plt.figure(figsize=(15, 8))\r\n plt.subplots_adjust(hspace=0.)\r\n for i in range(1, n_outputs+1):\r\n ax = plt.subplot(n_outputs, 1, i)\r\n ax.tick_params(axis=\"both\", direction=\"in\", bottom=True, top=True, left=True, right=True)\r\n ax.tick_params(axis=\"both\", labelbottom=False)\r\n ax.set_xlim(xlim)\r\n ax.set_ylabel(f'Output {i}')\r\n gp.plot(plot_limits=xlim, ax=ax, fixed_inputs=[(1, i-1)], which_data_rows=slices[i-1], legend=False)\r\n ax.tick_params(axis=\"both\", labelbottom=True)\r\n plt.show()\r\n return", "def create_figure(self) -> None:\n plt.ion()\n self.fig = plt.figure(1)\n self.axis = self.fig.add_subplot(111, xlim=(0, 1), ylim=(0, 1))\n self.axis.grid(True)\n plt.xticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n plt.yticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n a_plt, = self.axis.plot([], [], 'bx', markersize=5)\n l_plt, = self.axis.plot([], [], 'r.', markersize=15)\n self.plots = [a_plt, l_plt]", "def _generate_axes_(self):\n\n return AxesTuple(self._axis(i) for i in range(self.ndim))", "def __init__(self):\n self.fig = pl.figure(1,figsize=(8,6), dpi=80 , frameon = True , facecolor = '0.75' , edgecolor = 'w')\n self.fig.add_subplot(111 , axisbg = 'w' , projection = 'rectilinear') #if you want to add axes on particular place: fig.add_axes([0.15, 0.1, 0.7, 0.3]) where -> [begin , bottom to start axes , width , height ]\n self.separated = True #if we have a list and need to plot the plots separated", "def set_component_outputs(self):\n outputs = []\n\n text = Text(self.get_data(), self.vocabulary)\n dp = DataPack(text, usage=self.indices)\n outputs.append(dp)\n\n if self.is_labelled():\n labels_data = Numeric(self.labels)\n labels_usage = Labels(self.label_names, self.multilabel)\n dp = DataPack(labels_data, labels_usage)\n dp.add_usage(self.indices)\n outputs.append(dp)\n if self.targets:\n # tar = self.train_targets + self.test_targets\n # dp = DataPack.make(tar, GroundTruth)\n dp = DataPack.make(self.targets, GroundTruth)\n dp.add_usage(self.indices)\n outputs.append(dp)\n\n self.data_pool.add_data_packs(outputs, self.name)", "def create_fig_2d(self, data_array_2d, output_fn='', xlabel='', ylabel='', title=''):", "def create_plots(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n self.sse_plot()\n self.avg_sse_plot()", "def gen_figure(snf_nmi, rbf_nmi, snf_mod, rbf_mod):\n xticklabels = ['CT', 'SV', 'DB', 'CSF', 'CLIN']\n yticklabels = ['68', '114', '219', '448', '1000']\n\n fig, axes = plt.subplots(2, 2, figsize=(10, 8), sharex=True, sharey=True)\n fig.subplots_adjust(wspace=-0.05, hspace=0.15)\n\n # make four circleplots\n ax1 = plotting.circleplot(snf_nmi, vmin=-0.01, vmax=1.01, ax=axes[0][0],\n xticklabels=[], yticklabels=yticklabels)\n ax2 = plotting.circleplot(rbf_nmi, vmin=-0.01, vmax=1.01, ax=axes[0][1],\n xticklabels=[], yticklabels=yticklabels,\n cbar_kws={'ticks': [0.00, 1.00]})\n ax3 = plotting.circleplot(snf_mod, vmin=-0.01, vmax=0.36, ax=axes[1][0],\n xticklabels=xticklabels, yticklabels=yticklabels)\n ax4 = plotting.circleplot(rbf_mod, vmin=-0.01, vmax=0.36, ax=axes[1][1],\n xticklabels=xticklabels, yticklabels=yticklabels,\n cbar_kws={'ticks': [0, 0.35]})\n\n for ax in axes.flatten():\n for tick in ax.xaxis.get_major_ticks() + ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(14)\n\n # set axis titles\n ax1.set_title('Similarity network fusion', pad=20)\n ax2.set_title('Data concatenation', pad=20)\n\n # set axis labels\n ax1.set_ylabel('Dimensionality of\\ncortical thickness data',\n labelpad=15, x=0, y=0)\n ax3.set_xlabel('Data type', labelpad=15, x=1.1)\n\n # turn off colorbars on lefthand plots\n ax1.collections[0].colorbar.ax.set_visible(False)\n ax3.collections[0].colorbar.ax.set_visible(False)\n\n # correct colorbar appearance for righthand plots\n ax2.collections[0].colorbar.ax.tick_params(size=0, labelsize=14)\n ax4.collections[0].colorbar.ax.tick_params(size=0, labelsize=14)\n ax2.collections[0].colorbar.ax.set_ylabel('Normalized mutual\\ninformation',\n rotation=270, labelpad=30)\n ax4.collections[0].colorbar.ax.set_ylabel('Modularity',\n rotation=270, labelpad=15)\n\n # plot small gray lines to better differentiate plots\n plt.plot([0.4725, 0.4725], [0.55, 0.85], color='gray', lw=0.5,\n transform=fig.transFigure, clip_on=False)\n plt.plot([0.4725, 0.4725], [0.15, 0.45], color='gray', lw=0.5,\n transform=fig.transFigure, clip_on=False)\n plt.plot([0.155, 0.415], [0.5, 0.5], color='gray', lw=0.5,\n transform=fig.transFigure, clip_on=False)\n plt.plot([0.525, 0.795], [0.5, 0.5], color='gray', lw=0.5,\n transform=fig.transFigure, clip_on=False)\n\n return fig", "def generate_plots(self, input_data, input_labels=None):\n pass", "def make_plots(self):\n n_rounds = self.run.n_rounds\n\n log.info('Making %d frames', n_rounds)\n args = [self._get_for_parallel(i) for i in range(n_rounds)]\n self.lbv.map(_plot_helper, args)", "def create_four_subplots():\n pass", "def assemblePlot(self):\n self.clearPlot()\n self.axes = self.figure.add_subplot(111)\n\n # Reset handles\n self._fluxOverlayHandles = []\n self._magneticAxisHandle = None\n self._orbitHandles = []\n self._separatrixOverlayHandle = None\n self._wallCrossSectionOverlayHandle = None\n\n # Plot image\n self.plotEq()\n\n # Plot overlays\n self.plotOverlays()\n\n self.adjustAxes()", "def create_figure(self):\n plt.rcParams.update(general_utils.returnGraphConfigs(\"anim\"))\n self.fig = plt.figure()\n self.axes = plt.axes()\n self.axes.set_xlabel(\"Cells In X (Columns)\")\n self.axes.set_ylabel(\"Cells In Y (Rows)\")\n self.axes.set_xlim(0, self.dimensions - 1)\n self.axes.set_ylim(0, self.dimensions - 1)", "def create_plot():\n\n fig, ax = plt.subplots()\n return fig, ax", "def combine_plot(qa_out_path,brain_path):\n \n #Get the scan volume of the brain.\n brain_ref = nib.load(brain_path)\n brain_ref_shape = brain_ref.shape[0:3]\n \n plots_list = ['Rotate_Z_axis_000000.png','Rotate_Z_axis_000001.png','Rotate_Z_axis_000002.png',\n 'Rotate_Y_axis_000000.png','Rotate_Y_axis_000001.png','Rotate_Y_axis_000002.png',\n 'Rotate_X_axis_000000.png','Rotate_X_axis_000001.png','Rotate_X_axis_000002.png']\n y_labels = [\"Rotate with Z axis\",\"Rotate with Y axis\",\"Rotate with X axis\"]\n x_labels = [\"angle=0\",\"angle=120\",\"angle=240\"]\n \n #Temporary list to store the image nparray:\n im_arr=[] \n \n fig= plt.figure()\n plt.title(f'QA_tractography. Scan volume = {brain_ref_shape} \\n\\n', fontsize=60,fontweight='bold')\n plt.xticks([])\n plt.yticks([])\n plt.axis(\"off\")\n\n j = 0\n for i in range(9):\n #Load in the nine images into a nparray one by one.\n im_arr = np.array(Image.open(qa_out_path + \"/\" + plots_list[i]))\n #Change the background of the image into black:\n im_arr = np.where(im_arr<=0.01, 255, im_arr) \n ax = fig.add_subplot(3,3,i+1)\n ax.imshow(im_arr,interpolation=\"none\",alpha=0.9)\n \n #Set the X labels and Y labels\n if i<3:\n ax.set_title(x_labels[i],fontsize=60,fontweight='bold')\n if i % 3 == 0:\n ax.set_ylabel(y_labels[j],fontsize=60,fontweight='bold')\n j = j + 1\n plt.xticks([])\n plt.yticks([])\n \n fig.set_size_inches(40, 40, forward = True)\n fig.savefig(qa_out_path + \"/\" + 'qa_tractography.png', format='png')\n\n #Delete the Nine images which used to generate the qa_tractography.png \n for plot in plots_list:\n if os.path.exists(qa_out_path + \"/\" + plot):\n os.remove(qa_out_path + \"/\" + plot)\n else:\n print('No such file generated from streamlines window. Please check if the streamline.trk files is generated from the pipeline correctly or not')", "def plot(self):\n fig, axes = plt.subplots(math.ceil(len(self.plots) / self.col_wrap), self.col_wrap)\n\n for ps, ax in zip(self.plots, axes.flatten()):\n for p in ps:\n if p.x is not None and p.y is not None:\n p.method(x=p.x, y=p.y, *p.args, ax=ax, **p.kwargs)\n else:\n p.method(*p.args, ax=ax, **p.kwargs)\n\n return fig, axes", "def plot(self):\n h = .02\n i=1\n bags_X = self.bags_X\n bags_y = self.bags_y\n fig1 = plt.figure(figsize=(45, 9))\n\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n for model in self.models:\n ax = plt.subplot(1, len(self.models) , i)\n X = pd.DataFrame(bags_X[i-1])\n y = pd.Series(bags_y[i-1])\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(model.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n # print(Z[12])\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n ax.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n # size=[1000*w for w in self.weights[i-1]]\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel(str(X.columns[0]))\n ax.set_ylabel(str(X.columns[1]))\n plt.title(\"Estimator \"+str(i))\n i+=1\n \n fig2 = plt.figure(figsize=(9,9))\n X = self.X\n y = self.y\n ax2 = plt.subplot(1,1,1)\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(self.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n Z = Z.reshape(xx.shape)\n ax2.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n # size=[1000*w for w in self.weights[i-2]]\n ax2.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n plt.title(\"Combined Decision Surface\")\n \n plt.tight_layout()\n plt.show()\n\n return [fig1,fig2]", "def plot_3outputs(X, gp,xlim):\r\n fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, figsize=(15, 8))\r\n slices = GPy.util.multioutput.get_slices([X, X, X])\r\n #Output 1\r\n ax1.set_xlim(xlim)\r\n ax1.set_title('X')\r\n gp.plot(plot_limits=xlim, ax=ax1, fixed_inputs=[(1,0)], which_data_rows=slices[0])\r\n # ax1.plot(X1,Y1,'r,',mew=1.5)\r\n #Output 2\r\n ax2.set_xlim(xlim)\r\n ax2.set_title('Y')\r\n gp.plot(plot_limits=xlim, ax=ax2, fixed_inputs=[(1,1)], which_data_rows=slices[1])\r\n # ax2.plot(X2,Y2,'r,',mew=1.5)\r\n # Output 3\r\n ax3.set_xlim(xlim)\r\n ax3.set_title('Z')\r\n gp.plot(plot_limits=xlim, ax=ax3, fixed_inputs=[(1,2)], which_data_rows=slices[2])\r\n # ax3.plot(X3, Y3, 'r,', mew=1.5)\r\n return", "def new_axes(self, name):\n\n return self.figure.add_axes([0.05, 0.05, 0.9, 0.9], label=name)", "def _init(self) -> List[PlotType]:\n self.plots[0].set_data([], [], 'bx', markersize=5)\n self.plots[1].set_data([], [], 'r.', markersize=15)\n return self.plots", "def _add_axes(self, n):\n height = (self.top - self.bottom) / float(self.get_n())\n height = min(height, self.maxheight)\n width = self.right - self.left\n ax = self.figure.add_axes([self.left, self.bottom + (n - 1) * height, width, height])\n return ax", "def _ps_init(self):\n\n self.ps_ax.set_xlim(-np.pi, np.pi)\n self.ps_ax.set_ylim(-10, 10)\n self.ps_ax.set_xlabel(\"degree [rad]\")\n self.ps_ax.set_ylabel(\"velocity [rad/s]\")\n for ap in self.ps_plots:\n ap.set_data([], [])\n return self.ps_plots", "def _make_twin_axes(self, *args, **kwargs):\n # Typically, SubplotBase._make_twin_axes is called instead of this.\n # There is also an override in axes_grid1/axes_divider.py.\n if 'sharex' in kwargs and 'sharey' in kwargs:\n raise ValueError('Twinned Axes may share only one axis.')\n ax2 = self.figure.add_axes(self.get_position(True), *args, **kwargs)\n self.set_adjustable('datalim')\n ax2.set_adjustable('datalim')\n self._twinned_axes.join(self, ax2)\n return ax2" ]
[ "0.6783506", "0.62494135", "0.60730106", "0.5980152", "0.59566844", "0.5922597", "0.58838063", "0.5869608", "0.57832056", "0.57643634", "0.56859326", "0.5636432", "0.5632888", "0.55650336", "0.55625474", "0.549513", "0.5488764", "0.5483506", "0.5480738", "0.54502887", "0.54458594", "0.5432833", "0.54252785", "0.54171586", "0.54126114", "0.53948927", "0.5372675", "0.5370707", "0.5357724", "0.5324857" ]
0.7195969
0
Slice output to desired shape given by deconv_out_shape
def _slice_output(self, output, spatial_axes, **kwargs): if self.deconv_out_shape is None: return output if isinstance(spatial_axes, dict): spatial_axes = tuple(spatial_axes.get(name, name) for name in ("D", "H", "W")) elif isinstance(spatial_axes, tuple): if len(spatial_axes) < 3: raise ValueError("spatial_axes must have length 3 (e.g. ('D', 'H', 'W'))") spatial_axes = tuple(name if name else default for name, default in zip(spatial_axes, ("D", "H", "W"))) slices = [slice(None)] * len(output.axes) for ii, ax_name in enumerate(spatial_axes): if ax_name in output.axes.names: index = output.axes.names.index(ax_name) out_size = output.axes[index].length trim_size = self.deconv_out_shape[ii] if trim_size > out_size: raise ValueError('specified {} output dimension {} is greater than {}' .format(ax_name, trim_size, out_size)) elif trim_size < out_size: extra = out_size - trim_size start = extra // 2 end = start + trim_size slices[index] = slice(start, end) return ng.tensor_slice(output, slices)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slice_output_shape(input_shape):\n shape_1 = input_shape[0]\n shape_2 = 80\n shape_3 = input_shape[2]\n return (shape_1, shape_2, shape_3)", "def _squeeze_output(out):\r\n out = out.squeeze()\r\n if out.ndim == 0:\r\n out = out[()]\r\n return out", "def output_reshape(ct):\n return np.moveaxis(ct, 1, -1)", "def conv2d_output_shape(input_shape, filter_shape, stride, padding):\n filter_shape = tf.TensorShape(filter_shape).as_list()\n filter_out = filter_shape[-1]\n filter_patch_shape = np.array(filter_shape[0:2])\n input_shape_list = tf.TensorShape(input_shape).as_list()\n batch = input_shape_list[:-3]\n input_shape = np.array(input_shape_list[-3:])\n stride = np.array(stride)\n if padding == 'VALID':\n shift = -filter_patch_shape + 1\n elif padding == 'SAME':\n shift = 0\n else:\n raise ValueError('padding must be either \"VALID\" or \"SAME\", but \"%s\" was given' % padding)\n output_shape = np.ceil((input_shape[:2] + shift) / stride[1:3])\n return batch + output_shape.astype(np.int).tolist() + [filter_out]", "def conv2d_output_shape(height, width, filter_height, filter_width, out_channels, stride):\n return (out_channels, ((height - filter_height) / stride + 1), ((width - filter_width) / stride + 1))", "def set_output_shape(self):\n self.output_shape = ((self.input_shape[0] // self.stride[0],\n self.input_shape[1] // self.stride[1],\n self.input_shape[2]\n ))", "def reshape_output_shape(input_shape):\n shape_1 = input_shape[0]\n shape_2 = 384\n return(shape_1, shape_2)", "def _reshape_output_batch(self, number, output):\n #tt = cutotime('reshape')\n #tt.start()\n output = output.reshape(self.output_shapes[number]) # batch, h, w, 3, (5 + 80)\n #tt.stop()\n return output", "def flatten_output(self, output, feature_sequence_lens=None):\n if feature_sequence_lens == None:\n feature_sequence_lens = self.feature_sequence_lens\n num_outs = output.shape[2]\n# num_seq = output.shape[1]\n flat_output = np.zeros((self.batch_size(feature_sequence_lens), num_outs))\n cur_index = 0\n for seq_index, num_obs in enumerate(feature_sequence_lens):\n flat_output[cur_index:cur_index+num_obs, :] = copy.deepcopy(output[:num_obs, seq_index, :])\n cur_index += num_obs\n \n return flat_output", "def _reshape_output(self, output):\n output = np.transpose(output, [0, 2, 3, 1])\n _, height, width, _ = output.shape\n dim1, dim2 = height, width\n dim3 = 3\n # There are CATEGORY_NUM=80 object categories:\n dim4 = (4 + 1 + CATEGORY_NUM)\n return np.reshape(output, (dim1, dim2, dim3, dim4))", "def reshape_output_shape_0(input_shape): \n shape_1 = input_shape[0]\n shape_2 = input_shape[1]\n shape_3 = input_shape[2]\n return(shape_1, shape_2, shape_3, 1)", "def test_reshape_conv_slice_conv():\n class Net(gluon.HybridBlock):\n def __init__(self, **kwargs):\n super(Net, self).__init__(**kwargs)\n self.conv0 = nn.Conv2D(16, (3, 3))\n self.conv1 = nn.Conv2D(32, (3, 3))\n\n def hybrid_forward(self, F, x):\n x_reshape = x.reshape((0, 0, 64, 16))\n y = self.conv0(x_reshape)\n \"shape of y is (4, 16, 62, 14)\"\n y_slice = y.slice(begin=(0, 0, 0, 0), end=(2, 16, 14, 14))\n out = self.conv1(y_slice)\n return out\n x = mx.nd.random.uniform(shape=(4, 3, 32, 32))\n net = Net()\n check_layer_forward_withinput(net, x)", "def _reduce_outshape(self, outshape):\n return tuple([index for index in outshape if index != 1])", "def conv_output_shape(\n h_w: Tuple[int, int],\n kernel_size: int = 1,\n stride: int = 1,\n pad: int = 0,\n dilation: int = 1,\n ):\n h = floor(\n ((h_w[0] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1\n )\n w = floor(\n ((h_w[1] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1\n )\n return h, w", "def _get_conv_out(self, shape) -> int:\n conv_out = self.conv(torch.zeros(1, *shape))\n return int(np.prod(conv_out.size()))", "def _get_conv_out(self, shape) -> int:\n conv_out = self.conv(torch.zeros(1, *shape))\n return int(np.prod(conv_out.size()))", "def _get_conv_out(self, shape) -> int:\n conv_out = self.conv(torch.zeros(1, *shape))\n return int(np.prod(conv_out.size()))", "def l_out_conv(layer_num, kernel_size, pool=False):\n l_out_list = []\n l_in = constants.SHAPE_OF_ONE_DATA_POINT[1]\n for i in range(layer_num):\n l_out = l_out_conv1d(l_in, kernel_size, stride=2)\n l_out = l_out_conv1d(l_out, kernel_size, stride=2)\n\n l_out_list.append(l_out)\n\n if pool:\n pool_size = 3\n l_out = l_out_pool(l_out, pool_size)\n l_out_list.append(l_out)\n l_in = l_out\n\n # make a copy and reverse for decoder size def\n\n l_out_list_copy = copy.deepcopy(l_out_list)\n l_out_list.append(32)\n encoder_sizes = l_out_list\n l_out_list_copy.reverse()\n l_out_list_copy.append(constants.SHAPE_OF_ONE_DATA_POINT[1])\n decoder_sizes = l_out_list_copy\n return encoder_sizes, decoder_sizes", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n h = math.floor( ((h_w[0] + (2 * pad) - ( dilation * (kernel_size[0] - 1)\n ) - 1 )/ stride) + 1)\n w = math.floor( ((h_w[1] + (2 * pad) - ( dilation * (kernel_size[1] - 1)\n ) - 1 )/ stride) + 1)\n return h, w", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n from math import floor\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n h = floor(((h_w[0] + (2 * pad) - (dilation *\n (kernel_size[0] - 1)) - 1) / stride) + 1)\n w = floor(((h_w[1] + (2 * pad) - (dilation *\n (kernel_size[1] - 1)) - 1) / stride) + 1)\n return h, w", "def calc_conv_out_dims(X_shape, W_shape, stride=1, pad=0, dilation=0):\n\tdummy = np.zeros(X_shape)\n\ts, p, d = stride, pad, dilation\n\tif len(X_shape) == 3:\n\t\t_, p = pad1D(dummy, p)\n\t\tpw1, pw2 = p\n\t\tfw, in_ch, out_ch = W_shape\n\t\tn_ex, in_length, in_ch = X_shape\n\n\t\t_fw = fw * (d+1) - d\n\t\tout_length = (in_length + pw1 + pw2 - _fw) // s + 1\n\t\tout_dim = (n_ex, out_length, out_ch)\n\n\telif len(X_shape) == 4:\n\t\t_, p = pad2D(dummy, p)\n\t\tpr1, pr2, pc1, pc2 = p\n\t\tfr, fc, in_ch, out_ch = W_shape\n\t\tn_ex, in_rows, in_cols, in_ch = X_shape\n\n\t\t# adjust effective filter size to account for dilation\n\t\t_fr, _fc = fr * (d+1) - d, fc * (d+1) - d\n\t\tout_rows = (in_rows + pr1 + pr2 - _fr) // s + 1\n\t\tout_cols = (in_cols + pc1 + pc2 - _fc) // s + 1\n\t\tout_dims = (n_ex, out_rows, out_cols, out_ch)\n\telse:\n\t\traise ValueError(\"unrecognized number of the input dims: {}\".format(len(X_shape)))", "def crop(arr, target_shape):\n arr_shape = arr.shape\n ncrop = ()\n for dim in range(len(arr_shape)):\n diff = arr_shape[dim] - target_shape[dim]\n if diff > 0:\n start = int(diff / 2)\n stop = start + target_shape[dim]\n ncrop += np.index_exp[start:stop]\n else:\n ncrop += np.index_exp[:]\n cropped = arr[ncrop]\n return cropped", "def conv2d_transpose(self, output_shape, filter_):\n return self.add_layer(conv2d_transpose, output_shape, filter_)", "def conv_out_shape(dims, conv):\n kernel_size, stride, pad, dilation = conv.kernel_size, conv.stride, conv.padding, conv.dilation\n return tuple(int(((dims[i] + (2 * pad[i]) - (dilation[i]*(kernel_size[i]-1))-1)/stride[i])+1) for i in range(len(dims)))", "def __call__(self, in_obj, channel_axes=\"C\", spatial_axes=(\"D\", \"H\", \"W\"), **kwargs):\n output = super(Deconvolution, self).__call__(in_obj, channel_axes, spatial_axes, **kwargs)\n return self._slice_output(output, spatial_axes, **kwargs)", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n from math import floor\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n\n if type(pad) is not tuple:\n pad = (pad, pad)\n\n h = floor(((h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1) / stride) + 1)\n w = floor(((h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1) / stride) + 1)\n return h, w", "def get_output_shape(self):\n weights = self.W.get_shape().as_list()\n input_size = np.asarray(self.incoming_shape[-3:-1])\n strides = np.asarray(self.strides[-3:-1])\n kernels = np.asarray(weights[0:2])\n num_output = weights[-1]\n dilations = np.asarray(self.dilation_rate)\n if (isinstance(self.padding, list) or isinstance(self.padding, tuple)) and len(self.padding) == 2:\n output_size = np.asarray(\n np.ceil((input_size + 2 * np.asarray(self.padding) - kernels - (kernels - 1) * (\n dilations - 1)) / strides + 1),\n dtype=np.int)\n else:\n output_size = np.asarray(\n np.ceil(input_size / strides) if self.padding == \"SAME\" or self.padding == \"ZEROPAD\" else np.ceil(\n (input_size - (kernels - 1) * dilations) / strides), dtype=np.int)\n \n output_shape = self.incoming_shape[:]\n output_shape[-3:-1] = output_size.tolist()\n output_shape[-1] = num_output\n return output_shape", "def conv_transpose_output_shape(h_w, kernel_size=1, stride=1, pad=0, output_padding=0):\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n h = (h_w[0] - 1) * stride - (2 * pad) + kernel_size[0] + output_padding\n w = (h_w[1] - 1) * stride - (2 * pad) + kernel_size[1] + output_padding\n return h, w", "def output_shape(self):\r\n return self.detector.output_shape", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n \n if type(h_w) is not tuple:\n h_w = (h_w, h_w)\n \n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n \n if type(stride) is not tuple:\n stride = (stride, stride)\n \n if type(pad) is not tuple:\n pad = (pad, pad)\n \n h = (h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1)// stride[0] + 1\n w = (h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1)// stride[1] + 1\n \n return h, w" ]
[ "0.70831054", "0.6761576", "0.66313136", "0.6393305", "0.6357284", "0.629436", "0.62509036", "0.62246907", "0.621388", "0.6152387", "0.61450714", "0.61170775", "0.6114104", "0.6026817", "0.60165524", "0.60165524", "0.60165524", "0.59306437", "0.5909184", "0.5898397", "0.58895063", "0.5878083", "0.58551455", "0.5850034", "0.58405805", "0.5840487", "0.5810404", "0.57615244", "0.5748894", "0.5740253" ]
0.7285818
0
Compute a deconvolution over in_obj
def __call__(self, in_obj, channel_axes="C", spatial_axes=("D", "H", "W"), **kwargs): output = super(Deconvolution, self).__call__(in_obj, channel_axes, spatial_axes, **kwargs) return self._slice_output(output, spatial_axes, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _conv_op(self, in_obj, channel_axes, spatial_axes):\n\n manual_pad = collections.OrderedDict([(ax.name, (0, 0)) for ax in in_obj.axes])\n pad_int, extra_pad = self._get_pad_int(spatial_axes)\n manual_pad.update(extra_pad)\n if any((pad != (0, 0)) for pad in manual_pad.values()):\n in_obj = ng.pad(in_obj, manual_pad.values())\n spatial_axes = in_obj.axes.get_by_names(*ng.make_axes(spatial_axes).names)\n\n output_axes = self._output_axes(in_obj.axes,\n pad_int)\n convparams = utils.make_convparams(self.nout, self.filter_shape,\n self.strides, pad_int, self.dilation)\n return ng.deconvolution(convparams,\n in_obj,\n self.W,\n axes=output_axes)", "def deconvolution(obs, green, lambd):\n\n nr, nt = obs.shape\n num = np.zeros(nt)\n den = np.zeros(nt)\n\n for ir in range(len(obs)):\n\n OBS = fft(obs[ir, :])\n GRE = fft(green[ir, :])\n\n # Sum all\n num = num + np.conj(GRE) * OBS\n den = den + np.conj(GRE) * GRE\n\n # Get maximum value of denominator\n maxden = np.max(np.abs(den))\n\n # Waterlevel\n wl = lambd * maxden\n\n # Deconvolution using the waterlevel\n src = np.real(ifft(num / (den+wl).T))\n\n # Compute fit to original data\n res = obs\n chi0 = 0.5 * np.sum(np.sum(res ** 2))\n\n syn = compute_synth(green, src)\n res = obs - syn\n chi = 0.5 * np.sum(np.sum(res ** 2))\n\n print(chi/chi0)\n\n return src, syn", "def deconvolve(self, img, psf):\n self.data = pysap.Image(data=self.deconv.deconvolve(img, psf))", "def Deconvolve(obj, gsparams=None, propagate_gsparams=True):\n from .chromatic import ChromaticDeconvolution\n if isinstance(obj, ChromaticObject):\n return ChromaticDeconvolution(obj, gsparams=gsparams, propagate_gsparams=propagate_gsparams)\n elif isinstance(obj, GSObject):\n return Deconvolution(obj, gsparams=gsparams, propagate_gsparams=propagate_gsparams)\n else:\n raise TypeError(\"Argument to Deconvolve must be either a GSObject or a ChromaticObject.\")", "def test_deconv():\n\n # filter params\n R, S = 5, 5\n fshape = (R, S, 1)\n strides = 2\n filter_val_nz = np.arange(1, R * S + 1).reshape(R, S)\n filter_val = np.zeros(fshape)\n filter_val[:, :, 0] = filter_val_nz\n\n deconv = Deconvolution(fshape,\n filter_init=ConstantInit(filter_val),\n strides=strides,\n padding=0,\n dilation=1)\n\n N = ng.make_axis(name='N', length=1) # batch\n image_shape = (1, 8, 8) # CHW\n image_axes = ng.make_axes([ng.make_axis(name=nm, length=l)\n for nm, l in zip('CHW', image_shape)])\n image_axes |= N\n image = ng.placeholder(axes=image_axes)\n\n output = deconv(image)\n\n with closing(ngt.make_transformer()) as transformer:\n comp = transformer.add_computation(ng.computation(output, image))\n input_val = np.zeros(image_shape + (N.length, ), dtype=float)\n input_val[0, 0, 0] = 1\n input_val[0, 5, 5] = 1\n input_val[0, 7, 7] = 1\n result = comp(input_val)\n feature_map = np.squeeze(result)\n\n assert (feature_map[:5, :5] == filter_val_nz).all()\n\n result2 = filter_val_nz.copy()\n result2[-1, -1] = 26\n assert (feature_map[10:15, 10:15] == result2).all()\n\n result3 = filter_val_nz.copy()\n result3[0, 0] = 26\n assert (feature_map[-5:, -5:] == result3).all()", "def deconv(inp):\n num_filters = inp.get_shape().as_list()[-1]\n\n x = Conv2DTranspose(\n filters=num_filters,\n kernel_size=4,\n strides=2,\n padding=\"same\",\n use_bias=False,\n kernel_initializer=\"he_uniform\",\n )(inp)\n x = BatchNormalization()(x)\n x = Activation(\"elu\")(x)\n\n return x", "def deconv2d(layer_input, skip_input, filters, f_size=3, dropout_rate=0, padding='same', strides=2):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=strides, padding=padding, activation='relu')(u)\n if dropout_rate:\n u = Dropout(dropout_rate)(u)\n u = BatchNormalization()(u)\n u = UpSampling2D(size=2)(u)\n u = Concatenate()([u, skip_input])\n return u\n\n # Image input", "def deconv2D_naive(X, W, stride, pad, dilation=0):\n\tif stride > 1:\n\t\tX = dilate(X, stride-1)\n\t\tstride = 1\n\n\t# pad the input\n\tX_pad, p = pad2D(X, pad, W.shape[:2], stride=stride, dilation=dilation)\n\n\tn_ex, in_rows, in_cols, n_in = X.shape\n\tfr, fc, n_in, n_out = W.shape\n\ts, d = stride, dilation\n\tpr1, pr2, pc1, pc2 = p\n\n\t# update effective filter shape based on dilation factor\n\t_fr, _fc = fr * (d + 1) - d, fc * (d + 1) - d\n\n\t# compute deconvolution output dims\n\tout_rows = s * (in_rows - 1) - pr1 - pr2 + _fr\n\tout_cols = s * (in_cols - 1) - pc1 - pc2 + _fc\n\tout_dim = (out_rows, out_cols)\n\n\t# add additional padding to achieve the target output dim\n\t_p = calc_pad_dims_2D(X_pad.shape, out_dim, W.shape[:2], s, d)\n\tX_pad, pad = pad2D(X_pad, _p, W.shape[:2], stride=s, dilation=dilation)\n\n\t# perform the forward convolution using the flipped weight matrix \n\t# note we set pad to 0, since we have already addded padding\n\tZ = conv2D(X_pad, np.rot90(W, 2), s, 0, d)\n\n\tpr2 = None if pr2 == 0 else -pr2\n\tpc2 = None if pc2 == 0 else -pc2\n\treturn Z[:, pr1:pr2, pc1:pc2, :]", "def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)\n if dropout_rate:\n u = Dropout(dropout_rate)(u)\n u = BatchNormalization(momentum=0.8)(u)\n u = Concatenate()([u, skip_input])\n return u", "def wiener_deconvolution(img, otf, sn_power_ratio, snr_includes_otf=False):\n if snr_includes_otf:\n wfilter = otf.conj() / (np.abs(otf)**2 * (1 + 1 / sn_power_ratio))\n else:\n wfilter = otf.conj() / (np.abs(otf) ** 2 + 1 / sn_power_ratio)\n\n wfilter[np.isnan(wfilter)] = 0\n img_deconvolved = img * wfilter\n\n return img_deconvolved, wfilter", "def deconv2d(layer_input, filters, f_size=8, dropout_rate=0,permanent=False):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)\n if dropout_rate and not permanent:\n u = Dropout(dropout_rate)(u)\n elif dropout_rate and permanent:\n # permanent droput from my main man fchollet <3\n u=Lambda(lambda x: K.dropout(x, level=dropout_rate))(u) \n \n u = BatchNormalization(momentum=0.8)(u)\n return u", "def deconv(dims, inplanes, outplanes, kernel_size, stride, bias, dilation):\n padding = math.floor((kernel_size-stride+1)/2)\n if dims==2:\n return nn.ConvTranspose2d(inplanes, outplanes, kernel_size, stride,\n padding=padding, bias=bias) #, dilation=1)\n elif dims==3:\n return nn.ConvTranspose3d(inplanes, outplanes, kernel_size, stride,\n padding = padding, bias=bias) #, dilation=1)\n else:\n raise ValueError('dimension of deconv must be 2 or 3')", "def deconv2d(layer_input, skip_input, filters, f_size=3, dropout_rate=0, padding='same', strides=2):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=strides, padding=padding, activation='relu')(u)\n if dropout_rate:\n u = Dropout(dropout_rate)(u)\n u = BatchNormalization()(u)\n u = UpSampling2D(size=2)(u)\n u = Concatenate()([u, skip_input])\n return u", "def deconv(self, other, balance=1000, reg=None, is_real=True, clip=False, postnormalize=True):\n from skimage.restoration import wiener\n\n result = wiener(self.data, other.data, balance=balance, reg=reg, is_real=is_real, clip=clip)\n if postnormalize:\n result += result.min()\n result /= result.max()\n return Convolvable(result, self.x, self.y, False)", "def deconvolution_cce(shape_filter, shape_x, # pylint: disable=R0913, R0914\n input_sizes, strides, pads, dilations=(1, 1, 1, 1),\n filter_dtype='float16', x_dtype='float16',\n res_dtype='float16', bias=False, offset_x=0,\n fusion_para=None,\n kernel_name=\"deconvolution_cce\"):\n\n def _ceil(x_1, x_2):\n if x_2 == 0:\n raise RuntimeError(\"Division by zero\")\n return (x_1 + x_2 - 1) // x_2\n if fusion_para is None:\n fusion_para = {\"input_memory_type\": 0,\n \"output_memory_type\": 0,\n \"valid_shape\": (),\n \"slice_offset\": (),\n \"output_offset\": (),\n \"l1_fusion_type\": -1,\n \"fmap_l1_addr_flag\": False,\n \"fmap_l1_valid_size\": 0}\n\n if filter_dtype == \"int8\" and x_dtype == \"int8\":\n shape_filter = [shape_filter[1], shape_filter[0],\n shape_filter[2], shape_filter[3]]\n res = comm.check_conv2dbp_input_params(shape_filter, shape_x, input_sizes,\n strides, pads, dilations,\n filter_dtype, x_dtype,\n res_dtype, kernel_name, fusion_para)\n\n shape_filter, shape_x, input_sizes, strides, pads, dilations, \\\n filter_dtype, x_dtype, res_dtype, kernel_name = res\n\n dedy_batch, dedy_channel, dedy_h, dedy_w = shape_x\n filter_batch, filter_channel, filter_h, filter_w = shape_filter\n\n _, dy_k0, _ = CUBE_MKN[x_dtype]['mac']\n _, w_k0, w_n0 = CUBE_MKN[filter_dtype]['mac']\n shape_dedy = (dedy_batch,\n _ceil(dedy_channel, dy_k0), dedy_h, dedy_w, dy_k0)\n filter_channel = comm.align(filter_channel, w_n0)\n if filter_dtype == \"int8\" and x_dtype == \"int8\":\n shape_filter_frac = (\n _ceil(filter_batch, w_k0)*filter_h*filter_w,\n _ceil(filter_channel, w_n0), w_n0, w_k0)\n else:\n shape_filter_frac = (\n _ceil(filter_channel, w_n0)*filter_h*filter_w,\n _ceil(filter_batch, w_k0), w_k0, w_n0)\n tensor_dedy = tvm.placeholder(shape_dedy, name=\"dedy\", dtype=x_dtype)\n\n tensor_filter_frac = tvm.placeholder(shape_filter_frac,\n name=\"filter\", dtype=filter_dtype)\n\n if bias:\n tensor_bias = tvm.placeholder(\n (filter_channel,), name='tensor_bias', dtype=res_dtype\n )\n else:\n tensor_bias = None\n\n dedx = te.lang.cce.conv2d_backprop_input_compute(\n filters=tensor_filter_frac,\n out_backprop=tensor_dedy,\n filter_sizes=shape_filter,\n input_sizes=input_sizes,\n strides=strides,\n padding=pads,\n dilations=dilations,\n res_dtype=res_dtype,\n tensor_bias=tensor_bias,\n offset_x=offset_x,\n fusion_para=fusion_para,\n kernel_name=kernel_name\n )\n if bias:\n tensor_list = [tensor_dedy, tensor_filter_frac, tensor_bias, dedx]\n else:\n tensor_list = [tensor_dedy, tensor_filter_frac, dedx]\n\n with tvm.target.cce():\n sch = generic.auto_schedule(dedx)\n\n config = {\n \"name\": kernel_name,\n \"tensor_list\": tensor_list\n }\n\n te.lang.cce.cce_build_code(sch, config)", "def deconv2d(layer_input):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(256, kernel_size=3, strides=1, padding='same')(u)\n u = Activation('relu')(u)\n return u", "def deconv2d(layer_input,num=256):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(num, kernel_size=3, strides=1, padding='same')(u)\n u = Activation('relu')(u)\n return u", "def deconvolvefrombeam(self, *args, **kwargs):\n return _image.image_deconvolvefrombeam(self, *args, **kwargs)", "def get_deconv_filter(f_shape):\n width = f_shape[0]\n heigh = f_shape[0]\n f = ceil(width/2.0)\n c = (2 * f - 1 - f % 2) / (2.0 * f)\n bilinear = np.zeros([f_shape[0], f_shape[1]])\n for x in range(width):\n for y in range(heigh):\n value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))\n bilinear[x, y] = value\n weights = np.zeros(f_shape)\n for i in range(f_shape[2]):\n weights[:, :, i, i] = bilinear\n\n init = tf.constant_initializer(value=weights,\n dtype=tf.float32)\n return tf.get_variable(name=\"up_filter\", initializer=init,\n shape=weights.shape)", "def __init__(self, c_in, c_out, k_size, stride=1, pad=0, bias=True):\r\n\r\n super(EqualizedDeconv2d, self).__init__()\r\n\r\n # define the weight and bias if to be used\r\n self.weight = torch.nn.Parameter(torch.nn.init.normal_(\r\n torch.empty(c_in, c_out, *_pair(k_size))\r\n ))\r\n\r\n self.use_bias = bias\r\n self.stride = stride\r\n self.pad = pad\r\n\r\n if self.use_bias:\r\n self.bias = torch.nn.Parameter(torch.FloatTensor(c_out).fill_(0))\r\n\r\n fan_in = c_in # value of fan_in for deconv\r\n self.scale = sqrt(2) / sqrt(fan_in)", "def fftdeconvolve(image, kernel):\n x = numpy.fft.fftshift(numpy.fft.fftn(image))\n y = numpy.fft.fftshift(numpy.fft.fftn(kernel))\n\n return numpy.real(numpy.fft.fftshift(\n numpy.fft.ifftn(numpy.fft.ifftshift(x / y))))", "def deconv_layer(self, inputs, field_size, channels_size,\n initializer_type, name, act_func=tf.nn.relu):\n batch, height, width, in_channels = inputs.get_shape().as_list()\n #shape = tf.shape(inputs)\n assert in_channels == channels_size[0], (\n 'Number of input channels doe not match filter inputs channels.'\n )\n with tf.variable_scope(name):\n channels_size.reverse() # now [out_c, in_c]\n filter_size = field_size + channels_size\n bias_size = [channels_size[0]]\n\n if initializer_type:\n initializer = tf.contrib.layers.xavier_initializer()\n else:\n initializer = tf.truncated_normal_initializer(stddev=.1)\n\n weights = tf.get_variable('W', filter_size, initializer=initializer)\n biases = tf.get_variable(\n 'b', bias_size, initializer=tf.constant_initializer(.1))\n\n #target_shape_tensor = tf.stack([shape[0], height, width, channels_size[0]])\n conv = tf.nn.conv2d_transpose(\n inputs,\n weights,\n #target_shape_tensor,\n [batch, height, width, channels_size[0]],\n [1, 1, 1, 1],\n padding='SAME')\n conv_bias = tf.nn.bias_add(conv, biases)\n if act_func == None:\n output = conv_bias\n else:\n output = act_func(conv_bias)\n #set_shape does not accept tensor\n #output.set_shape([batch, height, width, channels_size[0]])\n #this sets first size to none. why? Not used.\n #output = tf.reshape(output, target_shape_tensor)\n\n return output", "def de_cic(engine, deconvolved, d_k):\n code = CodeSegment(engine)\n def tf(k):\n kny = [numpy.sinc(k[i]*engine.pm.BoxSize[i]/(2*numpy.pi*engine.pm.Nmesh[i])) for i in range(3)]\n wts = (kny[0]*kny[1]*kny[2])**-2\n return wts\n \n #kny = numpy.pi*engine.pm.Nmesh[0]/engine.pm.BoxSize[0]\n code.assign(x='d_k', y='tmp')\n code.transfer(complex='tmp', tf=tf)\n code.c2r(real=deconvolved, complex='tmp')\n return code", "def deconv(depth, nfilter, ksize=3, stride=1, \r\n pad_in=0, pad_out=0, groups=1,\r\n dilation=1, pad_mode='zeros',\r\n bias=True, lrelu=None):\r\n assert (depth>0 and nfilter>0 and ksize>0 and ksize%2==1 and \r\n stride>0 and pad_in>=0 and pad_out>=0 and dilation>=1 and\r\n groups>=1 and depth%groups==0 and nfilter%groups==0)\r\n deconv_ = nn.ConvTranspose2d(depth, nfilter, ksize, stride, \r\n pad_in, pad_out, groups, bias, dilation,\r\n pad_mode)\r\n if lrelu is not None:\r\n deconv_ = nn.Sequential(deconv_, \r\n nn.LeakyReLU(lrelu, inplace=True))\r\n return deconv_", "def conv_backward_naive(dout, cache):\n dx, dw, db = None, None, None\n ###########################################################################\n # TODO: Implement the convolutional backward pass. #\n ###########################################################################\n #Extract variables from cache.\n x,w,b,conv_param = cache\n stride = conv_param['stride']\n pad = conv_param['pad']\n #Extract shapes(lots of dimensions can become buggy)\n N,F,out_height,out_width = dout.shape\n #Save filter dimensions.\n HH,WW = w.shape[2],w.shape[3]\n #Start by computing gradient of the bias.(always the simplest one)\n db = np.sum(np.sum(np.sum(dout,axis = 3),axis = 2),axis = 0)\n dw = np.zeros_like(w)\n dx = np.zeros_like(x)\n #Start computing gradient of w and x.(Naive implementation)\n #Go over each filter in w.\n for i in range(F):\n #Go over each training example.\n for j in range(N):\n curr_x = x[j,:,:,:]\n #Get current gradient of activation map for j filter on i training example.\n curr_dout = dout[j,i,:,:]\n a = 0;b = 0\n #print(\"HERE\",curr_x.shape)\n #print(\"Stride:\",stride)\n for t in range(0,curr_x.shape[1] - WW + 1,stride):\n for k in range(0,curr_x.shape[2] - HH + 1,stride):\n #print(\"t: %d k: %d WW:%d HH:%d \" % (t,k,WW,HH))\n dw[i,:,:,:] += curr_dout[a,b] * curr_x[:,t:(t + WW),k:(k + HH)]\n dx[j,:,t:(t + WW),k:(k + HH)] += curr_dout[a,b] * w[i,:,:,:]\n if(b == dout.shape[3] - 1):\n a += 1\n b = 0\n else:\n b += 1\n #Remove padding.\n dx = dx[:,:,pad : (dx.shape[2] - pad),pad: (dx.shape[3] - pad)] \n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx, dw, db", "def perform(self, node, inp, out):\r\n img2d, filtersflipped = inp\r\n z, = out\r\n if not imported_scipy_signal:\r\n raise theano.gof.utils.MethodNotDefined(\r\n \"c_headers\", type(self), self.__class__.__name__,\r\n \"Need the python package for scipy.signal to be installed \"\r\n \"for the python implementation. You can use the C\"\r\n \" implementation instead.\")\r\n\r\n # TODO: move these back out to global scope when they no longer\r\n # cause an atexit error\r\n imshp = self.imshp\r\n if imshp is None or any([x is None for x in imshp]):\r\n imshp = tuple(img2d.shape[1:])\r\n kshp = self.kshp\r\n if kshp is None or any([x is None for x in kshp]):\r\n kshp = tuple(filtersflipped.shape[2:])\r\n bsize = self.bsize\r\n if bsize is None:\r\n bsize = img2d.shape[0]\r\n nkern = self.nkern\r\n if nkern is None:\r\n nkern = filtersflipped.shape[0]\r\n\r\n imshp_logical = self.imshp_logical\r\n if imshp_logical is None:\r\n imshp_logical = imshp\r\n if numpy.any([x is None for x in imshp_logical]):\r\n imshp_logical = tuple(img2d.shape[1:])\r\n\r\n kshp_logical = self.kshp_logical\r\n if kshp_logical is None:\r\n kshp_logical = kshp\r\n if numpy.any([x is None for x in kshp_logical]):\r\n kshp = tuple(filtersflipped.shape[2:])\r\n\r\n if self.fulloutshp is not None:\r\n fulloutshp = tuple(self.fulloutshp)\r\n else:\r\n fulloutshp = tuple(ConvOp.getOutputShape(imshp_logical[\r\n 1:], kshp_logical, (1, 1), self.out_mode))\r\n\r\n if z[0] is None or z[0].shape != (bsize, nkern,) + fulloutshp:\r\n z[0] = numpy.zeros((bsize, nkern,) + fulloutshp,\r\n dtype=img2d.dtype)\r\n zz = z[0]\r\n\r\n stacklen = imshp[0]\r\n\r\n img2d = img2d.reshape((bsize,) + imshp)\r\n filtersflipped = filtersflipped.reshape((nkern, stacklen) + kshp)\r\n\r\n if self.imshp != self.imshp_logical:\r\n # assuming that to get from imshp to imshp logical we insert zeros in missing spots\r\n rstride = int(numpy.ceil(imshp_logical[1] / float(imshp[1])))\r\n cstride = int(numpy.ceil(imshp_logical[2] / float(imshp[2])))\r\n buf = numpy.zeros((bsize,) + imshp_logical, dtype=img2d.dtype)\r\n buf[:, :, ::rstride, ::cstride] = img2d\r\n img2d = buf\r\n del buf, rstride, cstride\r\n\r\n if kshp != kshp_logical:\r\n rstride = int(numpy.ceil(kshp_logical[0] / float(kshp[0])))\r\n cstride = int(numpy.ceil(kshp_logical[1] / float(kshp[1])))\r\n buf = numpy.zeros((nkern, stacklen) +\r\n self.kshp_logical, dtype=filtersflipped.dtype)\r\n if self.kshp_logical_top_aligned:\r\n roffset = coffset = 0\r\n else:\r\n roffset = (kshp_logical[0] - (kshp[0] *\r\n rstride) - 1 + rstride) % rstride\r\n coffset = (kshp_logical[1] - (kshp[1] *\r\n cstride) - 1 + cstride) % cstride\r\n assert roffset >= 0\r\n assert coffset >= 0\r\n buf[:, :, roffset::rstride, coffset::cstride] = filtersflipped\r\n filtersflipped = buf\r\n del buf, rstride, cstride\r\n\r\n val = _valfrommode(self.out_mode)\r\n bval = _bvalfromboundary('fill')\r\n\r\n for b in xrange(bsize):\r\n for n in xrange(nkern):\r\n zz[b, n, ...].fill(0)\r\n for im0 in xrange(stacklen):\r\n zz[b, n, ...] += _convolve2d(img2d[b, im0, ...],\r\n filtersflipped[n, im0, ...],\r\n 1, val, bval, 0)\r\n\r\n if False:\r\n if False and self.out_mode == \"full\":\r\n img2d2 = numpy.zeros((bsize, stacklen,\r\n imshp[1] + 2 * kshp[0] - 2,\r\n imshp[2] + 2 * kshp[1] - 2))\r\n img2d2[:, :, kshp[0] - 1:kshp[0] - 1 + imshp[1],\r\n kshp[1] - 1:kshp[1] - 1 + imshp[2]] = img2d\r\n img2d = img2d2\r\n #N_image_shape = image_data.shape\r\n\r\n for b in xrange(bsize):\r\n for n in xrange(nkern):\r\n zz[b, n, ...].fill(0)\r\n for im0 in xrange(stacklen):\r\n for row in xrange(0, zz.shape[2], self.dx):\r\n for col in xrange(0, zz.shape[3], self.dy):\r\n zz[b, n, row, col] += (img2d[b, im0, row:row + kshp[0], col:col + kshp[1]] *\r\n filtersflipped[n, im0, ::-1, ::-1]).sum()\r\n\r\n #We copy it to remove the Stride mismatch warning from DEBUG_MODE.\r\n #The copy make that we return an object with the same stride as the c version.\r\n #The copy don't affect the performence during our experience as in that case we\r\n #execute the c version which is much faster.\r\n if self.dx > 1 or self.dy > 1:\r\n zz = zz[:, :, 0::self.dx, 0::self.dy].copy()\r\n\r\n z[0] = zz", "def askapsoft_decimate_n_extract(af, over_sampling, kernel_support):\n\n # why is this normalization required..?\n rescale = over_sampling*over_sampling\n #rescale = 1\n\n cSize = 2 * kernel_support + 1\n itsConvFunc=np.zeros((over_sampling, over_sampling, cSize, cSize), dtype=complex)\n\n for fracu in range(0,over_sampling):\n for fracv in range(0,over_sampling):\n\n # Now cut out the inner part of the convolution function and\n # insert it into the convolution function\n for iy in range(-kernel_support,kernel_support+1):\n for ix in range(-kernel_support,kernel_support+1):\n\n nx = af.shape[0]\n ny = af.shape[1]\n\n # assumes support is the same for all w-planes:\n xval = (ix) * over_sampling + fracu + nx / 2\n yval = (iy) * over_sampling + fracv + ny / 2\n\n itsConvFunc[fracu, fracv, ix+cSize/2, iy+cSize/2] \\\n = rescale * af[xval, yval]\n\n return itsConvFunc[::-1,::-1]", "def __init__(self, incoming, W=None, b=tf.zeros, ksize: int = None, num_outputs: int = None,\n weight_initializer=None, a=tf.nn.elu, output_shape=None, strides=(1, 2, 2, 1), padding='SAME',\n data_format='NHWC',\n name='DeConvLayer'):\n super(DeConvLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n \n # Set init for W and b\n if all(p is not None for p in [weight_initializer, ksize, num_outputs]):\n W = tofov(weight_initializer,\n shape=(ksize, ksize, num_outputs, incoming.get_output_shape()[-1]),\n var_params=dict(name='W_deconv'))\n else:\n W = tofov(W, shape=None, var_params=dict(name='W_deconv'))\n b = tofov(b, shape=W.get_shape().as_list()[-2], var_params=dict(name='b_deconv'))\n \n if output_shape is None:\n if padding == 'SAME' and strides[0] == 1:\n if len(self.incoming_shape) == 5:\n output_shape = [self.incoming_shape[0], self.incoming_shape[1],\n self.incoming_shape[2] * strides[1], self.incoming_shape[3] * strides[2],\n W.get_shape().as_list()[-2] * strides[3]]\n else:\n output_shape = [self.incoming_shape[0], self.incoming_shape[1] * strides[1],\n self.incoming_shape[2] * strides[2], W.get_shape().as_list()[-2] * strides[3]]\n else:\n raise AttributeError(\"Automatic output_shape calculation not implemented for strides!=1 in \"\n \"first dimension\")\n \n if isinstance(padding, int):\n if len(self.incoming_shape) == 5:\n self.padding = [[0, 0], [0, 0], [padding, padding], [padding, padding], [0, 0]]\n elif len(self.incoming_shape) == 4:\n self.padding = [[0, 0], [padding, padding], [padding, padding], [0, 0]]\n else:\n raise ValueError(\"invalid input shape\")\n else:\n self.padding = padding\n \n self.a = a\n self.b = b\n self.W = W\n \n self.output_shape = output_shape\n self.strides = strides\n \n self.data_format = data_format\n \n self.out = None\n self.name = name", "def clConvolution(self, size, mask):", "def compute(self, node, input_vals):\r\n #assert len(input_vals) == 2\r\n #start = time.time()\r\n strides = node.const_attr\r\n ish = list(input_vals[0].shape)\r\n fsh = list(input_vals[1].shape)\r\n filter = input_vals[1].astype(float32)\r\n input = np.zeros((ish[0],ish[1]+fsh[0]-1,ish[2]+fsh[1]-1,ish[3])).astype(float32)\r\n input[:,fsh[0]//2:fsh[0]//2+ish[1]:1,fsh[1]//2:fsh[1]//2+ish[2]:1,:]+=input_vals[0].astype(float32)\r\n ish = list(input.shape)\r\n output = np.zeros([ish[0],(ish[1]-fsh[0])//strides[1]+1,(ish[2]-fsh[1])//strides[2]+1,fsh[3]]).astype(float32)\r\n osh = output.shape\r\n\r\n assert c_kernel.conv2d_c(get_pointer(input), ish[0],ish[1],ish[2],ish[3],get_pointer(filter),fsh[0],fsh[1],fsh[2],fsh[3],strides[0],strides[1],strides[2],strides[3],get_pointer(output), osh[0],osh[1],osh[2],osh[3])==0\r\n #print(\"conv2d\") \r\n #end = time.time()\r\n\r\n #print(end - start) \r\n return output\r\n \r\n '''\r\n rm = range(osh[0])\r\n ri = range(osh[1])\r\n rj = range(osh[2])\r\n rdi = range(fsh[0])\r\n rdj = range(fsh[1])\r\n for m in rm:\r\n for i in ri:\r\n for j in rj:\r\n for di in rdi:\r\n for dj in rdj:\r\n print(input[m,strides[1]*i+di,strides[2]*j+dj,:])\r\n print(filter[di,dj,:,:])\r\n t = np.dot(\r\n input[m,strides[1]*i+di,strides[2]*j+dj,:],\r\n filter[di,dj,:,:]\r\n )\r\n output[m,i,j] = np.sum(\r\n [\r\n t,\r\n output[m,i,j]\r\n ],\r\n axis=0\r\n )\r\n #print(\"type(output)\")\r\n #print(type(output))\r\n return output\r\n '''" ]
[ "0.6474561", "0.6416828", "0.6388879", "0.6168792", "0.6160192", "0.6092755", "0.5938092", "0.5935818", "0.59309554", "0.5918654", "0.590212", "0.5895692", "0.58778983", "0.58704996", "0.58675754", "0.57710433", "0.56697917", "0.5654541", "0.56469446", "0.56134385", "0.5609469", "0.5601776", "0.5573443", "0.55514795", "0.55316025", "0.5510086", "0.54994607", "0.54880387", "0.54810375", "0.5479857" ]
0.71887773
0
Given a list of cells, combine the individual state_info's of the cells into a single state_info for the collection of cells, i.e. do the equivalent of sum([c.state_info for c in cells], []), but using itertools instead because it's much faster
def _cells_state_info(cells): return list(itertools.chain(*[c.state_info for c in cells]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _combine_omnipage_cell_list(table, inds, row_flag):\n if row_flag:\n row_or_col_list = [table[i, :] for i in inds]\n else:\n row_or_col_list = [table[:, i] for i in inds]\n return [' '.join(_unique_sorted([str(k) for k in j])).strip()\n for j in zip(*row_or_col_list)]", "def _cells_initialize_states(cells, batch_axis, **kwargs):\n\n return list(itertools.chain(\n *[c.initialize_states(batch_axis, **kwargs) for c in cells]))", "def state_(state):\n return tuple( [ tuple( row ) for row in state ] )", "def _reduce_cells(self):\n\n def reduce_cell(result, cell):\n # We assume only _sum aggergation\n # All measures should be prepared so we can to this\n for aggregate in self.aggregate_names:\n result[aggregate] = result.get(aggregate, 0) + \\\n cell.get(aggregate, 0)\n return result\n\n # 1. Map cells to reduced time path\n #\n reduced_map = defaultdict(list)\n reduced_len = len(self.time_levels)\n\n for key, cell in self.time_cells.items():\n time_path = key[0]\n reduced_path = time_path[0:reduced_len]\n\n reduced_key = (reduced_path, key[1])\n\n # self.logger.debug(\"reducing %s -> %s\" % (key, reduced_key))\n reduced_map[reduced_key].append(cell)\n\n self.browser.logger.debug(\"response cell count: %s reduced to: %s\" %\n (len(self.time_cells), len(reduced_map)))\n\n # 2. Reduce the cells\n #\n # See the function reduce_cell() above for aggregation:\n #\n reduced_cells = {}\n for key, cells in reduced_map.items():\n # self.browser.logger.debug(\"Reducing: %s -> %s\" % (key, cells))\n cell = reduce(reduce_cell, cells, {})\n\n reduced_cells[key] = cell\n\n self.time_cells = reduced_cells", "def _cell_state_size(self):\n state_sizes = self._cells[0].state_size\n if isinstance(state_sizes, tuple):\n return sum(state_sizes)\n return state_sizes", "def count_neighbourhood_states(self,state):\n\t\tassert len(state) > 0\n\t\tenergies = [float(\"inf\") for i in range (self._size)]\n\t\ttaxes = [float(\"inf\") for i in range (self._size)]\n\n\t\tfor i in range (0,self._size):\n\t\t\tnewState = copy(state)\n\t\t\tnewState[i] = 1 - newState[i]\n\t\t\tenergies[i] = self.count_energy(newState)\n\t\t\ttaxes[i] = self.count_tax(newState)\n\n\t\treturn energies,taxes", "def _extract_states(self, state):\n conf = self._config\n\n # c_prev is `m` (cell value), and\n # m_prev is `h` (previous output) in the paper.\n # Keeping c and m here for consistency with the codebase\n c_prev = [None] * conf.num_dims\n m_prev = [None] * conf.num_dims\n\n # for LSTM : state = memory cell + output, hence cell_output_size > 0\n # for GRU/RNN: state = output (whose size is equal to _num_units),\n # hence cell_output_size = 0\n total_cell_state_size = self._cell_state_size()\n cell_output_size = total_cell_state_size - conf.num_units\n\n if self._state_is_tuple:\n if len(conf.recurrents) != len(state):\n raise ValueError('Expected state as a tuple of {} '\n 'element'.format(len(conf.recurrents)))\n\n for recurrent_dim, recurrent_state in zip(conf.recurrents, state):\n if cell_output_size > 0:\n c_prev[recurrent_dim], m_prev[recurrent_dim] = recurrent_state\n else:\n m_prev[recurrent_dim] = recurrent_state\n else:\n for recurrent_dim, start_idx in zip(conf.recurrents,\n range(0, self.state_size,\n total_cell_state_size)):\n if cell_output_size > 0:\n c_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx],\n [-1, conf.num_units])\n m_prev[recurrent_dim] = array_ops.slice(\n state, [0, start_idx + conf.num_units], [-1, cell_output_size])\n else:\n m_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx],\n [-1, conf.num_units])\n return c_prev, m_prev, cell_output_size", "def merge_cells(self):\n\n for rownum, row in enumerate(self.cells):\n for colnum, cell in enumerate(row):\n if not isinstance(cell, Cell):\n continue\n cols_to_merge = 0\n for i in range(colnum+1, len(row)):\n if isinstance(self.cells[rownum][i], Cell) and self.cells[rownum][i].event == cell.event:\n cols_to_merge += 1\n if cols_to_merge > 0:\n cell.colspan = cols_to_merge + 1\n for i in range(1, cols_to_merge + 1):\n self.cells[rownum][colnum + i] = SpanCell(rownum, colnum + i)", "def map_view(state):\n string_rows = []\n\n for row in state:\n string_row1 = []\n string_row2 = []\n for cell in row:\n if \"grass\" not in cell and \"lapis_block\" not in cell:\n string_row1.append(\"XXX\")\n string_row2.append(\"XXX\")\n else:\n bottom_corners = \"E\" if \"lapis_block\" in cell else \" \"\n string_row1.append((\"A\" if \"Agent_2\" in cell else \" \") + \" \" +\n (\"P\" if \"Pig\" in cell else \" \"))\n string_row2.append(bottom_corners + (\"C\" if \"Agent_1\" in cell else \" \") + bottom_corners)\n string_rows.append(\"\".join(string_row1))\n string_rows.append(\"\".join(string_row2))\n\n return \"\\n\".join(string_rows)", "def inner_apply(self, inputs, states, cells, mask=None):\n def slice_last(x, no):\n return x[:, no*self.dim: (no+1)*self.dim]\n\n activation = tensor.dot(states, self.W_state) + inputs\n in_gate = self.gate_activation.apply(\n slice_last(activation, 0) + cells * self.W_cell_to_in)\n forget_gate = self.gate_activation.apply(\n slice_last(activation, 1) + cells * self.W_cell_to_forget)\n next_cells = (\n forget_gate * cells +\n in_gate * self.activation.apply(slice_last(activation, 2)))\n out_gate = self.gate_activation.apply(\n slice_last(activation, 3) + next_cells * self.W_cell_to_out)\n next_states = out_gate * self.activation.apply(next_cells)\n\n if mask:\n next_states = (mask[:, None] * next_states +\n (1 - mask[:, None]) * states)\n next_cells = (mask[:, None] * next_cells +\n (1 - mask[:, None]) * cells)\n\n return next_states, next_cells, in_gate, forget_gate, out_gate", "def num_species_on_map(self):\n # tot_herbivores = 0\n # tot_carnivores = 0\n # for cells in itertools.chain.from_iterable(self.map):\n # curr_herbivore, curr_carnivore = cells.num_species_per_cell()\n # tot_herbivores += curr_herbivore\n # tot_carnivores += curr_carnivore\n\n return (sum(x) for x in zip(*[cells.num_species_per_cell() for cells in itertools.chain.from_iterable(self.map)]))\n\n # (sum(x) for x in zip(*[cells.num_species_per_cell() for cells in itertools.chain.from_iterable(self.map)]))", "def generate_states(esncell, xs, h0):\n (map_ih, (Whh, shape), bh) = esncell\n def _step(h, x):\n #h = jnp.tanh(sp_dot(Whh, h, shape[0]) + map_ih(x) + bh)\n h = jnp.tanh(sp_dot(Whh, h, shape[0]) + map_ih(x))\n return (h, h)\n (h, hs) = lax.scan(_step, h0, xs)\n return (h, hs)", "def collapse_states(states):\n new_states = states[:1]\n\n for state in states[1:]:\n last_state = new_states[-1]\n if state[0] == last_state[0]:\n new_states[-1] = (state[0], last_state[1] + state[1])\n else:\n new_states.append(state)\n return new_states", "def state_to_locations(state: list) -> list:\n\n locations = []\n for i in range(0, 16):\n locations.append((0, 0))\n # Each tuple represents a location on the board as (row, column)\n\n \"\"\" \"locations\" keeps track of all fifteen numbers in the given state and the goal \n state. The location of the blank in the state is stored as the tuple at locations[0], \n the location of the number 1 is stored as locations[1], so on and so forth.\"\"\"\n\n \"\"\" Due to the nature of indices on a list, when a location is stored as a tuple \n (row, column), the four rows and four columns are represented as indices from 0 \n to 3, even though the numbers 1 through 15 are represented as indices from 1 to \n 15 on the list.\"\"\"\n\n for i in range(0, 4):\n for j in range(0, 4):\n \"\"\" The loop scans the given state and reads the integer at [i][j]. The number \n is stored at its corresponding index in the list \"locations\". By the time the \n loop finishes, the locations of all fifteen numbers as well as the blank in \n the given state will have been stored in the list.\"\"\"\n num = state[i][j]\n locations[num] = (i, j)\n\n return locations", "def total_population(self):\n total_population_list = []\n for y in self.island_map:\n for cell in y:\n total_population_list += cell.population\n return total_population_list", "def iter_cells(cells: List[TCell],\n include_ts: bool = False) -> Union[Iterator[bytes],\n Iterator[Tuple[bytes, int]]]:\n if include_ts:\n return ((c.value, c.timestamp) for c in cells)\n else:\n return (c.value for c in cells)", "def updateCells(cell_positions):\n # Build a set of canditates for live cells at the next generation, instead of looking through the whole grid\n # These will be dead neighbours of living cells\n possible_future_cells = set()\n # Make sets of cells to add and remove at the end of the check\n cells_remove = set()\n cells_add = set()\n for cell in cell_positions:\n # Get adjacent squares\n neighbours_dict = cellNeighbours(cell)\n number_live_neighbours = 0\n # Check which of these corresponds to another living cell\n for square in neighbours_dict.values():\n if square in cell_positions:\n number_live_neighbours+=1\n else:\n possible_future_cells.add(square)\n\n # Any live cell with fewer than two live neighbours dies, as if caused by under-population\n if number_live_neighbours<2:\n cells_remove.add(cell)\n # Any live cell with two or three live neighbours lives on to the next generation\n # do nothing\n # Any live cell with more than three live neighbours dies, as if by overcrowding\n elif number_live_neighbours>3:\n cells_remove.add(cell)\n # Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction\n for cell_candidate in possible_future_cells:\n cell_candidate_neighbours = cellNeighbours(cell_candidate).values()\n # Count number of live neighbours\n count = 0\n for square in cell_candidate_neighbours:\n if square in cell_positions:\n count+=1\n if count == 3:\n cells_add.add(cell_candidate)\n # Update cell_positions by removing dead cells and adding new-born cells\n for cell in cells_add:\n cell_positions.add(cell)\n for cell in cells_remove:\n cell_positions.remove(cell)\n # Return the update live cell list\n return cell_positions", "def find_next_moves(self):\n # iterate through all cells, and group them with upper cells and left\n # cells\n\n # generate separated cells then merge the them with same neighbours\n matrix_rows = len(self.status)\n if matrix_rows == 0:\n matrix_cols = 0\n else:\n matrix_cols = len(self.status[0])\n matrix = []\n for i in range(matrix_rows):\n matrix.append([[(i, j)] for j in range(matrix_cols)])\n # merge coordinations\n for i in range(matrix_rows):\n for j in range(matrix_cols):\n if self.status[i][j] != '':\n # is same with right cell?\n if j < matrix_cols - 1 and self.status[i][j] == self.status[i][j + 1]:\n new_item = matrix[i][j] + matrix[i][j + 1]\n matrix[i][j] = matrix[i][j + 1] = new_item\n # is same with down cell?\n if i < matrix_rows - 1 and self.status[i][j] == self.status[i + 1][j]:\n new_item = matrix[i][j] + matrix[i + 1][j]\n matrix[i][j] = matrix[i + 1][j] = new_item\n\n # filter out all unvalid results\n result = []\n # filter out all single-cell groups\n for i in range(matrix_rows):\n for j in range(matrix_cols):\n if (len(matrix[i][j]) > 1 and\n matrix[i][j] not in result):\n result.append(matrix[i][j])\n\n # filter sublists\n result = sorted(result, key=len, reverse=True)\n changed = True\n while changed:\n changed = False\n for i in range(len(result)):\n for j in range(i + 1, len(result)):\n if set(result[i]).issuperset(set(result[j])):\n result.remove(result[j])\n changed = True\n break\n if changed:\n break\n\n if result:\n for i in result:\n yield (self.convert_coordinations(i),\n len(i) * len(i) * 5,\n self.calc_new_status(i))\n else:\n left_cells = sum([len(i) - i.count('') for i in self.status])\n left_cells_score = 2000 - 20 * left_cells * left_cells\n if left_cells_score < 0:\n left_cells_score = 0\n for i in self.parents:\n i.children[self] = [(i.children[self][0][0] + left_cells_score,\n i.children[self][0][1],\n i.children[self][0][2])]", "def iadd(state: State) -> State:\n cell = state.array[state.index] or 0\n return state._replace(acc=state.acc + cell)", "def get_all_cell_addresses() -> Tuple[CellAddress, ...]:\n return _all_cell_addresses", "def cudnn_lstm_state(lstm_cell_state):\n h = tf.stack([s.h for s in lstm_cell_state])\n c = tf.stack([s.c for s in lstm_cell_state])\n return (h, c)", "def _encode_list_state(dest_np, state_list, who_move):\n assert dest_np.shape == OBS_SHAPE\n\n for col_idx, col in enumerate(state_list):\n for rev_row_idx, cell in enumerate(col):\n row_idx = GAME_ROWS - rev_row_idx - 1\n if cell == who_move:\n dest_np[0, row_idx, col_idx] = 1.0\n else:\n dest_np[1, row_idx, col_idx] = 1.0", "def state_format(states: list) -> list:\n return list(map(_format_n0, states))", "def sum_fuel(fuel_list):\n return functools.reduce(lambda a, b: a+b, fuel_list)", "def empty_cells(state):\r\n cells = []\r\n for x, row in enumerate(state):\r\n for y, cell in enumerate(row):\r\n if cell == 0:\r\n cells.append([x, y])\r\n\r\n return cells", "def update_cnt_map(self,s):\r\n cnts = []\r\n num_grid = self.cnt_map.shape[0]*self.cnt_map.shape[1]\r\n old_coverage =num_grid- self.cnt_map.flatten().tolist().count(0)\r\n for sj in s:\r\n grid_s = self.get_gridState(sj)\r\n self.cnt_map[grid_s[0], grid_s[1]] += 1\r\n cnts.append(self.cnt_map[grid_s[0], grid_s[1]])\r\n\r\n self.map_coverage = num_grid - self.cnt_map.flatten().tolist().count(0)\r\n print(\"Coverage:\",self.map_coverage)\r\n print(\"Change of coverage:\",self.map_coverage-old_coverage)\r\n\r\n return cnts", "def process_state(state):\n grid = state.grid\n pos = state.pos\n reshaped_grid = np.reshape(grid,(1, grid_size*grid_size)) # Only use squared for square matrices\n reshaped_grid = reshaped_grid[0]\n processed_state = np.concatenate((pos, reshaped_grid))\n processed_state = np.array([processed_state])\n # processed_state.reshape(1, 1, grid_size*grid_size+2, 1)\n #print(processed_state.shape)\n\n return processed_state", "def _merge(self, box_list):\n if isinstance(box_list, self.__class__):\n box_list = [box_list]\n for box in box_list:\n for row in box:\n row[IND] = len(self)\n self.append(row)\n self._combine(row)", "def get_population(self):\n population = 0\n for i in self:\n population += i.count(self.cell_state['alive'])\n return population", "def count_cells(rule, n=500):\n ca = Cell1D(rule, n)\n ca.start_single()\n\n res = []\n for i in range(1, n):\n cells = np.sum(ca.array)\n res.append((i, i**2, cells))\n ca.step()\n\n return res" ]
[ "0.53551733", "0.5274854", "0.5060582", "0.49974838", "0.496517", "0.48731998", "0.4826265", "0.48187545", "0.4786807", "0.4734661", "0.47343242", "0.4716697", "0.46995485", "0.46986324", "0.46858564", "0.46833917", "0.46762833", "0.46357366", "0.46302217", "0.4623123", "0.4622143", "0.4567456", "0.45576906", "0.45472112", "0.45407906", "0.45217782", "0.4501906", "0.449414", "0.44672072", "0.4464081" ]
0.7432871
0
Given a list of cells, initialize the states of the individual cells together by doing the equivalent of sum([c.initialize_states(kwargs) for c in cells], []), but using itertools instead because it's much faster
def _cells_initialize_states(cells, batch_axis, **kwargs): return list(itertools.chain( *[c.initialize_states(batch_axis, **kwargs) for c in cells]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_cells(self):\n for loc in np.ndindex(*self.shape): # TODO: see if nested for loop is faster than this\n c = Cell(loc, self)\n self.cells.append(c)", "def create_iterables(self):\n iterables = [[0, 1], [0, 1], [0, 1],\n [0, 1], [0, 1], [0, 1],\n [0, 1], [0, 1], [0, 1],\n [0, 1], [0, 1]]\n self.states = []\n for t in itertools.product(*iterables):\n self.states.append(t)", "def generate_states(esncell, xs, h0):\n (map_ih, (Whh, shape), bh) = esncell\n def _step(h, x):\n #h = jnp.tanh(sp_dot(Whh, h, shape[0]) + map_ih(x) + bh)\n h = jnp.tanh(sp_dot(Whh, h, shape[0]) + map_ih(x))\n return (h, h)\n (h, hs) = lax.scan(_step, h0, xs)\n return (h, hs)", "def _cells_state_info(cells):\n\n return list(itertools.chain(*[c.state_info for c in cells]))", "def init_states(self, batch_size: int) -> NestedMap:\n raise NotImplementedError('Abstract method')", "def init_states(batch_size, num_lstm_layer, num_hidden):\n init_c = [('l%d_init_c' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]\n init_h = [('l%d_init_h' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]\n return init_c + init_h", "def initialize_states(templates, number_of_templates, number_of_states=5):\n number_of_frames_in_each_state_for_each_template = []\n for i in xrange(number_of_templates):\n # get number_of_frames_in_each_state_for_each_template\n length = len(templates[i])\n small_number_of_elements_in_current_state = length / number_of_states # if length is 12,\n # then there are 3 states have 2 frames and 2 states have 3 frames,we call 2 small number and 3 big number\n number_of_big_number = length % number_of_states\n number_of_frames_in_each_state = [small_number_of_elements_in_current_state for j in \\\n xrange(number_of_states - number_of_big_number)]\n number_of_frames_in_each_state.extend \\\n ([small_number_of_elements_in_current_state + 1 for j in xrange(number_of_big_number)])\n number_of_frames_in_each_state_for_each_template.append(number_of_frames_in_each_state)\n # print number_of_frames_in_each_state_for_each_template\n return number_of_frames_in_each_state_for_each_template", "def init_states(self, init_fn, *args: Any, **kwargs: Any) -> Any:\n p = self.params\n init_states = init_fn(self.sub, *args, **kwargs)\n\n def tile_x(x):\n a = jnp.expand_dims(x, 0)\n return jnp.tile(a, [p.x_times] + [1] * len(x.shape))\n\n init_states = jax.tree_map(tile_x, init_states)\n\n # TODO(yonghui): Configure for spmd.\n return init_states", "def init_cells(self):\n state = list()\n width = WIDTH / CELL_SIZE\n height = HEIGHT / CELL_SIZE\n\n for index in range(0, width * height):\n if randint(1, 100) >= 100 - CELL_DENSITY:\n # Live cell.\n status = NORMAL\n state.append(1)\n else:\n # Dead cell.\n status = HIDDEN\n state.append(0)\n\n cell = self.canvas.create_rectangle((index % width) * CELL_SIZE, (index / width) * CELL_SIZE,\n ((index % width) + 1) * CELL_SIZE, ((index / width) + 1) * CELL_SIZE,\n fill=\"black\", state=status, outline=\"white\")\n self.cells.append(cell)\n\n return state", "def initialize_states(self, batch_axis, reset_cells=True):\n\n state_axes = self.feature_axes + batch_axis\n states = {}\n for info in self.state_info:\n name = info['state_name']\n if reset_cells:\n states[name] = ng.constant(const=0,\n axes=state_axes).named(name)\n else:\n states[name] = ng.variable(initial_value=0,\n axes=state_axes).named(name)\n return states", "def initializeStates(n):\n states = []\n for i in range(n):\n states.append(0)\n return states", "def set_n_cells(p_state, n_cells=[1, 1, 1], idx_image=-1, idx_chain=-1):\n vec3 = ctypes.c_int * 3\n _Set_N_Cells(ctypes.c_void_p(p_state), vec3(*n_cells))", "def set_cell(state: State) -> State:\n assert state.index < state.array_len\n return state._replace(\n array=state.array[: state.index] + [state.acc] + state.array[state.index + 1 :]\n )", "def updateNodeStates (self,listAtoms):\r\n \r\n for i in range(len(listAtoms)):\r\n for j in range(len(listAtoms[i].nodeArray)):\r\n self.mol[i].nodeArray[j].state = listAtoms[i].nodeArray[j].state", "def init_morphed(self, switches_normal, switches_reduce):\n for cell in self.cells:\n cell.init_morphed(switches_reduce if cell.reduction else switches_normal)", "def generate_grains(self, cells):\n\t\tfor cell_num in range(cells):\n\t\t\trandom_row = random.randrange(0,self.space.shape[0],1)\n\t\t\tsample_cell = np.random.choice(self.space[random_row],1)\n\t\t\tsample_cell = sample_cell[0]\n\t\t\twhile sample_cell.state != 0:\n\t\t\t\trandom_row = random.randrange(0,self.space.shape[0],1)\n\t\t\t\tsample_cell = np.random.choice(self.space[random_row],1)\n\t\t\t\tsample_cell = sample_cell[0]\n\t\t\tsample_cell.change_state(self.init_time ,cell_num)", "def assign_CRE_states(self):\n if self.verbose >= 2:\n print(\"\\r{}\\rAssign states to CREs\".format(' ' * 80), end='', file=sys.stderr)\n # Find ranges of states for each CRE\n Cranges = numpy.zeros((self.cre.shape[0], 2), dtype=numpy.int32)\n for i in range(self.cre_indices.shape[0] - 1):\n s = self.cre_indices[i]\n e = self.cre_indices[i + 1]\n if e - s == 0:\n continue\n s1 = self.state_indices[i]\n e1 = self.state_indices[i + 1]\n if e1 - s1 == 0:\n continue\n starts = numpy.searchsorted(self.state['end'][s1:e1],\n self.cre['start'][s:e], side='right') + s1\n stops = numpy.searchsorted(self.state['start'][s1:e1],\n self.cre['end'][s:e], side='left') + s1\n Cranges[s:e, 0] = starts\n Cranges[s:e, 1] = stops\n self.Cranges = Cranges\n # Divide list across multiple processes\n cre_queue = multiprocessing.JoinableQueue()\n results_queue = multiprocessing.JoinableQueue()\n processes = []\n for i in range(self.threads):\n processes.append(multiprocessing.Process(\n target=self._assign_CRE_state, args=(cre_queue, results_queue,\n self.rng.randint(99999))))\n processes[-1].daemon = True\n processes[-1].start()\n step = int(self.cre_indices[-1] / max(self.threads, 1) / 4.)\n for i in range(self.cre_indices.shape[0] - 1):\n for j in range(self.cre_indices[i], self.cre_indices[i + 1], step):\n stop = min(self.cre_indices[i + 1], j + step)\n cre_queue.put((j, stop))\n for i in range(self.threads):\n cre_queue.put(None)\n # Even though there may be multiple reps for a celltype, we only find the average state proportion across reps\n Cstates = numpy.zeros((self.cre.shape[0], self.cellN, self.stateN), dtype=numpy.int32)\n finished = 0\n while finished < self.threads:\n results = results_queue.get(True)\n if results is None:\n finished += 1\n continue\n start, stop = results[:2]\n Cstates[start:stop, :, :] = results[2]\n self.Cstates = Cstates\n if self.verbose >= 2:\n print(\"\\r{}\\r\".format(' ' * 80), end='', file=sys.stderr)", "def updateCells(cell_positions):\n # Build a set of canditates for live cells at the next generation, instead of looking through the whole grid\n # These will be dead neighbours of living cells\n possible_future_cells = set()\n # Make sets of cells to add and remove at the end of the check\n cells_remove = set()\n cells_add = set()\n for cell in cell_positions:\n # Get adjacent squares\n neighbours_dict = cellNeighbours(cell)\n number_live_neighbours = 0\n # Check which of these corresponds to another living cell\n for square in neighbours_dict.values():\n if square in cell_positions:\n number_live_neighbours+=1\n else:\n possible_future_cells.add(square)\n\n # Any live cell with fewer than two live neighbours dies, as if caused by under-population\n if number_live_neighbours<2:\n cells_remove.add(cell)\n # Any live cell with two or three live neighbours lives on to the next generation\n # do nothing\n # Any live cell with more than three live neighbours dies, as if by overcrowding\n elif number_live_neighbours>3:\n cells_remove.add(cell)\n # Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction\n for cell_candidate in possible_future_cells:\n cell_candidate_neighbours = cellNeighbours(cell_candidate).values()\n # Count number of live neighbours\n count = 0\n for square in cell_candidate_neighbours:\n if square in cell_positions:\n count+=1\n if count == 3:\n cells_add.add(cell_candidate)\n # Update cell_positions by removing dead cells and adding new-born cells\n for cell in cells_add:\n cell_positions.add(cell)\n for cell in cells_remove:\n cell_positions.remove(cell)\n # Return the update live cell list\n return cell_positions", "def _compute_world_params(self) -> None:\n\n self.states = []\n for row in range(self.grid_height):\n for col in range(self.grid_width):\n cell = row * self.grid_width + col\n cell_type = self.grid[cell]\n\n possible_actions = {\n Action.up: self._get_action(max(row - 1, 0) * self.grid_width + col),\n Action.down: self._get_action(min(row + 1, self.grid_height - 1) * self.grid_width + col),\n Action.right: self._get_action(row * self.grid_width + min(col + 1, self.grid_width - 1)),\n Action.left: self._get_action(row * self.grid_width + max(col - 1, 0))\n }\n\n self.states.append(State(cell, possible_actions, cell_type))", "def generate_cubes(num_states, distance, startstate='RRRRRRRRRBBBBBBBBBOOOOOOOOOGGGGGGGGGWWWWWWWWWYYYYYYYYY'):\n terminal_states = {'RRRRRRRRRYYYYYYYYYOOOOOOOOOWWWWWWWWWBBBBBBBBBGGGGGGGGG',\n 'BBBBBBBBBYYYYYYYYYGGGGGGGGGWWWWWWWWWOOOOOOOOORRRRRRRRR',\n 'YYYYYYYYYBBBBBBBBBWWWWWWWWWGGGGGGGGGRRRRRRRRROOOOOOOOO',\n 'BBBBBBBBBWWWWWWWWWGGGGGGGGGYYYYYYYYYRRRRRRRRROOOOOOOOO',\n 'OOOOOOOOOGGGGGGGGGRRRRRRRRRBBBBBBBBBWWWWWWWWWYYYYYYYYY',\n 'GGGGGGGGGOOOOOOOOOBBBBBBBBBRRRRRRRRRYYYYYYYYYWWWWWWWWW',\n 'GGGGGGGGGRRRRRRRRRBBBBBBBBBOOOOOOOOOWWWWWWWWWYYYYYYYYY',\n 'GGGGGGGGGYYYYYYYYYBBBBBBBBBWWWWWWWWWRRRRRRRRROOOOOOOOO',\n 'RRRRRRRRRWWWWWWWWWOOOOOOOOOYYYYYYYYYGGGGGGGGGBBBBBBBBB',\n 'OOOOOOOOOWWWWWWWWWRRRRRRRRRYYYYYYYYYBBBBBBBBBGGGGGGGGG',\n 'WWWWWWWWWRRRRRRRRRYYYYYYYYYOOOOOOOOOBBBBBBBBBGGGGGGGGG',\n 'BBBBBBBBBOOOOOOOOOGGGGGGGGGRRRRRRRRRWWWWWWWWWYYYYYYYYY',\n 'BBBBBBBBBRRRRRRRRRGGGGGGGGGOOOOOOOOOYYYYYYYYYWWWWWWWWW',\n 'RRRRRRRRRGGGGGGGGGOOOOOOOOOBBBBBBBBBYYYYYYYYYWWWWWWWWW',\n 'YYYYYYYYYRRRRRRRRRWWWWWWWWWOOOOOOOOOGGGGGGGGGBBBBBBBBB',\n 'YYYYYYYYYOOOOOOOOOWWWWWWWWWRRRRRRRRRBBBBBBBBBGGGGGGGGG',\n 'GGGGGGGGGWWWWWWWWWBBBBBBBBBYYYYYYYYYOOOOOOOOORRRRRRRRR',\n 'WWWWWWWWWGGGGGGGGGYYYYYYYYYBBBBBBBBBRRRRRRRRROOOOOOOOO',\n 'OOOOOOOOOYYYYYYYYYRRRRRRRRRWWWWWWWWWGGGGGGGGGBBBBBBBBB',\n 'RRRRRRRRRBBBBBBBBBOOOOOOOOOGGGGGGGGGWWWWWWWWWYYYYYYYYY',\n 'WWWWWWWWWBBBBBBBBBYYYYYYYYYGGGGGGGGGOOOOOOOOORRRRRRRRR',\n 'WWWWWWWWWOOOOOOOOOYYYYYYYYYRRRRRRRRRGGGGGGGGGBBBBBBBBB',\n 'OOOOOOOOOBBBBBBBBBRRRRRRRRRGGGGGGGGGYYYYYYYYYWWWWWWWWW',\n 'YYYYYYYYYGGGGGGGGGWWWWWWWWWBBBBBBBBBOOOOOOOOORRRRRRRRR'}\n states = []\n while len(states) < num_states:\n x = RubiksCubeOld(startstate)\n for j in range(distance):\n x.apply_move(np.random.randint(0,18))\n newstate = x.get_state()\n if newstate not in terminal_states: states.append(newstate)\n states = list(set(states))\n\n return states", "def run(self):\n\t\tfrom loc import loc as Loc\n\t\tfor r in range(1,self.size):\n\t\t\tfor c in range(self.size): \n\t\t\t\tthis = Loc(r,c)\n\t\t\t\tself.state.set_cell(this, self.rule(self.neighbor_vals(this), self.__prob))\n\t\tself.__ran = True", "def fill_states(self,objs=None):\n\n if objs is None:\n objs = self._cycle_states\n local = True\n else:\n local = False\n\n for i in objs:\n full = True\n for j in objs[i]:\n if objs[i][j] is None:\n full = False\n if full: continue\n if (objs[i][CoolProp.iDmass] is not None and\n objs[i][CoolProp.iT] is not None):\n self._state.update(CoolProp.DmassT_INPUTS, objs[i][CoolProp.iDmass], objs[i][CoolProp.iT])\n elif (objs[i][CoolProp.iP] is not None and\n objs[i][CoolProp.iHmass] is not None):\n self._state.update(CoolProp.HmassP_INPUTS, objs[i][CoolProp.iHmass], objs[i][CoolProp.iP])\n elif (objs[i][CoolProp.iP] is not None and\n objs[i][CoolProp.iSmass] is not None):\n self._state.update(CoolProp.PSmass_INPUTS, objs[i][CoolProp.iP], objs[i][CoolProp.iSmass])\n else:\n warnings.warn(\"Please fill the state[{0:s}] manually.\".format(str(i)))\n continue\n for j in objs[i]:\n if objs[i][j] is None:\n objs[i][j] = self._state.keyed_output(j)\n\n if local: self._cycle_states = objs\n return objs", "def set_flag_cells(self, cells):\n self.cells_flagged.add(cells)", "def empty_cells(state):\r\n cells = []\r\n for x, row in enumerate(state):\r\n for y, cell in enumerate(row):\r\n if cell == 0:\r\n cells.append([x, y])\r\n\r\n return cells", "def makeState(*args,**kwargs):\n \n cells = []\n\n for item in args:\n #print item\n cells.append(item)\n \n newState = State(cells)\n #newState.printBoard()\n return newState", "def inner_apply(self, inputs, states, cells, mask=None):\n def slice_last(x, no):\n return x[:, no*self.dim: (no+1)*self.dim]\n\n activation = tensor.dot(states, self.W_state) + inputs\n in_gate = self.gate_activation.apply(\n slice_last(activation, 0) + cells * self.W_cell_to_in)\n forget_gate = self.gate_activation.apply(\n slice_last(activation, 1) + cells * self.W_cell_to_forget)\n next_cells = (\n forget_gate * cells +\n in_gate * self.activation.apply(slice_last(activation, 2)))\n out_gate = self.gate_activation.apply(\n slice_last(activation, 3) + next_cells * self.W_cell_to_out)\n next_states = out_gate * self.activation.apply(next_cells)\n\n if mask:\n next_states = (mask[:, None] * next_states +\n (1 - mask[:, None]) * states)\n next_cells = (mask[:, None] * next_cells +\n (1 - mask[:, None]) * cells)\n\n return next_states, next_cells, in_gate, forget_gate, out_gate", "def _optimizer_state_init(opt_states):\n prefix_list = [\"moments\", \"accum\", \"moment1\", \"moment2\", \"lamb_m\", \"lamb_v\", \"mean_grad\",\n \"mean_square\", \"prev\"]\n for opt_param in opt_states:\n prefix = opt_param.name[:opt_param.name.find(\".\")]\n if opt_param.has_init and (prefix in prefix_list or opt_param.name == \"global_step\"):\n opt_param.init_data()", "def initialize_grid(self):\r\n for i in range(self.height):\r\n for j in range(self.width):\r\n self.grid[i][j] = 0\r\n \r\n # fill up unvisited cells\r\n for r in range(self.height):\r\n for c in range(self.width):\r\n if r % 2 == 0 and c % 2 == 0:\r\n self.unvisited.append((r,c))\r\n\r\n self.visited = []\r\n self.path = dict()\r\n self.generated = False", "def generate_pure_initial_state(state0, dimensions, states):\n\n cluster_state = 1\n\n for i, s in enumerate(states):\n d = dimensions[i]\n n = int(round((d - 1) / 2 - s))\n\n state = np.zeros(d)\n state[n] = 1\n cluster_state = np.kron(cluster_state, state)\n\n with_central_spin = np.kron(cluster_state, state0)\n\n return with_central_spin", "def bulkbuildstates():\n import sys\n state_codes = (x.lower() for x in sys.argv[1:])\n for state_code in state_codes:\n State = importlib.import_module('states.%s' % state_code).State\n #_temp = __import__('spam.ham', globals(), locals(), ['eggs', 'sausage'], -1)\n some_state = State()\n some_state.bulkbuild()" ]
[ "0.6383152", "0.63666826", "0.6025529", "0.59512055", "0.5893001", "0.57383454", "0.5733948", "0.57162946", "0.5703301", "0.56930435", "0.56870705", "0.5666645", "0.56311667", "0.55474085", "0.55270493", "0.55179924", "0.54676026", "0.54474306", "0.54134625", "0.5404968", "0.53878593", "0.5370858", "0.5370234", "0.5349005", "0.5331898", "0.5320796", "0.52913564", "0.52820355", "0.5276298", "0.52718914" ]
0.86237913
0
Unroll the cell for num_steps steps.
def unroll(cell, num_steps, inputs, init_states=None, reset_cells=True, return_sequence=True, reverse_mode=False): recurrent_axis = inputs.axes.recurrent_axis() recurrent_axis_idx = len(cell.feature_axes) batch_axis = inputs.axes.batch_axis() out_axes = cell.feature_axes + batch_axis if init_states is not None: states = {k: ng.cast_role(v, out_axes) for (k, v) in init_states.items()} else: states = init_states stepped_inputs = get_steps(inputs, recurrent_axis, backward=reverse_mode) stepped_outputs = [] for t in range(num_steps): with ng.metadata(step=str(t)): output, states = cell(stepped_inputs[t], states) stepped_outputs.append(output) if reverse_mode: if return_sequence: stepped_outputs.reverse() if return_sequence: outputs = ng.stack(stepped_outputs, recurrent_axis, pos=recurrent_axis_idx) else: outputs = stepped_outputs[-1] if not reset_cells: update_inits = ng.doall([ng.assign(initial, states[name]) for (name, initial) in init_states.items()]) outputs = ng.sequential([update_inits, outputs]) return outputs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unroll_cell(self, decoder_inputs, initial_state, loop_function, cell):\n pass", "def fold_unroll(cell, fold, length, inputs, begin_state=None, layout='NTC'):\n cell.reset()\n\n inputs, _, F, batch_size = _format_sequence(length, inputs, layout, False)\n begin_state = _get_begin_state(cell, F, begin_state, inputs, batch_size)\n\n states = begin_state\n outputs = []\n for i in range(length):\n output, states = fold.record(0, cell, inputs[i], states).split(2)\n outputs.append(output)\n states = states.split(2)\n\n return outputs, states", "def reduce(self, steps=1):\n self.h -= steps", "def _roll(self):\n order = np.array(self.order)\n nsteps = np.array(self.nsteps)\n order[nsteps > 1] = np.roll(order[nsteps > 1], 1)\n self.order = order.tolist()", "def make_step(self):\n self.step_vals = np.cumsum(self.vals)", "def increment_steps(self):\n self.num_steps += 1", "def _roll(self):\n self.order = np.roll(self.order, 1)", "def _random_unroll(env, seed=1234, num_steps=10, difficulty=5,\n random_choice_before_reset=False):\n np.random.seed(seed)\n action_spec = env.action_spec()\n if random_choice_before_reset:\n np.random.choice([8], p=[1.])\n timestep = env.reset(difficulty=difficulty)\n trajectory = [timestep]\n actions = [None]\n for _ in range(num_steps):\n if timestep.last():\n if random_choice_before_reset:\n np.random.choice([8], p=[1.])\n timestep = env.reset(difficulty=difficulty)\n action = _make_random_action(action_spec, timestep.observation)\n timestep = env.step(action)\n trajectory.append(timestep)\n actions.append(action)\n return trajectory, actions", "def unroll_cell(self, decoder_inputs, initial_state, loop_function, cell):\n return tf.contrib.legacy_seq2seq.rnn_decoder(\n decoder_inputs=decoder_inputs,\n initial_state=initial_state,\n cell=cell,\n loop_function=self.get_input)", "def flip_l_r(self, times: int):\n for i in range(0, times):\n new_rows = []\n for row in self.tile_rows:\n new_rows.append(row[::-1])\n\n self.tile_rows = new_rows", "def unshift(self, num_chunks):\n for _ in xrange(num_chunks):\n self.probability /= self.graph.ftp(self[-2], self[-1])\n num_leaves = len(self[-1].leaves)\n del self.leaves[-num_leaves:]\n del self[-1]", "def test_unroll_batch(self):\r\n # mini-batch of size 6 is multiple of 2 and 3. Should work.\r\n self.validate((6, 2, 3, 3), (3, 2, 2, 2), 'valid',\r\n unroll_batch=2, verify_grad=False)\r\n self.validate((6, 2, 3, 3), (3, 2, 2, 2), 'valid',\r\n unroll_batch=3, verify_grad=False)", "def steps(self, step_count):\n self.dir.value(0 if step_count > 0 else 1)\n for i in range(abs(step_count)):\n self.stp.value(1)\n sleep_us(self.step_time)\n self.stp.value(0)\n sleep_us(self.step_time)\n self.current_position += step_count", "def flip_t_b(self, times: int):\n for i in range(0, times):\n self.tile_rows = self.tile_rows[::-1]", "def unroll(data, sequence_length=20):\n result = []\n for index in range(len(data) - sequence_length):\n result.append(data[index: index + sequence_length])\n return np.asarray(result)", "def merge_steps(self, steps):\n raise NotImplementedError", "def shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][1], state[1][2], state[1][3], state[1][0]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][3], state[3][0], state[3][1], state[3][2]", "def _step_snell(self) -> None:\n self.snell.step()", "def test_unroll_special(self):\r\n self.validate((6, 2, 3, 3), (3, 2, 2, 2), 'valid', unroll_batch=1)", "def turn_steps(self, steps, delay_ms=1):\n if steps < 0:\n direction = -1\n else:\n direction = 1\n for _ in range(abs(int(steps))):\n self.current_step += direction\n element = STEP_ELEMENTS[self.current_step % N_STEP_ELEMENTS ]\n self.set_bits(element)\n time.sleep_ms(delay_ms)", "def render(self):\n step = 1\n while step < self.number_steps and self.update():\n step += 1", "def corun(self, step:int=0):\n if step > 0:\n _range = range(self.current_step, self.current_step + step + 1)\n else: # run forever\n _range = itertools.count(self.current_step)\n for step_num in _range:\n self.config_template = (yield self.step()) or self.config_template", "def multiroll(self, number):\n result = []\n for num in range(1, number):\n result.append(self.roll())\n return result", "def shuffle(self, steps):\n from random import sample\n\n for s in range(steps):\n direction = sample('LRUD', 1)[0]\n if direction in 'LR':\n stepsize = str(sample(range(self.cdim), 1)[0])\n else:\n stepsize = str(sample(range(self.rdim), 1)[0])\n\n self.shift(direction + stepsize)\n\n return '\\n'.join([''.join([node.value for node in row]) for row in self.rows])", "def inv_shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][3], state[1][0], state[1][1], state[1][2]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][1], state[3][2], state[3][3], state[3][0]", "def swipeBase (self) :\n grid = self.grid\n\n #we start by putting every tile up\n for columnNbr in range(4) :\n nbrZeros = 4 - np.count_nonzero(grid[:,columnNbr])\n\n for lineNbr in range(4) :\n counter = 0\n while (grid[lineNbr, columnNbr] == 0) and (counter < 4):\n counter += 1\n if np.count_nonzero(grid[lineNbr:4, columnNbr]) != 0 :\n for remainingLine in range (lineNbr, 3) :\n grid[remainingLine, columnNbr] = grid[remainingLine+1, columnNbr]\n grid[3, columnNbr] = 0\n\n #now we do the additions\n for lineNbr in range(3) :\n if grid[lineNbr, columnNbr] == grid[lineNbr+1, columnNbr] :\n grid[lineNbr, columnNbr] *= 2\n for remainingLine in range (lineNbr+1, 3) :\n grid[remainingLine, columnNbr] = grid[remainingLine+1, columnNbr]\n grid[3, columnNbr] = 0\n\n return (grid)", "def roll(x, shift, dim):\n if isinstance(shift, (tuple, list)):\n assert len(shift) == len(dim)\n for s, d in zip(shift, dim):\n x = roll(x, s, d)\n return x\n shift = shift % x.size(dim)\n if shift == 0:\n return x\n left = x.narrow(dim, 0, x.size(dim) - shift)\n right = x.narrow(dim, x.size(dim) - shift, shift)\n return torch.cat((right, left), dim=dim)", "def roll(x, shift, dim):\n if isinstance(shift, (tuple, list)):\n assert len(shift) == len(dim)\n for s, d in zip(shift, dim):\n x = roll(x, s, d)\n return x\n shift = shift % x.size(dim)\n if shift == 0:\n return x\n left = x.narrow(dim, 0, x.size(dim) - shift)\n right = x.narrow(dim, x.size(dim) - shift, shift)\n return torch.cat((right, left), dim=dim)", "def roll(x, shift, dim):\n if isinstance(shift, (tuple, list)):\n assert len(shift) == len(dim)\n for s, d in zip(shift, dim):\n x = roll(x, s, d)\n return x\n shift = shift % x.size(dim)\n if shift == 0:\n return x\n left = x.narrow(dim, 0, x.size(dim) - shift)\n right = x.narrow(dim, x.size(dim) - shift, shift)\n return torch.cat((right, left), dim=dim)", "def roll(x, shift, dim):\n if isinstance(shift, (tuple, list)):\n assert len(shift) == len(dim)\n for s, d in zip(shift, dim):\n x = roll(x, s, d)\n return x\n shift = shift % x.size(dim)\n if shift == 0:\n return x\n left = x.narrow(dim, 0, x.size(dim) - shift)\n right = x.narrow(dim, x.size(dim) - shift, shift)\n return torch.cat((right, left), dim=dim)" ]
[ "0.6567133", "0.647948", "0.6255634", "0.62448305", "0.6042708", "0.5770145", "0.5750804", "0.56965303", "0.5601399", "0.53311074", "0.5281608", "0.5258098", "0.5168075", "0.5160226", "0.5158495", "0.5141035", "0.5117448", "0.5101255", "0.50683206", "0.5047122", "0.50363326", "0.502999", "0.5012735", "0.5006767", "0.5005074", "0.49802867", "0.4945723", "0.4945723", "0.4945723", "0.4945723" ]
0.687197
0
Create a new mention finder to find a given list of mention types. entityTypes = list of mention types (e.g. group, outcome) to find
def __init__(self, entityTypes, tokenClassifier): Finder.__init__(self, entityTypes) self.tokenClassifier = tokenClassifier if self.tokenClassifier != None: self.finderType = 'mention.'+self.tokenClassifier.classifierType else: self.finderType = 'mention'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def entities(self, params=None, **kwargs):\n entities = entity_map()\n\n # Sort entities into type => <set of aliases>.\n type_to_aliases = {}\n for alias in entities:\n entity = entities[alias]\n\n if isinstance(entity, Facility):\n type_name = 'Facilities'\n elif isinstance(entity, Ship):\n type_name = 'Ships'\n elif isinstance(entity, Defense):\n type_name = 'Defense'\n elif isinstance(entity, Technology):\n type_name = 'Technology'\n\n if type_name not in type_to_aliases:\n type_to_aliases[type_name] = set()\n type_to_aliases[type_name].add(alias)\n\n nick = self.irc.source.split('!')[0]\n self.irc.reply('Sending list of entities to %s.' % nick)\n\n for type_name in type_to_aliases:\n aliases = sorted(list(type_to_aliases[type_name]))\n self.irc.privmsg(nick, '%s: %s' % (type_name, ', '.join(aliases)))", "def entity_mentions(adm):\n for entity in adm['attributes']['entities']['items']:\n for mention in entity['mentions']:\n # Augment mentions with the entity type of the entity they refer to\n mention['type'] = entity.get('type')\n yield mention", "def create_entities(self, entity_type):\n data = self.read_file(entity_type)\n base_url = data['url']\n for entity in data['entities']:\n url = base_url + entity['url']\n for data in entity['entities']:\n r = requests.post(url, json.dumps(data))\n print(r.text)", "def materials_search_ents(self, entities, elements, cutoff=None):\n\n method = \"POST\"\n sub_url = \"/search/material_search\"\n payload = {\n \"entities\": entities,\n \"elements\": elements,\n \"cutoff\": cutoff\n }\n return self._make_request(sub_url, payload=payload, method=method)", "async def test_intent_entities(self, dm):\n context = create_request(\n \"domain\",\n \"intent\",\n [{\"type\": \"entity_1\"}, {\"type\": \"entity_2\"}, {\"type\": \"entity_3\"}],\n )\n result = await dm.apply_handler(context, create_responder(context))\n assert result.dialogue_state == \"intent_entities\"", "def mentions(self, users_list, mentions_list, feature_size=None, relative_freq=True):\n # Collapsing mentions of users into a single list\n all_mentions = [x for m in mentions_list for x in m if x]\n mention_counts = sorted_count(all_mentions)\n\n mentions_vector = [m for m,_ in mention_counts]\n\n # zip users, mentions\n users_mentions_zip = list(zip(users_list, mentions_list))\n # findng mention feature vector for each user\n mention_features = {}\n for user in tqdm(set(users_list), desc=\"mention_features\", leave=LEAVE_BAR):\n user_mentions = [m for u,mns in users_mentions_zip for m in mns if u==user]\n mention_features[user] = np.array( [ user_mentions.count(m) for m in mentions_vector ] )\n if relative_freq and np.sum(mention_features[user])!=0:\n mention_features[user] = mention_features[user]/np.sum(mention_features[user])\n \n return mention_features", "def get_embeddings(self, entities, embedding_type='entity'):\n if not self.is_fitted:\n msg = 'Model has not been fitted.'\n logger.error(msg)\n raise RuntimeError(msg)\n\n if embedding_type is 'entity':\n emb_list = self.trained_model_params[0]\n lookup_dict = self.ent_to_idx\n elif embedding_type is 'relation':\n emb_list = self.trained_model_params[1]\n lookup_dict = self.rel_to_idx\n else:\n msg = 'Invalid entity type: {}'.format(embedding_type)\n logger.error(msg)\n raise ValueError(msg)\n\n idxs = np.vectorize(lookup_dict.get)(entities)\n return emb_list[idxs]", "def handle_entities( entities, model, compartment = \"default\", arguments = DEFAULT_ARGUMENTS):\n for trigger in entities:\n try:\n if STANDOFF_ENTITY_TO_SBO_MAPPING.get( trigger.type_lower):\n add_species( trigger, model, compartment = compartment, arguments = arguments);\n except:\n logging.getLogger( \"st2sbml\").error( \"{0} entity {1} could not be added to model\".format( get_path( arguments), trigger))", "def scan_entity_mentions(entity):\n shoulds = []\n for term in entity.regex_terms:\n shoulds.append(text_query_string(term))\n\n query = {\n 'query': {\n 'bool': {'should': shoulds, \"minimum_should_match\": 1}\n },\n 'sort': [{'document_id': 'desc'}],\n '_source': ['document_id', 'text']\n }\n for res in scan(get_es(), query=query, index=get_es_index(),\n doc_type=[TYPE_RECORD]):\n text = res.get('_source').get('text')\n texts = text if isinstance(text, list) else [text]\n for text in texts:\n yield (res.get('_source').get('document_id'), text)", "def add_entities(doc):\n\n # Calls function to tokenize the document, stores as list of strings\n tokens = tokenize(doc)\n\n # Calls function to find named entities in the tokens, stores as list of strings\n chunks = chunk(tokens)\n\n return chunks", "def match_entities(self, message: Message):\n extracted_entities = []\n tokens = message.get(\"tokens\")\n for token in tokens:\n for entity_type in self.ents.keys():\n fuzzy_matches = self.ents[entity_type].get(token.text)\n for match in fuzzy_matches:\n if match[0] < self.min_confidence: continue # skip low-confidence entities\n entity = {\n \"start\": token.start,\n \"end\": token.end,\n \"value\": match[1],\n \"confidence\": match[0],\n \"entity\": entity_type,\n }\n extracted_entities.append(entity) \n return extracted_entities", "def get_named_entities(\n self,\n identity: Optional[str] = None,\n type: Optional[str] = None,\n subtype: Optional[str] = None,\n ) -> List[NamedEntity]:\n found: List[NamedEntity] = []\n for named_entity in [\n e for h in self.headlines for s in h.sentences for e in s.named_entities\n ]:\n if identity and (identity != named_entity.identity):\n continue\n if type and (type != named_entity.type):\n continue\n if subtype and (subtype != named_entity.subtype):\n continue\n found.append(named_entity)\n return found", "def recognize_entities( # type: ignore\n self,\n documents, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]]\n **kwargs # type: Any\n ):\n # type: (...) -> List[Union[RecognizeEntitiesResult, DocumentError]]\n language_arg = kwargs.pop(\"language\", None)\n language = language_arg if language_arg is not None else self._default_language\n docs = _validate_input(documents, \"language\", language)\n model_version = kwargs.pop(\"model_version\", None)\n show_stats = kwargs.pop(\"show_stats\", False)\n string_index_type = _check_string_index_type_arg(\n kwargs.pop(\"string_index_type\", None),\n self._api_version,\n string_index_type_default=self._string_index_type_default\n )\n if string_index_type:\n kwargs.update({\"string_index_type\": string_index_type})\n disable_service_logs = kwargs.pop(\"disable_service_logs\", None)\n if disable_service_logs is not None:\n kwargs['logging_opt_out'] = disable_service_logs\n\n try:\n return self._client.entities_recognition_general(\n documents=docs,\n model_version=model_version,\n show_stats=show_stats,\n cls=kwargs.pop(\"cls\", entities_result),\n **kwargs\n )\n except HttpResponseError as error:\n process_http_response_error(error)", "def generate_entity_instances(entities: str | list[str], allow_anonymous: bool = True, **kwargs: Any) -> list[tuple[str, BaseEntity]]:\r\n\r\n from wikibaseintegrator.entities.baseentity import BaseEntity\r\n\r\n if isinstance(entities, str):\r\n entities = [entities]\r\n\r\n assert isinstance(entities, list)\r\n\r\n params = {\r\n 'action': 'wbgetentities',\r\n 'ids': '|'.join(entities),\r\n 'format': 'json'\r\n }\r\n\r\n reply = mediawiki_api_call_helper(data=params, allow_anonymous=allow_anonymous, **kwargs)\r\n\r\n entity_instances = []\r\n for qid, v in reply['entities'].items():\r\n from wikibaseintegrator import WikibaseIntegrator\r\n wbi = WikibaseIntegrator()\r\n f = [x for x in BaseEntity.__subclasses__() if x.ETYPE == v['type']][0]\r\n ii = f(api=wbi).from_json(v)\r\n entity_instances.append((qid, ii))\r\n\r\n return entity_instances", "def masked_mentions(adm, masks):\n # Keep track of separate indices per masked entity type\n index = {k: 0 for k in MASKS}\n for entity in adm['attributes']['entities']['items']:\n if entity['type'] in masks:\n index[entity['type']] += 1\n for mention in entity['mentions']:\n # add the appropriate mask to the mention\n mention['mask'] = masks[entity['type']].format(\n index[entity['type']]\n )\n yield mention", "def _query_similar_entities(self, entities: List[str]):\n similar_entities = []\n for e in entities:\n # Don't return the artists given in the\n # parms (for the case where there are\n # multiple artists and they are related\n # to each other).\n similar_entities += [\n ent\n for ent\n in self.kb_api.get_related_entities(e)\n if ent not in entities\n ]\n\n return similar_entities", "def _return_entities_indexes(self, seqs: List[List[str]], type_select: str) -> List[Tuple[str, int, int]]:\n\n entities = []\n sequences = [label for seq in seqs for label in seq + ['O']]\n sentences = [word for sentence in self.sentences for word in sentence + ['']]\n prev_top = 'O'\n prev_type = ''\n focus_idx = 0\n\n for i, (label, words) in enumerate(zip(sequences + ['O'], sentences + [''])):\n top = label[0]\n type_ = label.split('-')[-1]\n\n if self._is_end_of_label(prev_top, top, prev_type, type_) \\\n and type_select in [prev_type, ''] \\\n and self._check_add_entities(''.join(sentences[focus_idx: i]), type_select):\n entities.append((prev_type, focus_idx, i - 1))\n\n focus_idx = i if self._is_begin_of_label(prev_top, top, prev_type, type_) else focus_idx\n prev_top = top\n prev_type = type_\n\n return entities", "def resolve_entities(root, info, ids: list[int], **kwargs):\n return Entity.objects.filter(id__in=ids)", "def entity_extractor(service_type: Text):\n\n def register_extractor(fun: ExtractEntitesFunc) -> ExtractEntitesFunc:\n @wraps(fun)\n async def get_and_upload(ctx: MetricsContext, project_id: str, svc_def: GCPService) -> Iterable[Entity]:\n try:\n entities = await fun(ctx, project_id, svc_def)\n except Exception as e:\n ctx.log(f\"Failed to finish entity extractor task, reason is {type(e).__name__} {e}\")\n return []\n\n return entities\n\n entities_extractors[service_type] = get_and_upload\n return get_and_upload\n\n return register_extractor", "def add_ents(self, ents: Iterable['Entity']) -> None:\n ents = list(ents)\n self.entities.extend(ents)\n for item in ents:\n self.by_class[item['classname'].casefold()].add(item)\n self.by_target[item['targetname', ''].casefold() or None].add(item)\n if 'nodeid' in item:\n try:\n node_id = int(item['nodeid'])\n except (TypeError, ValueError):\n pass\n else:\n item['nodeid'] = str(self.node_id.get_id(node_id))", "def __init__(self, entityType, sentenceFilter, useDetected=True):\n Finder.__init__(self, [entityType])\n self.finderType = 'clusterer'\n self.sentenceFilter = sentenceFilter\n self.useDetected = useDetected", "def search_functional_identifiers(self, criterion_type, criteria_list):\n check_type(value=criterion_type, allowed_types=str, var_name=\"criterion_type\", raise_exception=True)\n check_type(value=criteria_list, allowed_types=list, var_name=\"criteria_list\", raise_exception=True)\n\n my_filter = dict()\n my_filter[criterion_type] = criteria_list\n\n response = self.send(root_url=self.session.dm_url + self.root_url,\n verb=GenericClient.VERB.POST,\n template=TEMPLATES['search_functional_identifier_list'],\n data=my_filter,\n files=None)\n check_http_code(response)\n\n return response.json", "def get_embeddings(self, entities, type='entity'):\n return None", "def get_mentions(self, column, list_of_types, total=False, average=False):\n for mbti_type in list_of_types:\n self.df[mbti_type + '_mentions'] = [sum([x.casefold().count(mbti_type.casefold()) for x in post]) for post in self.df[column]]\n if total == True:\n mention_cols = [col for col in self.df.columns if 'mentions' in col]\n self.df['total_mentions'] = self.df.filter(mention_cols).sum(axis=1)\n if average == True:\n self.df['avg_mentions_per_post'] = self.df['total_mentions'] / self.df['count_posts']", "def make_entities():\n entities = [\n Creature((5, 5), 10, [], '*'),\n Creature((9, 5), 1, [], '@'),\n Creature((5, 9), 1, [], '@'),\n Potion((1, 2), 8, '#'),\n Weapon((2, 4), 5, '/'),\n Weapon((7, 1), 6, '(')\n ]\n return entities", "def findentity(string):\r\n for x in entitylist:\r\n if x in string:\r\n print(f\"(Doc.{i})--Entity = {x.title()}\")\r\n break", "def extract_entities(self) :\n entities = []\n googleEntityList = self.googleLanguageModel.analyze_entities() \n watsonEntityList = self.watsonLanguageModel['entities']\n\n for entity in googleEntityList.entities[:self.entitySizeLimit]:\n if len(entity.metadata) > 0:\n entities.append({ 'name' : entity.name, 'metadata' : entity.metadata})\n \n for entity in watsonEntityList[:self.entitySizeLimit]: \n entities.append({ 'name': entity['text'], 'metadata': entity.get('disambiguation', {})}) \n\n return entities", "def get_entities(tags):\n pass", "def find_or_create(self, entity_type, data=None, **kwargs):\n \n if data and kwargs:\n raise ValueError('specify data or kwargs')\n data = data or kwargs\n \n filters = []\n for k, v in data.iteritems():\n filters.append((k, 'is', v))\n entity = self.shotgun.find_one(entity_type, filters, data.keys())\n if entity:\n return entity\n data = data.copy()\n data.pop('id', None)\n return self.create(entity_type, data, data.keys())", "def create_instances_from_mention_link(\n\tmention, all_documents, tfidf_candidates, tokenizer, max_seq_length,\n\trng, is_training=True):\n\n\t# Account for [CLS], [SEP], [SEP]\n\tmax_num_tokens = max_seq_length - 3\n\n\tmention_length = int(max_num_tokens/2) \n\tcand_entity_length = max_num_tokens - mention_length\n\n\tcontext_document_id = mention['context_document_id']\n\tlabel_document_id = mention['label_document_id']\n\tstart_index = mention['start_index']\n\tend_index = mention['end_index']\n\n\tcontext_document = all_documents[context_document_id]['text']\n\tcontext_tokens = context_document.split()\n\textracted_mention = context_tokens[start_index: end_index+1]\n\textracted_mention = ' '.join(extracted_mention)\n\tcontext_tokens.insert(start_index, MS)\n\tcontext_tokens.insert(end_index + 2, ME)\n\tstart_index += 1\n\tend_index += 1\n\tassert extracted_mention == mention['text']\n\tmention_text_tokenized = tokenizer.tokenize(mention['text'])\n\n\tmention_context, mention_start, mention_end = get_context_tokens(\n\t\tcontext_tokens, start_index, end_index, mention_length, tokenizer)\n\n\tmention_id = mention['mention_id']\n\tassert mention_id in tfidf_candidates\n\n\tcand_document_ids = tfidf_candidates[mention_id]\n\tif not cand_document_ids:\n\t\treturn None\n\n\tif not is_training:\n\t\tcand_document_ids = cand_document_ids[:num_cands]\n\n\tif not is_training and label_document_id not in cand_document_ids:\n\t\treturn None\n\n\n\tcand_document_ids = [cand for cand in cand_document_ids if cand != label_document_id]\n\tassert label_document_id not in cand_document_ids\n\n\twhile len(cand_document_ids) < num_cands:\n\t\tcand_document_ids.extend(cand_document_ids)\n\n\tcand_document_ids.insert(0, label_document_id)\n\n\tcand_document_ids = cand_document_ids[:num_cands]\n\tassert len(cand_document_ids) == num_cands\n\n\tlabel_id = None\n\tfor i, document in enumerate(cand_document_ids):\n\t\tif document == label_document_id:\n\t\t\tassert label_id == None\n\t\t\tlabel_id = i\n\n\tassert label_id == 0\t \n\n\n\tinstance_tokens = []\n\tinstance_input_ids = []\n\tinstance_segment_ids = []\n\tinstance_input_mask = []\n\tinstance_mention_id = []\n\n\tfor cand_document_id in cand_document_ids:\n\t\ttokens_a = mention_context\n\t\tcand_document_title = all_documents[cand_document_id]['title']\n\t\tcand_document_text = all_documents[cand_document_id]['text'][len(cand_document_title):].strip()\n\t\tcand_document = cand_document_title + ' ' + ENT + \" \" + cand_document_text \n\t\t# cand_document = cand_document_title + ' ' + cand_document_text \n\t\tcand_document_truncate = ' '.join(cand_document.split()[:cand_entity_length])\n\t\tcand_document = tokenizer.tokenize(cand_document_truncate)\n\t\ttokens_b = cand_document[:cand_entity_length]\n\n\t\ttokens = ['[CLS]'] + tokens_a + ['[SEP]'] + tokens_b + ['[SEP]']\n\n\t\tinput_ids = tokenizer.convert_tokens_to_ids(tokens)\n\t\tsegment_ids = [0]*(len(tokens_a) + 2) + [1]*(len(tokens_b) + 1)\n\t\tinput_mask = [1]*len(input_ids)\n\t\tmention_id = [0]*len(input_ids)\n\n\t\t# Update these indices to take [CLS] into account\n\t\tnew_mention_start = mention_start + 1\n\t\tnew_mention_end = mention_end + 1\n\n\t\tassert tokens[new_mention_start: new_mention_end+1] == mention_text_tokenized\n\t\tfor t in range(new_mention_start, new_mention_end+1):\n\t\t\tmention_id[t] = 1\n\n\t\tassert len(input_ids) <= max_seq_length\n\n\t\ttokens = tokens + ['<pad>'] * (max_seq_length - len(tokens))\n\t\tinstance_tokens.extend(tokens)\n\t\tinstance_input_ids.extend(pad_sequence(input_ids, max_seq_length))\n\t\tinstance_segment_ids.extend(pad_sequence(segment_ids, max_seq_length))\n\t\tinstance_input_mask.extend(pad_sequence(input_mask, max_seq_length))\n\t\tinstance_mention_id.extend(pad_sequence(mention_id, max_seq_length))\n\n\n\tinstance = TrainingInstance(\n\t\ttokens=instance_tokens,\n\t\tinput_ids=instance_input_ids,\n\t\tinput_mask=instance_input_mask,\n\t\tsegment_ids=instance_segment_ids,\n\t\tlabel_id=label_id,\n\t\tmention_ids=instance_mention_id,\n\t\tmention_guid=mention['mention_id'],\n\t\tcand_guids=cand_document_ids)\n\n\treturn instance" ]
[ "0.548936", "0.53434336", "0.5073382", "0.50559545", "0.5006649", "0.5000927", "0.49018753", "0.4899283", "0.48503044", "0.48402494", "0.4830563", "0.4823236", "0.48019728", "0.4800545", "0.47958317", "0.47294486", "0.46860445", "0.46614227", "0.4642066", "0.4610926", "0.46101096", "0.4580064", "0.45723", "0.45559043", "0.455588", "0.4479205", "0.4469262", "0.44692227", "0.44660538", "0.44432595" ]
0.6929863
0
compute classifier features for each token in each abstract in a given list of abstracts.
def computeFeatures(self, absList): raise NotImplementedError("Need to implement computeFeatures()")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tokenize_abstracts(abstracts):\n\n tokenized_abstracts_list = []\n for abstract in abstracts:\n tokens = nltk.word_tokenize(abstract)\n tokenized_abstracts_list.append(tokens)\n return tokenized_abstracts_list", "def computeFeatures(self, absList, mode=''):\n raise NotImplementedError(\"Need to implement computeFeatures()\")", "def featurize(self, tokens):\n features = []\n \n nrc_hashtag_emotion_features = self.nrc_hashtag_emotion(tokens)\n nrc_affect_intensity_features = self.nrc_affect_intensity(tokens)\n nrc_hashtag_sentiment_lexicon_unigrams_features = self.nrc_hashtag_sentiment_lexicon_unigrams(tokens)\n nrc_hashtag_sentiment_lexicon_bigrams_features = self.nrc_hashtag_sentiment_lexicon_bigrams(tokens)\n sentiment140_unigrams_features = self.sentiment140_unigrams(tokens)\n sentiment140_bigrams_features = self.sentiment140_bigrams(tokens)\n senti_wordnet_features = self.senti_wordnet(tokens)\n bing_lui_sentiment_lexicons_features = self.bing_lui_sentiment_lexicons(tokens)\n nrc_expanded_lexicon_features = self.nrc_10_expanded(tokens)\n negating_word_list_features = self.negating_words_list(tokens)\n total_number_of_words_features = self.get_total_number_of_words(tokens)\n mpqa_subjectivity_lexicon_features = self.mpqa_subjectivity_lexicon(tokens)\n afinn_sentiment_features = self.afinn_sentiment_scores(tokens)\n # senti_strength_features = self.get_sentistrength(\" \".join(tokens))\n\n features.extend(nrc_hashtag_emotion_features.values()) # 10 features\n features.extend(nrc_affect_intensity_features.values()) # 10 features\n features.extend(nrc_hashtag_sentiment_lexicon_unigrams_features.values()) # 4 features\n features.extend(nrc_hashtag_sentiment_lexicon_bigrams_features.values()) # 4 features\n features.extend(sentiment140_unigrams_features.values()) # 4 features \n features.extend(sentiment140_bigrams_features.values()) # 4 features\n features.extend(senti_wordnet_features.values()) # 4 features\n features.extend(bing_lui_sentiment_lexicons_features.values()) # 2 features\n features.extend(nrc_expanded_lexicon_features.values()) # 10 features\n features.extend(negating_word_list_features.values()) # 1 feature\n features.extend(total_number_of_words_features.values()) # 1 feature\n features.extend(mpqa_subjectivity_lexicon_features.values()) # 2 features\n features.extend(afinn_sentiment_features.values()) # 2 features\n # features.extend(senti_strength_features.values()) # 2 features\n\n return features", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {} # label\n for (i, label) in enumerate(label_list): # ['0', '1']\n label_map[label] = i\n\n features = [] # feature\n for (ex_index, example) in enumerate(examples):\n text_a_id = int(example.text_a_id)\n text_b_id = int(example.text_b_id)\n\n text_a_fields = example.text_a.split(\" _eop_ \")\n \n tokens_a = []\n text_a_subtype = []\n for text_a_field_idx, text_a_field in enumerate(text_a_fields):\n text_a_field_token = tokenizer.tokenize(text_a_field)\n tokens_a.extend(text_a_field_token)\n text_a_subtype.extend([text_a_field_idx]*len(text_a_field_token))\n assert len(tokens_a) == len(text_a_subtype)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b) # text_b tokenize\n\n if tokens_b: # if has b\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) # truncate\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because # (?)\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n subtype_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n subtype_ids.append(0)\n for token_idx, token in enumerate(tokens_a):\n tokens.append(token)\n segment_ids.append(0)\n subtype_ids.append(text_a_subtype[token_idx])\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n subtype_ids.append(1)\n\n if tokens_b:\n for token_idx, token in enumerate(tokens_b):\n tokens.append(token)\n segment_ids.append(1)\n subtype_ids.append(2)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n subtype_ids.append(2)\n\n input_sents = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_sents) # mask\n\n # Zero-pad up to the sequence length.\n while len(input_sents) < max_seq_length:\n input_sents.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n subtype_ids.append(0)\n\n assert len(input_sents) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(subtype_ids) == max_seq_length\n\n label_id = label_map[example.label]\n\n if ex_index%2000 == 0:\n print('convert_{}_examples_to_features'.format(ex_index))\n\n features.append(\n InputFeatures( # object\n text_a_id=text_a_id,\n text_b_id=text_b_id,\n input_sents=input_sents,\n input_mask=input_mask,\n segment_ids=segment_ids,\n subtype_ids=subtype_ids,\n label_id=label_id))\n\n return features", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, mode): #check later if we can merge this function with the SQuAD preprocessing\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if mode!=\"ae\":\n tokens_a = tokenizer.tokenize(example.text_a)\n else: #only do subword tokenization.\n tokens_a, labels_a, example.idx_map= tokenizer.subword_tokenize([token.lower() for token in example.text_a], example.label )\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n target_indices = find_target_indices(tokens_a, tokens)\n if target_indices is None:\n target_indices = (1, 1 + len(tokens_a))\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if mode!=\"ae\":\n label_id = label_map[example.label]\n else:\n label_id = [-1] * len(input_ids) #-1 is the index to ignore\n #truncate the label length if it exceeds the limit.\n lb=[label_map[label] for label in labels_a]\n if len(lb) > max_seq_length - 2:\n lb = lb[0:(max_seq_length - 2)]\n label_id[1:len(lb)+1] = lb\n\n features.append(\n InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n target_indices=target_indices))\n return features", "def getfeaturesandlabels(lst, exptype=False, semantic=True, predict=True):\n if 'PGATE' in lst[0][0]:\n print \"Get features from {} expressions.\".format('predicted' if predict else 'gold')\n else:\n print \"Get features from gold expressions. (No PGATE in token)\"\n predict = False\n \n stats = {'holders_not_in_candidates': [],\n 'position': {},\n 'expt_not_in_candidates': []}\n if not exptype:\n exptypelist = EXPTYPES\n features = {}\n labels = {}\n pos = {}\n ev = evaluate()\n for expt in EXPTYPES:\n features[expt] = []\n labels[expt] = []\n pos[expt] = []\n features[expt+'implicit'] = []\n labels[expt+'implicit'] = []\n pos[expt+'implicit'] = []\n features[expt+'w'] = []\n labels[expt+'w'] = []\n pos[expt+'w'] = []\n for sent_i, sent in enumerate(lst):\n if DEBUG: print \"---\", sent_i\n if sent_i % 1000 == 0: print \"setning\", sent_i\n daughterlists_sent(sent)\n ex = getexpressions_sent(sent)\n pex = getexpressions_sent(sent, predict=predict)\n tagholdercandidates_sent(sent, predict=predict)\n candidates = getholdercandidates_list_sent(sent)\n holder_dct = getholders_sent_new(sent)\n holder_exp_pairs = getholder_exp_pairs_sent(sent, ex, holder_dct, test=predict)\n count_gold(holder_exp_pairs) \n if True: # syntactic_path\n paths = getpaths_sent(getgraph_sent(sent))\n else:\n paths = False\n if predict:\n\n holder_exp_pairs_sys = []\n\n for c, p in enumerate(extolst(pex, gatekey='PGATE')):\n # first located e' that corresponded to e\n argmaxcxe = 0 # at least some overlap\n if args.argmaxcxe:\n argmaxcxe = int(args.argmaxcxe)\n current_pair = None\n for exp_pair_i, exp_pair in enumerate(holder_exp_pairs):\n #argmax c(x,e) regardless of exp type j&m 7.1.1\n if DEBUG:\n print exp_pair\n cxe = ev.spancoverage(exp_pair[0], p['token_id']) \n if DEBUG:\n print cxe\n if cxe > argmaxcxe:\n argmaxcxe = cxe\n current_pair = exp_pair\n if current_pair:\n holder_exp_pairs_sys.append((p['token_id'], current_pair[1], current_pair[2], current_pair[3]))\n else:\n counters['falsely_detected_exp'] += 1\n counters['falsely_detected_exp' + p['expt']] += 1\n \n if predict:\n holder_exp_pairs_use = holder_exp_pairs_sys\n else:\n holder_exp_pairs_use = holder_exp_pairs\n holder_exp_pairs_use = count_sys(holder_exp_pairs_use, save=True)\n for exp_pair in holder_exp_pairs_use:\n expt = exp_pair[2]\n cand_exists = True\n holder_set = True\n # Categorise \n if isinstance(exp_pair[1], str):\n #if predict:\n holder_set = False\n elif isinstance(exp_pair[1], set):\n # om holder ikke er hc\n #print candidates\n if expt in candidates:\n if not exp_pair[1].intersection(candidates[expt]):\n counters['holder_not_in_candidate_head'] += 1\n cand_exists = False\n for cand in candidates[expt]:\n if exp_pair[1].intersection(get_subtree(sent, cand, transitive=True)):\n cand_exists = True\n if not cand_exists:\n counters['holder_not_in_candidates'] += 1\n counters['holder_not_in_candidates' + exp_pair[2]] += 1\n stats['holders_not_in_candidates'].append({'candidates': candidates[expt],\n 'exp_pair': exp_pair})\n else:\n cand_exists = False\n counters['ignore_count'] += 1\n counters['holder not in candidates - special case'] += 1\n #if cand_exists:\n # For prediction:\n elif isinstance(exp_pair[1], OrderedDict):\n if expt in candidates:\n holdermax = argmaxcxh(exp_pair[1], candidates[expt])\n if not holdermax[0]:\n cand_exists = False\n counters['ignore_count'] += 1\n else:\n cand_exists = False\n counters['expt_not_in_candidates - new'] += 1\n stats['expt_not_in_candidates'].append({'sent': sent_i,\n 'exp_pair': exp_pair})\n else:\n raise Exception('exp_pair[1] of unknown type: {}'.format(exp_pair[1]))\n\n if not predict or cand_exists:\n # we don't need to count false predicted holders, the p. sum is already\n # made, but we need these for training\n \n # ext-classifiers (w/imp)\n # labels\n if exp_pair[1] == 'w':\n labels[expt + 'w'].append(True)\n labels[expt + 'implicit'].append(False)\n elif exp_pair[1] == 'implicit':\n labels[expt + 'w'].append(False)\n labels[expt + 'implicit'].append(True)\n else:\n labels[expt + 'w'].append(False)\n labels[expt + 'implicit'].append(False)\n\n # Features\n featuresdict = {}\n ex_head = getex_head(exp_pair[0], sent)\n featuresdict['ex_head_word'] = sent[ex_head-1]['form']\n featuresdict['ex_head_pos'] = sent[ex_head-1]['pos']\n featuresdict['ex_head_lemma'] = sent[ex_head-1]['lemma']\n tmp = dom_ex_type(sent, sent[ex_head-1]['head'], transitive=False)\n if tmp:\n featuresdict['dom_ex_type'] = tmp\n featuresdict['ex_verb_voice'] = ex_verb_voice(sent, exp_pair[0])\n featuresdict['deprel_to_parent'] = sent[ex_head-1]['deprel']\n features[expt + 'w'].append(featuresdict)\n #features[expt + 'implicit'].append(featuresdict)\n pos[expt + 'w'].append({'sent': sent_i,\n 'exp': exp_pair[0],\n 'holder_gold': exp_pair[1],\n 'holder_sys': 'w'})\n pos[expt + 'implicit'].append({'sent': sent_i,\n 'exp': exp_pair[0],\n 'holder_gold': exp_pair[1],\n 'holder_sys': 'implicit'})\n\n if cand_exists:\n # internals\n if expt in candidates:\n featuresandlabeladded = False\n for cand in candidates[expt]:\n if args.restrict == 'sameexp' and cand in exp_pair[0]: #get_subtree(sent, cand, transitive=True)):\n pass\n else:\n featuresdict = {}\n if holder_set:\n featuresandlabeladded = True\n\n # labels\n if isinstance(exp_pair[1], OrderedDict):\n label = cand_in_ghodct(cand, exp_pair[1])\n if isinstance(exp_pair[1], set):\n label = cand in exp_pair[1]\n elif isinstance(exp_pair[1], str):\n label = cand == exp_pair[1]\n labels[expt].append(label)\n\n # positions\n pos[expt].append({'sent': sent_i,\n 'exp': exp_pair[0],\n 'holder_sys': get_subtree(sent, cand, transitive=True),\n 'holder_gold': exp_pair[1],\n 'coref_gold': exp_pair[3],\n 'exptype' : expt\n }) \n\n # features\n ex_head = getex_head(exp_pair[0], sent)\n featuresdict['synt_path'] = syntactic_path(cand, ex_head,\n sent, paths=paths)\n if semantic:\n tmp = shallow_sem_relation(cand-1, ex_head-1, sent)\n if tmp:\n featuresdict['shal_sem_rel'] = tmp\n featuresdict['ex_head_word'] = sent[ex_head-1]['form']\n featuresdict['ex_head_pos'] = sent[ex_head-1]['pos']\n featuresdict['ex_head_lemma'] = sent[ex_head-1]['lemma']\n featuresdict['cand_head_word'] = sent[cand-1]['form']\n featuresdict['cand_head_pos'] = sent[cand-1]['pos']\n tmp = dom_ex_type(sent, sent[ex_head-1]['head'], transitive=False)\n if tmp:\n featuresdict['dom_ex_type'] = tmp\n featuresdict['ex_verb_voice'] = ex_verb_voice(sent, exp_pair[0])\n if cand > 1:\n featuresdict['context_r_word'] = sent[cand-2]['form']\n featuresdict['context_r_pos'] = sent[cand-2]['pos']\n if cand < len(sent):\n featuresdict['context_l_word'] = sent[cand]['form']\n featuresdict['context_l_pos'] = sent[cand]['pos']\n featuresdict['deprel_to_parent'] = sent[ex_head-1]['deprel']\n \n features[expt].append(featuresdict)\n else:\n counters[\"expt_not_in_candidates\"] += 1\n counters[\"expt_not_in_candidates\" + expt] += 1\n\n stats['positions'] = pos\n return features, labels, stats", "def tagged_abstracts(tokenized_abstracts_list):\n\n tagged_abstracts_list = []\n for tokenized_abstract in tokenized_abstracts_list:\n tagged = nltk.pos_tag(tokenized_abstract)\n tagged_abstracts_list.append(tagged)\n return tagged_abstracts_list", "def handle_classification_tags(\n actapi: act.api.Act, content: Text, classification_tags: List[Text]\n) -> List[act.api.fact.Fact]:\n\n feeds_facts: List[act.api.fact.Fact] = []\n\n for tag in classification_tags:\n feeds_facts.append(\n actapi.fact(\"classifiedAs\")\n .source(\"content\", content)\n .destination(\"tool\", tag)\n )\n\n return feeds_facts", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, mode): #check later if we can merge this function with the SQuAD preprocessing \n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if mode!=\"ae\":\n tokens_a = tokenizer.tokenize(example.text_a)\n else: #only do subword tokenization.\n tokens_a, labels_a, example.idx_map= tokenizer.subword_tokenize([token.lower() for token in example.text_a], example.label )\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if mode!=\"ae\":\n label_id = label_map[example.label]\n else:\n label_id = [-1] * len(input_ids) #-1 is the index to ignore\n #truncate the label length if it exceeds the limit.\n lb=[label_map[label] for label in labels_a]\n if len(lb) > max_seq_length - 2:\n lb = lb[0:(max_seq_length - 2)]\n label_id[1:len(lb)+1] = lb\n \n features.append(\n InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features", "def extractFeatures(self, datum):\n abstract", "def convert_examples_to_features(tokens_set, labels_set, max_seq_length, tokenizer):\r\n\r\n #label_map = {label: i for i, label in enumerate(label_list, 1)}\r\n\r\n input_ids, input_masks, segment_ids, labels = [], [], [], []\r\n for index in tqdm_notebook(range(len(tokens_set)),desc=\"Converting examples to features\"):\r\n textlist = tokens_set[index] #example.text_a.split(' ')\r\n labellist = labels_set[index]\r\n input_id, input_mask, segment_id,label = convert_single_example(\r\n textlist, labellist,max_seq_length,tokenizer\r\n )\r\n input_ids.append(input_id)\r\n input_masks.append(input_mask)\r\n segment_ids.append(segment_id)\r\n labels.append(label)\r\n return (\r\n np.array(input_ids),\r\n np.array(input_masks),\r\n np.array(segment_ids),\r\n np.array(labels)\r\n )", "def make_training_data(feature_funcs, annotations):\n extractor = FeatureExtractor(feature_funcs)\n \n training_instances = []\n \n for sent_str, anns in annotations:\n tree = parser.raw_parse(sent_str).next()\n tree = convert_brackets(tree)\n # print tree\n # some preprocessing, align the positions and \n # also use the sentence string given the parse tree\n anns = align_annotation_with_sentence(sent_str, ' '.join(tree.leaves()), anns)\n sent_str = ' '.join(tree.leaves())\n for ann in anns:\n frame_name = ann.frame_name\n start, end = ann.target.start, ann.target.end\n frame = Frame(start, end, frame_name)\n frame_node = find_node_by_positions(tree, start, end)\n\n # TODO: bug here\n if frame_node is None: \n sys.stderr.write(\"Warning: %r does not correspond to any tree node in sentence \\\"%s\\\"\\nSkip it\\n \" %(frame, sent_str))\n continue\n \n for node, (node_start_pos, node_end_pos) in collect_nodes(tree):\n node_pos = NodePosition(node_start_pos, node_end_pos)\n context = Context(sent_str, tree, frame, node_pos)\n\n feature_values = extractor.extract(node, context)\n \n # try to see the it has some semantic role\n found_matching_node = False\n for fe in ann.FE:\n other_node = find_node_by_positions(tree, fe.start, fe.end)\n if node == other_node:\n training_instances.append((feature_values, fe.name))\n found_matching_node = True\n break\n\n # semantic role => NULL\n if not found_matching_node:\n training_instances.append((feature_values, 'NULL'))\n\n return training_instances", "def classify(words, all_tags):\n answer = []\n for word in words:\n label, score = clf_base.predict({word:1},weights,list(all_tags))\n answer.append(label)\n return answer", "def parallel_tokenizer(df):\n pool = mp.Pool(processes=4)\n df['tokenized_abs'] = pool.map(_tokenize_abstract, df['Abstract'])\n pool.terminate()\n return df", "def compute_label_feature(text, token_to_idx):\n tokens = list(text.strip().lower())\n feats = [token_to_idx[token] for token in tokens]\n return feats", "def convert_examples_to_features(self, examples_paths, label_list, max_seq_length, tokenizer, set_type):\n \n if all([os.path.exists(path.replace('examples', 'features')) for path in examples_paths]):\n features_paths = examples_paths\n \n else:\n\n def f(example):\n labels_ids = torch.FloatTensor(example.label).unsqueeze(0).to(torch.int64)\n input_ids = torch.FloatTensor(example.text_a).unsqueeze(0).to(torch.int64)\n #attention_mask = torch.ones(input_ids.size()).to(torch.int64)\n attention_mask = torch.FloatTensor(example.text_b).unsqueeze(0).to(torch.int64)\n token_type_ids = torch.zeros(input_ids.size()).to(torch.int64)\n output_mask = (labels_ids != -100)\n return InputFeatures(input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n label_ids=labels_ids,\n output_mask=output_mask)\n\n for index_split, examples_split in enumerate(examples_paths):\n split = self.load_object(examples_split)\n print(f\"Computing split {index_split+1} / {self.n_splits}... Split size: {len(split)}\")\n features = Parallel(n_jobs=-1)(delayed(f)(example) for example in tqdm(split))\n self.save_object(os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_features_split-{index_split}.pkl'), features)\n\n features_paths = [os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_features_split-{index_split}.pkl') for index_split in range(self.n_splits)]\n \n return features_paths", "def classify(self, features):\n\n # TODO: finish this.\n class_labels = []\n # TODO: finish this.\n features = np.array(features)\n feat_shape = features.shape\n for i in range(feat_shape[0]):\n vote = np.zeros((self.num_trees))\n for j in range(self.num_trees):\n #print self.trees[j].classify(feat)\n vote[j] = self.trees[j].classify(features[i,self.attr_track[j]].reshape(1,-1))[0]\n counts = np.bincount(vote.astype(int))\n class_labels.append(np.argmax(counts))\n return class_labels", "def eval_all_combinations(docs, labels, punct_vals,\n feature_fns, min_freqs):\n# result = []\n\n# for punct_val in punct_vals:\n# tokens = [tokenize(doc, punct_val) for doc in docs]\n# for f in [comb for i in range(len(feature_fns)) for comb in combinations(feature_fns, i+1)]:\n# feature = list(f)\n\n# for min_freq in min_freqs:\n# clf = LogisticRegression()\n# X, vocab = vectorize(tokens, feature, min_freq)\n# accuracy = cross_validation_accuracy(clf, X, labels, 5)\n# result.append(dict(punct= punct_val, features= feature, min_freq= min_freq, accuracy = accuracy))\n\n# return sorted(result, key=lambda x:(-x['accuracy'],-x['min_freq']))\n clf = LogisticRegression()\n result = []\n output = []\n for l in range(1, len(feature_fns)+1):\n for c in combinations(feature_fns,l):\n result.append(c)\n\n for p in punct_vals:\n list_tok = [tokenize(d,p) for d in docs]\n for fl in result:\n for mf in min_freqs:\n dict_output = {}\n matrix,vocab = vectorize(list_tok, fl, mf)\n average_value = cross_validation_accuracy(clf, matrix, labels, 5)\n dict_output['features'] = fl\n dict_output['punct'] = p\n dict_output['accuracy'] = average_value\n dict_output['min_freq'] = mf\n output.append(dict_output)\n\n return sorted(output, key=lambda x: (-x['accuracy'], -x['min_freq']))", "def featurize(tokens, feature_fns):\n answer = []\n for func in feature_fns:\n feats = defaultdict(lambda: 0)\n func(tokens,feats)\n answer.extend(feats.items())\n return sorted(answer, key= lambda x: x[0])", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n\tlabel_map = {label : i for i, label in enumerate(label_list,1)}\n\t\n\tfeatures = []\n\tfor (ex_index,example) in enumerate(examples):\n\n\t\ttext_a, entity_a, entity_b = example.text_a.split('[RE]')\n\n\t\ttokens_a = tokenizer.tokenize(text_a)\n\t\ttokens_b = None\n\t\ttokens_ea = tokenizer.tokenize(entity_a)\n\t\ttokens_eb = tokenizer.tokenize(entity_b)\n\n\t\t# Account for [CLS] and [SEP] with \"- 2\"\n\t\tif (len(tokens_a) + len(tokens_ea) + len(tokens_eb)) > (max_seq_length - 4) :\n\t\t\ttokens_a = tokens_a[0:(max_seq_length - 4 - len(tokens_ea) - len(tokens_eb))]\n\n\t\ttokens = []\n\t\tsegment_ids = []\n\t\ttokens.append(\"[CLS]\")\n\t\tsegment_ids.append(0)\n\t\tfor token in tokens_a:\n\t\t\ttokens.append(token)\n\t\t\tsegment_ids.append(0)\n\t\ttokens.append(\"[SEP]\")\n\t\tsegment_ids.append(0)\n\t\tfor token in tokens_ea:\n\t\t\ttokens.append(token)\n\t\t\tsegment_ids.append(0)\n\n\t\ttokens.append(\"[SEP]\")\n\t\tsegment_ids.append(0)\n\n\t\tfor token in tokens_eb:\n\t\t\ttokens.append(token)\n\t\t\tsegment_ids.append(0)\n\n\t\ttokens.append(\"[SEP]\")\n\t\tsegment_ids.append(0)\n\n\t\tinput_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n\t\t# The mask has 1 for real tokens and 0 for padding tokens. Only real\n\t\t# tokens are attended to.\n\t\tinput_mask = [1] * len(input_ids)\n\n\t\t# Zero-pad up to the sequence length.\n\t\twhile len(input_ids) < max_seq_length:\n\t\t\tinput_ids.append(0)\n\t\t\tinput_mask.append(0)\n\t\t\tsegment_ids.append(0)\n\n\t\tassert len(input_ids) == max_seq_length\n\t\tassert len(input_mask) == max_seq_length\n\t\tassert len(segment_ids) == max_seq_length\n\n\t\tlabel_id = label_map[example.label]\n\t\tif ex_index < 2:\n\t\t\tlogger.info(\"*** Example ***\")\n\t\t\tlogger.info(\"guid: %s\" % (example.guid))\n\t\t\tlogger.info(\"tokens: %s\" % \" \".join(\n\t\t\t\t\t[str(x) for x in tokens]))\n\t\t\tlogger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n\t\t\tlogger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n\t\t\tlogger.info(\n\t\t\t\t\t\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n\t\t\tlogger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n\t\tfeatures.append(InputFeatures(\n\t\t\tinput_ids=input_ids,\n\t\t\tinput_mask=input_mask,\n\t\t\tsegment_ids=segment_ids,\n\t\t\tlabel_id=label_id))\n\treturn features", "def classify(self, features):\n \n class_labels = []\n # TODO: finish this.\n features = np.array(features)\n feat_shape = features.shape\n for indx in range(feat_shape[0]):\n# print list(features[indx,:]), features[indx,:]\n decision = self.root.decide(list(features[indx,:]))\n class_labels.append(decision)\n return class_labels", "def _extract(texts: list[str], tokens: list[list[str]], sentences: list[list[str]], /,\n avg_words=True, avg_sentences=True, pos_distribution=True,\n foreign_words_ratio=True, lexicon=True, punctuation_distribution=True,\n n_jobs=1) -> pd.DataFrame:\n\n def process(function, objects: list, feature_name: str):\n result_ = np.vstack(Parallel(n_jobs)(delayed(function)(objects_) for objects_ in objects))\n\n # Build a list of the column names to create a features DataFrame\n n_columns = result_.shape[1]\n columns_name = [feature_name + f'_{i}' for i in range(1, n_columns + 1)]\n\n return pd.DataFrame(result_, columns=columns_name)\n\n results = []\n # Average length of words\n if avg_words:\n results.append(process(funcs.avg_length, tokens, AVG_WORDS))\n # Average length of sentences\n if avg_sentences:\n results.append(process(funcs.avg_length, sentences, AVG_SENTENCES))\n # POS distribution\n if pos_distribution:\n results.append(process(funcs.pos_distribution, tokens, POS_DISTRIBUTION))\n # Lexicon size\n if lexicon:\n results.append(process(funcs.lexicon, tokens, LEXICON_SIZE))\n # Foreign words ratio\n if foreign_words_ratio:\n results.append(process(funcs.foreign_words_ratio, tokens, FOREIGN_RATIO))\n # Punctuations distribution\n if punctuation_distribution:\n results.append(process(funcs.punctuations_distribution, texts, PUNCTUATIONS_DISTRIBUTION))\n\n if not results:\n raise ValueError(\"At least one feature must be chosen\")\n\n return pd.concat(results, axis=1)", "def extract_features(data, stopwords=STOPWORDS):\n tags = set()\n docs = []\n for document in data:\n doc_data = dict()\n doc_data['pmid'] = document['sourceid']\n text = document['text']\n\n # Insert PubTator annotations inside abstract\n denotations = document['denotations']\n sorted_denotations = []\n for denotation in denotations:\n begin = denotation['span']['begin']\n end = denotation['span']['end']\n obj = denotation['obj']\n for c in punctuation:\n obj = obj.replace(c, '')\n tags.add(obj)\n doc_data[obj] = doc_data.get(obj,0)+1\n sorted_denotations.append([begin,end,obj])\n sorted_denotations.sort()\n sorted_denotations.reverse()\n for begin, end, obj in sorted_denotations:\n text = text[:begin] + obj + ' ' + text[end:]\n\n doc_data['text'] = clean_text(text, stopwords)\n docs.append(doc_data)\n\n return docs", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {label: i for i, label in enumerate(label_list, 1)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n textlist = example.text_a.split(' ')\n labellist = example.label\n tokens = []\n labels = []\n valid = []\n label_mask = []\n for i, word in enumerate(textlist):\n token = tokenizer.tokenize(word)\n tokens.extend(token)\n label_1 = labellist[i]\n for m in range(len(token)):\n if m == 0:\n labels.append(label_1)\n valid.append(1)\n label_mask.append(True)\n else:\n valid.append(0)\n if len(tokens) >= max_seq_length - 1:\n tokens = tokens[0:(max_seq_length - 2)]\n labels = labels[0:(max_seq_length - 2)]\n valid = valid[0:(max_seq_length - 2)]\n label_mask = label_mask[0:(max_seq_length - 2)]\n ntokens = []\n segment_ids = []\n label_ids = []\n ntokens.append(\"[CLS]\")\n segment_ids.append(0)\n valid.insert(0, 1)\n label_mask.insert(0, True)\n label_ids.append(label_map[\"[CLS]\"])\n for i, token in enumerate(tokens):\n ntokens.append(token)\n segment_ids.append(0)\n if len(labels) > i:\n label_ids.append(label_map[labels[i]])\n ntokens.append(\"[SEP]\")\n segment_ids.append(0)\n valid.append(1)\n label_mask.append(True)\n label_ids.append(label_map[\"[SEP]\"])\n input_ids = tokenizer.convert_tokens_to_ids(ntokens)\n input_mask = [1] * len(input_ids)\n label_mask = [True] * len(label_ids)\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n label_ids.append(0)\n valid.append(1)\n label_mask.append(False)\n while len(label_ids) < max_seq_length:\n label_ids.append(0)\n label_mask.append(False)\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(label_ids) == max_seq_length\n assert len(valid) == max_seq_length\n assert len(label_mask) == max_seq_length\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_ids,\n valid_ids=valid,\n label_mask=label_mask))\n return features", "def convert_examples_to_features(examples, tokenizer, max_seq_length):\n \n features = []\n num_tokens_article = []\n count = 0\n masking_token_id = tokenizer.convert_tokens_to_ids(['[MASK]'])[0]\n # padding_token_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)\n for example_index, example in enumerate(examples):\n article_tokens = tokenizer.tokenize(example['article'])\n ques_tokens = tokenizer.tokenize(example['question'].replace(\"@placeholder\", \"_\"))\n masked_index = ques_tokens.index('_')\n ques_tokens[masked_index] = '[MASK]'\n \n# options = example['options']\n options = tokenizer.convert_tokens_to_ids(example['options'])\n# print(options, '\\n')\n\n choices_features = []\n _truncate_seq_pair(article_tokens, ques_tokens, max_seq_length - 3)\n\n# tokens = [\"[CLS]\"] + ques_tokens + [\"[SEP]\"] + article_tokens + [\"[SEP]\"]\n tokens = ques_tokens + [\"[SEP]\"] + article_tokens + [\"[SEP]\"]\n\n# segment_ids = [0] * (len(ques_tokens) + 2) + [1] * (len(article_tokens) + 1)\n segment_ids = [0] * (len(ques_tokens) + 1) + [1] * (len(article_tokens) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\t\n candidates = example['options']\n candidates_ids = tokenizer.convert_tokens_to_ids(candidates)\n# print(tokenizer.convert_tokens_to_ids(tokenizer.tokenize(example['options'][example['label']]))[0])\n\n lm_labels = [-1 if t_id != masking_token_id else tokenizer.convert_tokens_to_ids(tokenizer.tokenize(example['options'][example['label']]))[0] for t_id in input_ids]\n# lm_labels = [t_id if t_id != 0 else -1 for t_id in input_ids]\n# for i, t_id in enumerate(lm_labels) :\n# if(t_id == masking_token_id):\n# lm_labels[i] = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(example['options'][example['label']]))[0]\n\n# print(lm_labels, '\\n')\n # tokenizer.convert_tokens_to_ids(tokenizer.tokenize(example['options'][example['label']]))[0]\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(lm_labels) == max_seq_length\n\n choices_features.append((tokens, input_ids, input_mask, segment_ids))\n\n features.append(\n InputFeatures(\n choices_features = choices_features,\n lm_labels = lm_labels,\n options = options,\n label = example['label']\n )\n )\n \n return features", "def convert_examples_to_features(examples, label_list, max_seq_len,\n tokenizer, output_mode,\n cls_token='[CLS]',\n cls_token_segment_id=1,\n sep_token='[SEP]',\n pad_token=0,\n pad_token_segment_id=0,\n sequence_a_segment_id=0,\n sequence_b_segment_id=1,\n mask_padding_with_zero=True,\n use_entity_indicator=True):\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n #print(example.text_a)\n tokens_a = tokenizer.tokenize(example.text_a)\n # print(tokens_a)\n if use_entity_indicator:\n e11_p = tokens_a.index(\"[E11]\")+1 # the start position of entity1\n e12_p = tokens_a.index(\"[E12]\")+2 # the end position ofentity1\n e21_p = tokens_a.index(\"[E21]\")+1 # the start position ofentity2\n e22_p = tokens_a.index(\"[E22]\")+2 # the end position of entity2\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\". \" -4\" for RoBERTa.\n special_tokens_count = 3\n _truncate_seq_pair(tokens_a, tokens_b,\n max_seq_len - special_tokens_count)\n else:\n # Account for [CLS] and [SEP] with \"- 2\" and with \"- 3\" for RoBERTa.\n special_tokens_count = 2\n if len(tokens_a) > max_seq_len - special_tokens_count:\n tokens_a = tokens_a[:(max_seq_len - special_tokens_count)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = tokens_a + [sep_token]\n segment_ids = [sequence_a_segment_id] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [sep_token]\n segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)\n\n tokens = [cls_token] + tokens\n segment_ids = [cls_token_segment_id] + segment_ids\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_seq_len - len(input_ids)\n input_ids = input_ids + ([pad_token] * padding_length)\n input_mask = input_mask + \\\n ([0 if mask_padding_with_zero else 1] * padding_length)\n segment_ids = segment_ids + \\\n ([pad_token_segment_id] * padding_length)\n if use_entity_indicator:\n e1_mask = [0 for i in range(len(input_mask))]\n\n e2_mask = [0 for i in range(len(input_mask))]\n #print(len(e1_mask),e11_p,e12_p)\n for i in range(e11_p, e12_p):\n e1_mask[i] = 1\n #print(e21_p,e22_p,len(e2_mask))\n for i in range(e21_p, e22_p):\n e2_mask[i] = 1\n\n assert len(input_ids) == max_seq_len\n assert len(input_mask) == max_seq_len\n assert len(segment_ids) == max_seq_len\n\n if output_mode == \"classification\":\n # label_id = label_map[example.label]\n label_id = int(example.label)\n elif output_mode == \"regression\":\n label_id = float(example.label)\n else:\n raise KeyError(output_mode)\n\n # if ex_index < 5:\n # logger.info(\"*** Example ***\")\n # logger.info(\"guid: %s\" % (example.guid))\n # logger.info(\"tokens: %s\" % \" \".join(\n # [str(x) for x in tokens]))\n # logger.info(\"input_ids: %s\" %\n # \" \".join([str(x) for x in input_ids]))\n # logger.info(\"input_mask: %s\" %\n # \" \".join([str(x) for x in input_mask]))\n # if use_entity_indicator:\n # logger.info(\"e11_p: %s\" % e11_p)\n # logger.info(\"e12_p: %s\" % e12_p)\n # logger.info(\"e21_p: %s\" % e21_p)\n # logger.info(\"e22_p: %s\" % e22_p)\n # logger.info(\"e1_mask: %s\" %\n # \" \".join([str(x) for x in e1_mask]))\n # logger.info(\"e2_mask: %s\" %\n # \" \".join([str(x) for x in e2_mask]))\n # logger.info(\"segment_ids: %s\" %\n # \" \".join([str(x) for x in segment_ids]))\n # logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n e11_p=e11_p,\n e12_p=e12_p,\n e21_p=e21_p,\n e22_p=e22_p,\n e1_mask=e1_mask,\n e2_mask=e2_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n ID=example.ID))\n\n return features", "def getFeatures(featureInput):\n featureList = []\n for defTerm,candidateSent in featureInput:\n tokens = nltk.word_tokenize(candidateSent)\n features = {}\n POScenter,POSleft,POSright = wordPOS(tokens,defTerm)\n features['Pos of first Article'] = posFirstArticle(tokens)\n## features['Num Punct Marks'] = numPunctuation(tokens)\n features['Subj words Predicate'] = subWordPerdicate(candidateSent,defTerm,tokens)\n features['Word before def term'] = wordBeforeDef(tokens,defTerm)\n features['POS centered word'] = POScenter\n features['POS left word'] = POSleft\n## features['POS right word'] = POSright \n featureList.append(features)\n return featureList", "def newsgroup_featurize(data_list):\n # TODO: Implement featurization of input.\n all_text = data_list[\"train\"][\"input\"] + data_list[\"test\"][\"input\"] + data_list[\"dev\"][\"input\"]\n word_dict = word_count(all_text)\n bow_noun_features = bow_noun(word_dict) # 11,925 features\n train_input = np.array([text_to_bow_noun_vector(text, bow_noun_features) for text in data_list[\"train\"][\"input\"]])\n dev_input = np.array([text_to_bow_noun_vector(text, bow_noun_features) for text in data_list[\"dev\"][\"input\"]])\n test_input = np.array([text_to_bow_noun_vector(text, bow_noun_features) for text in data_list[\"test\"][\"input\"]])\n return train_input, dev_input, test_input", "def convert_examples_to_features(examples, label_list, max_seq_length,\n tokenizer, output_mode = \"classification\" ,\n cls_token_at_end=False,\n cls_token='[CLS]',\n cls_token_segment_id=1,\n sep_token='[SEP]',\n sep_token_extra=False,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n sequence_a_segment_id=0, \n sequence_b_segment_id=1,\n mask_padding_with_zero=True):\n\n label_map = {label : i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n tokens_a = tokenizer.tokenize(example.text_a)\n #bowonko\n '''\n Example class\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n '''\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\". \" -4\" for RoBERTa.\n special_tokens_count = 4 if sep_token_extra else 3\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)\n else:\n # Account for [CLS] and [SEP] with \"- 2\" and with \"- 3\" for RoBERTa.\n special_tokens_count = 3 if sep_token_extra else 2\n if len(tokens_a) > max_seq_length - special_tokens_count:\n tokens_a = tokens_a[:(max_seq_length - special_tokens_count)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned. -?? bowonko 2019.11.05 무슨 말이지?\n tokens = tokens_a + [sep_token]\n if sep_token_extra:\n # roberta uses an extra separator b/w pairs of sentences\n tokens += [sep_token]\n segment_ids = [sequence_a_segment_id] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [sep_token]\n segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)\n\n if cls_token_at_end:\n tokens = tokens + [cls_token]\n segment_ids = segment_ids + [cls_token_segment_id]\n else:\n tokens = [cls_token] + tokens\n segment_ids = [cls_token_segment_id] + segment_ids\n\n #token > index로 변환 2019 11 05\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_seq_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask\n segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if output_mode == \"classification\":\n label_id = label_map[example.label]\n elif output_mode == \"regression\":\n label_id = float(example.label)\n else:\n raise KeyError(output_mode)\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {label: i for i, label in enumerate(label_list)}\n tokens_a_longer_max_seq_length = 0\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n\n len_tokens_a = len(tokens_a)\n len_tokens_b = 0\n\n\n\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n len_tokens_b = len(tokens_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n if (len_tokens_a + len_tokens_b) > (max_seq_length - 2):\n tokens_a_longer_max_seq_length += 1\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids)==max_seq_length\n assert len(input_mask)==max_seq_length\n assert len(segment_ids)==max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 1 and example.guid is not None and example.guid.startswith('train'):\n logger.info(\"\\n\\n*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n logger.info(\"\\n\\n\")\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n\n logger.info(\":: Sentences longer than max_sequence_length: %d\" % (tokens_a_longer_max_seq_length))\n logger.info(\":: Num sentences: %d\" % (len(examples)))\n return features" ]
[ "0.624462", "0.62424415", "0.6225621", "0.5921949", "0.5816261", "0.5813086", "0.5795551", "0.57948124", "0.5740147", "0.57276946", "0.5717184", "0.57068926", "0.56946427", "0.56593144", "0.5658686", "0.5652911", "0.56022424", "0.56008846", "0.5594525", "0.55912507", "0.55830866", "0.5571676", "0.5561331", "0.5551855", "0.554622", "0.5544036", "0.55378443", "0.5530637", "0.55268353", "0.5515939" ]
0.62984097
0
compute RPF stats for detected mentions in a list of abstracts. write results to output stream. write final RPF stats to statOut write TP/FP/FN to errorOut
def computeStats(self, absList, statOut=None, errorOut=None): stats = EntityStats(self.entityTypes) for abs in absList: errorOut.write('---'+abs.id+'---\n') # identify ALL annotated mentions, even in sentences we are not focused on # for sentence in abs.allSentences(): # for mType in self.entityTypes: # aList = sentence.getAnnotatedMentions(mType, recomputeMentions=True) # # for sentence in abs.sentences: # for mType in self.entityTypes: # self.compareAnnotatedAndDetected(sentence, mType, \ # stats.irstats[mType], errorOut) for sentence in abs.allSentences(): for mType in self.entityTypes: if sentence in abs.sentences: self.compareAnnotatedAndDetected(sentence, mType, \ stats.irstats[mType], errorOut) else: aList = sentence.getAnnotatedMentions(mType, recomputeMentions=True) stats.printStats() if statOut != None: stats.saveStats(statOut, keyPrefix='MF - ') return stats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def computeStats(self, absList, statOut, errorOut):\n \n nMentions = 0\n pSum = 0\n rSum = 0\n for abstract in absList:\n # build hash of annotated clusters/chains keyed by ID\n errorOut.write('\\n---- '+abstract.id+' ----\\n')\n trueChainLengths = {}\n entityList = abstract.annotatedEntities.getList(self.entityTypes[0])\n errorOut.write('True chains:\\n')\n for entityTemplate in entityList:\n if len(entityTemplate.getAnnotatedId()) > 0:\n trueChain = entityTemplate.getMentionChain()\n trueChainLengths[entityTemplate.getAnnotatedId(checkEntireCluster=False)] = len(trueChain)\n for m in trueChain:\n# errorOut.write(m.name+':'+m.getAnnotatedId(checkEntireCluster=False) +'\\n')\n errorOut.write('%s %s:%s, matchedMention=%s \\n'%(m.name, m.mention, m.getAnnotatedId(checkEntireCluster=False), m.mention.matchedMention))\n\n errorOut.write('----\\n')\n else:\n print abstract.id, entityTemplate.name, 'is missing an ID'\n \n # compute Recall and precision for each detected chain/cluster\n entityList = abstract.entities.getList(self.entityTypes[0])\n errorOut.write('\\nHypothesis chains:\\n')\n for entityTemplate in entityList:\n detectedChain = entityTemplate.getMentionChain()\n \n rootMention = entityTemplate.rootMention()\n errorOut.write('[Canonical name: '+rootMention.getCanonicalName()+']\\n')\n \n for m in detectedChain:\n errorOut.write('%s %s:%s, matchedMention=%s \\n'%(m.name, m.mention, m.getAnnotatedId(checkEntireCluster=False), m.mention.matchedMention))\n# errorOut.write(m.name+':'+m.getAnnotatedId(checkEntireCluster=False) +'\\n')\n errorOut.write('----\\n')\n\n nMentionsInChain = len(detectedChain)\n for mTemplate in detectedChain:\n nMentions += 1\n if len(mTemplate.getAnnotatedId(checkEntireCluster=False)) == 0:\n # mention is a false positive, it does not belong to any chain\n pSum += 1.0/nMentionsInChain\n rSum += 1\n else:\n if mTemplate.getAnnotatedId(checkEntireCluster=False) not in trueChainLengths:\n print abstract.id, 'template with id =',mTemplate.getAnnotatedId(checkEntireCluster=False), 'not in a true chain'\n break\n nMentionsInTrueChain = trueChainLengths[mTemplate.getAnnotatedId(checkEntireCluster=False)]\n nCorrectInDetectedChain = 0\n annotatedMatches = set([])\n # count the number of mentions in the detected chain that\n # should be in the same chain as this mention\n for m in detectedChain:\n if mTemplate.getAnnotatedId(checkEntireCluster=False) == m.getAnnotatedId(checkEntireCluster=False) \\\n and m.mention.matchedMention not in annotatedMatches:\n nCorrectInDetectedChain += 1\n annotatedMatches.add(m.mention.matchedMention)\n# else:\n# print abstract.id, 'Two mentions do not belong in same chain',\n# print mTemplate, m.getAnnotatedId()\n \n if nCorrectInDetectedChain > nMentionsInTrueChain:\n print abstract.id, 'id=',mTemplate.getAnnotatedId(checkEntireCluster=False), \n print 'detected chain=', nCorrectInDetectedChain,\n print 'true chain=', nMentionsInTrueChain\n nCorrectInDetectedChain = nMentionsInTrueChain\n \n# if nCorrectInDetectedChain != nMentionsInChain:\n# print abstract.id, 'id=',mTemplate.getAnnotatedId(), \n# print 'detected chain=', nCorrectInDetectedChain,\n# print 'true chain=', nMentionsInTrueChain\n \n pSum += float(nCorrectInDetectedChain) / nMentionsInChain\n rSum += float(nCorrectInDetectedChain) / nMentionsInTrueChain\n \n if nMentions == 0:\n print 'No mentions???'\n return \n \n precision = pSum/nMentions\n recall = rSum/nMentions \n fscore = 2*(recall*precision)/(recall + precision)\n \n sys.stdout.write('Recall\\tPrecision\\tF-score\\n')\n sys.stdout.write('%.3f\\t ' % recall + '%.3f\\t ' % precision + '%.3f' % fscore+'\\n')\n# statOut.write(self.entityTypesString+'\\n')\n# statOut.write('Recall\\tPrecision\\tF-score\\n')\n# statOut.write('%.3f\\t ' % recall + '%.3f\\t ' % precision + '%.3f' % fscore+'\\n')\n statOut.addStats('MC - '+self.entityTypesString, [['R', recall], ['P', precision], ['F',fscore]])", "def write_results(location, classes, results):\n check_out(location)\n # these are tuples consisting of list of values, mean of the values and std the values\n [accuracies, precisions, recalls, f1scores] = results\n\n file_name = []\n for class_ in classes:\n if class_.endswith('.hdf5'):\n # file_name.append(re.findall('\\d/(.*).hdf5$', class_)[0])\n file_name.append(re.findall('([%.\\w-]*).hdf5$', class_)[0])\n\n with open(os.path.join(location, '-'.join(file_name) + '.results'), 'w') as out_file:\n for class_id, class_ in enumerate(classes):\n out_file.write(file_name[class_id].split('_')[0] + '\\n')\n # out_file.write(infos)\n # out_file.write(' '.join([str(header_item) for header_item in headers[class_id]]))\n out_file.write('\\n\\n')\n\n out_file.write(\"Mean accuracy is {:.2f} with a std of ({:.2f}).\\n\".format(accuracies[1], accuracies[2]))\n out_file.write('\\n'.join([str(accuracy) for accuracy in accuracies[0]]) + '\\n')\n\n if len(classes) == 2:\n out_file.write(\"\\nMean precision is {:.2f} with a std of ({:.2f}).\\n\".format(precisions[1], precisions[2]))\n out_file.write('\\n'.join([str(precision) for precision in precisions[0]]) + '\\n')\n\n out_file.write(\"\\nMean recall is {:.2f} with a std of ({:.2f}).\\n\".format(recalls[1], recalls[2]))\n out_file.write('\\n'.join([str(recall) for recall in recalls[0]]) + '\\n')\n\n out_file.write(\"\\nMean f1 score is {:.2f} with a std of ({:.2f}).\\n\".format(f1scores[1], f1scores[2]))\n out_file.write('\\n'.join([str(f1score) for f1score in f1scores[0]]) + '\\n')", "def writeStats(inDir, outFname):\n ofh = open(outFname, \"w\")\n ofh.write(\"meta\\tkallistoProcReads\\tkallistoAlnReads\\tkallistoEstFragLen\\n\")\n\n inFnames = glob.glob(join(inDir, \"log\", \"*.log\"))\n print(\"Parsing %d logfiles and writing to %s\" % (len(inFnames), outFname))\n for inFname in inFnames:\n cellId = basename(inFname).split(\".\")[0].split(\"_\")[0]\n # [quant] processed 1,836,518 reads, 636,766 reads pseudoaligned\n # [quant] estimated average fragment length: 251.99\n for line in open(inFname):\n if line.startswith(\"[quant] processed \"):\n words = line.split()\n readCount = words[2].replace(\",\",\"\")\n alignCount = words[4].replace(\",\",\"\")\n if line.startswith(\"[quant] estimated average fragment length:\"):\n fragLen = line.split()[5]\n row = [cellId, readCount, alignCount, fragLen]\n ofh.write(\"\\t\".join(row)+\"\\n\")\n ofh.close()", "def write_output_summary(outfile, read_scores, args):\n\theader = ['sim_info_file', 'sim_sam_file', 'analysis_info_file', 'results_file', 'junc_type', 'score_type', \n\t\t\t 'true_positives', 'true_negatives', 'false_positives', 'false_negatives']\n\t\t\t \n\tfilenames = [args.sim_info, args.sim_sam, args.analysis_info, args.output]\n\ttypes = ['tp', 'tn', 'fp', 'fn']\n\t\t\t \n\twith open(args.output_summary, \"w\") as outfile:\n\t\toutfile.write(\"\\t\".join(header) + \"\\n\")\n\t\t\n\t\tfor score_type in read_scores:\n\t\t\tfor junc_type in read_scores[score_type]:\n\t\t\t\tif junc_type == 'discord':\n\t\t\t\t\tscores = [str(read_scores[score_type][junc_type][type]/2) for type in types]\n\t\t\t\telse:\n\t\t\t\t\tscores = [str(read_scores[score_type][junc_type][type]) for type in types]\n\t\t\t\tline = filenames + [junc_type, score_type] + scores\n\t\t\t\toutfile.write(\"\\t\".join(line) + \"\\n\")", "def integrateStats(self, data):\n\n typeTags = ('AA', 'UU', 'UUE', 'UUD', 'UX', 'UnM', 'UrM',\n 'nMnM', 'rMrM', 'nMrM', 'nMA', 'rMA', 'XX',\n 'fr', 'rf', 'ff', 'rr')\n\n for pDict in data:\n for i in xrange(len(self.orderedStreams)):\n if self.orderedStreams[i].name == pDict['name']:\n stream = self.orderedStreams[i]\n # insert size stats\n for sizePair in pDict['stats']['sizes']:\n stream.stats['sizes'].append(sizePair)\n # Rname stats\n for statsKey in ('rnames', 'RNsingle', 'RNpairs'):\n for key,value in pDict['stats'][statsKey].items():\n try:\n stream.stats[statsKey][key] += value\n except KeyError:\n stream.stats[statsKey][key] = value\n # LL and ss match stats\n for tag in typeTags:\n try:\n stream.stats[tag] += pDict['stats'][tag]\n except KeyError:\n stream.stats[tag] = pDict['stats'][tag]\n # General counts and output files (var: fns)\n stream.count += 1\n fns = []\n for filename in pDict['files']:\n if stream.op(OP_SAM):\n fns.append('%s.%s' % (filename, OP_SAM))\n elif stream.op(OP_SAMPP):\n fns.append('%s.pp.%s' % (filename, OP_SAM))\n if stream.op(OP_FASTQPP):\n if stream.op(OP_SH):\n fns.append('%s.pp.sh.%s' % (filename, OP_FASTQ))\n else:\n fns.append('%s.pp.1.%s' % (filename, OP_FASTQ))\n fns.append('%s.pp.2.%s' % (filename, OP_FASTQ))\n elif stream.op(OP_FASTQ):\n if stream.op(OP_SH):\n fns.append('%s.sh.%s' % (filename, OP_FASTQ))\n else:\n fns.append('%s.1.%s' % (filename, OP_FASTQ))\n fns.append('%s.2.%s' % (filename, OP_FASTQ))\n for fname in fns:\n stream.fileswritten.add(fname)\n stream.outputfilenames.add(fname)\n break\n\n # Now add the so-called global stats from each dict - we only need\n # update the globalstats list in the first stream object\n for pDict in data:\n stream = self.orderedStreams[0]\n for tag in typeTags:\n try:\n stream.globalstats[tag] += pDict['globalstats'][tag]\n except KeyError:\n stream.globalstats[tag] = pDict['globalstats'][tag]\n\n # Update the heartbeat object with the number of record pairs\n # written. This will trigger a console update automatically when\n # needed.\n self.heartbeat.count = stream.globalstats['AA']\n self.heartbeat.update()", "def output(results):\n\n text_file = open(\"problem_1_B_output.txt\", \"w\")\n\n out = \"\"\n\n for i, line in enumerate(results):\n\n string = \"Sample {}: {}, posterior probability of {:.4f}\".format(i + 1,\n line[0],\n line[1])\n\n out += (string + \"\\n\")\n\n text_file.write(out)\n\n text_file.close()", "def read_annotation_output(self, algorithm, count_parameters=False, parameter_out_dir=None):\n print ' read output'\n\n if count_parameters:\n assert parameter_out_dir is not None\n pcounter = ParameterCounter(self.germline_seqs) if count_parameters else None\n true_pcounter = ParameterCounter(self.germline_seqs) if (count_parameters and not self.args.is_data) else None\n perfplotter = PerformancePlotter(self.germline_seqs, 'hmm') if self.args.plot_performance else None\n\n n_seqs_processed, n_events_processed = 0, 0\n hmminfo = {}\n with opener('r')(self.hmm_outfname) as hmm_csv_outfile:\n reader = csv.DictReader(hmm_csv_outfile)\n boundary_error_queries = []\n for line in reader:\n utils.process_input_line(line,\n splitargs=('unique_ids', 'seqs'),\n int_columns=('nth_best', 'v_5p_del', 'd_5p_del', 'cdr3_length', 'j_5p_del', 'j_3p_del', 'd_3p_del', 'v_3p_del'),\n float_columns=('logprob'))\n ids = line['unique_ids']\n same_event = utils.from_same_event(self.args.is_data, self.reco_info, ids)\n if same_event is None:\n same_event = -1\n id_str = ''.join(['%20s ' % i for i in ids])\n\n # check for errors\n if line['nth_best'] == 0: # if this is the first line for this set of ids (i.e. the best viterbi path or only forward score)\n if line['errors'] is not None and 'boundary' in line['errors'].split(':'):\n boundary_error_queries.append(':'.join([uid for uid in ids]))\n else:\n assert len(line['errors']) == 0\n\n utils.add_cdr3_info(self.germline_seqs, self.cyst_positions, self.tryp_positions, line)\n if self.args.debug:\n if line['nth_best'] == 0: # if this is the first line (i.e. the best viterbi path) for this query (or query pair), print the true event\n print '%s %d' % (id_str, same_event)\n self.print_hmm_output(line, print_true=(line['nth_best']==0)) #, perfplotter=perfplotter)\n if line['nth_best'] == 0 and (line['cdr3_length'] != -1 or not self.args.skip_unproductive): # if it's productive, or if we're not skipping unproductive rearrangements\n if pcounter is not None:\n pcounter.increment_reco_params(line)\n if true_pcounter is not None:\n true_pcounter.increment_reco_params(self.reco_info[ids[0]]) # NOTE doesn't matter which id you pass it, since they all have the same reco parameters\n n_events_processed += 1\n for iseq in range(len(ids)):\n uid = ids[iseq]\n hmminfo[uid] = dict(line) # make a copy of the info, into which we'll insert the sequence-specific stuff\n hmminfo[uid]['seq'] = line['seqs'][iseq]\n hmminfo[uid]['unique_id'] = uid\n utils.add_match_info(self.germline_seqs, hmminfo[uid], self.cyst_positions, self.tryp_positions, debug=(self.args.debug > 0))\n if pcounter is not None:\n pcounter.increment_mutation_params(hmminfo[uid])\n if true_pcounter is not None:\n true_pcounter.increment_mutation_params(self.reco_info[uid]) # NOTE doesn't matter which id you pass it, since they all have the same reco parameters\n if perfplotter is not None:\n perfplotter.evaluate(self.reco_info[uid], hmminfo[uid], None if self.args.dont_pad_sequences else self.sw_info[uid]['padded'])\n n_seqs_processed += 1\n\n if pcounter is not None:\n pcounter.write(parameter_out_dir)\n if self.args.plotdir is not None:\n pcounter.plot(self.args.plotdir + '/hmm', subset_by_gene=True, cyst_positions=self.cyst_positions, tryp_positions=self.tryp_positions)\n if true_pcounter is not None:\n true_pcounter.write(parameter_out_dir + '/true')\n if self.args.plotdir is not None:\n true_pcounter.plot(self.args.plotdir + '/hmm/true', subset_by_gene=True, cyst_positions=self.cyst_positions, tryp_positions=self.tryp_positions)\n if perfplotter is not None:\n assert self.args.plotdir is not None\n perfplotter.plot(self.args.plotdir + '/hmm/performance')\n\n print ' processed %d sequences (%d events)' % (n_seqs_processed, n_events_processed)\n if len(boundary_error_queries) > 0:\n print ' %d boundary errors (%s)' % (len(boundary_error_queries), ', '.join(boundary_error_queries))\n\n if self.args.outfname is not None:\n outpath = self.args.outfname\n if self.args.outfname[0] != '/': # if full output path wasn't specified on the command line\n outpath = os.getcwd() + '/' + outpath\n shutil.copyfile(self.hmm_outfname, outpath)\n with open(outpath) as outfile:\n reader = csv.DictReader(outfile)\n outfo = []\n for line in reader:\n outfo.append(line)\n outfo[-1]['naive_seq'] = utils.get_full_naive_seq(self.germline_seqs, line)\n with open(outpath, 'w') as outfile:\n writer = csv.DictWriter(outfile, outfo[0].keys())\n writer.writeheader()\n for line in outfo:\n writer.writerow(line)\n\n if self.args.annotation_clustering == 'vollmers':\n if self.args.outfname is not None:\n outfile = open(self.args.outfname, 'w') # NOTE overwrites annotation info that's already been written to <self.args.outfname>\n headers = ['n_clusters', 'threshold', 'clusters'] #, 'true_clusters']\n if not self.args.is_data:\n headers += ['adj_mi', ] #, 'n_true_clusters']\n writer = csv.DictWriter(outfile, headers)\n writer.writeheader()\n\n for thresh in self.args.annotation_clustering_thresholds:\n adj_mi, partition = annotationclustering.vollmers(hmminfo, threshold=thresh, reco_info=self.reco_info)\n n_clusters = len(partition)\n if self.args.outfname is not None:\n row = {'n_clusters' : n_clusters, 'threshold' : thresh, 'clusters' : utils.get_str_from_partition(partition)}\n if not self.args.is_data:\n row['adj_mi'] = adj_mi\n # row['n_true_clusters'] = len(utils.get_true_partition(self.reco_info))\n # true_partition = [cl for cl in utils.get_true_partition(self.reco_info).values()]\n # row['true_clusters'] = utils.get_str_from_partition(true_partition)\n writer.writerow(row)\n if self.args.outfname is not None:\n outfile.close()\n\n if not self.args.no_clean:\n os.remove(self.hmm_outfname)", "def process_stat_files(param):\n\n #get the files that are actually in the output directory\n call = ['cp', '-R']\n call.append(param['working_dir']+'results/featureCount/')\n call.append(param['working_dir']+'report/')\n _, _ = subprocess.Popen(call,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n\n featurecount_file = (param['working_dir']+\n 'results/featureCount/featureCount_stats.txt')\n #extract table\n table = []\n filehandle = open(featurecount_file)\n #header\n table.append(filehandle.readlines()[0].rstrip().split('\\t'))\n table[0] = table[0][1:]\n filehandle.close()\n\n #total number of aligned reads\n tot_reads = param['bam_qc']['unique_aligned_reads']\n counter = [0] * len(param['bam_qc']['unique_aligned_reads'])\n \n filehandle = open(featurecount_file)\n for line in filehandle.readlines()[1:]:\n cur_line = line.rstrip().split('\\t')\n cur_line[0] = re.sub(r'_',' ',cur_line[0])\n if cur_line[0] not in ['Unassigned MultiMapping','Assigned']:\n counter = [ct + int(cr) for ct, cr in zip(counter, cur_line[1:])]\n perc = ([cur_line[0]]+\n MODULE_HELPER.get_percentage(cur_line[1:],\n tot_reads,\n len(cur_line)-1))\n table.append(perc)\n filehandle.close()\n assigned = [tot_reads[idx] - counter[idx] for idx in range(len(tot_reads))]\n perc = ['Assigned'] + MODULE_HELPER.get_percentage(assigned,\n tot_reads,\n len(counter))\n return table", "def write_results(self, all_features, n_samples, n_neighbours, layers, act_func, dropout_rate, learning_rate,\n iterations, optimum_idx, optimum_loss, start_test_cost, end_test_cost, accuracy_train,\n accuracy_test, accuracy_val, ROC_AUC_train, ROC_AUC_test, ROC_AUC_val, run_time):\n\n # Get the file name where the statistical results will be written to\n working_dir = os.path.dirname(os.path.abspath(__file__))\n file_name = working_dir + \"/output_ANN/results_ANN.txt\"\n\n # Extend the file with the results in the corresponding data types\n with open(file_name, 'a') as output:\n output.write(\n \"\\n{:s}\\t{:d}\\t{:d}\\t{:s}\\t{:s}\\t{:.2f}\\t{:f}\\t{:d}\\t{:d}\\t{:.4f}\\t{:.4f}\\t{:.4f}\\t{:.2f}\\t\"\n \"{:.2f}\\t{:.2f}\\t{:.2f}\\t{:.2f}\\t{:.2f}\\t{:.0f}\".format(\n all_features, n_samples, n_neighbours, layers, act_func, dropout_rate, learning_rate,\n iterations, optimum_idx, optimum_loss, start_test_cost, end_test_cost, accuracy_train,\n accuracy_test, accuracy_val, ROC_AUC_train, ROC_AUC_test, ROC_AUC_val, run_time))", "def stat(**kwargs):\n print(\"output stats\")", "def stat_parser():\n from tools import file_importer, file_outporter\n from math import log\n \n print(\"this is stat parser\")\n \n relPath = \"bob/processed/24h_bobdata_ed2.csv\"\n outPathUp = \"bob/processed/24h_bobprots_up_full.csv\"\n outPathDown = \"bob/processed/24h_bobprots_down_full.csv\"\n inpF = file_importer(relPath)\n outFUp = file_outporter(outPathUp)\n outFDown = file_outporter(outPathDown)\n \n \n skipFlag = True\n \n for inpLine in inpF:\n if skipFlag:\n skipFlag = False\n outFDown.write(\"ID,Uniprot ID,Gene name,unique peptides (unique+razor),KO1,KO2,KO3,WT1,WT2,WT3,enrichment,P value\\n\")\n outFUp.write(\"ID,Uniprot ID,Gene name,unique peptides (unique+razor),KO1,KO2,KO3,WT1,WT2,WT3,enrichment,P value\\n\")\n continue\n inpLine = inpLine.split(\"\\\" \\\"\")\n curLine = []\n for inpI in inpLine:\n curLine.append(inpI.strip(\"\\\"\\n\"))\n try: \n curLine[-1] = float(curLine[-1])\n except ValueError:\n curLine[-1] = 1 \n if curLine[-1] < 0.05 and int(curLine[3]) > 1: # check if protein has at least 2 unique peptides and has a significant p value\n curLine[4:10] = [int(x) for x in curLine[4:10]]\n enrScore = log((sum(curLine[4:7]) / 3.0)/(sum(curLine[7:10]) / 3.0),2) # calculate log2 enrichment score\n # print int(sum(curLine[4:7]) / 3.0), int(sum(curLine[7:10]) / 3.0)\n if sum(curLine[4:7]) / 3.0 > sum(curLine[7:10]) / 3.0: # if the mean of the KO intensities is higher than the wt \n for outI in curLine:\n outFDown.write(str(outI).strip(\" \"))\n if outI is not curLine[-1]:\n outFDown.write(\",\")\n if outI is curLine[-2]:\n outFDown.write(str(enrScore)+ \",\")\n else:\n outFDown.write(\"\\n\")\n # outFDown.write(curLine[1] + \",\" + curLine[2] + \"\\n\")\n else:\n # outFUp.write(curLine[1] + \",\" + curLine[2] + \"\\n\")\n for outI in curLine:\n outFUp.write(str(outI).strip(\" \"))\n if outI is not curLine[-1]:\n outFUp.write(\",\")\n if outI is curLine[-2]:\n outFUp.write(str(enrScore)+ \",\")\n else:\n outFUp.write(\"\\n\")\n \n inpF.close()\n outFUp.close()\n outFDown.close()\n print(\"stat_parser completed\")", "def outputpairedstats(fname,writemode,name1,n1,m1,se1,min1,max1,name2,n2,m2,se2,min2,max2,statname,stat,prob):\r\n suffix = '' # for *s after the p-value\r\n try:\r\n x = prob.shape\r\n prob = prob[0]\r\n except:\r\n pass\r\n if prob < 0.001: suffix = ' ***'\r\n elif prob < 0.01: suffix = ' **'\r\n elif prob < 0.05: suffix = ' *'\r\n title = [['Name','N','Mean','SD','Min','Max']]\r\n lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1],\r\n [name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]]\r\n if type(fname)<>StringType or len(fname)==0:\r\n print\r\n print statname\r\n print\r\n pstats.printcc(lofl)\r\n print\r\n try:\r\n if stat.shape == ():\r\n stat = stat[0]\r\n if prob.shape == ():\r\n prob = prob[0]\r\n except:\r\n pass\r\n print 'Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix\r\n print\r\n else:\r\n file = open(fname,writemode)\r\n file.write('\\n'+statname+'\\n\\n')\r\n file.close()\r\n writecc(lofl,fname,'a')\r\n file = open(fname,'a')\r\n try:\r\n if stat.shape == ():\r\n stat = stat[0]\r\n if prob.shape == ():\r\n prob = prob[0]\r\n except:\r\n pass\r\n file.write(pstats.list2string(['\\nTest statistic = ',round(stat,4),' p = ',round(prob,4),suffix,'\\n\\n']))\r\n file.close()\r\n return None", "def _Analyze(role, output):\n with open(\n os.path.join(self.output_directory, os.path.basename(output)),\n 'r') as fp:\n output = json.loads(fp.read())\n metadata = {\n 'event': 'mpstat',\n 'role': role,\n }\n samples.extend(\n _MpstatResults(\n metadata,\n output,\n self.interval,\n per_interval_samples=self.per_interval_samples,\n ))", "def output(query,lda,features):\n roles = get_mostcommon(path,5000)\n all_roles = len(roles)\n irrelevant = irrelevant_features(features)\n #with open(\"guesses.txt\", \"w\") as text_file:\n # text_file.write('role:')\n # text_file.write('\\t')\n # text_file.write(\"guess: \")\n # text_file.write('\\t')\n # text_file.write(\"smatch: \")\n # text_file.write('\\n')\n for query in roles:\n #text_file.write(str(query))\n #text_file.write('\\t')\n guess = guess_topic(ilda,query,features, irrelevant)\n #smatch = try_normaliser(query)\n #if guess != smatch:\n # diff += 1\n print(query)\n # print(guess, '\\t' , smatch )\n print(guess)\n print()\n #text_file.write(str(guess))\n #text_file.write('\\t')\n #text_file.write(str(smatch))\n #print('guess: ', str(guess), '\\n')\n #print('smatch: ', str(smatch))\n #text_file.write('\\t')\n #text_file.write(str(smatch))\n #text_file.write('\\n')\n #text_file.write('\\n')", "def get_stats(self):\n # pool.map needs an arg for each function that will be run\n dmx_mean = [self.dmx.mean()] * len(self.genome_paths)\n with ProcessingPool() as pool:\n results = pool.map(genome.mp_stats, self.genome_paths, dmx_mean)\n self.stats = pd.concat(results)\n self.stats.to_csv(self.stats_path)", "def analyze(collector):\n mod_list = [\"Oxidation\", \"Deamidated\", \"Methyl\", \"Acetyl\", \"Phospho\"]\n fieldnames = (\n [\"approach\", \"count_type\", \"validation_engine\", \"unmodified\", \"multimodified\"]\n + mod_list\n + [\"total\"]\n )\n\n csv_writer = csv.DictWriter(open(\"ungrouped_results.csv\", \"w\"), fieldnames)\n csv_writer.writeheader()\n uc = ursgal.UController()\n uc.params[\"validation_score_field\"] = \"PEP\"\n uc.params[\"bigger_scores_better\"] = False\n\n # Count the number of identified peptides and PSMs for the different modifications\n # Spectra with multiple PSMs are sanitized, i.e. only the PSM with best PEP score is counted\n # and only if the best hit has a PEP that is at least two orders of\n # magnitude smaller than the others\n for validation_engine, result_file in collector.items():\n counter_dict = {\"psm\": ddict(set), \"pep\": ddict(set)}\n grouped_psms = uc._group_psms(\n result_file, validation_score_field=\"PEP\", bigger_scores_better=False\n )\n for spec_title, grouped_psm_list in grouped_psms.items():\n best_score, best_line_dict = grouped_psm_list[0]\n if len(grouped_psm_list) > 1:\n second_best_score, second_best_line_dict = grouped_psm_list[1]\n best_peptide_and_mod = (\n best_line_dict[\"Sequence\"] + best_line_dict[\"Modifications\"]\n )\n second_best_peptide_and_mod = (\n second_best_line_dict[\"Sequence\"]\n + second_best_line_dict[\"Modifications\"]\n )\n\n if best_peptide_and_mod == second_best_peptide_and_mod:\n line_dict = best_line_dict\n elif best_line_dict[\"Sequence\"] == second_best_line_dict[\"Sequence\"]:\n if best_score == second_best_score:\n line_dict = best_line_dict\n else:\n if (-1 * math.log10(best_score)) - (\n -1 * math.log10(second_best_score)\n ) >= 2:\n line_dict = best_line_dict\n else:\n continue\n else:\n if (-1 * math.log10(best_score)) - (\n -1 * math.log10(second_best_score)\n ) >= 2:\n line_dict = best_line_dict\n else:\n continue\n else:\n line_dict = best_line_dict\n\n count = 0\n for mod in mod_list:\n if mod in line_dict[\"Modifications\"]:\n count += 1\n key_2_add = \"\"\n if count == 0:\n key_2_add = \"unmodified\"\n elif count >= 2:\n key_2_add = \"multimodified\"\n elif count == 1:\n for mod in mod_list:\n if mod in line_dict[\"Modifications\"]:\n key_2_add = mod\n break\n # for peptide identification comparison\n counter_dict[\"pep\"][key_2_add].add(\n line_dict[\"Sequence\"] + line_dict[\"Modifications\"]\n )\n # for PSM comparison\n counter_dict[\"psm\"][key_2_add].add(\n line_dict[\"Spectrum Title\"]\n + line_dict[\"Sequence\"]\n + line_dict[\"Modifications\"]\n )\n for counter_key, count_dict in counter_dict.items():\n dict_2_write = {\n \"approach\": \"ungrouped\",\n \"count_type\": counter_key,\n \"validation_engine\": validation_engine,\n }\n total_number = 0\n for key, obj_set in count_dict.items():\n dict_2_write[key] = len(obj_set)\n total_number += len(obj_set)\n dict_2_write[\"total\"] = total_number\n csv_writer.writerow(dict_2_write)\n return", "def write(self, output):\n\n keys = self._motifs.keys()\n keys.sort()\n\n thre = self._motifs['threshold'] # threshold used for motif detection\n align = self._motifs[\"align\"] # alignment score used to detect motifs\n\n with open(output, \"w\") as o:\n\n # Precise time of the computation and print the whole results file\n print >> o, \"Launched:{} GMT Threshold used: {} AlignThreshold: {}\\n\" \\\n .format(strftime(\"%a, %d %b %Y %H:%M:%S\", gmtime()), thre, align)\n for k in keys:\n if k != \"threshold\" and k != \"size\" and k != \"align\":\n smk = self._motifs[k] # motif number X, SMK stands for\n # Self._Motifs[K]\n # print the general information for this particular motif\n print >> o, \"\\n{} Start: {} Stop: {} \".format(k, smk[\"start\"], smk[\"stop\"]-1)\\\n + \"AvgPhylogeneticScore: {} AvgAlignScore: {} Size: {}\\n\"\\\n .format(smk[\"score\"], smk[\"align\"], smk[\"size\"])\n\n sub = smk.keys()\n sub.sort() # to have always the same order of sequences\n # print sequences\n for s in sub:\n if s != \"start\" and s != \"stop\" and s != \"score\" \\\n and s != \"align\" and s != \"size\":\n print >> o, \"{0:20}\\t{1:20}\\t{2}\".format(s, smk[s][\"start\"], smk[s][\"seq\"].upper())", "def _write_stats(self, stat_type, user=None, summ_type=None):\n if stat_type == \"full collection\":\n self.summary_file.write(\"\\n\\nDataset: {c}\\n\".format(c=self.dataset_name))\n self.summary_file.write(\"Number of unique urls: {u}\\nNumber of unique sites: {s}\\n\".format(u=len(set(self.stat_dict['urls'])), s=len(set(self.stat_dict['sites'])))\n )\n site_cnts = Counter(self.stat_dict['sites']).most_common()\n for site in site_cnts:\n self.summary_file.write(\"{s}: {n}\\n\".format(s=site[0], n=site[1]))\n\n if stat_type == \"token_counts\":\n self.summary_file.write(\"\\n\\nDataset: {c}\\n\".format(c=self.dataset_name))\n for doc_type in self.stat_dict:\n if user is not None:\n self.summary_file.write(\"\\n{0}, {1}\\n\".format(user, summ_type))\n\n self.summary_file.write(\n \"\\nNumber of {d}s: {p}\\nAverage tokens/{d}: {t}\\nAverage sentences/{d}: {s}\\n\".format(\n d=doc_type, p=len(self.stat_dict[doc_type][0]), t=sum(self.stat_dict[doc_type][1])/len(self.stat_dict[doc_type][1]), s=sum(self.stat_dict[doc_type][0])/len(self.stat_dict[doc_type][0])\n )\n )\n\n self.summary_file.write(\n \"Median tokens/{d}: {p}\\nStandard deviation tokens/{d}: {t}\\n\".format(\n d=doc_type, p=np.median(self.stat_dict[doc_type][1]), t=np.std(self.stat_dict[doc_type][1])\n )\n )\n\n self.summary_file.write(\n \"Median sentences/{d}: {p}\\nStandard deviation sentences/{d}: {t}\\n\".format(\n d=doc_type, p=np.median(self.stat_dict[doc_type][0]), t=np.std(self.stat_dict[doc_type][0])\n )\n )", "def results_aggregator(self, names):\n\t\tfor name in names:\n\t\t\tresult = self.main(name)\n\t\t\tself.results.append(result)\n\t\t\tprint(\"'%s' has been written to the file.\" % result[0])\n\t\t\t\"\"\"result is formatted name, number, rating, review count\"\"\"", "def do_analyse(args):\n\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', None)\n\n score = 'score'\n\n # Read in the results, and add a boolean target column.\n df = pd.read_csv(args.results, index_col=0)\n df['target'] = df['verify_speaker'] == df['enrol_speaker']\n\n # Calculate ideal 0.01% threshold over the multi-session data.\n nontarget_df = df.loc[df['target'] == False].sort_values(score, ascending=False)\n nontarget_count = nontarget_df[score].count()\n th_calc = nontarget_df.iloc[int(nontarget_count * (1 / 10000))][score]\n\n # Now filter the data so that we only consider mono-session enrolment and verification.\n df = df.loc[df['verify_room'] == df['enrol_room']]\n target_df = df.loc[df['target'] == True].sort_values(score, ascending=False)\n nontarget_df = df.loc[df['target'] == False].sort_values(score, ascending=False)\n target_count = target_df[score].count()\n nontarget_count = nontarget_df[score].count()\n\n # Calculate FA/FR for the user-defined threshold.\n th_user = args.th_user\n fr_user = target_df.loc[target_df[score] < th_user][score].count()\n fa_user = nontarget_df.loc[nontarget_df[score] > th_user][score].count()\n frr_user = fr_user / target_count\n far_user = fa_user / nontarget_count\n label_user = 'User Threshold: th {:.4f}, FR {} ({:.3f}%), FA {} ({:.3f}%)'.format(th_user, fr_user, frr_user * 100,\n fa_user, far_user * 100)\n\n # Calculate the FA/FR for the ideal threshold calculated from the multi-session data.\n fr_calc = target_df.loc[target_df[score] < th_calc][score].count()\n fa_calc = nontarget_df.loc[nontarget_df[score] > th_calc][score].count()\n frr_calc = fr_calc / target_count\n far_calc = fa_calc / nontarget_count\n label_calc = 'Calc Threshold: th {:.4f}, FR {} ({:.3f}%), FA {} ({:.3f}%)'.format(th_calc, fr_calc, frr_calc * 100,\n fa_calc, far_calc * 100)\n\n # Print the stats.\n print('\\nTarget Stats:')\n print(target_df[score].describe())\n print('\\nNon-Target Stats:')\n print(nontarget_df[score].describe())\n print('\\nThresholds:')\n print(label_user)\n print(label_calc)\n\n # Paint the graphs.\n paint_graph(score, 'verify_room', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'enrol_room', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'verify_speaker', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'enrol_speaker', df, th_user, label_user, th_calc, label_calc)", "def process_pr_results(self, results_files, custom_report):\n \n\n \n output_file = open(os.path.join(self.path, 'raw_results.txt'), 'w')\n \n #Keep track of the last read line before a newline; this will be the best value from an optimization run\n last_line = ''\n #Match a string of the format ( 0.0995749 0.101685 0.108192 0.091224 ) 0.091224 0 100\n #Contains parameter values, the best optimization value, the cpu time, and some other values, e.g. particle numbers that Copasi likes to add. These could be removed, but they seem useful.\n output_string = r'.*\\(\\s(?P<params>.+)\\s\\)\\s+(?P<best_value>\\S+)\\s+(?P<cpu_time>\\S+)\\s+(?P<function_evals>\\S+)\\.*'\n output_re = re.compile(output_string)\n \n best_value = None\n best_line = None\n \n #Copy the contents of the first file to results.txt\n for line in open(os.path.join(self.path, results_files[0]), 'r'):\n output_file.write(line)\n try:\n if line != '\\n':\n if output_re.match(line):\n current_value = float(output_re.match(line).groupdict()['best_value'])\n if best_value != None:\n if current_value < best_value:\n best_value = current_value\n best_line = line\n elif best_value == None:\n best_value = current_value\n best_line = line\n else:\n pass\n except Exception as e:\n if custom_report:\n pass\n else:\n raise e\n \n #And for all other files, copy everything but the last line\n for filename in results_files[1:]:\n firstLine = True\n for line in open(os.path.join(self.path, filename), 'r'):\n if not firstLine:\n output_file.write(line)\n try:\n if line != '\\n':\n if output_re.match(line):\n current_value = float(output_re.match(line).groupdict()['best_value'])\n if current_value < best_value:\n best_value = current_value\n best_line = line\n else:\n pass\n except Exception as e:\n if custom_report:\n pass\n else:\n raise e\n firstLine = False\n \n \n output_file.close()\n \n #Write the best value to results.txt\n output_file = open(os.path.join(self.path, 'results.txt'), 'w')\n \n output_file.write('Best value\\tCPU time\\tFunction evals\\t')\n \n for parameter in self.get_parameter_estimation_parameters():\n\n output_file.write(parameter[0].encode('utf8'))\n output_file.write('\\t')\n output_file.write('\\n')\n\n best_line_dict = output_re.match(best_line).groupdict()\n\n output_file.write(best_line_dict['best_value'])\n output_file.write('\\t')\n output_file.write(best_line_dict['cpu_time'])\n output_file.write('\\t')\n output_file.write(best_line_dict['function_evals'])\n output_file.write('\\t')\n \n for parameter in best_line_dict['params'].split('\\t'):\n output_file.write(parameter)\n output_file.write('\\t')\n output_file.close()\n \n if best_value != None:\n return True\n else:\n return False", "def process_results(percentage_ids, alignment_lengths, percentage_ids_other,\n alignment_lengths_other, best_hits):\n\n len_percentage_ids = len(percentage_ids)\n len_alignment_lengths = len(alignment_lengths)\n results = []\n\n for i, j in [(i,j) for i in range(len_percentage_ids) for j in range(len_alignment_lengths)]:\n filename = \"p1_%d-a1_%d_p2_%d-a2_%d\" % (percentage_ids[i],\n alignment_lengths[j], percentage_ids_other[i], alignment_lengths_other[j])\n results.append({ 'filename': filename, 'db_interest': 0, 'db_other': 0,\n 'perfect_interest': 0, 'equal': 0, 'summary': ['#SeqId\\tFirst\\t'\n 'Second'], 'db_seqs_counts': {'a': {}, 'b': {} } })\n\n for seq_name, values in best_hits.items():\n seq_name = seq_name.split(' ')[0].strip()\n for i, vals in enumerate(values):\n if not vals:\n continue\n\n # Validating duplicated results in the databases\n #@@@ Do this step in a different script early in the pipeline\n if vals['a']['subject_id'] not in results[i]['db_seqs_counts']['a']:\n results[i]['db_seqs_counts']['a'][vals['a']['subject_id']]=0\n if vals['a']['subject_id'] == results[i]['db_seqs_counts']['b']:\n raise Warning, \"%s is in both databases\" % vals['a']['subject_id']\n if vals['b']['subject_id'] not in results[i]['db_seqs_counts']['b']:\n results[i]['db_seqs_counts']['b'][vals['b']['subject_id']]=0\n if vals['b']['subject_id'] == results[i]['db_seqs_counts']['a']:\n raise Warning, \"%s is in both databases\" % vals['b']['subject_id']\n\n # Comparing bit_scores to create outputs\n if vals['a']['bit_score']==vals['b']['bit_score']:\n results[i]['equal'] += 1\n results[i]['summary'].append('%s\\t%s\\t%s' % (seq_name, vals['a']['subject_id'], vals['b']['subject_id']))\n results[i]['db_seqs_counts']['a'][vals['a']['subject_id']] += 1\n results[i]['db_seqs_counts']['b'][vals['b']['subject_id']] += 1\n elif vals['a']['bit_score']>vals['b']['bit_score']:\n if not vals['b']['subject_id']:\n results[i]['perfect_interest'] += 1 \n results[i]['summary'].append('%s\\t%s\\t' % (seq_name, vals['a']['subject_id']))\n results[i]['db_seqs_counts']['a'][vals['a']['subject_id']] += 1\n else:\n results[i]['db_other'] += 1\n results[i]['summary'].append('%s\\n\\t%s' % (seq_name, ''))\n results[i]['db_seqs_counts']['b'][vals['b']['subject_id']] += 1\n\n return results", "def summarize(self):\n # NOTE: should be moved to abstract superclass\n failcount = len(self.mosaictrees) - len(self)\n msg = \"Parsed %i mosaics from the FluoView project.\\n\\n\" % len(self)\n if failcount > 0:\n msg += (\n \"\\n==== WARNING ====== WARNING ====\\n\\n\"\n \"Parsing failed on %i mosaic(s). Missing files?\\n \"\n \"\\n==== WARNING ====== WARNING ====\\n\\n\\n\" % failcount\n )\n for mos in self:\n msg += \"Mosaic %i: \" % mos.supplement[\"index\"]\n msg += \"%i x %i tiles, \" % (mos.dim[\"X\"], mos.dim[\"Y\"])\n msg += \"%.1f%% overlap.\\n\" % mos.get_overlap()\n return msg", "def summarize(self):\n # NOTE: should be moved to abstract superclass\n failcount = len(self.mosaictrees) - len(self)\n msg = \"Parsed %i mosaics from the FluoView project.\\n\\n\" % len(self)\n if failcount > 0:\n msg += (\n \"\\n==== WARNING ====== WARNING ====\\n\\n\"\n \"Parsing failed on %i mosaic(s). Missing files?\\n \"\n \"\\n==== WARNING ====== WARNING ====\\n\\n\\n\" % failcount\n )\n for mos in self:\n msg += \"Mosaic %i: \" % mos.supplement[\"index\"]\n msg += \"%i x %i tiles, \" % (mos.dim[\"X\"], mos.dim[\"Y\"])\n msg += \"%.1f%% overlap.\\n\" % mos.get_overlap()\n return msg", "def ANOVA_stats(subject_list, data_dir, h5_type, model_types):\n\t\n\tall_subjs = []\n\tall_models = []\n\tall_corrs = []\n\tcorrs = dict()\n\n\tfor model in model_types: # 3 total models we are comparing\n\t\tfor s in subject_list:\n\t\t\t# Load the STRF file for each individual model for the subject of interest\n\t\t\t# (phnfeat only, env only, or pitch only)\n\t\t\tstrf_file = '%s/%s/%s_STRF_by_%s_%s.hf5'%(data_dir, s, s, model, h5_type) # The STRF for this subject and this model type (env, phnfeat, or pitch)\n\t\t\twith h5py.File(strf_file,'r') as hf:\n\t\t\t\tcorrs[s] = hf['corrs_%s' %(h5_type.lower())][:] # Load the corrs\n\t\t\tfor ch in np.arange(64):\n\t\t\t\t# We have to do this so we have the subjects and models\n\t\t\t\t# columns that match the correlations vector\n\t\t\t\tall_subjs.append(s)\n\t\t\t\tall_models.append(model)\n\t\t\t\tall_corrs.append(corrs[s][ch])\n\tdata= {'corrs': np.array(all_corrs).ravel(), 'subject': all_subjs, 'STRF_type': all_models}\n\tdf = pd.DataFrame.from_dict(data)\n\tdf\n\t\n\t# Run a Friedman ANOVA (non-parametric equivalent of the repeated measures ANOVA)\n\t# with STRF performance as yhour dependent variable, STRF type (env, phnfeat, pitch) \n\t# as your within subjects measure, and subject as your subject. Look at p-unc for\n\t# the p value\n\tdata = df.groupby(['subject', 'STRF_type']).mean().reset_index()\n\t#print(data)\n\tpg.friedman(data=df, dv='corrs', within='STRF_type', subject='subject')\n\t\n\t# if p<0.05, run post-hoc sign-rank tests\n\n\t#extract just the corr values from the dataframe - will be used for post-hoc sign-rank tests\n\tpitch_x = data['corrs'][np.where(data['STRF_type']=='pitch')[0]]\n\tphnfeat_x = data['corrs'][np.where(data['STRF_type']=='phnfeat')[0]]\n\tenvs_x = data['corrs'][np.where(data['STRF_type']=='envs')[0]]\n\ttotalmodel_x = data['corrs'][np.where(data['STRF_type']=='pitchenvsphnfeat')[0]]\n\n\n\t#run wilcoxon signrank test - compare total model with individual features\n\tprint(pg.wilcoxon(totalmodel_x, phnfeat_x, tail='two-sided')) \n\tprint(pg.wilcoxon(totalmodel_x, envs_x, tail='two-sided')) \n\tprint(pg.wilcoxon(totalmodel_x, pitch_x, tail='two-sided'))\n\n\t#run wilcoxon signrank test - compare individual feature models with each other \n\tprint(pg.wilcoxon(phnfeat_x,pitch_x, tail='two-sided'))\n\tprint(pg.wilcoxon(envs_x, pitch_x, tail='two-sided'))\n\tprint(pg.wilcoxon(phnfeat_x, envs_x, tail='two-sided'))", "def annotate_rank_summary_with_pscore_RAxML(filename, delimiter=','):\n\tmotif_dir = os.path.dirname(filename)\n\tin_csv = csv.DictReader(open(filename), delimiter=delimiter) # cmfinder rank summaries are comma-separated\n\twith open(filename + '.RAxML_added', 'w') as out_f:\n\t\tif in_csv.fieldnames is None:\n\t\t\tprint >> sys.stderr, \"file {0} is odd. IGNORE now\".format(filename)\n\t\t\treturn\t\n\t\tpscore_fieldnames = [('pscore_RAxML','pscoreout_RAxML'), \\\n\t\t\t\t ('pscore_RAxML_fixTreeEstBranch','fixTreeEstBranch.pscoreout_RAxML')]\n\t\tnew_fieldnames = in_csv.fieldnames + [x[0] for x in pscore_fieldnames]\n\t\tout_csv = csv.DictWriter(out_f, new_fieldnames, delimiter=delimiter)\n\t\t# need to write out the field names\n\t\t#out_csv.writeheader()# lol this function only in 2.7 and i have 2.6 Orz \n\t\tout_f.write(delimiter.join(new_fieldnames) + '\\n')\n\t\tfor obj in in_csv:\n\t\t\tmotif_full_path = os.path.join(motif_dir, obj['motif'])\n\t\t\tprint >> sys.stderr, motif_full_path\n\t\t\tfor field, suffix in pscore_fieldnames:\n\t\t\t\tpscore_RAxML = os.popen(\"grep \\\"Total pair posterior\\\" {0}.{1}\".format(motif_full_path, suffix)).read().strip()\n\t\t\t\ttry:\n\t\t\t\t\tobj[field] = float( pscore_RAxML[len('Total pair posterior '):] )\n\t\t\t\texcept ValueError:\n\t\t\t\t\tif len(os.popen(\"grep \\\"Fail to open\\\" {0}.{1}\".format(motif_full_path, suffix)).read().strip()) > 0:\n\t\t\t\t\t\tobj[field] = 'NA'\n\t\t\tout_csv.writerow(obj)", "def write_results(gold, pred, ratings, text):\n\n f = open(\"results.txt\", \"w\")\n for g, p, r, t in zip(gold, pred, ratings, text):\n f.write(\"%d\\t%d\\t%d\\t%s\\n\" % (g,p,r,t))\n\n f.close()", "def main(titles, paragraphs, printDetailedStats = True):\n avg_precision = 0 # all the precision values will be added to this value. In the end, this value will be divided by\n # the number of comparisons analysed to report the average precision\n avg_recall = 0\n avg_fmeasure = 0\n\n if paragraphs:\n manual_directory = path.MANUAL_PARAGRAPHS\n auto_directory = path.OUTDIR_PARAGRAPHS\n else:\n manual_directory = path.MANUAL_SENTENCES\n auto_directory = path.OUTDIR_SENTENCES\n\n for title in titles:\n with open(manual_directory + title) as m:\n with open(auto_directory + title) as a:\n tPositive, fPositive, fNegative, precision, recall, fmeasure = \\\n compare(m.readlines(), a.readlines(), paragraphs)\n avg_precision += precision\n avg_recall += recall\n avg_fmeasure += fmeasure\n if printDetailedStats: # then report precision and recall for every individual article\n print(\"comparing \" + title)\n print(\"tp=\" + str(tPositive) + \" fn=\" + str(fNegative) + \" fp=\" + str(fPositive))\n print(\"precision=\" + str(round(precision, 5)) + \"\\t\\t recall=\" + str(\n round(recall, 5)) + \"\\t\\t fmeasure=\" + str(round(fmeasure, 5)) + \" \\n\\n\")\n print(\"AVERAGE_PRECISION=\" + str(round(avg_precision/len(titles), 5)) + \"\\t\\t AVERAGE_RECALL=\"\n + str(round(avg_recall/len(titles), 5)) + \"\\t\\t AVERAGE_F_MEASURE=\"\n + str(round(avg_fmeasure/len(titles), 5)))\n return avg_fmeasure/len(titles)", "def all_raw_stats(r1_stats, r2_stats, name):\n raw_output= name + \"_raw_read_counts.txt\"\n raw_R1_reads, raw_R1_bases, Q20_R1_bp, Q20_R1_percent, Q30_R1_bp, Q30_R1_percent = get_raw_stats(r1_stats)\n raw_R2_reads, raw_R2_bases, Q20_R2_bp, Q20_R2_percent, Q30_R2_bp, Q30_R2_percent = get_raw_stats(r2_stats)\n write_raw_stats(raw_R1_reads, raw_R1_bases, Q20_R1_bp, Q20_R1_percent, Q30_R1_bp, Q30_R1_percent, raw_R2_reads, raw_R2_bases, Q20_R2_bp, Q20_R2_percent, Q30_R2_bp, Q30_R2_percent, raw_output, name)", "def _snps_failed_report(write: bool=False, miss_threshold: float=0.2,\n maf_threshold: float=0.00001, hwe_threshold: float=1e-6,\n lmissfile: str=\"plink.lmiss\", maffile: str=\"MAF_check.frq\",\n hwefile: str=\"plink.hwe\"):\n snps = {}\n ids_list = []\n lmiss = pd.read_csv(lmiss_file, delimiter=\" \", skipinitialspace=True)\n\n missing_snps = lmiss.loc[lmiss['F_MISS'] > miss_threshold]\n snps['missing_snps'] = missing_snps['SNP'].tolist()\n ids_list.append(missing_snps['SNP'].tolist())\n # print(\"total missing snps failed: \", len(missing_snps['SNP'].tolist()))\n\n # MAF\n maf = pd.read_csv(maf_file, delimiter=\" \", skipinitialspace=True)\n rare = maf.loc[maf['MAF'] < maf_threshold]\n snps['maf'] = rare['SNP'].tolist()\n ids_list.append(rare['SNP'].tolist())\n # print(\"total maf snps failed: \", len(rare['SNP'].tolist()))\n\n # HWE departures\n hardy = pd.read_csv(hwe_file, delimiter=\" \", skipinitialspace=True)\n hwe_failed = hardy.loc[hardy['P'] < hwe_threshold]\n snps['hwe'] = hwe_failed['SNP'].tolist()\n ids_list.append(hwe_failed['SNP'].tolist())\n # print(\"total hwe snps failed: \", len(hwe_failed['SNP'].tolist()))\n\n # graph everything\n tests = ['SNP Missingness', 'Minor Allele Frequency', 'Outlying HWE']\n fail_counts = [len(missing_snps['SNP'].tolist()), len(rare['SNP'].tolist()), len(hwe_failed['SNP'].tolist())]\n total_fails = set(x for l in ids_list for x in l)\n # print(\"total fails: \", len(total_fails))\n\n fig = plt.figure(figsize=(8,6))\n plt.tight_layout()\n plt.bar(x=tests, height=fail_counts)\n plt.title(\"SNPs failing QC checks (total: {}/{})\".format(len(total_fails), lmiss.shape[0]))\n plt.xlabel(\"QC Test\")\n plt.ylabel(\"Number of SNPs\")\n plt.tick_params(axis='x', rotation=90)\n\n if write:\n write_fail_file(snps, \"failed_snps_ids\")\n\n return fig" ]
[ "0.65849566", "0.5925648", "0.5819624", "0.57653034", "0.5762071", "0.5727758", "0.5691433", "0.56473553", "0.5570363", "0.55676913", "0.55645865", "0.55539656", "0.55180854", "0.5451508", "0.5381246", "0.5380846", "0.53807884", "0.53778684", "0.5371544", "0.5353347", "0.5350788", "0.5330331", "0.53193647", "0.53193647", "0.53076583", "0.5284841", "0.5273059", "0.5267607", "0.52668613", "0.5262589" ]
0.6283277
1
write a message to a given output stream
def write(self, out, message): if out != None: out.write(message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, msg, *_):\n if self.out is not None:\n self.out.write(msg)\n self.out.flush()", "def _write_stream(self, stream_id, content):\n # We have to be careful about how we encode the bytes. It's better to assume it is utf-8 and just\n # serialize it that way.\n encoded_content = six.text_type(content).encode(\"utf-8\")\n # When we send over a chunk of bytes to the client, we prefix it with a code that identifies which\n # stream it should go to (stdout or stderr) and how many bytes we are sending. To encode this information\n # into a single integer, we just shift the len of the bytes over by one and set the lower bit to 0 if it is\n # stdout, or 1 if it is stderr.\n code = len(encoded_content) * 2 + stream_id\n\n self.__channel_lock.acquire()\n try:\n if self.__channel_lock is not None:\n # 2->TODO struct.pack|unpack in python < 2.7.7 does not allow unicode format string.\n self.__channel.write(\n compat.struct_pack_unicode(\"i\", code) + encoded_content\n )\n elif stream_id == RedirectorServer.STDOUT_STREAM_ID:\n self.__sys.stdout.write(content)\n else:\n self.__sys.stderr.write(content)\n finally:\n self.__channel_lock.release()", "def write(self, message):\r\n os.write(self.wfid, message.encode('utf-8'))", "def write (self, message, dest):\n raise NotImplementedError( 'Needs implementation' )", "def emit_message(message, stream=None):\n if stream is None:\n stream = sys.stderr\n stream.write(\"%(message)s\\n\" % vars())\n stream.flush()", "def write(self, msg):\n cmd = self.__compose(msg)\n self.sock.send(cmd)", "def write(self, output_buffer):\n self.__writer_func(self.__stream_id, output_buffer)", "def _output(self, message, verbosity, exact, stream):\n if exact:\n if self.config.verbosity == verbosity:\n stream.write(message + \"\\n\")\n else:\n if self.config.verbosity >= verbosity:\n stream.write(message + \"\\n\")", "def _writeOutput(self, msg, outputFile):\n f=self.openFile(outputFile, \"a\") #open otuputFile for appending\n f.write (msg)\n f.close()", "def write(self, msg):\n # FIXME: do we have to check the size of msg and split output? \n return self.inout.send(Mtcpfns.pack_msg(msg))", "def write(content, port=sys.stdout):\n require_type(is_output(port), 'the parameter of write must be an output file')\n if port is sys.stdout:\n display(content)\n return\n port.write(tostr(content))", "def writeOutput(self, output):", "def writeMessage(self,message):\n pass", "def write(self, out):", "def write(message):\n __terminalState.osSupport.print(message)", "def write_out(self, message, verbosity_level=1):\n if self.verbosity and self.verbosity >= verbosity_level:\n sys.stdout.write(smart_str(message))\n sys.stdout.flush()", "def write_out(self, message, verbosity_level=1):\n if self.verbosity and self.verbosity >= verbosity_level:\n sys.stdout.write(smart_str(message))\n sys.stdout.flush()", "def write(self, msg):\n\n sys.stderr.write(msg)", "def _write_message(self, message):\n raw_data = message.serialize()\n debug(\"writing outgoing message of type \" + message.__class__.__name__)\n self.request.sendall(raw_data)", "def write(self, data):\n\t\tself.outputbuffer.write(data)", "def write(self, data):\n _check_init()\n self._check_open()\n\n self._output.Write(data)", "def write(self, value):\r\n self.__output__.write(value)", "async def write(self, msg: Union[str, bytes]) -> None:\n if isinstance(msg, bytes):\n self.writer.write(msg)\n else:\n self.writer.write(msg.encode(\"utf-8\"))\n self.writer.write(b\"\\n\")\n await self.writer.drain()", "def write(self, msg):\n if self._writer is None:\n raise NotImplementedError(\"Function not available while socket is closed.\")\n self._writer.write(msg)\n self._writer.flush()", "def StdOut(self, message):\n sys.stdout.write('{0:s}\\n'.format(message))\n sys.stdout.flush()", "def _write(message: Optional[str] = None) -> None:\n if message is not None:\n stdout(\"%s\\n\" % message)\n else:\n stdout(\"\\n\")", "def write(self, *args):\n\n self._write(self._out, *args)", "def write(self, msg):\n sys.stderr.write(msg)", "def writeMessage(message):\n stderr.write(message + '\\n')\n stderr.flush()\n return", "def write(self, msg):\n self.sock.send(msg.encode())" ]
[ "0.7389709", "0.69831085", "0.68361956", "0.67861736", "0.677018", "0.66936415", "0.6660411", "0.6635271", "0.66243285", "0.6623509", "0.66060543", "0.6497261", "0.64774483", "0.64748174", "0.63748026", "0.6370138", "0.6370138", "0.63609517", "0.6360882", "0.6360357", "0.63523597", "0.6297278", "0.6227231", "0.6226178", "0.62184095", "0.6215318", "0.6212212", "0.6187694", "0.6168072", "0.6154782" ]
0.7655083
0
Compute lists of detected and annotated mentions and compare them. count number of true positives, false positives, and false negatives.
def compareAnnotatedAndDetected(self, sentence, mType, irStats, errorOut=None): aList = sentence.getAnnotatedMentions(mType, recomputeMentions=True) dList = sentence.getDetectedMentions(mType, recomputeMentions=True) if len(aList) == 0 and len(dList) == 0: return self.compareMentionLists(dList, aList, mType, irStats, errorOut)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compareMentionLists(self, dList, aList, mType, irStats, errorOut=None):\n # build lists of overlapping mentions for annotated and detected mentions in this sentence\n potentialMatches = {}\n for aMention in aList:\n potentialMatches[aMention] = []\n for dMention in dList:\n potentialMatches[dMention] = []\n for aMention in aList:\n if dMention.countOverlapTokens(aMention) > 0:\n potentialMatches[dMention].append(aMention)\n potentialMatches[aMention].append(dMention)\n \n # check matches for each detected template\n for dMention in dList:\n aMentionList = potentialMatches[dMention]\n if len(aMentionList) == 1 and dMention.matchAnnotated(aMentionList[0]):\n # there is only one annotated mention that matches this detected one\n # this is either a TP or a DUPLICATE\n annotatedMention = aMentionList[0]\n if len(potentialMatches[annotatedMention]) == 1:\n # this detected mention matches only ONE annotated one, count as TP\n # OTHERWISE, deal with it when we process annotated mentions\n dMention.matchedMention = annotatedMention\n annotatedMention.matchedMention = dMention\n# self.write(errorOut, '+TP: '+dMention.text+' == '+annotatedMention.text+' ('+mType+')\\n')\n self.write(errorOut, '+TP: %s == %s %s (%s)\\n'%(dMention.text, annotatedMention.text, annotatedMention, mType))\n\n irStats.incTP() \n else:\n # this detected mention overlaps multiple annotated mentions. \n # OR it does not match any annotated mention. either way, discard it.\n # count it as a FP\n self.write(errorOut, '-FP: '+dMention.text+' ('+mType+')\\n')\n irStats.incFP()\n for aMention in aMentionList:\n potentialMatches[aMention].remove(dMention)\n self.write(errorOut, 'DETECTED MENTION OVERLAPS '+aMention.text+'\\n')\n potentialMatches[dMention] = []\n\n # check matches for each annotated mention \n for annotatedMention in aList:\n dMatches = potentialMatches[annotatedMention]\n if len(dMatches) == 0:\n # annotated mention was unmatched, count as FN\n irStats.incFN()\n self.write(errorOut, '-FN: '+annotatedMention.text+' ('+mType+')\\n')\n elif len(dMatches) > 1:\n # annotated mention overlapped multiple detected ones\n # check each one to see if it counts as a match\n # If more than one does, count the best match as a TP\n # and the rest as duplicates.\n bestMatches = []\n for dMention in dMatches:\n if dMention.matchAnnotated(annotatedMention):\n overlap = dMention.countOverlapTokens(annotatedMention)\n bestMatches.append([overlap, dMention])\n dMention.matchedMention = annotatedMention\n else:\n # detected mention did not sufficiently match, count as FP\n self.write(errorOut, '-FP: '+dMention.text+' ('+mType+')\\n')\n irStats.incFP()\n\n if len(bestMatches) > 0:\n # count best match\n bestMatches.sort()\n dMention = bestMatches[-1][1]\n dMention.matchedMention = annotatedMention\n annotatedMention.matchedMention = dMention \n self.write(errorOut, '+TP: '+dMention.text+' == '+annotatedMention.text+' ('+mType+')\\n') \n irStats.incTP() \n # count duplicates\n for i in range(0, len(bestMatches)-1):\n irStats.incDuplicates()\n dMention = bestMatches[i][1] \n self.write(errorOut, 'ANNOTATED MENTION ALSO MATCHES ')\n self.write(errorOut, dMention.text+'\\n')\n dMention.matchedMention = annotatedMention\n else:\n # there are no valid matches\n irStats.incFN()\n self.write(errorOut, '-FN: '+annotatedMention.text+' ('+mType+')\\n')", "def hits_and_misses(guess_seq, true_seq):\n num_hits = sum(guess_token == true_token\n for guess_token, true_token\n in zip(guess_seq, true_seq))\n num_misses = sum((Counter(guess_seq) & Counter(true_seq)).values()) - num_hits\n return num_hits, num_misses", "def check_adr_lexicon(annotations_dict, adr_lexicon_dict):\n\n adrs_matching_labels = 0\n adrs_not_found_in_lexicon = 0\n indications_matching_labels = 0\n indications_not_found_in_lexicon = 0\n for i, (k, v) in enumerate(annotations_dict.items()):\n for index, annotation in enumerate(v):\n # tweet = tweets_dict[k]\n annotatedText = annotation['annotatedText']\n\n is_adr_lexicon = is_in_adr_lexicon(annotatedText, adr_lexicon_dict)\n if is_adr_lexicon:\n # print(\"ADR lexicon contains this text {}\".format(annotatedText))\n # detected_adrs += 1\n if annotation['semanticType'] == \"ADR\":\n adrs_matching_labels += 1\n else:\n indications_matching_labels += 1\n else:\n if annotation['semanticType'] == \"ADR\":\n adrs_not_found_in_lexicon += 1\n else:\n indications_not_found_in_lexicon += 1\n\n print(\"Number of ADR mentions present in the ADR Lexicon: {}\".format(adrs_matching_labels))\n print(\"Number of Indication mentions present in the ADR Lexicon: {}\".format(indications_matching_labels))\n print(\"Number of ADR mentions not present in the ADR Lexicon: {}\".format(adrs_not_found_in_lexicon))\n print(\"Number of Indication mentions not present in the ADR Lexicon: {}\".format(indications_not_found_in_lexicon))", "def computeStats(self, absList, statOut=None, errorOut=None):\n \n stats = EntityStats(self.entityTypes)\n for abs in absList:\n errorOut.write('---'+abs.id+'---\\n') \n \n # identify ALL annotated mentions, even in sentences we are not focused on\n# for sentence in abs.allSentences():\n# for mType in self.entityTypes:\n# aList = sentence.getAnnotatedMentions(mType, recomputeMentions=True)\n# \n# for sentence in abs.sentences:\n# for mType in self.entityTypes:\n# self.compareAnnotatedAndDetected(sentence, mType, \\\n# stats.irstats[mType], errorOut)\n\n\n for sentence in abs.allSentences():\n for mType in self.entityTypes:\n if sentence in abs.sentences:\n self.compareAnnotatedAndDetected(sentence, mType, \\\n stats.irstats[mType], errorOut)\n else: \n aList = sentence.getAnnotatedMentions(mType, recomputeMentions=True)\n \n\n stats.printStats()\n if statOut != None:\n stats.saveStats(statOut, keyPrefix='MF - ')\n \n return stats", "def countOverlapTokens(self, annotatedMention):\n if self.tokens[0].sentence != annotatedMention.tokens[0].sentence:\n return 0 # mentions in different sentences, no overlap \n \n if self.end < annotatedMention.start or self.start > annotatedMention.end:\n return 0 # mention ends before or starts after annotated one\n \n if self.start == annotatedMention.start and self.end == annotatedMention.end:\n return len(self.tokens) # exact match for annotated mention\n \n # There is some overlap. Does it consist of anything substantial?\n importantTokens = 0\n for token in self.tokens:\n if token.index >= annotatedMention.start \\\n and token.index <= annotatedMention.end \\\n and token.isSymbol() == False and token.isStopWord() == False:\n importantTokens += 1\n \n return importantTokens", "def get_mention_counts(articles, skip_nils=True):\n gold_forms=[]\n gold_links=[]\n for example_article in articles:\n for entity in example_article.entity_mentions:\n mention=entity.mention\n meaning=entity.gold_link\n if not skip_nils or meaning!='--NME--':\n gold_forms.append(mention)\n gold_links.append(meaning)\n cnt_instances=Counter(gold_links)\n cnt_forms=Counter(gold_forms)\n return cnt_instances, cnt_forms", "def analyze(self, word_count_thresh):", "def simple_disambiguation(images, senses, labels, image_column, verb_types):\n accuracy = {'motion': [0, 0], 'non_motion': [0, 0]}\n for _, image_row in enumerate(images.itertuples()):\n i_t = np.array(getattr(image_row, image_column))\n image_id = image_row.Index\n verbs = labels.query('image == @image_id')['lemma'].to_frame()\n\n for _, verb_row in enumerate(verbs.itertuples()):\n verb = verb_row.lemma\n filtered_senses = senses.query('lemma == @verb')\n # Cosine similarity between image i_t and every other sense s_t\n dot_prod = filtered_senses['e_combined'].apply(\n lambda s_t: -1 if np.all(i_t == None) else np.dot(i_t, s_t))\n s_hat = dot_prod.values.argmax()\n if np.max(dot_prod) == -1: # the image can't be represented\n continue\n pred_sense_id = filtered_senses.iloc[s_hat]['sense_num']\n sense_id = labels.query('image == @image_id and lemma == @verb')['sense_chosen'].iloc[0]\n\n # Accuracy statistics\n if verb in verb_types['motion']:\n if sense_id == pred_sense_id:\n accuracy['motion'][1] += 1\n else:\n accuracy['motion'][0] += 1\n elif verb in verb_types['non_motion']:\n if sense_id == pred_sense_id:\n accuracy['non_motion'][1] += 1\n else:\n accuracy['non_motion'][0] += 1\n else:\n raise ValueError('Unknown verb type')\n\n print('%s representation, sense accuracy:' % image_column)\n print('Motion verbs: %s' % ((accuracy['motion'][1] / (accuracy['motion'][0] + accuracy['motion'][1])) * 100))\n print('Non-motion verbs: %s' % ((accuracy['non_motion'][1] / (accuracy['non_motion'][0] + accuracy['non_motion'][1])) * 100))\n print('-')", "def get_word_containement_measure(self,l2,l1):\n count = 0\n found_idfs = []\n unfound_idfs = []\n for w in l1:\n val = self.idf.get_tfidf_val(w)\n if (val > 10):\n val = 10\n if w in l2:\n count += 1\n found_idfs.append(val)\n else:\n unfound_idfs.append(val)\n if (len(found_idfs) == 0):\n avg_found = 0\n else:\n avg_found = np.mean(found_idfs)\n if (len(unfound_idfs) ==0):\n avg_unfound = 0\n else:\n avg_unfound = np.mean(unfound_idfs)\n\n\n\n return count / self.normalize_factor, avg_found, avg_unfound", "def get_word_containement_measure(self,l2,l1):\n count = 0\n found_idfs = []\n unfound_idfs = []\n word_count_dict = self.list_to_word_count_dict(l1)\n for w in l2:\n was_found = False\n val = self.idf.get_tfidf_val(w)\n if (val > 10):\n val = 10\n for i,w2 in enumerate(l1):\n if (w2 == w and word_count_dict[(w2,i)] == 0):\n word_count_dict[(w2,i)] = 1\n count += 1\n found_idfs.append(val)\n was_found = True\n break\n if (was_found):\n unfound_idfs.append(val)\n if (len(found_idfs) == 0):\n avg_found = 0\n else:\n avg_found = np.mean(found_idfs)\n if (len(unfound_idfs) ==0):\n avg_unfound = 0\n else:\n avg_unfound = np.mean(unfound_idfs)\n\n # full idf features\n unfound_vec = list(sorted(unfound_idfs, reverse=True))\n found_vec = list(sorted(found_idfs, reverse=True))\n unfound_vec = self.pad_or_cut_vec(unfound_vec, self.LENGTH_MAX)\n found_vec = self.pad_or_cut_vec(found_vec, self.LENGTH_MAX)\n\n return count , avg_found, avg_unfound, found_vec, unfound_vec", "def stats(detections, faces):\n vp, fp, fn, vn = 0, 0, 0, 0\n max_label = np.max(faces[:, 0])\n for i in range(max_label + 1):\n detections_i = get_label_with_index(detections, i)\n faces_i = get_label_with_index(faces, i)\n local_vp = 0\n for face in faces_i:\n found = False\n for detection in detections_i:\n if intersection_ratio(face, detection) >= 0.5:\n found = True\n break\n if found:\n vp += 1\n local_vp += 1\n else:\n fn += 1\n fp += len(detections_i) - local_vp\n\n precision = vp / (vp + fp)\n rappel = vp / (vp + fn)\n f_score = 2 * ((precision * rappel) / (precision + rappel))\n\n return precision, rappel, f_score", "def classify(texts: List[str], params: Any) -> List[str]:\n\n alpha = 1\n token_probs_pos = params[\"token_probs_pos\"]\n token_probs_neg = params[\"token_probs_neg\"]\n all_words = params[\"all_words\"]\n M = len(all_words)\n cnt_pos_docs = params[\"cnt_pos_docs\"]\n cnt_neg_docs = params[\"cnt_neg_docs\"]\n\n sum_len_neg = params[\"sum_len_neg\"]\n sum_len_pos = params[\"sum_len_pos\"]\n pos_dict = params[\"pos_dict\"]\n neg_dict = params[\"neg_dict\"]\n\n\n test_texts = preprocessing(texts)\n test_tokenized_texts = text_to_tokens(test_texts)\n \n res = []\n log_pos_probablity = 0\n log_neg_probablity = 0\n i = 0\n for text in test_tokenized_texts:\n if (i % 5000 == 0):\n print(\"Classified\", i, \"texts\")\n i += 1\n log_pos_probablity = log(cnt_pos_docs)\n log_neg_probablity = log(cnt_neg_docs)\n for token in text:\n if (token_probs_pos[token] == 0):\n token_probs_pos[token] = alpha / (alpha * M + sum_len_pos)\n else:\n log_pos_probablity += log(token_probs_pos[token])\n if (token_probs_neg[token] == 0):\n token_probs_neg[token] = alpha / (alpha * M + sum_len_neg)\n else:\n log_neg_probablity += log(token_probs_neg[token])\n if (log_neg_probablity > log_pos_probablity):\n res.append(\"neg\")\n #for token in text:\n # all_words.add(token)\n # M = len(all_words)\n # neg_dict[token] += text[token]\n # sum_len_neg += text[token]\n # token_probs_neg[token] = (alpha + neg_dict[token]) / (alpha * M + sum_len_neg)\n\n else:\n res.append(\"pos\")\n #for token in text:\n # all_words.add(token)\n # M = len(all_words)\n # pos_dict[token] += text[token]\n # sum_len_pos += text[token]\n # token_probs_pos[token] = (alpha + pos_dict[token]) / (alpha * M + sum_len_pos)\n\n\n \n print('Predicted labels counts:')\n print(count_labels(res))\n return res", "def confusion_stats(set_true, set_test):\n true_pos = len(set_true.intersection(set_test))\n false_pos = len(set_test.difference(set_true))\n false_neg = len(set_true.difference(set_test))\n \n return true_pos, false_pos, false_neg", "def _detection_scores(inputs, gt_boxes, gt_labels, model):\n model = check_model('model', model, BlackModel)\n boxes_and_confi, pred_labels = model.predict(*inputs)\n det_scores = []\n correct_labels_num = []\n # repeat gt_boxes and gt_labels for all particles cloned from the same sample in PSOAttack/GeneticAttack\n if gt_boxes.shape[0] == 1 and boxes_and_confi.shape[0] > 1:\n gt_boxes = np.repeat(gt_boxes, boxes_and_confi.shape[0], axis=0)\n gt_labels = np.repeat(gt_labels, boxes_and_confi.shape[0], axis=0)\n iou_thres = 0.5\n for boxes, labels, gt_box, gt_label in zip(boxes_and_confi, pred_labels, gt_boxes, gt_labels):\n gt_box_num = gt_box.shape[0]\n score = 0\n box_num = boxes.shape[0]\n correct_label_flag = np.zeros(gt_label.shape)\n for i in range(box_num):\n pred_box = boxes[i]\n max_iou_confi = 0\n for j in range(gt_box_num):\n iou = calculate_iou(pred_box[:4], gt_box[j][:4])\n if labels[i] == gt_label[j] and iou > iou_thres and correct_label_flag[j] == 0:\n max_iou_confi = max(max_iou_confi, pred_box[-1] + iou)\n correct_label_flag[j] = 1\n score += max_iou_confi\n det_scores.append(score)\n correct_labels_num.append(np.sum(correct_label_flag))\n return np.array(det_scores), np.array(correct_labels_num)", "def information_content_similarity(s1, s2):\n lemmas_sentence_1, tagged_sentence_1 = lemmatize_sentence(s1.lower())\n lemmas_sentence_2, tagged_sentence_2 = lemmatize_sentence(s2.lower())\n\n # Disambiguate words and create list of sysnsets \n synsets_sentence_1 = []\n for (lemma, word_tag) in zip(lemmas_sentence_1, tagged_sentence_1):\n synset = lesk(lemmas_sentence_1, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_1.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_1.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n synsets_sentence_2 = []\n for (lemma, word_tag) in zip(lemmas_sentence_2, tagged_sentence_2):\n synset = lesk(lemmas_sentence_2, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_2.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_2.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n score, count = 0.0, 0\n # For each word in the first sentence\n for synset in synsets_sentence_1:\n L = []\n for ss in synsets_sentence_2:\n try:\n L.append(synset.lin_similarity(ss, brown_ic))\n except:\n continue\n if L: \n best_score = max(L)\n score += best_score\n count += 1\n # Average the values\n if count > 0: score /= count\n return score", "def analyze_reviews(reviews):\n\n good_reviews=reviews[reviews['rs_review_movie_score']>=9]\n bad_reviews=reviews[reviews['rs_review_movie_score']<=2]\n\n print 'len(good_reviews)=%s' % len(good_reviews)\n print 'len(bad_reviews)=%s' % len(bad_reviews)\n\n m = re.compile('\\d')\n\n english_stop_words=stopwords.words('english')\n\n\n def tokenize(text):\n tokens=nltk.word_tokenize(text)\n # strip out trailing puncutation\n tokens = [ token[:-1] if token[-1] in ['.',',','!','?'] else token for token in tokens]\n\n # lower case\n tokens = [token.lower() for token in tokens]\n\n # Take only relativly long characters\n tokens = [token for token in tokens if len(token)>=3]\n\n # remove words with numbers/digits\n tokens = [token for token in tokens if m.search(token) is None]\n\n # Remove stop words: http://nltk.googlecode.com/svn/trunk/doc/book/ch02.html\n tokens = [token for token in tokens if token not in english_stop_words]\n return tokens\n\n good_tokens_list = []\n for i,review in good_reviews.iterrows():\n text=review['rs_review_text']\n good_tokens_list.append(tokenize(text))\n\n bad_tokens_list = []\n for i,review in bad_reviews.iterrows():\n text=review['rs_review_text']\n bad_tokens_list.append(tokenize(text))\n\n all_words=Counter()\n for tokens in good_tokens_list + bad_tokens_list:\n for token in tokens:\n all_words[token]+=1\n\n most_common=all_words.most_common(2000)\n most_common=zip(*most_common)[0]\n\n print 'most_common_words = ',most_common[-20:]\n\n def document_features(tokens):\n return {word:word in tokens for word in most_common}\n\n good_set=[(document_features(tokens), 'pos') for tokens in good_tokens_list]\n bad_set=[(document_features(tokens), 'neg') for tokens in bad_tokens_list]\n\n train_set = good_set + bad_set\n random.shuffle(train_set) # dunno if this is necessary\n\n classifier = nltk.NaiveBayesClassifier.train(train_set)\n\n print 'accuracy',nltk.classify.accuracy(classifier, train_set)\n\n classifier.show_most_informative_features(300)\n\n return classifier", "def countOccurrences(self, wordsToCheck):\n count = 0\n for token in self.importantTokenList():\n w = token.text\n for wtc in wordsToCheck:\n if wtc == w:\n count = count + 1\n return count", "def analyze(self, text):\n\n sent = 0\n for word in text.split():\n # check each word in tweet\n if word.strip(\":, \").lower() in self.posWords:\n sent += 1\n elif word.strip(\":, \").lower() in self.negWords:\n sent -= 1\n\n return sent", "def sentiment_aspects(docs: Iterable[tokens.Doc]) -> List[collections.Counter]:\n sent_dict_list = []\n start_time = time.time()\n\n for doc in docs:\n sent_dict = collections.Counter()\n for token in doc:\n # check if the word is an opinion word, then assign sentiment\n if token.text.lower() in _OPINION_WORDS:\n sentiment = 1 if token.text.lower() in _POS_WORDS else -1\n if (token.dep_ == \"advmod\"):\n # if target is an adverb modifier (i.e. pretty, highly, etc.)\n # but happens to be an opinion word, ignore and pass\n continue\n\n elif (token.dep_ == \"amod\"):\n sent_dict[token.head.text.lower()] += sentiment\n\n else:\n for child in token.children:\n # if there's a adj modifier (i.e. very, pretty, etc.) add\n # more weight to sentiment\n # This could be better updated for modifiers that either\n # positively or negatively emphasize\n if _is_opinion_mod(child):\n sentiment *= 1.5\n # check for negation words and flip the sign of sentiment\n if child.dep_ == \"neg\":\n sentiment *= -1\n for child in token.children:\n if (token.pos_ == \"VERB\") & (child.dep_ == \"dobj\"):\n # if verb, check if there's a direct object\n sent_dict[child.text.lower()] += sentiment\n # check for conjugates (a AND b), then add both to dictionary\n subchildren = []\n conj = 0\n for subchild in child.children:\n if subchild.text.lower() == \"and\": conj=1\n if (conj == 1) and (subchild.text.lower() != \"and\"):\n subchildren.append(subchild.text.lower())\n conj = 0\n for subchild in subchildren:\n sent_dict[subchild] += sentiment\n\n # check for negation\n for child in token.head.children:\n noun = \"\"\n if _is_opinion_mod(child):\n sentiment *= 1.5\n if (child.dep_ == \"neg\"):\n # check for negation words and flip the sign of sentiment\n sentiment *= -1\n\n # check for nouns\n for child in token.head.children:\n noun = \"\"\n if (child.pos_ == \"NOUN\") and (child.text not in sent_dict):\n noun = child.text.lower()\n # Check for compound nouns\n for subchild in child.children:\n if subchild.dep_ == \"compound\":\n noun = subchild.text.lower() + \" \" + noun\n sent_dict[noun] += sentiment\n sent_dict_list.append(collections.Counter(sent_dict))\n\n print(\"\\nFound aspects on {} reviews.\".format(len(sent_dict_list)))\n print(time.time() - start_time)\n return sent_dict_list", "def count_annotations(graph):\n return Counter(_annotation_iter_helper(graph))", "def analyze(self, text):\n\n # TODO\n # tokens = tokenizer.tokenize(tweet)\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n score = 0\n\n for word in tokens:\n # iterate over tokens#str.lower\n\n if word.lower() in self.positives:\n score = score+1\n\n elif word.lower() in self.negatives:\n score = score-1\n\n else:\n continue\n return score", "def computeStats(self, absList, statOut, errorOut):\n \n nMentions = 0\n pSum = 0\n rSum = 0\n for abstract in absList:\n # build hash of annotated clusters/chains keyed by ID\n errorOut.write('\\n---- '+abstract.id+' ----\\n')\n trueChainLengths = {}\n entityList = abstract.annotatedEntities.getList(self.entityTypes[0])\n errorOut.write('True chains:\\n')\n for entityTemplate in entityList:\n if len(entityTemplate.getAnnotatedId()) > 0:\n trueChain = entityTemplate.getMentionChain()\n trueChainLengths[entityTemplate.getAnnotatedId(checkEntireCluster=False)] = len(trueChain)\n for m in trueChain:\n# errorOut.write(m.name+':'+m.getAnnotatedId(checkEntireCluster=False) +'\\n')\n errorOut.write('%s %s:%s, matchedMention=%s \\n'%(m.name, m.mention, m.getAnnotatedId(checkEntireCluster=False), m.mention.matchedMention))\n\n errorOut.write('----\\n')\n else:\n print abstract.id, entityTemplate.name, 'is missing an ID'\n \n # compute Recall and precision for each detected chain/cluster\n entityList = abstract.entities.getList(self.entityTypes[0])\n errorOut.write('\\nHypothesis chains:\\n')\n for entityTemplate in entityList:\n detectedChain = entityTemplate.getMentionChain()\n \n rootMention = entityTemplate.rootMention()\n errorOut.write('[Canonical name: '+rootMention.getCanonicalName()+']\\n')\n \n for m in detectedChain:\n errorOut.write('%s %s:%s, matchedMention=%s \\n'%(m.name, m.mention, m.getAnnotatedId(checkEntireCluster=False), m.mention.matchedMention))\n# errorOut.write(m.name+':'+m.getAnnotatedId(checkEntireCluster=False) +'\\n')\n errorOut.write('----\\n')\n\n nMentionsInChain = len(detectedChain)\n for mTemplate in detectedChain:\n nMentions += 1\n if len(mTemplate.getAnnotatedId(checkEntireCluster=False)) == 0:\n # mention is a false positive, it does not belong to any chain\n pSum += 1.0/nMentionsInChain\n rSum += 1\n else:\n if mTemplate.getAnnotatedId(checkEntireCluster=False) not in trueChainLengths:\n print abstract.id, 'template with id =',mTemplate.getAnnotatedId(checkEntireCluster=False), 'not in a true chain'\n break\n nMentionsInTrueChain = trueChainLengths[mTemplate.getAnnotatedId(checkEntireCluster=False)]\n nCorrectInDetectedChain = 0\n annotatedMatches = set([])\n # count the number of mentions in the detected chain that\n # should be in the same chain as this mention\n for m in detectedChain:\n if mTemplate.getAnnotatedId(checkEntireCluster=False) == m.getAnnotatedId(checkEntireCluster=False) \\\n and m.mention.matchedMention not in annotatedMatches:\n nCorrectInDetectedChain += 1\n annotatedMatches.add(m.mention.matchedMention)\n# else:\n# print abstract.id, 'Two mentions do not belong in same chain',\n# print mTemplate, m.getAnnotatedId()\n \n if nCorrectInDetectedChain > nMentionsInTrueChain:\n print abstract.id, 'id=',mTemplate.getAnnotatedId(checkEntireCluster=False), \n print 'detected chain=', nCorrectInDetectedChain,\n print 'true chain=', nMentionsInTrueChain\n nCorrectInDetectedChain = nMentionsInTrueChain\n \n# if nCorrectInDetectedChain != nMentionsInChain:\n# print abstract.id, 'id=',mTemplate.getAnnotatedId(), \n# print 'detected chain=', nCorrectInDetectedChain,\n# print 'true chain=', nMentionsInTrueChain\n \n pSum += float(nCorrectInDetectedChain) / nMentionsInChain\n rSum += float(nCorrectInDetectedChain) / nMentionsInTrueChain\n \n if nMentions == 0:\n print 'No mentions???'\n return \n \n precision = pSum/nMentions\n recall = rSum/nMentions \n fscore = 2*(recall*precision)/(recall + precision)\n \n sys.stdout.write('Recall\\tPrecision\\tF-score\\n')\n sys.stdout.write('%.3f\\t ' % recall + '%.3f\\t ' % precision + '%.3f' % fscore+'\\n')\n# statOut.write(self.entityTypesString+'\\n')\n# statOut.write('Recall\\tPrecision\\tF-score\\n')\n# statOut.write('%.3f\\t ' % recall + '%.3f\\t ' % precision + '%.3f' % fscore+'\\n')\n statOut.addStats('MC - '+self.entityTypesString, [['R', recall], ['P', precision], ['F',fscore]])", "def analyze_embeddings(emb):\n dic = {\"Hi\": 0, \"En\": 1, \"Ot\": 2}\n count = [0, 0, 0, 0]\n count_zero = [0, 0, 0, 0]\n for i, j in zip(emb, corpus_trans):\n for k, l in zip(i, j):\n count[dic[l[1]]] += 1\n if sum(k) == 0:\n count_zero[dic[l[1]]] += 1\n count[-1] = sum(count)\n count_zero[-1] - sum(count_zero)\n print(\"hi, en, ot, total\")\n print(\"count: \", count)\n print(\"zero count: \", count_zero)", "def painting_matching_ml(imgs, db_imgs, method_list, text_masks, author_text, gt_text, metrics, weights, splits=30, max_rank=10):\n descriptor_extractors = [get_descriptor_extractor(method_name) for method_name in method_list]\n tmp_img_format = []\n tmp_mask_format = []\n tmp_text_format = []\n for i in range(len(imgs)):\n for j in range(len(imgs[i])):\n tmp_img_format.append(imgs[i][j])\n tmp_mask_format.append(text_masks[i][j])\n tmp_text_format.append(author_text[i][j])\n\n #db_imgs = [img[0] for img in db_imgs]\n db_img_splits = [i*len(db_imgs)//splits for i in range(splits-1)]\n \n scores = []\n query_descriptors = extract_descriptors(tmp_img_format, descriptor_extractors, method_list, tmp_text_format, tmp_mask_format) \n #np.array([extract_descriptors(img, matching_methods, mask) for img, mask in zip(tmp_img_format, tmp_mask_format)])\n print(\"Starting db extraction + matching\")\n for split in tqdm(range(splits-2)):\n db_descriptors = extract_descriptors(db_imgs[db_img_splits[split]:db_img_splits[split+1]], descriptor_extractors, method_list, gt_text[db_img_splits[split]:db_img_splits[split+1]], None) #np.array([mrhm(db_img) for db_img in db_imgs[db_img_splits[split]:db_img_splits[split+1]]])\n scores.append(compare_descriptors(query_descriptors, db_descriptors, metrics, method_list, weights))\n # compare_descriptors(query_descriptors, db_descriptors, descriptor_comp_methods, descriptor_names, weights)\n db_descriptors = extract_descriptors(db_imgs[db_img_splits[-1]:], descriptor_extractors, method_list, gt_text[db_img_splits[-1]:], None)\n scores.append(compare_descriptors(query_descriptors, db_descriptors, metrics, method_list, weights))\n \n # concatenate all the results\n scores = np.concatenate(scores, 1)\n \n top_k_matches = np.argpartition(scores, list(range(max_rank)))[:, :max_rank]\n return top_k_matches", "def analyze_similarities():\r\n print('Total number of candidate pairs:', len(pairs))\r\n print(f'\\nNumber of actual item pairs in the train set: {pairs[\"true_match\"].sum()}\\n')\r\n\r\n for feature in ['text_score', 'image_score', 'txt_img_score', 'words_ratio', 'txt_img_words']:\r\n\r\n # Check distribution of True and False predictions for various similarity scores\r\n print('-' * 50)\r\n print(f'\\nDistribution of True/False predictions for {feature}')\r\n for thr in (0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95):\r\n print('-' * 50)\r\n print(f'Similarity score over {thr}')\r\n pairs_sample = pairs[pairs[feature] >= thr]\r\n print(f'Number of similar item pairs: {len(pairs_sample)}')\r\n print(pairs_sample['true_match'].value_counts(normalize=True))\r\n\r\n # Check if identical phash can be used to improve the accuracy\r\n same_phash = pairs[pairs['phash_match'] == 1]\r\n different_phash = pairs[pairs['phash_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same phash:')\r\n print(same_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_phash))\r\n\r\n print('\\nFor item pairs with different phash:')\r\n print(different_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_phash))\r\n\r\n # Check if numbers in titles can be used to improve the accuracy\r\n same_numbers = pairs[pairs['nums_match'] == 1]\r\n different_numbers = pairs[pairs['nums_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same numbers:')\r\n print(same_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_numbers))\r\n\r\n print('\\nFor item pairs with different numbers:')\r\n print(different_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_numbers))", "def __wiki_counts(self):\n\n num_lines = 0\n num_valid_hyperlinks = 0\n disambiguation_ent_errors = 0\n\n print(\"Calculating Wikipedia mention/entity occurrences\")\n\n last_processed_id = -1\n exist_id_found = False\n\n wiki_anchor_files = os.listdir(\n os.path.join(self.base_url, self.wiki_version, \"basic_data/anchor_files/\")\n )\n for wiki_anchor in wiki_anchor_files:\n wiki_file = os.path.join(\n self.base_url,\n self.wiki_version,\n \"basic_data/anchor_files/\",\n wiki_anchor,\n )\n\n with open(wiki_file, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n num_lines += 1\n\n if num_lines % 5000000 == 0:\n print(\n \"Processed {} lines, valid hyperlinks {}\".format(\n num_lines, num_valid_hyperlinks\n )\n )\n if '<doc id=\"' in line:\n id = int(line[line.find(\"id\") + 4 : line.find(\"url\") - 2])\n if id <= last_processed_id:\n exist_id_found = True\n continue\n else:\n exist_id_found = False\n last_processed_id = id\n else:\n if not exist_id_found:\n (\n list_hyp,\n disambiguation_ent_error,\n print_values,\n ) = self.__extract_text_and_hyp(line)\n\n disambiguation_ent_errors += disambiguation_ent_error\n\n for el in list_hyp:\n mention = el[\"mention\"]\n ent_wiki_id = el[\"ent_wikiid\"]\n\n num_valid_hyperlinks += 1\n if mention not in self.wiki_freq:\n self.wiki_freq[mention] = {}\n\n if (\n ent_wiki_id\n in self.wikipedia.wiki_id_name_map[\"ent_id_to_name\"]\n ):\n if mention not in self.mention_freq:\n self.mention_freq[mention] = 0\n self.mention_freq[mention] += 1\n\n ent_name = self.wikipedia.wiki_id_name_map[\n \"ent_id_to_name\"\n ][ent_wiki_id].replace(\" \", \"_\")\n if ent_name not in self.wiki_freq[mention]:\n self.wiki_freq[mention][ent_name] = 0\n self.wiki_freq[mention][ent_name] += 1\n\n print(\n \"Done computing Wikipedia counts. Num valid hyperlinks = {}\".format(\n num_valid_hyperlinks\n )\n )", "def mentions(self, users_list, mentions_list, feature_size=None, relative_freq=True):\n # Collapsing mentions of users into a single list\n all_mentions = [x for m in mentions_list for x in m if x]\n mention_counts = sorted_count(all_mentions)\n\n mentions_vector = [m for m,_ in mention_counts]\n\n # zip users, mentions\n users_mentions_zip = list(zip(users_list, mentions_list))\n # findng mention feature vector for each user\n mention_features = {}\n for user in tqdm(set(users_list), desc=\"mention_features\", leave=LEAVE_BAR):\n user_mentions = [m for u,mns in users_mentions_zip for m in mns if u==user]\n mention_features[user] = np.array( [ user_mentions.count(m) for m in mentions_vector ] )\n if relative_freq and np.sum(mention_features[user])!=0:\n mention_features[user] = mention_features[user]/np.sum(mention_features[user])\n \n return mention_features", "def measureAll(authors_texts,sectorialized_agents):\n authors_texts=P.text.aux.textFromAuthors(authors_texts,self.topm_dict[\"sectorialized_agents\"])\n authors_measures={}\n # análise de cada mensagem e de cada autor\n for author in authors_texts:\n authors_measures[author]={}\n texts=authors_texts[author]\n authors_measures[author][\"raw_strings\"]=P.text.raw.analyseAll(texts)\n authors_measures[author][\"pos\"]= P.text.pos.analyseAll(authors_analysis[author][\"raw_analysis\"])\n authors_measures[author][ \"wordnet\" ]=P.text.wordnet.analyseAll(authors_analysis[author][\"pos_analysis\"])\n authors_measures[author][\"tfIdf\"]=P.text.tfIdf.analyseAll(texts) # tfIdf de cada texto e do autor, numeric: mean e std das distancias\n # análise de cada setor e da estrutura toda\n# sectors_texts=P.text.aux.textFromSectors(authors_text,sectorialized_agents)\n sectors_measures={}\n for sector in sectorialized_agents:\n sectors_measures[sector][\"raw_strings\"]=P.text.raw.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n sectors_measures[sector][\"pos\"]= P.text.pos.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n sectors_measures[sector][\"wordnet\"]= P.text.wordnet.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n # tfIdf de cada texto e de cada autor, numeric: mean e std das distancias por texto e por autor, e media e etd dos autores\n sectors_measures[sector][\"tfIdf\"]= P.text.tfIdf.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n\n# texts=[sectors_texts[i] for i in (\"peripherals\",\"intermediaries\",\"hubs\")]\n# sectors_analysis[\"raw_strings\"]=P.text.raw.analyseAll(texts)\n# sectors_analysis[\"pos\"]= P.text.pos.analyseAll(sectors_analysis[\"raw_analysis\"])\n# sectors_analysis[ \"wordnet\" ]=P.text.wordnet.analyseAll(sectors_analysis[\"pos_analysis\"])\n# sectors_analysis[\"tfIdf\"]=P.text.tfIdf.tfIdf(texts)\n\n overall_measures[\"raw_strings\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n overall_measures[\"pos\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n overall_measures[\"wordnet\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n # tfIdf measurespor texto, autor e setor, numeric: media e desvio das distancias por cada grupo, media e desvio dos setores e dos autores\n overall_measures[\"tfIdf\"]=P.text.tfIdf.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n\n del authors_texts,sectorialized_agents,author, sector\n return locals()", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def analyze(self, tokens):\n score = 0\n recognized = 0\n \n if isinstance(tokens, list):\n tokens = Counter(tokens)\n\n for token, count in tokens.iteritems():\n if self.tokens.get(token):\n recognized += count\n score += count * self.tokens[token]\n \n if recognized > 0:\n return float(score) / float(recognized)\n else:\n return 0" ]
[ "0.67164296", "0.64145726", "0.613983", "0.6108635", "0.60373306", "0.5994101", "0.5844405", "0.5815301", "0.58053136", "0.56646466", "0.5660439", "0.5660156", "0.5652913", "0.56374556", "0.5599953", "0.55947095", "0.5588759", "0.5588722", "0.5547049", "0.55327773", "0.5528737", "0.5521225", "0.5510449", "0.54809165", "0.5474272", "0.5472964", "0.5464792", "0.5435412", "0.54348123", "0.54327935" ]
0.7307794
0
compare list of annotated mentions with list of detected mentions. count number of true positives, false positives, and false negatives.
def compareMentionLists(self, dList, aList, mType, irStats, errorOut=None): # build lists of overlapping mentions for annotated and detected mentions in this sentence potentialMatches = {} for aMention in aList: potentialMatches[aMention] = [] for dMention in dList: potentialMatches[dMention] = [] for aMention in aList: if dMention.countOverlapTokens(aMention) > 0: potentialMatches[dMention].append(aMention) potentialMatches[aMention].append(dMention) # check matches for each detected template for dMention in dList: aMentionList = potentialMatches[dMention] if len(aMentionList) == 1 and dMention.matchAnnotated(aMentionList[0]): # there is only one annotated mention that matches this detected one # this is either a TP or a DUPLICATE annotatedMention = aMentionList[0] if len(potentialMatches[annotatedMention]) == 1: # this detected mention matches only ONE annotated one, count as TP # OTHERWISE, deal with it when we process annotated mentions dMention.matchedMention = annotatedMention annotatedMention.matchedMention = dMention # self.write(errorOut, '+TP: '+dMention.text+' == '+annotatedMention.text+' ('+mType+')\n') self.write(errorOut, '+TP: %s == %s %s (%s)\n'%(dMention.text, annotatedMention.text, annotatedMention, mType)) irStats.incTP() else: # this detected mention overlaps multiple annotated mentions. # OR it does not match any annotated mention. either way, discard it. # count it as a FP self.write(errorOut, '-FP: '+dMention.text+' ('+mType+')\n') irStats.incFP() for aMention in aMentionList: potentialMatches[aMention].remove(dMention) self.write(errorOut, 'DETECTED MENTION OVERLAPS '+aMention.text+'\n') potentialMatches[dMention] = [] # check matches for each annotated mention for annotatedMention in aList: dMatches = potentialMatches[annotatedMention] if len(dMatches) == 0: # annotated mention was unmatched, count as FN irStats.incFN() self.write(errorOut, '-FN: '+annotatedMention.text+' ('+mType+')\n') elif len(dMatches) > 1: # annotated mention overlapped multiple detected ones # check each one to see if it counts as a match # If more than one does, count the best match as a TP # and the rest as duplicates. bestMatches = [] for dMention in dMatches: if dMention.matchAnnotated(annotatedMention): overlap = dMention.countOverlapTokens(annotatedMention) bestMatches.append([overlap, dMention]) dMention.matchedMention = annotatedMention else: # detected mention did not sufficiently match, count as FP self.write(errorOut, '-FP: '+dMention.text+' ('+mType+')\n') irStats.incFP() if len(bestMatches) > 0: # count best match bestMatches.sort() dMention = bestMatches[-1][1] dMention.matchedMention = annotatedMention annotatedMention.matchedMention = dMention self.write(errorOut, '+TP: '+dMention.text+' == '+annotatedMention.text+' ('+mType+')\n') irStats.incTP() # count duplicates for i in range(0, len(bestMatches)-1): irStats.incDuplicates() dMention = bestMatches[i][1] self.write(errorOut, 'ANNOTATED MENTION ALSO MATCHES ') self.write(errorOut, dMention.text+'\n') dMention.matchedMention = annotatedMention else: # there are no valid matches irStats.incFN() self.write(errorOut, '-FN: '+annotatedMention.text+' ('+mType+')\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compareAnnotatedAndDetected(self, sentence, mType, irStats, errorOut=None):\n aList = sentence.getAnnotatedMentions(mType, recomputeMentions=True)\n dList = sentence.getDetectedMentions(mType, recomputeMentions=True) \n if len(aList) == 0 and len(dList) == 0:\n return\n self.compareMentionLists(dList, aList, mType, irStats, errorOut)", "def hits_and_misses(guess_seq, true_seq):\n num_hits = sum(guess_token == true_token\n for guess_token, true_token\n in zip(guess_seq, true_seq))\n num_misses = sum((Counter(guess_seq) & Counter(true_seq)).values()) - num_hits\n return num_hits, num_misses", "def countOverlapTokens(self, annotatedMention):\n if self.tokens[0].sentence != annotatedMention.tokens[0].sentence:\n return 0 # mentions in different sentences, no overlap \n \n if self.end < annotatedMention.start or self.start > annotatedMention.end:\n return 0 # mention ends before or starts after annotated one\n \n if self.start == annotatedMention.start and self.end == annotatedMention.end:\n return len(self.tokens) # exact match for annotated mention\n \n # There is some overlap. Does it consist of anything substantial?\n importantTokens = 0\n for token in self.tokens:\n if token.index >= annotatedMention.start \\\n and token.index <= annotatedMention.end \\\n and token.isSymbol() == False and token.isStopWord() == False:\n importantTokens += 1\n \n return importantTokens", "def count_similar_occurence(listOccurence1, listOccurence2):\n nbMatch=0\n for timestamp in listOccurence1:\n if(timestamp in listOccurence2):\n nbMatch+=1\n return nbMatch", "def computeStats(self, absList, statOut=None, errorOut=None):\n \n stats = EntityStats(self.entityTypes)\n for abs in absList:\n errorOut.write('---'+abs.id+'---\\n') \n \n # identify ALL annotated mentions, even in sentences we are not focused on\n# for sentence in abs.allSentences():\n# for mType in self.entityTypes:\n# aList = sentence.getAnnotatedMentions(mType, recomputeMentions=True)\n# \n# for sentence in abs.sentences:\n# for mType in self.entityTypes:\n# self.compareAnnotatedAndDetected(sentence, mType, \\\n# stats.irstats[mType], errorOut)\n\n\n for sentence in abs.allSentences():\n for mType in self.entityTypes:\n if sentence in abs.sentences:\n self.compareAnnotatedAndDetected(sentence, mType, \\\n stats.irstats[mType], errorOut)\n else: \n aList = sentence.getAnnotatedMentions(mType, recomputeMentions=True)\n \n\n stats.printStats()\n if statOut != None:\n stats.saveStats(statOut, keyPrefix='MF - ')\n \n return stats", "def get_mention_counts(articles, skip_nils=True):\n gold_forms=[]\n gold_links=[]\n for example_article in articles:\n for entity in example_article.entity_mentions:\n mention=entity.mention\n meaning=entity.gold_link\n if not skip_nils or meaning!='--NME--':\n gold_forms.append(mention)\n gold_links.append(meaning)\n cnt_instances=Counter(gold_links)\n cnt_forms=Counter(gold_forms)\n return cnt_instances, cnt_forms", "def countOccurrences(self, wordsToCheck):\n count = 0\n for token in self.importantTokenList():\n w = token.text\n for wtc in wordsToCheck:\n if wtc == w:\n count = count + 1\n return count", "def check_adr_lexicon(annotations_dict, adr_lexicon_dict):\n\n adrs_matching_labels = 0\n adrs_not_found_in_lexicon = 0\n indications_matching_labels = 0\n indications_not_found_in_lexicon = 0\n for i, (k, v) in enumerate(annotations_dict.items()):\n for index, annotation in enumerate(v):\n # tweet = tweets_dict[k]\n annotatedText = annotation['annotatedText']\n\n is_adr_lexicon = is_in_adr_lexicon(annotatedText, adr_lexicon_dict)\n if is_adr_lexicon:\n # print(\"ADR lexicon contains this text {}\".format(annotatedText))\n # detected_adrs += 1\n if annotation['semanticType'] == \"ADR\":\n adrs_matching_labels += 1\n else:\n indications_matching_labels += 1\n else:\n if annotation['semanticType'] == \"ADR\":\n adrs_not_found_in_lexicon += 1\n else:\n indications_not_found_in_lexicon += 1\n\n print(\"Number of ADR mentions present in the ADR Lexicon: {}\".format(adrs_matching_labels))\n print(\"Number of Indication mentions present in the ADR Lexicon: {}\".format(indications_matching_labels))\n print(\"Number of ADR mentions not present in the ADR Lexicon: {}\".format(adrs_not_found_in_lexicon))\n print(\"Number of Indication mentions not present in the ADR Lexicon: {}\".format(indications_not_found_in_lexicon))", "def mentions(self, users_list, mentions_list, feature_size=None, relative_freq=True):\n # Collapsing mentions of users into a single list\n all_mentions = [x for m in mentions_list for x in m if x]\n mention_counts = sorted_count(all_mentions)\n\n mentions_vector = [m for m,_ in mention_counts]\n\n # zip users, mentions\n users_mentions_zip = list(zip(users_list, mentions_list))\n # findng mention feature vector for each user\n mention_features = {}\n for user in tqdm(set(users_list), desc=\"mention_features\", leave=LEAVE_BAR):\n user_mentions = [m for u,mns in users_mentions_zip for m in mns if u==user]\n mention_features[user] = np.array( [ user_mentions.count(m) for m in mentions_vector ] )\n if relative_freq and np.sum(mention_features[user])!=0:\n mention_features[user] = mention_features[user]/np.sum(mention_features[user])\n \n return mention_features", "def confusion_stats(set_true, set_test):\n true_pos = len(set_true.intersection(set_test))\n false_pos = len(set_test.difference(set_true))\n false_neg = len(set_true.difference(set_test))\n \n return true_pos, false_pos, false_neg", "def count_annotations(graph):\n return Counter(_annotation_iter_helper(graph))", "def _n_matches(gold_tokens, pred_tokens):\n matcher = difflib.SequenceMatcher(None, gold_tokens, pred_tokens)\n return sum(match.size for match in matcher.get_matching_blocks())", "def compare(orflist1, orflist2):\n identicals = []\n threshold = 0 # in percentage\n while len(identicals) == 0: # if no identical ORF is found, we lower the threshold\n threshold += 1\n for orf1 in orflist1:\n for orf2 in orflist2:\n if orf1 == orf2 or orf1 in orf2 or orf2 in orf1:\n if len(orf1) > len(orf2):\n identicals.append(orf1)\n elif len(orf2) > len(orf1):\n identicals.append(orf2)\n else:\n identicals.append(orf1)\n else:\n same = 0\n diff = 0\n for i in range(0, min(len(orf1), len(orf2))):\n if orf1[i] == orf2[i]:\n same += 1\n else:\n diff += 1\n if diff / min(len(orf1), len(orf2)) * 100 > threshold:\n break\n percent = same / (same+diff) * 100\n if percent >= (100 - threshold):\n if len(orf1) > len(orf2):\n identicals.append(orf1)\n elif len(orf2) > len(orf1):\n identicals.append(orf2)\n else:\n identicals.append(orf1)\n print(\"Sequences identical at \" + str(100 - threshold) + \"%\")\n \n return identicals", "def partialSetMatchAnnotated(self, annotatedMention):\n aWords = annotatedMention.importantWords()\n dWords = self.importantWords()\n \n if dWords.intersection(aWords) == dWords:\n # this mention is a subset of the annotated mention\n if dWords == aWords:\n return True # exact match\n if len(annotatedMention.shortSets) > 0:\n # annotated mention has short sections, try to if one is included\n # in the detected mention\n for ss in annotatedMention.shortSets:\n if ss.intersection(dWords) == ss:\n # detected mention contains all of the words in a short section\n return True\n \n return False", "def pos_match(self, a, b, threshold=0.5):\r\n # pos_a = map(self.get_wordnet_pos, nltk.pos_tag(word_tokenize(a)))\r\n # pos_b = map(self.get_wordnet_pos, nltk.pos_tag(word_tokenize(b)))\r\n pos_a = [self.get_wordnet_pos(token) for token in nltk.pos_tag(word_tokenize(a))]\r\n pos_b = [self.get_wordnet_pos(token) for token in nltk.pos_tag(word_tokenize(b))]\r\n lemmae_a = [self.lemmatizer.lemmatize(token.lower().strip(string.punctuation), pos) for token, pos in pos_a \\\r\n if pos == wordnet.NOUN and token.lower().strip(string.punctuation) not in self.stopwords]\r\n lemmae_b = [self.lemmatizer.lemmatize(token.lower().strip(string.punctuation), pos) for token, pos in pos_b \\\r\n if pos == wordnet.NOUN and token.lower().strip(string.punctuation) not in self.stopwords]\r\n\r\n # Calculate Jaccard similarity\r\n ratio = len(set(lemmae_a).intersection(lemmae_b)) / float(len(set(lemmae_a).union(lemmae_b)))\r\n return ratio\r\n # if ratio >= threshold: return ratio\r\n # return (ratio >= threshold)\r", "def true_positives(links_true, links_pred):\n\n links_true = _get_multiindex(links_true)\n links_pred = _get_multiindex(links_pred)\n\n return len(links_true.intersection(links_pred))", "def _compareFeatureCounts(self, transAnnot, evidTrans):\n if self.allowExtension:\n if len(evidTrans.features) < len(transAnnot.features):\n return EvidenceSupport.feat_count_mismatch\n else:\n if len(evidTrans.features) != len(transAnnot.features):\n return EvidenceSupport.feat_count_mismatch\n return EvidenceSupport.good", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def num_matches(list1, list2):\n list1.sort()\n list2.sort()\n matches = i = j = 0\n lenLst1 = len(list1)\n lenLst2 = len(list2)\n while i < lenLst1 and j < lenLst2:\n if list1[i] < list2[j]:\n i+=1\n elif list1[i] > list2[j]:\n j+=1\n else: #they are the same\n matches+=1\n i+=1\n j+=1\n return matches", "def isOk(annots):\n if annots == []:\n return True\n for a in annots:\n for label in a.labels:\n if (label != 'hasOkCopies' and\n label != 'hasBadCopies' and\n not label.startswith('count_')):\n return False\n return True", "def num_matches(list1, list2):\r\n #sorted already?\r\n #list1.sort()\r\n #list2.sort()\r\n matches = i = j = 0\r\n lenLst1 = len(list1)\r\n lenLst2 = len(list2)\r\n while i < lenLst1 and j < lenLst2:\r\n if list1[i] < list2[j]:\r\n i+=1\r\n elif list1[i] > list2[j]:\r\n j+=1\r\n else: #they are the same\r\n matches+=1\r\n i+=1\r\n j+=1\r\n return matches", "def analyze_reviews(reviews):\n\n good_reviews=reviews[reviews['rs_review_movie_score']>=9]\n bad_reviews=reviews[reviews['rs_review_movie_score']<=2]\n\n print 'len(good_reviews)=%s' % len(good_reviews)\n print 'len(bad_reviews)=%s' % len(bad_reviews)\n\n m = re.compile('\\d')\n\n english_stop_words=stopwords.words('english')\n\n\n def tokenize(text):\n tokens=nltk.word_tokenize(text)\n # strip out trailing puncutation\n tokens = [ token[:-1] if token[-1] in ['.',',','!','?'] else token for token in tokens]\n\n # lower case\n tokens = [token.lower() for token in tokens]\n\n # Take only relativly long characters\n tokens = [token for token in tokens if len(token)>=3]\n\n # remove words with numbers/digits\n tokens = [token for token in tokens if m.search(token) is None]\n\n # Remove stop words: http://nltk.googlecode.com/svn/trunk/doc/book/ch02.html\n tokens = [token for token in tokens if token not in english_stop_words]\n return tokens\n\n good_tokens_list = []\n for i,review in good_reviews.iterrows():\n text=review['rs_review_text']\n good_tokens_list.append(tokenize(text))\n\n bad_tokens_list = []\n for i,review in bad_reviews.iterrows():\n text=review['rs_review_text']\n bad_tokens_list.append(tokenize(text))\n\n all_words=Counter()\n for tokens in good_tokens_list + bad_tokens_list:\n for token in tokens:\n all_words[token]+=1\n\n most_common=all_words.most_common(2000)\n most_common=zip(*most_common)[0]\n\n print 'most_common_words = ',most_common[-20:]\n\n def document_features(tokens):\n return {word:word in tokens for word in most_common}\n\n good_set=[(document_features(tokens), 'pos') for tokens in good_tokens_list]\n bad_set=[(document_features(tokens), 'neg') for tokens in bad_tokens_list]\n\n train_set = good_set + bad_set\n random.shuffle(train_set) # dunno if this is necessary\n\n classifier = nltk.NaiveBayesClassifier.train(train_set)\n\n print 'accuracy',nltk.classify.accuracy(classifier, train_set)\n\n classifier.show_most_informative_features(300)\n\n return classifier", "def sentiment_aspects(docs: Iterable[tokens.Doc]) -> List[collections.Counter]:\n sent_dict_list = []\n start_time = time.time()\n\n for doc in docs:\n sent_dict = collections.Counter()\n for token in doc:\n # check if the word is an opinion word, then assign sentiment\n if token.text.lower() in _OPINION_WORDS:\n sentiment = 1 if token.text.lower() in _POS_WORDS else -1\n if (token.dep_ == \"advmod\"):\n # if target is an adverb modifier (i.e. pretty, highly, etc.)\n # but happens to be an opinion word, ignore and pass\n continue\n\n elif (token.dep_ == \"amod\"):\n sent_dict[token.head.text.lower()] += sentiment\n\n else:\n for child in token.children:\n # if there's a adj modifier (i.e. very, pretty, etc.) add\n # more weight to sentiment\n # This could be better updated for modifiers that either\n # positively or negatively emphasize\n if _is_opinion_mod(child):\n sentiment *= 1.5\n # check for negation words and flip the sign of sentiment\n if child.dep_ == \"neg\":\n sentiment *= -1\n for child in token.children:\n if (token.pos_ == \"VERB\") & (child.dep_ == \"dobj\"):\n # if verb, check if there's a direct object\n sent_dict[child.text.lower()] += sentiment\n # check for conjugates (a AND b), then add both to dictionary\n subchildren = []\n conj = 0\n for subchild in child.children:\n if subchild.text.lower() == \"and\": conj=1\n if (conj == 1) and (subchild.text.lower() != \"and\"):\n subchildren.append(subchild.text.lower())\n conj = 0\n for subchild in subchildren:\n sent_dict[subchild] += sentiment\n\n # check for negation\n for child in token.head.children:\n noun = \"\"\n if _is_opinion_mod(child):\n sentiment *= 1.5\n if (child.dep_ == \"neg\"):\n # check for negation words and flip the sign of sentiment\n sentiment *= -1\n\n # check for nouns\n for child in token.head.children:\n noun = \"\"\n if (child.pos_ == \"NOUN\") and (child.text not in sent_dict):\n noun = child.text.lower()\n # Check for compound nouns\n for subchild in child.children:\n if subchild.dep_ == \"compound\":\n noun = subchild.text.lower() + \" \" + noun\n sent_dict[noun] += sentiment\n sent_dict_list.append(collections.Counter(sent_dict))\n\n print(\"\\nFound aspects on {} reviews.\".format(len(sent_dict_list)))\n print(time.time() - start_time)\n return sent_dict_list", "def analyze(self, word_count_thresh):", "def _get_modified_precision_counts(self,\n predicted_tokens: torch.LongTensor,\n references_tokens: torch.LongTensor,\n ngram_size: int) -> Tuple[int, int]:\n clipped_matches = 0\n total_predicted = 0\n for batch_num in range(predicted_tokens.size(0)):\n\n # get n_gram counts for predicted_tokens\n predicted_row = predicted_tokens[batch_num, :]\n predicted_ngram_counts = self._count_ngrams(predicted_row, ngram_size)\n\n # get clipped n_gram counts for references_tokens\n references_row = references_tokens[batch_num, :]\n reference_ngram_counts_list = []\n for ref_num in range(references_row.size(0)):\n reference_ngram_counts_list.append(self._count_ngrams(references_row[ref_num, :], ngram_size))\n reference_ngram_counts = self._aggregate_ngram_counters(reference_ngram_counts_list)\n\n # get clipped_matches\n for ngram, count in predicted_ngram_counts.items():\n clipped_matches += min(count, reference_ngram_counts[ngram])\n total_predicted += count\n return clipped_matches, total_predicted", "def get_word_containement_measure(self,l2,l1):\n count = 0\n found_idfs = []\n unfound_idfs = []\n for w in l1:\n val = self.idf.get_tfidf_val(w)\n if (val > 10):\n val = 10\n if w in l2:\n count += 1\n found_idfs.append(val)\n else:\n unfound_idfs.append(val)\n if (len(found_idfs) == 0):\n avg_found = 0\n else:\n avg_found = np.mean(found_idfs)\n if (len(unfound_idfs) ==0):\n avg_unfound = 0\n else:\n avg_unfound = np.mean(unfound_idfs)\n\n\n\n return count / self.normalize_factor, avg_found, avg_unfound", "def test_total_new_annotations():\n\told_num_anns = len(oset['annotations'])\n\tnew_num_anns = len(nset['annotations'])\n\tnum_NAs_found = 0\n\n\told_anns = oset['annotations']\n\tfor ann in old_anns:\n\t\tann_id = ann['id']\n\t\tcat_id = ann['category_id']\n\t\tcat = old_coco_obj.cats[cat_id]\n\t\tOL = cat['name']\n\t\tfor search_row in map_rows:\n\t\t\tif OL == search_row[0]:\n\t\t\t\trow = search_row \n\t\t\t\tNL = row[2]\n\n\t\t\t\t# now we have the particular row from the CSV whose old category corresponds to this annotation's category\n\t\t\t\tif NL == 'NA':\n\t\t\t\t\tnum_NAs_found += 1\n\n\tassert old_num_anns - num_NAs_found == new_num_anns", "def analyze(self, text):\n\n sent = 0\n for word in text.split():\n # check each word in tweet\n if word.strip(\":, \").lower() in self.posWords:\n sent += 1\n elif word.strip(\":, \").lower() in self.negWords:\n sent -= 1\n\n return sent", "def test_count_elongated_words(self):\n review = \"Hiiii how aare you todaaay?\"\n result = count_elongated_words(review)\n self.assertEqual(result, 2)", "def count_match_mismatch(data, index):\n\tsubjects = set()\n\tnew_data = []\n\tfor line in data:\n\t\tif line[\"mark_name\"] == \"match_3D7_\" + str(index):\n\t\t\tsubjects.add(line[\"subject\"])\n\t\t\tnew_data.append(line)\n\tmatch_map = {}\n\tmismatch_map = {}\t\n\tfor subject in subjects:\n\t\tmatch_map[subject] = 0\n\t\tmismatch_map[subject] = 0\t\t\n\t\tfor line in new_data:\n\t\t\tif line[\"subject\"] == subject and line[\"mark_value\"] == \"1\":\n\t\t\t\tmatch_map[subject] = 1\n\t\t\tif line[\"subject\"] == subject and line[\"mark_value\"] == \"0\":\n\t\t\t\tmismatch_map[subject] = 1\t\t\t\t\n\tmatch_count = 0\n\tmismatch_count = 0\t\n\tfor subject, value in match_map.items():\n\t\tmatch_count += value\n\tfor subject, value in mismatch_map.items():\n\t\tmismatch_count += value\t\t\n\treturn (match_count, mismatch_count)" ]
[ "0.7023873", "0.6436592", "0.63096875", "0.59876", "0.58981645", "0.5846465", "0.5844285", "0.58240235", "0.58189034", "0.5724267", "0.55928034", "0.5576252", "0.5550993", "0.5538654", "0.5529423", "0.5477881", "0.546384", "0.5460951", "0.54609233", "0.5443485", "0.5435216", "0.54270405", "0.54218525", "0.5420196", "0.5416428", "0.54139537", "0.54130465", "0.5410209", "0.54069316", "0.5405825" ]
0.6937542
1
Returns a plotted table on an axs. Based on statistics for central tendancy, can take one column or more. Requires an matplot lib `axs` as input to be used.
def table_central_tend(data, axs, f=2): # Central tendacy v_mean = round(data.mean(), f) v_median = round(data.median(), f) # Use built in tex only, no depandancy needed sample_mean_str = "mean, " + r' $\bar x$ ' sample_median_str = "median" # Concatenate the statistics and symbols symbols = pd.DataFrame([sample_mean_str, sample_median_str]) val = pd.DataFrame([v_mean, v_median]) data = pd.concat([symbols, val], axis=1) # Plot onto matplotlib axs central_tend = axs.table( cellText=data.values, loc='center', cellLoc="center", colLoc='center', # xmin, ymin, width, height bbox=(0, 0, 1, 1), edges="" # No line ) title_color = '#9099A2' # Dark grey axs.set_title( ('Central Tendancy'), fontsize=12, color=title_color ) table_settings(axs, central_tend)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_table(self, axn: str = \"table\", df: Optional[DataFrame] = None) -> None:\n if self.axes is None:\n axs = self.initialize_figure(mosaic=[[axn]], figsize=(6, 4), return_ax=True)\n else:\n axs = self.axes\n\n if df is None:\n df = DataFrame([\"empty\"])\n\n axs[axn].table(\n df.values.tolist(),\n colLabels=df.columns,\n colColours=[(1.0, 1.0, 1.0, 1.0)]\n + [self.cmap(i, alpha=0.75) for i in range(len(df.columns) - 1)],\n bbox=(0.0, 0.0, 1.0, 1.0),\n )\n axs[axn].set_xticks([])\n axs[axn].set_yticks([])\n return axs[axn]", "def table(ax: Axes, data: DataFrame | Series, **kwargs) -> Table:\n plot_backend = _get_plot_backend(\"matplotlib\")\n return plot_backend.table(\n ax=ax, data=data, rowLabels=None, colLabels=None, **kwargs\n )", "def get_table_ms(plot=True, ax=None):\n table = ascii.read(tgas_path, delimiter=';', data_start=3)\n\n # floatify:\n table['BTmag'] = table['BTmag'].astype(float)\n table['VTmag'] = table['VTmag'].astype(float)\n\n # Compute the galactic latitude of each star, add to table\n coords = SkyCoord(ra=table['RA_ICRS'] * u.deg,\n dec=table['DE_ICRS'] * u.deg, frame='icrs')\n galactic_coords = coords.transform_to('galactic')\n abs_galactic_latitude = abs(galactic_coords.b).degree\n table.add_column(Column(data=abs_galactic_latitude, name='b'))\n\n # Compute distance, CMD\n def color_cut(b_minus_v):\n return -9. + 4.9 * b_minus_v\n\n parallax_mas = table['Plx']\n Vmag = table['VTmag']\n bt_minus_vt = table['BTmag'] - table['VTmag']\n\n parallax_arcsec = parallax_mas / 1000\n dist_pc = 1. / parallax_arcsec\n\n # Add astrometric uncertainty column to table\n table.add_column(Column(data=sigma_fov(table['<Gmag>']), name='sigma_fov'))\n\n # Add a distance column to the table:\n table.add_column(Column(data=dist_pc * u.pc, name='distance'))\n\n # Add a Nfov column to the table:\n table.add_column(Column(data=Nprime_fov(abs_galactic_latitude),\n name='N_fov'))\n\n M_V = Vmag - 5 * (np.log10(dist_pc) + 1)\n\n b_minus_v_lower = 0.6 # 0.64 # (B-V)_sun = 0.65\n b_minus_v_upper = 2\n\n main_sequence = ((np.abs(M_V - color_cut(bt_minus_vt)) < 1.) &\n (bt_minus_vt > b_minus_v_lower) &\n (bt_minus_vt < b_minus_v_upper))\n\n main_sequence_table = table[main_sequence]\n\n # Now match the B-V color table from HIPPARCOS to the main sequence TGAS table\n hipparcos_table = ascii.read(hipparcos_path, delimiter=';', header_start=0,\n data_start=3)\n hipparcos_table.add_index(\"HIP\")\n\n main_sequence_table['HIP'][main_sequence_table['HIP'].mask] = 0\n\n main_sequence_color_table = join(main_sequence_table, hipparcos_table,\n keys='HIP')\n\n # Cut again by the color cuts, this time with the real Johnson B and V,\n # rather than Tycho magnitudes:\n main_sequence = ((main_sequence_color_table['B-V'].data.data < b_minus_v_upper) &\n (main_sequence_color_table['B-V'].data.data > b_minus_v_lower))\n\n main_sequence_color_table = main_sequence_color_table[main_sequence]\n\n # Add in stellar radii with color-radius relation from Boyajian 2012\n R_star = bv_to_radius(main_sequence_color_table['B-V'].data.data)\n main_sequence_color_table.add_column(Column(data=R_star, name='R_star'))\n\n # Add in a column of interferometric angular diameters from\n # Boyajian 2012 where available:\n boyajian = ascii.read(boyajian_path)\n ang_diams = np.zeros(len(main_sequence_color_table))\n\n for row in boyajian:\n ang_diams[row['HIP'] == main_sequence_color_table['HIP']] = row['D(UD)']\n\n main_sequence_color_table.add_column(Column(data=ang_diams,\n name='angular_diameter'))\n\n boyajian_radii = main_sequence_color_table['angular_diameter'] != 0\n half_angle = (main_sequence_color_table['angular_diameter'][boyajian_radii]\n * u.marcsec/2)\n distance_pc = (main_sequence_color_table['Plx_1'][\n boyajian_radii].data.data / 1000)**-1 * u.pc\n measured_radii = distance_pc * np.tan(half_angle)\n\n R_star[boyajian_radii] = measured_radii\n\n # In radius reference column, `1`==color-radius estimate;\n # `2`==interferometric measurement\n refs = np.ones(len(R_star))\n refs[boyajian_radii] = 2\n main_sequence_color_table.add_column(Column(data=refs, name='rstar_ref'))\n\n # Add column containing approximate stellar effective temperatures based\n # on B-V -> T_eff table from Eric Mamajek:\n # http://www.pas.rochester.edu/~emamajek/EEM_dwarf_UBVIJHK_colors_Teff.txt\n mamajek = ascii.read(mamajek_path, format='commented_header')\n bv_to_teff = lambda bv: np.interp(bv, mamajek['B-V'], mamajek['Teff'])\n approx_teffs = bv_to_teff(main_sequence_color_table['B-V'])\n main_sequence_color_table.add_column(Column(data=approx_teffs, name='Teff'))\n\n if plot:\n if ax is None:\n ax = plt.gca()\n polygon_x = [0.6, 0.6, 2.0, 2.0, 0.6]\n polygon_y = [color_cut(0.6) - 1, color_cut(0.6) + 1,\n color_cut(2) + 1, color_cut(2) - 1,\n color_cut(0.6) - 1]\n\n H, xedges, yedges = np.histogram2d(bt_minus_vt[abs(bt_minus_vt) > 1e-3],\n M_V[abs(bt_minus_vt) > 1e-3],\n bins=1000)\n\n extent = [xedges.min(), xedges.max(), yedges.max(), yedges.min()]\n ax.imshow(np.log10(H.T), extent=extent, cmap=plt.cm.Greys, aspect=0.2)\n ax.plot(polygon_x, polygon_y, lw=2, color='r', ls='--')\n\n ax.set(xlim=[-0.5, 3], ylim=[2, -15],\n ylabel='$M_{VT}$', xlabel=\"BT - VT\")\n\n return main_sequence_color_table", "def draw_table(ax, dfs, legend, x, y):\n col_labels = dfs_all_values(dfs, x)\n column_legend = []\n cell_text = []\n # loop over all pandas.DataFrame objects\n for df in dfs:\n # to allow query y(x) easily\n df = df.set_index(x)\n df_row = df[y]\n # build a row with filled blanks '-'\n row = [\"{:.2f}\".format(df_row[column]) if column in df_row.index else '-' \\\n for column in col_labels]\n cell_text.append(row)\n\n ax.axis('tight')\n ax.axis('off')\n ax.table(cellText=cell_text, rowLabels=legend, colLabels=col_labels, \\\n loc='top')", "def descriptive_table(data, column_name, fig_size=(8, 8)):\n\n # Set up figure dimensions and sub components.\n sheet, axs = plt.subplots(4, 1, figsize=fig_size)\n\n # Heights ratio is based on the number of rows in each\n # table, this relates to the number of statistics each\n # sub table will show.\n gs = gridspec.GridSpec(4, 1, height_ratios=[2, 2, 5, 9])\n\n # Assign all subplots based on figure dimensions.\n ax0 = plt.subplot(gs[0])\n ax1 = plt.subplot(gs[1])\n ax2 = plt.subplot(gs[2])\n ax3 = plt.subplot(gs[3])\n\n title_color = '#9099A2' # Dark grey\n plt.suptitle(\n 'Descriptive Statistics',\n fontsize=16,\n color=title_color,\n x=0.25\n )\n\n table_top(data, column_name, ax0)\n table_central_tend(data, ax1)\n table_disperssion(data, ax2)\n table_distribution(data, ax3)\n\n # Adjust the spacing so the title fits correctly.\n sheet.subplots_adjust(hspace=0.2, top=0.95)", "def table_settings(axs_num, table_name):\n\n table_props = table_name.properties()\n table_cells = table_props['child_artists'] # matplotlib setting\n # iterate through cells of a table to change properties\n for cell in table_cells:\n cell._text.set_fontsize(15)\n cell._text.set_color('#192231') # Light grey\n\n # Set axis tick labels off, i.e. empty [].\n axs_num.set_yticklabels([])\n axs_num.set_xticklabels([])\n\n # Seaborn settings\n sns.set_style(\"whitegrid\")\n sns.set_style({'axes.grid': False})\n sns.set_context(\n \"poster\",\n rc={'font.sans-serif': 'Gill Sans MT'}\n )\n\n sns.despine(offset=2, top=False, trim=False, left=True, bottom=True)\n\n # Leave one line on top to break up the table\n axs_num.spines['top'].set_color('#9099A2')\n\n # Set tick labels to white in case they still are showing,\n # perhaps redudent but this is not perfect.\n plt.setp(\n [axs_num.get_xticklines(), axs_num.get_yticklines()],\n color=\"white\"\n )", "def plot_results(outputs_table_totals, elec_benefits, gas_benefits):\n summer_months = [6, 7, 8, 9]\n shoulder_months = [3, 4, 5, 10]\n winter_months = [11, 12, 1, 2]\n peak_hours = [16, 17, 18, 19, 20]\n pct_hours_in_summer = 2928 / 8760\n pct_hours_in_shoulder = 2952 / 8760\n pct_hours_in_winter = 2880 / 8760\n\n trc_costs_record = outputs_table_totals[\"TRC Costs ($)\"]\n pac_costs_record = outputs_table_totals[\"PAC Costs ($)\"]\n trc_record = outputs_table_totals[\"TRC\"]\n pac_record = outputs_table_totals[\"PAC\"]\n lifecycle_net_mwh = outputs_table_totals[\"Electricity Lifecycle Net Savings (MWh)\"]\n lifecycle_net_therms = outputs_table_totals[\"Gas Lifecycle Net Savings (Therms)\"]\n lifecycle_net_ghg = outputs_table_totals[\"Total Lifecycle GHG Savings (Tons)\"]\n\n # Getting variables for plots\n elec_benefits_cols = (\n [\"hourly_savings\"] + ACC_COMPONENTS_ELECTRICITY + [\"av_csts_levelized\"]\n )\n\n elec_benefits_hour_month_year = (\n elec_benefits.groupby([\"hour_of_day\", \"year\", \"month\"])\n .agg(\n {\n **{component: \"sum\" for component in ACC_COMPONENTS_ELECTRICITY},\n **{\n \"hourly_savings\": \"sum\",\n \"marginal_ghg\": \"sum\",\n \"av_csts_levelized\": \"mean\",\n },\n }\n )\n .reset_index()\n )\n\n total_benefits = list(\n elec_benefits_hour_month_year.groupby([\"hour_of_day\"])[\"total\"].sum()\n )\n\n summer_benefits = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n ]\n .groupby([\"hour_of_day\"])[\"total\"]\n .sum()\n )\n summer_peak_benefits = elec_benefits_hour_month_year[\"total\"][\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n & (elec_benefits_hour_month_year[\"hour_of_day\"].isin(peak_hours))\n ].sum()\n shoulder_benefits = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(shoulder_months))\n ]\n .groupby([\"hour_of_day\"])[\"total\"]\n .sum()\n )\n winter_benefits = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(winter_months))\n ]\n .groupby([\"hour_of_day\"])[\"total\"]\n .sum()\n )\n total_savings = list(\n elec_benefits_hour_month_year.groupby([\"hour_of_day\"])[\"hourly_savings\"].sum()\n )\n summer_savings = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n ]\n .groupby([\"hour_of_day\"])[\"hourly_savings\"]\n .sum()\n )\n shoulder_savings = list(\n elec_benefits_hour_month_year[\n ((elec_benefits_hour_month_year[\"month\"].isin(shoulder_months)))\n ]\n .groupby([\"hour_of_day\"])[\"hourly_savings\"]\n .sum()\n )\n summer_peak_savings = elec_benefits_hour_month_year[\"hourly_savings\"][\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n & (elec_benefits_hour_month_year[\"hour_of_day\"].isin(peak_hours))\n ].sum()\n winter_savings = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(winter_months))\n ]\n .groupby([\"hour_of_day\"])[\"hourly_savings\"]\n .sum()\n )\n total_av_csts_avg = list(\n elec_benefits_hour_month_year.groupby([\"hour_of_day\"])[\n \"av_csts_levelized\"\n ].mean()\n )\n summer_av_csts_avg = list(\n pct_hours_in_summer\n * elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n ]\n .groupby([\"hour_of_day\"])[\"av_csts_levelized\"]\n .mean()\n )\n summer_peak_av_csts_avg = elec_benefits_hour_month_year[\"av_csts_levelized\"][\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n & (elec_benefits_hour_month_year[\"hour_of_day\"].isin(peak_hours))\n ].mean()\n shoulder_av_csts_avg = list(\n pct_hours_in_shoulder\n * elec_benefits_hour_month_year[\n ((elec_benefits_hour_month_year[\"month\"].isin(shoulder_months)))\n ]\n .groupby([\"hour_of_day\"])[\"av_csts_levelized\"]\n .mean()\n )\n winter_av_csts_avg = list(\n pct_hours_in_winter\n * elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(winter_months))\n ]\n .groupby([\"hour_of_day\"])[\"av_csts_levelized\"]\n .mean()\n )\n\n elec_benefits_sum_by_hod = (\n elec_benefits[elec_benefits_cols].groupby(elec_benefits[\"hour_of_day\"]).sum()\n )\n elec_benefits_hoy = (\n elec_benefits[elec_benefits_cols]\n .groupby(elec_benefits[\"hour_of_year\"])\n .sum()\n .cumsum()\n .reset_index()\n )\n sav_avcsts_288 = (\n elec_benefits.groupby([\"hour_of_day\", \"month\"])\n .agg(\n {\n **{component: \"sum\" for component in ACC_COMPONENTS_ELECTRICITY},\n **{\n \"hourly_savings\": \"sum\",\n \"marginal_ghg\": \"sum\",\n \"av_csts_levelized\": \"mean\",\n },\n }\n )\n .reset_index()\n )\n sav_avcsts_288 = sav_avcsts_288[\n [\"hour_of_day\", \"month\", \"hourly_savings\", \"total\", \"marginal_ghg\"]\n ]\n ghgsav = sav_avcsts_288.pivot(\"hour_of_day\", \"month\", \"marginal_ghg\")\n sav = sav_avcsts_288.pivot(\"hour_of_day\", \"month\", \"hourly_savings\")\n avcsts = sav_avcsts_288.pivot(\"hour_of_day\", \"month\", \"total\")\n\n # savings load shape plot\n fig0, (ax1, ax2, ax3) = plt.subplots(\n 1, 3, figsize=(18, 5), sharex=True, sharey=True\n )\n plt.subplots_adjust(wspace=0, hspace=0)\n axs = [ax1, ax2, ax3]\n hod = elec_benefits_sum_by_hod.index\n legend_labels1 = [\"Summer\"]\n legend_labels2 = [\"Shoulder\"]\n legend_labels3 = [\"Winter\"]\n\n ax1.plot(\n hod,\n summer_savings,\n c=\"firebrick\",\n linewidth=5,\n marker=\"$\\u25EF$\",\n markersize=13,\n linestyle=\"-\",\n )\n ax2.plot(\n hod,\n shoulder_savings,\n c=\"royalblue\",\n linewidth=5,\n marker=\"$\\u2206$\",\n markersize=13,\n linestyle=\"-\",\n )\n ax3.plot(\n hod,\n winter_savings,\n c=\"green\",\n linewidth=5,\n marker=\"$\\u25A1$\",\n markersize=13,\n linestyle=\"-\",\n )\n ax1.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax2.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax3.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n # Shade peak region\n ax1.axvspan(16, 21, alpha=0.2, color=\"grey\")\n\n leg1 = ax1.legend(legend_labels1, fontsize=14, loc=\"upper left\", frameon=False)\n for line, text in zip(leg1.get_lines(), leg1.get_texts()):\n text.set_color(line.get_color())\n leg2 = ax2.legend(legend_labels2, fontsize=14, loc=\"upper left\", frameon=False)\n for line, text in zip(leg2.get_lines(), leg2.get_texts()):\n text.set_color(line.get_color())\n leg3 = ax3.legend(legend_labels3, fontsize=14, loc=\"upper left\", frameon=False)\n for line, text in zip(leg3.get_lines(), leg3.get_texts()):\n text.set_color(line.get_color())\n\n ax1.set_ylabel(\"Savings (MWh/hr)\", size=16)\n ax2.set_xlabel(\"Hour of Day\", size=16)\n\n if max(summer_savings + shoulder_savings + winter_savings) < 0:\n ymax = 0\n else:\n ymax = max(summer_savings + shoulder_savings + winter_savings)\n if min(summer_savings + shoulder_savings + winter_savings) > 0:\n ymin = 0\n else:\n ymin = min(summer_savings + shoulder_savings + winter_savings)\n\n # Tick and lebel parameters\n ax1.set_ylim(ymin * 1.08, ymax * 1.08)\n ax1.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax2.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax3.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax3.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax1.yaxis.set_minor_locator(AutoMinorLocator())\n ax1.set_xticks(np.arange(0, 24, step=4))\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax1.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax1.xaxis.set_minor_locator(AutoMinorLocator())\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax2.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax2.xaxis.set_minor_locator(AutoMinorLocator())\n ax3.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax3.xaxis.set_minor_locator(AutoMinorLocator())\n\n # Set plot title, size, and position\n ax1.set_title(\"Seasonal Savings Load Shapes\", size=18, loc=\"left\").set_position(\n [0, 1.03]\n )\n\n # benefits_seasonal_shape_plot\n fig1, (ax1, ax2, ax3) = plt.subplots(\n 1, 3, figsize=(18, 5), sharex=True, sharey=True\n )\n plt.subplots_adjust(wspace=0, hspace=0)\n axs = [ax1, ax2, ax3]\n hod = elec_benefits_sum_by_hod.index\n legend_labels1 = [\"Summer\"]\n legend_labels2 = [\"Shoulder\"]\n legend_labels3 = [\"Winter\"]\n\n ax1.plot(\n hod,\n summer_benefits,\n c=\"firebrick\",\n linewidth=5,\n marker=\"$\\u2B24$\",\n markersize=13,\n linestyle=\":\",\n )\n ax2.plot(\n hod,\n shoulder_benefits,\n c=\"royalblue\",\n linewidth=5,\n marker=\"$\\u25B2$\",\n markersize=13,\n linestyle=\":\",\n )\n ax3.plot(\n hod,\n winter_benefits,\n c=\"green\",\n linewidth=5,\n marker=\"$\\u25A0$\",\n markersize=13,\n linestyle=\":\",\n )\n ax1.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax2.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax3.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n # Shade peak region\n ax1.axvspan(16, 21, alpha=0.2, color=\"grey\")\n\n leg1 = ax1.legend(legend_labels1, fontsize=15, loc=\"upper left\", frameon=False)\n for line, text in zip(leg1.get_lines(), leg1.get_texts()):\n text.set_color(line.get_color())\n leg2 = ax2.legend(legend_labels2, fontsize=15, loc=\"upper left\", frameon=False)\n for line, text in zip(leg2.get_lines(), leg2.get_texts()):\n text.set_color(line.get_color())\n leg3 = ax3.legend(legend_labels3, fontsize=15, loc=\"upper left\", frameon=False)\n for line, text in zip(leg3.get_lines(), leg3.get_texts()):\n text.set_color(line.get_color())\n\n ax1.set_ylabel(\"TRC Benefits ($/hr)\", size=16)\n ax2.set_xlabel(\"Hour of Day\", size=16)\n\n if max(summer_benefits + shoulder_benefits + winter_benefits) < 0:\n ymax = 0\n else:\n ymax = max(summer_benefits + shoulder_benefits + winter_benefits)\n if min(summer_benefits + shoulder_benefits + winter_benefits) > 0:\n ymin = 0\n else:\n ymin = min(summer_benefits + shoulder_benefits + winter_benefits)\n\n # Tick and label parameters\n ax1.set_ylim(ymin * 1.08, ymax * 1.08)\n ax1.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax2.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax3.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax3.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax1.yaxis.set_minor_locator(AutoMinorLocator())\n ax1.set_xticks(np.arange(0, 24, step=4))\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax1.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax1.xaxis.set_minor_locator(AutoMinorLocator())\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax2.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax2.xaxis.set_minor_locator(AutoMinorLocator())\n ax3.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax3.xaxis.set_minor_locator(AutoMinorLocator())\n\n # Set plot title, size, and position\n ax1.set_title(\n \"Seasonal TRC Benefits by Hour ($)\", size=18, loc=\"left\"\n ).set_position([0, 1.03])\n\n # sum_hourly_plot\n fig2 = plt.figure(figsize=(12, 7), dpi=250)\n ax = fig2.gca()\n colors = [\n \"royalblue\",\n \"black\",\n \"pink\",\n \"firebrick\",\n \"gray\",\n \"darkviolet\",\n \"darkorange\",\n \"green\",\n \"saddlebrown\",\n ]\n legend_labels = []\n x = 1\n while x <= len(ACC_COMPONENTS_ELECTRICITY[1:]):\n if x == 1:\n ax.bar(\n hod,\n elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]],\n color=colors[x - 1],\n )\n legend_labels.append(\n re.findall(\n \".*Name: (.*),\",\n str(elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]]),\n )[0]\n )\n x += 1\n else:\n ax.bar(\n hod,\n elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]],\n bottom=elec_benefits_sum_by_hod.iloc[:, 2 : x + 1].sum(axis=1),\n color=colors[x - 1],\n )\n legend_labels.append(\n re.findall(\n \".*Name: (.*),\",\n str(elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]]),\n )[0]\n )\n x += 1\n\n # Set x and y limits based on min and max values\n ymax = elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).max()\n if elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).min() > 0:\n ymin = 0\n else:\n ymin = elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).min()\n\n ax.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax.set_ylim(ymin * 1.1, ymax * 1.08)\n\n # Set x and y axis labels\n ax.set_xlabel(\"Hour of Day\", size=17, labelpad=5)\n ax.set_ylabel(\"$ Avoided Costs\", size=17)\n\n # Set plot title, size, and position\n ax.set_title(\n \"Sum of Electric Avoided Costs by Component and Hour of Day\",\n size=17,\n loc=\"left\",\n )\n\n # Tick and lebel parameters\n ax.tick_params(bottom=True, top=False, left=True, right=False)\n ax.set_xticks(np.arange(0, 24, step=4))\n ax.set_yticks(\n np.arange(\n int(round(ymin * 1.1, 0)),\n ymax * 1.08,\n step=max(round(ymax - ymin, 2) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n # Minor ticks\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.yaxis.set_minor_locator(AutoMinorLocator())\n\n # Legend\n plt.legend(\n legend_labels,\n bbox_to_anchor=(1, 1),\n fontsize=12,\n loc=\"upper left\",\n frameon=False,\n )\n\n # avoided_cost_summary_plot\n fig3, (ax1, ax2, ax3) = plt.subplots(\n 3, 1, figsize=(6, 10), sharex=True, sharey=False\n )\n axs = [ax1, ax2, ax3]\n hod = elec_benefits_sum_by_hod.index\n legend_labels = [\"Total\", \"Summer\", \"Shoulder\", \"Winter\"]\n\n ax1.plot(\n hod,\n total_benefits,\n c=\"royalblue\",\n marker=\"$\\u25EF$\",\n markersize=10,\n linewidth=3,\n linestyle=\"-\",\n )\n ax1.plot(hod, summer_benefits, c=\"darkorchid\", linewidth=1, linestyle=\"--\")\n ax1.plot(hod, shoulder_benefits, c=\"olivedrab\", linewidth=1, linestyle=\":\")\n ax1.plot(hod, winter_benefits, c=\"teal\", linewidth=1, linestyle=\"-\")\n ax2.plot(\n hod,\n total_savings,\n c=\"firebrick\",\n marker=\"$\\u2206$\",\n markersize=10,\n linewidth=3,\n linestyle=\"-\",\n )\n ax2.plot(hod, summer_savings, c=\"darkorchid\", linewidth=1, linestyle=\"--\")\n ax2.plot(hod, shoulder_savings, c=\"olivedrab\", linewidth=1, linestyle=\":\")\n ax2.plot(hod, winter_savings, c=\"teal\", linewidth=1, linestyle=\"-\")\n ax3.plot(\n hod,\n total_av_csts_avg,\n c=\"green\",\n marker=\"$\\u25A0$\",\n markersize=10,\n linewidth=3,\n linestyle=\"-\",\n )\n ax3.plot(hod, summer_av_csts_avg, c=\"darkorchid\", linewidth=1, linestyle=\"--\")\n ax3.plot(hod, shoulder_av_csts_avg, c=\"olivedrab\", linewidth=1, linestyle=\":\")\n ax3.plot(hod, winter_av_csts_avg, c=\"teal\", linewidth=1, linestyle=\"-\")\n\n leg1 = ax1.legend(legend_labels, fontsize=11, loc=\"upper left\", frameon=False)\n for line, text in zip(leg1.get_lines(), leg1.get_texts()):\n text.set_color(line.get_color())\n leg2 = ax2.legend(legend_labels, fontsize=11, loc=\"upper left\", frameon=False)\n for line, text in zip(leg2.get_lines(), leg2.get_texts()):\n text.set_color(line.get_color())\n leg3 = ax3.legend(legend_labels, fontsize=11, loc=\"upper left\", frameon=False)\n for line, text in zip(leg3.get_lines(), leg3.get_texts()):\n text.set_color(line.get_color())\n\n ax3.set_xticks(np.arange(0, 24, step=4))\n ax3.set_xlabel(\"Hour of Day\", size=14, labelpad=5)\n ax3.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=12\n )\n ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax3.xaxis.set_minor_locator(AutoMinorLocator())\n\n ax1.set_ylabel(\"TRC Benefits ($)\", size=14)\n ax2.set_ylabel(\"Savings (MWh)\", size=14)\n ax3.set_ylabel(\"Av. Cost ($/MWh)\", size=14)\n\n if max(total_benefits + summer_benefits + shoulder_benefits + winter_benefits) < 0:\n ymax1 = 0\n else:\n ymax1 = max(\n total_benefits + summer_benefits + shoulder_benefits + winter_benefits\n )\n if min(total_benefits + summer_benefits + shoulder_benefits + winter_benefits) > 0:\n ymin1 = 0\n else:\n ymin1 = min(\n total_benefits + summer_benefits + shoulder_benefits + winter_benefits\n )\n if max(total_savings + summer_savings + shoulder_savings + winter_savings) < 0:\n ymax2 = 0\n else:\n ymax2 = max(total_savings + summer_savings + shoulder_savings + winter_savings)\n if min(total_savings + summer_savings + shoulder_savings + winter_savings) > 0:\n ymin2 = 0\n else:\n ymin2 = min(total_savings + summer_savings + shoulder_savings + winter_savings)\n if (\n max(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n < 0\n ):\n ymax3 = 0\n else:\n ymax3 = max(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n if (\n min(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n > 0\n ):\n ymin3 = 0\n else:\n ymin3 = min(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n\n # Tick and lebel parameters\n ax1.set_ylim(ymin1 * 1.08, ymax1 * 1.08)\n ax2.set_ylim(ymin2 * 1.08, ymax2 * 1.08)\n ax3.set_ylim(ymin3 * 1.08, ymax3 * 1.08)\n\n ax1.set_yticks(\n np.arange(\n ymin1 * 1.08,\n ymax1 * 1.08,\n step=max(round(ymax1 - ymin1, 3) / 5, int((round(ymax1 - ymin1, 0)) / 4)),\n )\n )\n ax2.set_yticks(\n np.arange(\n ymin2 * 1.08,\n ymax2 * 1.08,\n step=max(round(ymax2 - ymin2, 3) / 5, int((round(ymax2 - ymin2, 0)) / 4)),\n )\n )\n ax3.set_yticks(\n np.arange(\n ymin3 * 1.08,\n ymax3 * 1.08,\n step=max(round(ymax3 - ymin3, 3) / 5, int((round(ymax3 - ymin3, 0)) / 4)),\n )\n )\n\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=12\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=12\n )\n ax3.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=12\n )\n\n # Shade peak region\n ax1.axvspan(16, 21, alpha=0.2, color=\"grey\")\n ax2.axvspan(16, 21, alpha=0.2, color=\"grey\")\n ax3.axvspan(16, 21, alpha=0.2, color=\"grey\")\n\n # Print key information\n plt.annotate(\n \"Electric Benefits = $\" + str(round(elec_benefits[\"total\"].sum(), 2)),\n xy=(350, 530),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Gas Benefits = $\" + str(round(gas_benefits, 2)),\n xy=(350, 505),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Total Benefits = $\"\n + str(round(elec_benefits[\"total\"].sum() + gas_benefits, 2)),\n xy=(350, 480),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"TRC Costs = $\" + str(trc_costs_record),\n xy=(350, 455),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"PAC Costs = $\" + str(pac_costs_record),\n xy=(350, 430),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"TRC = \" + str(trc_record),\n xy=(350, 405),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"PAC = \" + str(pac_record),\n xy=(350, 380),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Net Lifecycle Electric Savings = \" + str(lifecycle_net_mwh) + \" MWh\",\n xy=(350, 335),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Net Lifecycle Gas Savings = \" + str(lifecycle_net_therms) + \" Therms\",\n xy=(350, 310),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Net Lifecycle GHG Savings = \" + str(lifecycle_net_ghg) + \" Tons\",\n xy=(350, 285),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n str(round(100 * ((summer_peak_savings) / sum(total_savings)), 1))\n + \"% MWh savings during summer peak period\",\n xy=(350, 260),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n str(round(100 * ((summer_peak_benefits) / sum(total_benefits)), 1))\n + \"% Electric TRC benefits from summer peak period\",\n xy=(350, 235),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Electric Benefits per MWh = $\"\n + str(round(elec_benefits[\"total\"].sum() / lifecycle_net_mwh, 2)),\n xy=(350, 210),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Typical Avoided Cost per MWh = $\"\n + str(round(elec_benefits[\"av_csts_levelized\"].mean(), 2)),\n xy=(350, 145),\n xycoords=\"axes points\",\n fontsize=18,\n )\n\n # Set plot title, size, and position\n ax1.set_title(\n \"Savings and Avoided Cost Profiles\", size=16, loc=\"left\"\n ).set_position([0, 1.03])\n\n # marginal_ghg_savings_plot\n cmp = sns.diverging_palette(16, 260, l=35, n=25, as_cmap=True)\n\n fig4 = plt.figure(figsize=(8, 6), dpi=100)\n ax1 = fig4.gca()\n y_ticks = [\n 0,\n \"\",\n 2,\n \"\",\n 4,\n \"\",\n 6,\n \"\",\n 8,\n \"\",\n 10,\n \"\",\n 12,\n \"\",\n 14,\n \"\",\n 16,\n \"\",\n 18,\n \"\",\n 20,\n \"\",\n 22,\n ]\n hmp = sns.heatmap(ghgsav, cmap=cmp, ax=ax1, yticklabels=y_ticks, center=0.00)\n ax1.set_xlabel(\"Month\", size=15)\n ax1.set_ylabel(\"Hour of Day\", size=15)\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=13\n )\n ax1.tick_params(\n which=\"major\",\n axis=\"y\",\n direction=\"out\",\n length=6,\n width=2,\n labelsize=13,\n rotation=0,\n )\n ax1.set_title(\"Electric GHG Savings by Month and Hour\", size=15, loc=\"left\", pad=8)\n cbar1 = hmp.collections[0].colorbar\n cbar1.ax.tick_params(labelsize=14)\n plt.annotate(\"Sum GHG\", xy=(370, 352), xycoords=\"axes points\", fontsize=12)\n plt.annotate(\"Savings (Tons)\", xy=(370, 336), xycoords=\"axes points\", fontsize=12)\n\n # month_hour_savings_benefits_plot\n fig5, (ax1, ax2) = plt.subplots(1, 2, figsize=(21, 10), dpi=200)\n y_ticks = [\n 0,\n \"\",\n 2,\n \"\",\n 4,\n \"\",\n 6,\n \"\",\n 8,\n \"\",\n 10,\n \"\",\n 12,\n \"\",\n 14,\n \"\",\n 16,\n \"\",\n 18,\n \"\",\n 20,\n \"\",\n 22,\n ]\n fleft = sns.heatmap(sav, cmap=cmp, ax=ax1, yticklabels=y_ticks, center=0.00)\n fright = sns.heatmap(avcsts, cmap=cmp, ax=ax2, yticklabels=y_ticks, center=0.00)\n ax1.set_xlabel(\"Month\", size=22)\n ax1.set_ylabel(\"Hour of Day\", size=22)\n ax2.set_xlabel(\"Month\", size=22)\n ax2.set_ylabel(\"Hour of Day\", size=22)\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=18\n )\n ax1.tick_params(\n which=\"major\",\n axis=\"y\",\n direction=\"out\",\n length=6,\n width=2,\n labelsize=18,\n rotation=0,\n )\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=18\n )\n ax2.tick_params(\n which=\"major\",\n axis=\"y\",\n direction=\"out\",\n length=6,\n width=2,\n labelsize=18,\n rotation=0,\n )\n ax1.set_title(\n \"MWh Savings by Month and Hour\", size=24, loc=\"left\", pad=15\n ).set_position([0, 1.1])\n ax2.set_title(\"$ Benefits by Month and Hour\", size=24, loc=\"left\", pad=15)\n fig4.tight_layout(pad=2.0)\n cbar1 = fleft.collections[0].colorbar\n cbar1.ax.tick_params(labelsize=18)\n cbar2 = fright.collections[0].colorbar\n cbar2.ax.tick_params(labelsize=18)\n plt.annotate(\"Sum MWh\", xy=(-200, 585), xycoords=\"axes points\", fontsize=20)\n plt.annotate(\"Savings\", xy=(-193, 560), xycoords=\"axes points\", fontsize=20)\n plt.annotate(\"Sum TRC\", xy=(435, 585), xycoords=\"axes points\", fontsize=20)\n plt.annotate(\"Benefits\", xy=(442, 560), xycoords=\"axes points\", fontsize=20)\n\n # savings_benefits_cumulative_sum_plot\n fig6 = plt.figure(figsize=(12, 7), dpi=250)\n ax1 = fig6.gca()\n ax1.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[\"hourly_savings\"],\n color=\"royalblue\",\n linewidth=3,\n )\n ax2 = ax1.twinx()\n ax2.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[\"total\"],\n color=\"firebrick\",\n linewidth=3,\n linestyle=\"--\",\n )\n ax2.axhline(y=0, color=\"gray\", linewidth=0.7, linestyle=\"--\")\n\n # Set x and y limits based on min and max values\n\n if (\n elec_benefits_hoy[\"hourly_savings\"].max() >= 0\n and elec_benefits_hoy[\"total\"].max() >= 0\n ):\n ymax1 = elec_benefits_hoy[\"hourly_savings\"].max()\n ymax2 = elec_benefits_hoy[\"total\"].max()\n elif (\n elec_benefits_hoy[\"hourly_savings\"].max() < 0\n and elec_benefits_hoy[\"total\"].max() < 0\n ):\n ymax1 = 0\n ymax2 = 0\n elif (\n elec_benefits_hoy[\"hourly_savings\"].max() < 0\n and elec_benefits_hoy[\"total\"].max() > 0\n ):\n ymax1 = (\n -1\n * elec_benefits_hoy[\"hourly_savings\"].min()\n * (\n elec_benefits_hoy[\"total\"].max()\n / (elec_benefits_hoy[\"total\"].max() - elec_benefits_hoy[\"total\"].min())\n )\n / (\n 1\n - elec_benefits_hoy[\"total\"].max()\n / (elec_benefits_hoy[\"total\"].max() - elec_benefits_hoy[\"total\"].min())\n )\n )\n ymax2 = elec_benefits_hoy[\"total\"].max()\n else:\n ymax1 = 0\n ymax2 = (\n -1\n * elec_benefits_hoy[\"total\"].min()\n * (\n elec_benefits_hoy[\"hourly_savings\"].max()\n / (\n elec_benefits_hoy[\"hourly_savings\"].max()\n - elec_benefits_hoy[\"hourly_savings\"].min()\n )\n )\n )\n\n if (\n elec_benefits_hoy[\"hourly_savings\"].min() <= 0\n and elec_benefits_hoy[\"total\"].min() <= 0\n ):\n ymin1 = elec_benefits_hoy[\"hourly_savings\"].min()\n ymin2 = elec_benefits_hoy[\"total\"].min()\n elif (\n elec_benefits_hoy[\"hourly_savings\"].min() > 0\n and elec_benefits_hoy[\"total\"].min() > 0\n ):\n ymin1 = 0\n ymin2 = 0\n elif (\n elec_benefits_hoy[\"hourly_savings\"].min() > 0\n and elec_benefits_hoy[\"total\"].min() < 0\n ):\n ymin1 = (\n -1\n * elec_benefits_hoy[\"hourly_savings\"].max()\n * (\n elec_benefits_hoy[\"total\"].min()\n / (elec_benefits_hoy[\"total\"].min() - elec_benefits_hoy[\"total\"].max())\n )\n / (\n 1\n - elec_benefits_hoy[\"total\"].min()\n / (elec_benefits_hoy[\"total\"].min() - elec_benefits_hoy[\"total\"].max())\n )\n )\n ymin2 = elec_benefits_hoy[\"total\"].min()\n else:\n ymin1 = 0\n ymin2 = (\n -1\n * elec_benefits_hoy[\"total\"].min()\n * (\n elec_benefits_hoy[\"hourly_savings\"].min()\n / (\n elec_benefits_hoy[\"hourly_savings\"].min()\n - elec_benefits_hoy[\"hourly_savings\"].min()\n )\n )\n )\n\n # Set x and y axis limits\n ax1.set_xlim(-340, 9000)\n ax1.set_ylim(ymin1 * 1.08, ymax1 * 1.08)\n ax2.set_ylim(ymin2 * 1.08, ymax2 * 1.08)\n\n # Set x and y axis labels\n ax1.set_xlabel(\"Hour of Year\", size=17, labelpad=5)\n ax1.set_ylabel(\"Net Lifecycle Savings (MWh)\", size=17)\n ax2.set_ylabel(\"$ TRC Benefits\", size=17, rotation=-90, labelpad=20)\n\n # Set plot title, size, and position\n ax1.set_title(\n \"Cumulative Savings and TRC Benefits by Hour of Year\",\n size=17,\n loc=\"left\",\n pad=8,\n )\n\n # Tick and lebel parameters\n ax1.set_xticks(np.arange(0, 8760, step=1000))\n ax1.set_yticks(\n np.arange(\n int(round(ymin1 * 1.1, 0)),\n ymax1 * 1.08,\n step=max(round(ymax1 - ymin1, 2) / 5, int((round(ymax1 - ymin1, 0)) / 4)),\n )\n )\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n ax2.set_xticks(np.arange(0, 8760, step=1000))\n ax2.set_yticks(\n np.arange(\n int(round(ymin2 * 1.1, 0)),\n ymax2 * 1.08,\n step=max(round(ymax2 - ymin2, 2) / 5, int((round(ymax2 - ymin2, 0)) / 4)),\n )\n )\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n # Minor ticks\n ax1.xaxis.set_minor_locator(AutoMinorLocator())\n ax1.yaxis.set_minor_locator(AutoMinorLocator())\n ax2.yaxis.set_minor_locator(AutoMinorLocator())\n\n # Legend\n ax1.legend(\n [\"Savings\"],\n fontsize=12,\n bbox_to_anchor=(0.02, 1),\n loc=\"upper left\",\n frameon=False,\n )\n ax2.legend(\n [\"TRC Beneftis\"],\n fontsize=12,\n bbox_to_anchor=(0.02, 0.95),\n loc=\"upper left\",\n frameon=False,\n )\n\n fig7 = plt.figure(figsize=(12, 7), dpi=250)\n ax = fig7.gca()\n colors1 = [\n \"black\",\n \"royalblue\",\n \"black\",\n \"pink\",\n \"firebrick\",\n \"gray\",\n \"darkviolet\",\n \"darkorange\",\n \"green\",\n \"saddlebrown\",\n ]\n legend_labels2 = []\n\n ax.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[ACC_COMPONENTS_ELECTRICITY[0]],\n color=colors1[0],\n linewidth=3,\n )\n legend_labels2.append(ACC_COMPONENTS_ELECTRICITY[0])\n x = 1\n while x <= len(ACC_COMPONENTS_ELECTRICITY) - 2:\n ax.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[ACC_COMPONENTS_ELECTRICITY[x]],\n color=colors1[x],\n )\n legend_labels2.append(ACC_COMPONENTS_ELECTRICITY[x])\n x += 1\n\n # Set x and y limits based on min and max values\n if max(elec_benefits_hoy.iloc[:, 2:x].max()) < 0:\n ymax = 0\n else:\n ymax = max(elec_benefits_hoy.iloc[:, 2:x].max())\n if min(elec_benefits_hoy.iloc[:, 2:x].min()) > 0:\n ymin = 0\n else:\n ymin = min(elec_benefits_hoy.iloc[:, 2:x].min())\n\n ax.set_xlim(-340, 9000)\n ax.set_ylim(ymin * 1.1, ymax * 1.08)\n\n # Set x and y axis labels\n ax.set_xlabel(\"Hour of Year\", size=17, labelpad=5)\n ax.set_ylabel(\"$ TRC Benefits\", size=17)\n\n # Set plot title, size, and position\n ax.set_title(\n \"Sum of Avoided Costs by Component and Hour of Day\", size=17, loc=\"left\"\n )\n\n # Tick and lebel parameters\n ax.set_xticks(np.arange(0, 8760, step=1000))\n ax.set_yticks(\n np.arange(\n int(round(ymin * 1.1, 0)),\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n # Minor ticks\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.yaxis.set_minor_locator(AutoMinorLocator())\n\n # Legend\n plt.legend(\n legend_labels2,\n bbox_to_anchor=(1, 1),\n fontsize=12,\n loc=\"upper left\",\n frameon=False,\n )", "def table_distribution(data, axs, f=2):\n\n # Measures of distribution\n v_max = round(data.max(), f)\n v_95 = round((data.quantile(0.95)), f)\n v_90 = round((data.quantile(0.9)), f)\n v_75 = round((data.quantile(0.75)), f)\n v_50 = round((data.quantile(0.5)), f)\n v_25 = round((data.quantile(0.25)), f)\n v_10 = round((data.quantile(0.1)), f)\n v_05 = round((data.quantile(0.05)), f)\n v_min = round(data.min(), f)\n\n # pandas quantile returns a series which needs to be recombined\n # hence reset_index and transpose used in this case.\n quantiles = pd.concat(\n [v_max, v_95, v_90,\n v_75, v_50, v_25,\n v_10, v_05, v_min],\n axis=1\n ).transpose().reset_index()\n quantiles.drop('index', axis=1, inplace=True)\n\n # Use built in tex only, no depandancy needed\n sample_max_str = r\"maximum\"\n sample_95_str = r\"$Q(0.95)$\"\n sample_90_str = r\"$Q(0.90)$\"\n sample_75_str = r\"$Q(0.75)$\"\n sample_50_str = r\"$Q(0.50)$\"\n sample_25_str = r\"$Q(0.25)$\"\n sample_10_str = r\"$Q(0.10)$\"\n sample_05_str = r\"$Q(0.05)$\"\n sample_min_str = r\"minimum\"\n\n symbols = pd.DataFrame([sample_max_str, sample_95_str, sample_90_str,\n sample_75_str, sample_50_str, sample_25_str,\n sample_10_str, sample_05_str, sample_min_str])\n\n data = pd.concat([symbols, quantiles], axis=1)\n\n distribution = axs.table(\n cellText=data.values,\n loc='center',\n cellLoc=\"center\",\n colLoc='right',\n # xmin, ymin, width, height\n bbox=(0, 0, 1, 1),\n edges=\"\")\n\n title_color = '#9099A2'\n axs.set_title(\n ('Distribution'),\n fontsize=12,\n color=title_color)\n\n table_settings(axs, distribution)", "def ana_plot_graphs(plotables,plotable_titles,figure_title=None,show=False):\n axes = num_to_subplots_axes(len(plotables))\n fig = plt.figure()\n fig.suptitle(figure_title)\n for i, ((plotable,plot_type),ana_type) in enumerate(zip(plotables,plotable_titles)):\n if plot_type == 'MESH':\n #ax = plot_mesh_sub(fig, axes+(i+1,), *plotable)\n ax = plot_imshow_from_mesh_sub(fig, axes+(i+1,), *plotable)\n # Suplots indexing is from 1 => i+1\n ax.set_title(ana_type)\n elif plot_type == 'HIST':\n ax = plot_imshow_sub(\n fig, axes+(i+1,), plotable[0],\n (np.min(plotable[1]),np.max(plotable[1])),\n (np.min(plotable[2]),np.max(plotable[2]))\n )\n ax.set_title(ana_type)\n else:\n assert False, \"Not implemented\"\n if show:\n plt.show()", "def plot(self, dtables, figs, **kwargs):\n self.safe_update(**kwargs)\n figs.setup_amp_plots_grid(\"row\",\n title=\"Correlation: imaging region and serial overscan\",\n xlabel=\"Correlation\",\n ylabel=\"Number of frames\")\n figs.setup_amp_plots_grid(\"col\",\n title=\"Correlation: imaging region and paralell overscan\",\n xlabel=\"Correlation\",\n ylabel=\"Number of frames\")\n\n dtab = dtables.get_table(\"correl\")\n for i in range(16):\n s_correl = dtab['s_correl_a%02i' % i]\n p_correl = dtab['p_correl_a%02i' % i]\n figs.get_obj('row', 'axs').flat[i].hist(s_correl, bins=100, range=(-1., 1.))\n figs.get_obj('col', 'axs').flat[i].hist(p_correl, bins=100, range=(-1., 1.))", "def SA_data_display(opt_df, all_df):\n fig, axs = plt.subplots(2, 3)\n\n axs[0,0].set_title(\"Optimal rewire attempts for circularity\")\n axs[0,0].set_ylabel(\"Percent waste %\")\n axs[0,0].set_xlabel(\"Time (s)\")\n axs[0,0].plot(opt_df[\"Time (s)\"], opt_df[\"Percent waste (%)\"])\n\n axs[0,1].set_title(\"Optimal rewire attempts acceptance probability\")\n axs[0,1].set_ylabel(\"Acceptance Probability\")\n axs[0,1].set_xlabel(\"Time (s)\") # time??\n axs[0,1].scatter(opt_df[\"Time (s)\"], opt_df[\"Probability\"])\n\n axs[0,2].set_title(\"Optimal rewire attempts temperature decrease\")\n axs[0,2].set_ylabel(\"Temperature\")\n axs[0,2].set_xlabel(\"Time (s)\") # time??\n axs[0,2].plot(opt_df[\"Time (s)\"], opt_df[\"Temperature\"])\n\n axs[1,0].set_title(\"All rewire attempts for circularity\")\n axs[1,0].set_ylabel(\"Percent waste %\")\n axs[1,0].set_xlabel(\"Time (s)\")\n axs[1,0].plot(all_df[\"Time (s)\"], all_df[\"Percent waste (%)\"])\n\n axs[1,1].set_title(\"All rewire attempts acceptance probability\")\n axs[1,1].set_ylabel(\"Acceptance Probability\")\n axs[1,1].set_xlabel(\"Time (s)\") # time??\n axs[1,1].scatter(all_df[\"Time (s)\"], all_df[\"Probability\"])\n\n axs[1,2].set_title(\"All rewire attempts temperature decrease\")\n axs[1,2].set_ylabel(\"Temperature\")\n axs[1,2].set_xlabel(\"Time (s)\") # time??\n axs[1,2].plot(all_df[\"Time (s)\"], all_df[\"Temperature\"])\n\n return plt.show()", "def plot_table(mat, width=.15, ratio=4):\n vmax = np.abs(mat).max()\n vals = np.around(mat, 2)\n fig = plt.figure()\n ax = fig.add_subplot(111, frameon=False, xticks=[], yticks=[])\n table = plt.table(cellText=vals, colWidths=[width]*vals.shape[1],\n loc='center', cellColours=plt.cm.RdBu_r(\n Normalize(-vmax, vmax)(mat)))\n table.scale(1, ratio)\n return fig", "def plot_alternate_t(noncentrality, df, alpha=0.05, ax=None):\n\n if ax is None:\n ax = plt.axes()\n\n x, y1, y2, crit = _summarize_t(noncentrality, df, alpha)\n color1, color2 = _get_colors()\n\n ax.plot(x, y1, color=color1)\n ax.plot(x, y2, color=color2)\n\n sn.despine(ax=ax, left=True, offset=10)\n\n return ax", "def plot(self, dtables, figs, **kwargs):\n self.safe_update(**kwargs)\n sumtable = dtables['biasoscorr_stats']\n figs.plot_stat_color('mean-s', sumtable['s_correl_mean'].reshape(9, 16))\n figs.plot_stat_color('mean-p', sumtable['p_correl_mean'].reshape(9, 16))", "def plot_eos_table(ax, mat, table_name, spec='t', vmin=None, vmax=None,\n nx=300, ny=350, xmax=None, ymax=None, xmin=None, ymin=None):\n\n table_name = table_name.format(s=spec)\n tab = mat.get_table(table_name)\n\n Rmin, Rmax = tab['Rmin'], tab['Rmax']\n Tmin, Tmax = tab['Tmin'], tab['Tmax']\n if xmin is not None:\n Rmin = xmin\n if ymin is not None:\n Tmin = ymin\n\n Xarr = np.logspace(np.log10(Rmin), np.log10(Rmax)-0.1, nx)\n Yarr = np.logspace(np.log10(Tmin), np.log10(Tmax)-0.1, ny)\n\n X, Y = np.meshgrid(Xarr, Yarr, indexing='ij')\n\n F = tab(X,Y)\n\n if vmax is None:\n vmax = np.percentile(F, 99.5) \n if vmin is None:\n vmin = np.percentile(F[F>0], 0.5)\n\n cs = ax.pcolormesh(X, Y*K2eV, F, cmap=plt.cm.jet, norm = LogNorm(),\n vmin=vmin, vmax=vmax)\n if vmin is not None:\n levels = np.arange(int(np.log10(vmin)), int(np.log10(F.max())))\n else:\n levels = np.arange(np.log10(F[F>0].min()), int(np.log10(F.max())))\n logF = np.log10(np.where(F>0, F, F[F>0].min()))\n cl = ax.contour(X, Y/11640, logF, levels, colors='k')\n plt.clabel(cl, fontsize=10, inline=False, fmt='%1.0d', use_clabeltext=True)\n plt.title('Table {0}: {1}'.format(tab['Material_ID'], table_name.replace('_', '\\_')))\n cb = plt.colorbar(cs)\n if F.min()<0:\n min_label = ' (min {0:.0e} GPa)'.format(F.min())\n else:\n min_label = ''\n cb.set_label('{0} [{1}] {2}'.format(tab.label.replace('_', '\\_'),\n tab.units, min_label))\n\n cl = ax.contourf(X, Y*K2eV, F>0, [0,0.5], colors='white', hatches=['//'])\n\n ax.set_xscale('symlog', linthreshx=3e-5)\n ax.set_yscale('symlog', linthreshy=0.1)\n if xmax is None:\n ax.set_xlim(0, Xarr.max())\n else:\n ax.set_xlim(0, xmax)\n if ymax is None:\n ax.set_ylim(0, Yarr.max()*K2eV)\n else:\n ax.set_ylim(0, ymax)\n\n ax.set_xlabel(r'$\\rho$ [g.cm$^{-3}$]')\n ax.set_ylabel(r'$T$ [eV]')\n return ax", "def _get_safety_totals_plot(self, ax, safety_stats):\n meta = self.meta\n violations_labels = meta['safety_constraints']\n total_violations = safety_stats['total_violations'].T\n\n for idx, violations in enumerate(total_violations):\n label = violations_labels[idx]\n ax.plot(np.arange(violations.shape[0]), violations, label=label)\n\n ax.set_title('# violations / episode')\n ax.legend()\n ax.set_ylabel('# violations')\n ax.set_xlabel('Episode')\n ax.plot()", "def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r", "def plot_exp_traces_plus_means(all_table, yscale = 'linear', data_col = 'spike_freq', x_label = 'Time (days)', title = 'Experiment Traces plus Mean', ymax=10, end_time = 1.375, norm_dmso = False, dmso_table = pd.DataFrame([]), c = 'multi', **plot_kwargs):\n \n time_vector = all_table['time']\n cat_table_norm = pd.DataFrame([])\n \n for exp in all_table['exp'].unique():\n exp_table = all_table.query('exp == @exp')\n time_vector = exp_table['time']\n if norm_dmso == True:\n exp_dmso_table = dmso_table.query('exp == @exp')\n if exp_dmso_table.empty:\n norm_exp = exp_table[data_col]\n else:\n norm_exp = np.divide(exp_table[data_col], exp_dmso_table[data_col])\n if c == 'multi':\n plt.plot(time_vector, norm_exp, alpha=0.4, **plot_kwargs)\n else:\n plt.plot(time_vector, norm_exp, alpha=0.2, color=c, label='_nolegend_', **plot_kwargs)\n cat_table_norm = pd.concat([cat_table_norm, (pd.DataFrame(data = {'spike_freq': norm_exp, 'time': time_vector, 'exp': exp}))])\n tt_drug, tt_end = exp_stats(cat_table_norm, end_time)\n else:\n if c == 'multi':\n plt.plot(time_vector, exp_table[data_col], alpha=0.4, **plot_kwargs)\n else:\n plt.plot(time_vector, exp_table[data_col], alpha=0.2, color = c, label='_nolegend_', **plot_kwargs)\n tt_drug, tt_end = exp_stats(all_table, end_time)\n \n mean_freq_traces = all_table.groupby(('time'))[data_col].mean()\n mean_freq_traces = mean_freq_traces.rename(data_col).reset_index() # Convert the multiindexed series back to a dataframe\n \n if norm_dmso == True:\n mean_freq_traces_dmso = dmso_table.groupby(('time'))[data_col].mean()\n mean_freq_traces_dmso = mean_freq_traces_dmso.rename(data_col).reset_index()\n if c == 'multi':\n plt.plot(mean_freq_traces['time'], np.divide(mean_freq_traces[data_col], mean_freq_traces_dmso[data_col]), 'k', **plot_kwargs)\n else:\n plt.plot(mean_freq_traces['time'], np.divide(mean_freq_traces[data_col], mean_freq_traces_dmso[data_col]), c, **plot_kwargs)\n else:\n if c == 'multi':\n plt.plot(mean_freq_traces['time'], mean_freq_traces[data_col], 'k', **plot_kwargs)\n else:\n plt.plot(mean_freq_traces['time'], mean_freq_traces[data_col], c, **plot_kwargs)\n \n plt.axhline(1, color='k', label='_nolegend_')\n plt.ylim([0,ymax])\n \n \n print('Drug Stats: ')\n print(tt_drug)\n print('End Stats: ')\n print(tt_end)\n \n plt.yscale(yscale)\n plt.xlabel(x_label)\n plt.ylabel('Fold Induction')\n plt.title(title)\n if c == 'multi':\n plt.legend(all_table['exp'].unique())\n \n return cat_table_norm", "def transaction_plot(ds):\n import seaborn as sns\n import pandas as pd\n df = pd.DataFrame()", "def plot(self, dtables, figs, **kwargs):\n self.safe_update(**kwargs)\n\n figs.setup_amp_plots_grid(\"ratio-row\", title=\"sflat ratio by row\",\n xlabel=\"row\", ylabel=\"Ratio\")\n figs.plot_xy_amps_from_tabledict(dtables, 'row', 'ratio_row',\n x_name='row_i', y_name='r_med')\n\n figs.setup_amp_plots_grid(\"ratio-col\", title=\"sflat ratio by col\",\n xlabel=\"col\", ylabel=\"Ratio\")\n figs.plot_xy_amps_from_tabledict(dtables, 'col', 'ratio_col',\n x_name='col_i', y_name='r_med')\n\n figs.setup_amp_plots_grid(\"scatter\", title=\"sflat ratio v. sbias\",\n xlabel=\"Superbias [ADU]\", ylabel=\"Ratio\")\n\n figs.plot_amp_arrays(\"mask\", self.quality_masks, vmin=0, vmax=3)\n\n for i, (amp, sbias_image) in enumerate(sorted(self.superbias_images)):\n figs.plot_two_image_hist2d('scatter', i,\n sbias_image,\n self.ratio_images[amp],\n bins=(200, 200),\n range=((-50, 50.), (0.018, 0.022)))", "def plot_results(self, a):\n import matplotlib.pyplot as plt\n fig, axes = plt.subplots(nrows=len(a.data_vars), sharex='all', sharey='all')\n for ax, var in zip(axes, a.data_vars):\n data = a[var]\n plt.sca(ax)\n data.plot(x='time', cmap=plt.cm.viridis_r, yincrease=False, robust=True)\n plt.show()", "def table_top(data, name, axs):\n\n # Count\n v_count = []\n for i in name:\n v_col_size = data[i].size\n v_count.append(v_col_size)\n\n # Use built in tex only, no depandancy needed\n sample_count_str = \"samples, \" + r' $n$ '\n\n symbols = pd.DataFrame([sample_count_str])\n val = pd.DataFrame([v_count])\n data = pd.concat([symbols, val], axis=1)\n\n # Get column names out of list\n labels = [\"\"]\n for i in name:\n labels.append(i)\n\n top = axs.table(\n cellText=data.values,\n colLabels=labels,\n loc='center',\n cellLoc=\"center\",\n colLoc='center',\n # xmin, ymin, width, height\n bbox=(0, 0, 1, 1),\n edges=\"\")\n\n table_settings(axs, top)\n\n # As the above table_settings function sets black\n # line on top overwrite that setting\n axs.spines['top'].set_color('white')", "def plot_unit_rate_change(cat_table, title='Unit FR Change', norm_dmso = False, dmso_table = pd.DataFrame([]), **plot_kwargs):\n unit_freq_mean_base = cat_table.query('time < 0 and time > -0.125').groupby(('exp','unit_name'))['spike_freq'].mean() \n unit_freq_mean_end = cat_table.query('time < 1.25 and time > 1.125').groupby(('exp','unit_name'))['spike_freq'].mean()\n \n \n\n for e in cat_table['exp'].unique():\n for unit in cat_table.query('exp == @e')['unit_name'].unique(): \n # if unit_freq_mean_base.loc[e,unit] > 0 and unit_freq_mean_end.loc[e,unit] > 0:\n plt.plot(unit_freq_mean_base.loc[e,unit], unit_freq_mean_end.loc[e,unit], '.', **plot_kwargs)\n \n max_base = max(unit_freq_mean_base)\n max_end = max(unit_freq_mean_end)\n \n plt.plot([0.001,np.ceil(max(max_base,max_end))],[0.001,np.ceil(max(max_base,max_end))],'k')\n\n plt.yscale('log')\n plt.xscale('log')\n plt.xlim([0.001,np.ceil(max(max_base,max_end))])\n plt.ylim([0.001,np.ceil(max(max_base,max_end))])\n plt.axis('equal')\n \n \n plt.ylabel('End mean firing rate (Hz)')\n plt.xlabel('Baseline mean firing rate (Hz)')\n plt.title(title)\n return", "def summaryPlot(df):\n import datetime as dt\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n import numpy as np\n import pandas as pd\n from numpy import array\n import matplotlib.patches as mpatches\n import seaborn as sns\n from matplotlib.pyplot import figure\n\n class color:\n # Allows for bolded and underlined text\n BOLD = \"\\033[1m\"\n UNDERLINE = \"\\033[4m\"\n END = \"\\033[0m\"\n\n # Reads df and fills empty values\n df.index = pd.to_datetime(df.date)\n df = df.drop(\"date\", axis=1)\n df_all = df.resample(\"1D\")\n df_all = df_all.fillna(method=\"ffill\")\n\n dataPoints = [\"pm25\", \"co\", \"so2\", \"pm10\", \"o3\", \"no2\", \"nox\", \"wd\", \"ws\"]\n\n i = 0\n sub = 1\n while i < 9:\n # Plots line and histogram plots for ecery polutant\n # in the correct location based on subplot\n plt.figure(1, figsize=(50, 50))\n plt.subplot(9, 2, sub)\n sub = sub + 1\n a = df_all[dataPoints[i]].plot.line(color=\"gold\")\n a.axes.get_xaxis().set_visible(False)\n a.yaxis.set_label_position(\"left\")\n plt.ylabel(dataPoints[i], fontsize=75, bbox=dict(facecolor=\"whitesmoke\"))\n # print(df['pm25'].max())\n\n plt.subplot(9, 2, sub)\n sub = sub + 1\n plt.hist(df_all[dataPoints[i]], bins=50, color=\"green\")\n i = i + 1\n i = 0\n while i < 9:\n # Calculates statistics\n nDf = df[dataPoints[i]]\n missing = nDf.isna().sum() + sum(n < 0 for n in nDf)\n minVal = nDf.min()\n maxVal = nDf.max()\n meanVal = nDf.mean()\n medianVal = nDf.median()\n percentile = nDf.quantile(0.95)\n print(\"---------------\")\n print(color.BOLD + color.UNDERLINE + dataPoints[i] + color.END)\n print(\"min = \" + str(0))\n print(\"max = \" + str(maxVal))\n print(\"missing = \" + str(missing))\n print(\"mean = \" + str(meanVal))\n print(\"median = \" + str(medianVal))\n print(\"95th percentile = \" + str(percentile))\n i = i + 1", "def table_disperssion(data, axs, f=2):\n\n # Measures of disperssion\n v_bessel_sd = round(data.std(), f)\n v_var = round(data.var(), f)\n v_range = round((data.max()-data.min()), f)\n v_iqr = round((data.quantile(0.75)-data.quantile(0.25)), f)\n v_mad = round(data.mad(), f)\n\n # Use built in tex only, no depandancy needed\n sample_std_str = \"stan. dev.\" + r' $s$ '\n sample_var_str = \"variance, \" + '$s^2$'\n sample_range_str = \"range\"\n sample_iqr_str = \"$IQR$\"\n sample_mad_str = \"mean abs. dev.\"\n\n symbols = pd.DataFrame(\n [sample_std_str, sample_iqr_str,\n sample_mad_str, sample_var_str,\n sample_range_str]\n )\n val = pd.DataFrame(\n [v_bessel_sd, v_iqr,\n v_mad, v_var, v_range]\n )\n data = pd.concat([symbols, val], axis=1)\n\n disperssion = axs.table(\n cellText=data.values,\n loc='center',\n cellLoc=\"center\",\n colLoc='right',\n # xmin, ymin, width, height\n bbox=(0, 0, 1, 1),\n edges=\"\") # No line\n\n title_color = '#9099A2' # Dark Grey\n axs.set_title(\n ('Disperssion'),\n fontsize=12,\n color=title_color)\n\n table_settings(axs, disperssion)", "def obstab_plot_observable(yyyy: int, doy: int, gnss: str, dfprnobst: pd.DataFrame, dir_gfzplt: str, obstab_name: str, dt_first: datetime, dt_last: datetime, show_plot: bool = False, logger: logging.Logger = None) -> str:\n cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored(sys._getframe().f_code.co_name, 'green')\n\n amutils.logHeadTailDataFrame(df=dfprnobst, dfName='dfprnobst[{gnss:s}]'.format(gnss=gnss), logger=logger, callerName=cFuncName)\n\n # set up the plot\n plt.style.use('ggplot')\n # plt.style.use('seaborn-darkgrid')\n\n # determine index of first obst\n idx_PRN = dfprnobst.columns.get_loc('PRN') + 1\n nr_obsts = len(dfprnobst.columns[idx_PRN:])\n\n # used markers\n lst_markers = ['o', 'x', '+', '.', ',', 'v', '^', '<', '>', 's', 'd']\n\n # create 2 subplots with same axis if more than 1 obst, else only 1 subplot\n if nr_obsts == 1:\n fig, ax1 = plt.subplots(1, figsize=(10, 4))\n else:\n fig, (ax1, ax2) = plt.subplots(2, sharex=True, figsize=(10, 7), gridspec_kw={'height_ratios': [2, 1]})\n\n # create colormap with nrcolors discrete colors which is th efirst always present plot\n obst_colors, title_font = amutils.create_colormap_font(nrcolors=nr_obsts, font_size=12)\n obst_markers = lst_markers[:nr_obsts]\n for obst, obst_color, marker in zip(dfprnobst.columns[idx_PRN:], obst_colors, obst_markers):\n ax1.plot(dfprnobst['DATE_TIME'], dfprnobst[obst], color=obst_color, label=obst, alpha=0.6, linestyle='', marker=marker, markersize=2)\n\n # beautify plot\n ax1.xaxis.grid(b=True, which='major')\n ax1.yaxis.grid(b=True, which='major')\n\n ax1.set_ylabel(gfzc.dict_obstypes[dfprnobst.columns[idx_PRN][0]], fontdict=title_font)\n # ax1.yaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))\n\n ax1.legend(loc='best', markerscale=4)\n\n # setticks on Y axis to represent the PRNs\n if dfprnobst.columns[idx_PRN][0] == 'S':\n ax1.set_yticks(np.arange(10, 61, 10))\n\n # this will be the bottom axis if only 1 obst available\n axis = ax1\n\n # add difference plot when there are more than 1 obst available\n if nr_obsts > 1:\n # add difference between observables\n diff_colors = []\n for i, color in enumerate(amutils.get_spaced_colors(nr_obsts)):\n diff_colors.append(tuple(rgb / 256. for rgb in color))\n\n obst_diff_markers = lst_markers[:nr_obsts]\n\n dfprnobstdiff = pd.DataFrame(dfprnobst['DATE_TIME'])\n for i, obst1 in enumerate(dfprnobst.columns[idx_PRN:-1]):\n for j, obst2 in enumerate(dfprnobst.columns[idx_PRN + (i + 1):]):\n obst_diff = '{obst1:s}-{obst2:s}'.format(obst1=obst1, obst2=obst2)\n\n dfprnobstdiff[obst_diff] = dfprnobst[obst1] - dfprnobst[obst2]\n\n marker = obst_diff_markers[i * len(dfprnobst.columns[idx_PRN:-1]) + j]\n ax2.plot(dfprnobstdiff['DATE_TIME'], dfprnobstdiff[obst_diff], label=obst_diff, alpha=0.6, linestyle='', marker=marker, markersize=2)\n\n # beutify this plot\n if dfprnobst.columns[idx_PRN][0] == 'S':\n ax2.set_ylim([-10, +10])\n if dfprnobst.columns[idx_PRN][0] == 'C':\n ax2.set_ylim([-20, +20])\n ax2.set_ylabel('Diff {obst:s}'.format(obst=gfzc.dict_obstypes[dfprnobst.columns[idx_PRN][0]]), fontdict=title_font)\n\n # this will be the bottom axis if more than 1 obst available\n axis = ax2\n\n # plot title\n plt.suptitle('{obst:s} for PRN {prn:s} on {yy:02d}/{doy:03d}'.format(obst=gfzc.dict_obstypes[dfprnobst.columns[idx_PRN][0]], prn=dfprnobst['PRN'].iloc[0], yy=(yyyy % 100), doy=doy))\n\n # beautify plot\n axis.set_xlabel('Time', fontdict=title_font)\n axis.yaxis.grid(b=True, which='major')\n axis.legend(loc='best', markerscale=3)\n\n # create the ticks for the time axis\n axis.set_xlim([dt_first, dt_last])\n dtFormat = plot_utils.determine_datetime_ticks(startDT=dt_first, endDT=dt_last)\n\n if dtFormat['minutes']:\n # ax.xaxis.set_major_locator(dates.MinuteLocator(byminute=range(10, 60, 10), interval=1))\n pass\n else:\n axis.xaxis.set_major_locator(dates.HourLocator(interval=dtFormat['hourInterval'])) # every 4 hours\n axis.xaxis.set_major_formatter(dates.DateFormatter('%H:%M')) # hours and minutes\n\n axis.xaxis.set_minor_locator(dates.DayLocator(interval=1)) # every day\n axis.xaxis.set_minor_formatter(dates.DateFormatter('\\n%d-%m-%Y'))\n\n axis.xaxis.set_tick_params(rotation=0)\n for tick in axis.xaxis.get_major_ticks():\n # tick.tick1line.set_markersize(0)\n # tick.tick2line.set_markersize(0)\n tick.label1.set_horizontalalignment('center')\n\n fig.tight_layout()\n\n # save the plot in subdir png of GNSSSystem\n plt_name = '{basen:s}-{gnss:s}-{PRN:s}-{obst:s}.pdf'.format(basen=obstab_name.split('.')[0], gnss=gnss, PRN=dfprnobst['PRN'].iloc[0], obst=gfzc.dict_obstypes[dfprnobst.columns[idx_PRN][0]])\n fig.savefig(os.path.join(dir_gfzplt, plt_name), dpi=200)\n logger.info('{func:s}: created plot {plot:s}'.format(func=cFuncName, plot=colored(plt_name, 'green')))\n\n # if show_plot:\n if show_plot:\n plt.show(block=True)\n else:\n plt.close(fig)\n\n return plt_name", "def tplot(self, analytes=None, figsize=[10, 4], scale=None, filt=None,\n ranges=False, stats=False, stat='nanmean', err='nanstd',\n interactive=False, focus_stage=None, err_envelope=False):\n\n if interactive:\n enable_notebook() # make the plot interactive\n\n if type(analytes) is str:\n analytes = [analytes]\n if analytes is None:\n analytes = self.analytes\n\n if focus_stage is None:\n focus_stage = self.focus_stage\n\n fig = plt.figure(figsize=figsize)\n ax = fig.add_axes([.1,.12,.77,.8])\n\n for a in analytes:\n x = self.Time\n y, yerr = unpack_uncertainties(self.data[focus_stage][a])\n\n if scale is 'log':\n ax.set_yscale('log')\n y[y == 0] = np.nan\n\n if filt:\n ind = self.filt.grab_filt(filt, a)\n xf = x.copy()\n yf = y.copy()\n yerrf = yerr.copy()\n if any(~ind):\n xf[~ind] = np.nan\n yf[~ind] = np.nan\n yerrf[~ind] = np.nan\n if any(~ind):\n ax.plot(x, y, color=self.cmap[a], alpha=.4, lw=0.6)\n ax.plot(xf, yf, color=self.cmap[a], label=a)\n if err_envelope:\n ax.fill_between(xf, yf - yerrf, yf + yerrf, color=self.cmap[a],\n alpha=0.2, zorder=-1)\n else:\n ax.plot(x, y, color=self.cmap[a], label=a)\n if err_envelope:\n ax.fill_between(x, y - yerr, y + yerr, color=self.cmap[a],\n alpha=0.2, zorder=-1)\n\n # Plot averages and error envelopes\n if stats and hasattr(self, 'stats'):\n sts = self.stats[sig][0].size\n if sts > 1:\n for n in np.arange(self.n):\n n_ind = ind & (self.ns == n + 1)\n if sum(n_ind) > 2:\n x = [self.Time[n_ind][0], self.Time[n_ind][-1]]\n y = [self.stats[sig][self.stats['analytes'] == a][0][n]] * 2\n\n yp = ([self.stats[sig][self.stats['analytes'] == a][0][n] +\n self.stats[err][self.stats['analytes'] == a][0][n]] * 2)\n yn = ([self.stats[sig][self.stats['analytes'] == a][0][n] -\n self.stats[err][self.stats['analytes'] == a][0][n]] * 2)\n\n ax.plot(x, y, color=self.cmap[a], lw=2)\n ax.fill_between(x + x[::-1], yp + yn,\n color=self.cmap[a], alpha=0.4,\n linewidth=0)\n else:\n x = [self.Time[0], self.Time[-1]]\n y = [self.stats[sig][self.stats['analytes'] == a][0]] * 2\n yp = ([self.stats[sig][self.stats['analytes'] == a][0] +\n self.stats[err][self.stats['analytes'] == a][0]] * 2)\n yn = ([self.stats[sig][self.stats['analytes'] == a][0] -\n self.stats[err][self.stats['analytes'] == a][0]] * 2)\n\n ax.plot(x, y, color=self.cmap[a], lw=2)\n ax.fill_between(x + x[::-1], yp + yn, color=self.cmap[a],\n alpha=0.4, linewidth=0)\n\n if ranges:\n for lims in self.bkgrng:\n ax.axvspan(*lims, color='k', alpha=0.1, zorder=-1)\n for lims in self.sigrng:\n ax.axvspan(*lims, color='r', alpha=0.1, zorder=-1)\n\n if filt is not None:\n ind = self.filt.grab_filt(filt)\n lims = bool_2_indices(~ind)\n for l, u in lims:\n if u >= len(self.Time):\n u = -1\n ax.axvspan(self.Time[l], self.Time[u], color='k',\n alpha=0.05, lw=0)\n\n # drawn = []\n # for k, v in self.filt.switches.items():\n # for f, s in v.items():\n # if s & (f not in drawn):\n # lims = bool_2_indices(~self.filt.components[f])\n # for u, l in lims:\n # ax.axvspan(self.Time[u-1], self.Time[l], color='k',\n # alpha=0.05, lw=0)\n # drawn.append(f)\n\n ax.text(0.01, 0.99, self.sample + ' : ' + self.focus_stage,\n transform=ax.transAxes,\n ha='left', va='top')\n\n ax.set_xlabel('Time (s)')\n ax.set_xlim(np.nanmin(x), np.nanmax(x))\n \n # y label\n ud = {'rawdata': 'counts',\n 'despiked': 'counts',\n 'bkgsub': 'background corrected counts',\n 'ratios': 'counts/{:s} count',\n 'calibrated': 'mol/mol {:s}'}\n if focus_stage in ['ratios', 'calibrated']:\n ud[focus_stage] = ud[focus_stage].format(self.internal_standard)\n ax.set_ylabel(ud[focus_stage])\n\n if interactive:\n ax.legend()\n plugins.connect(fig, plugins.MousePosition(fontsize=14))\n display.clear_output(wait=True)\n display.display(fig)\n input('Press [Return] when finished.')\n disable_notebook() # stop the interactivity\n else:\n ax.legend(bbox_to_anchor=(1.15, 1))\n\n return fig, ax", "def plot_scenario_distribution(self):\n x = self.arms\n\n y = self.df.groupby('price').mean().Converted[x]\n y_sex_0 = self.df[self.df.Sex == 0].groupby('price').mean().Converted[x]\n y_sex_1 = self.df[self.df.Sex == 1].groupby('price').mean().Converted[x]\n y_age_0 = self.df[self.df.Under_30 == 0].groupby('price').mean().Converted[x]\n y_age_1 = self.df[self.df.Under_30 == 1].groupby('price').mean().Converted[x]\n\n fig, ax_list = plt.subplots(2,1, figsize=(12, 9))\n\n for ax in ax_list:\n ax.grid(alpha=0.3, linestyle='--')\n\n ax.set_ylim(bottom=0, top=0.6)\n ax.set_xlim(left=50, right=104)\n\n ax.set_xlabel(\"Price\", fontsize=14)\n ax.set_ylabel(\"Conversion Rate\", fontsize=14)\n\n ax.set_xticks(self.arms)\n ax.set_xticklabels(self.arms.astype(np.int64), fontsize=12, alpha=0.7)\n ax.set_yticks(np.linspace(0, 0.7, 8))\n ax.set_yticklabels([str((i * 100).astype(np.int64)) + \"%\" for i in np.linspace(0, 0.7, 8)], fontsize=12, alpha=0.7)\n\n ax.spines['right'].set_alpha(0)\n ax.spines['left'].set_alpha(0.3)\n ax.spines['top'].set_alpha(0)\n ax.spines['bottom'].set_alpha(0.3)\n\n ax_list[0].plot(x, y, label='Global')\n ax_list[0].plot(x, y_sex_0, label='Male', color='moccasin')\n ax_list[0].plot(x, y_sex_1, label='Female', color='darkorange')\n\n ax_list[1].plot(x, y, label='Global')\n ax_list[1].plot(x, y_age_0, label='Under 30', color='red')\n ax_list[1].plot(x, y_age_1, label='Over 30', color='darkred')\n\n ax_list[0].legend()\n ax_list[1].legend()\n\n fig.suptitle(\"Conversion Rate\", fontsize=22)\n\n fig.show()\n\n plt.savefig('chapter5_pricing.png')", "def test_by_csa(df):\n sns.set(style=\"white\", palette=sns.color_palette(\"cubehelix\", 3))\n f, axes = plt.subplots(4, 1, figsize=(4, 9))#, sharex=True)\n sns.despine(top=True, bottom=True)\n f.suptitle(\"Diagnostic Test\\nGrouped by %CSA\")\n\n OSA_pure_df = df.loc[df['BaseDx'] == \"Mainly OSA\"]\n OSA_predom_df = df.loc[df['BaseDx'] == \"Combined OSA/CSA\"]\n CSA_predom_df = df.loc[df['BaseDx'] == \"Predominantly CSA\"]\n CSA_pure_df = df.loc[df['BaseDx'] == \"Pure CSA\"]\n\n OSA_pure_hist = OSA_pure_df['StudyType'].value_counts()\n OSA_predom_hist = OSA_predom_df['StudyType'].value_counts()\n CSA_predom_hist = CSA_predom_df['StudyType'].value_counts()\n CSA_pure_hist = CSA_pure_df['StudyType'].value_counts()\n\n # Pure OSA\n axes[0].set(xlabel=\"\", ylabel=\"<10% CSA\")\n osa_pure_wedges, _, _ = axes[0].pie(OSA_pure_hist, autopct=\"%1.1f%%\", startangle=0, pctdistance=1.25,\n textprops={'size': 'x-small'}, colors=sns.color_palette(\"cubehelix\", 6),\n wedgeprops={'edgecolor': 'black'})\n axes[0].legend(osa_pure_wedges, OSA_pure_hist.keys(), loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1))\n\n # Predom OSA\n axes[1].set(xlabel=\"\", ylabel=\"10-49.9% CSA\")\n osa_predom_wedges, _, _ = axes[1].pie(OSA_predom_hist, autopct=\"%1.1f%%\", startangle=0, pctdistance=1.25,\n textprops={'size': 'x-small'}, colors=sns.color_palette(\"cubehelix\", 6),\n wedgeprops={'edgecolor': 'black'})\n axes[1].legend(osa_predom_wedges, OSA_predom_hist.keys(), loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1))\n\n # Predom CSA\n axes[2].set(xlabel=\"\", ylabel=\"50-90% CSA\")\n\n csa_predom_wedges, _, _ = axes[2].pie(CSA_predom_hist, autopct=\"%1.1f%%\", startangle=0, pctdistance=1.25,\n textprops={'size': 'x-small'}, colors=sns.color_palette(\"cubehelix\", 6),\n wedgeprops={'edgecolor': 'black'})\n axes[2].legend(csa_predom_wedges, CSA_predom_hist.keys(), loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1))\n\n # Pure CSA\n axes[3].set(xlabel=\"Patients With Each Etiology Contributing to CSA\", ylabel=\">90% CSA\")\n\n csa_pure_wedges, _, _ = axes[3].pie(CSA_pure_hist, autopct=\"%1.1f%%\", startangle=0, pctdistance=1.25,\n textprops={'size': 'x-small'}, colors=sns.color_palette(\"cubehelix\", 6),\n wedgeprops={'edgecolor': 'black'})\n axes[3].legend(csa_pure_wedges, CSA_pure_hist.keys(), loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1))\n axes[3].set(xlabel=\"\\nProportion using each type \\nof diagnostic test\")\n\n f.tight_layout(rect=[0, 0, 1, 0.95]) # .95 to leave space for title\n f.savefig('Diag Test by percentage CSA.png', dpi=100)\n # plt.show()", "def plot_settings_table(settings, table_nr=1, plot_out=None):\n\n keys = settings.keys()\n\n data_matrix_1 = [keys[:len(keys) / 3], []]\n for key in data_matrix_1[0]:\n data_matrix_1[1].append(str(settings[key]))\n\n data_matrix_2 = [keys[len(keys) / 3:2 * len(keys) / 3], []]\n for key in data_matrix_2[0]:\n data_matrix_2[1].append(str(settings[key]))\n\n data_matrix_3 = [keys[2 * len(keys) / 3:len(keys)], []]\n for key in data_matrix_3[0]:\n data_matrix_3[1].append(str(settings[key]))\n\n data = [data_matrix_1, data_matrix_2, data_matrix_3]\n\n nr_columns = len(data[table_nr - 1][0])\n plot = {'data': [{'colorscale': [[0, '#00083e'], [0.5, '#ededee'], [1, '#ffffff']],\n 'hoverinfo': 'none',\n 'opacity': 0.75,\n 'showscale': False,\n 'type': 'heatmap',\n 'z': [[0, 0.5] for row in range(nr_columns)]\n }],\n 'layout': {\n 'annotations': [],\n 'yaxis1': {'autorange': 'reversed',\n 'showgrid': False,\n 'showticklabels': False,\n 'zeroline': False,\n 'ticks': ''\n },\n 'xaxis1': {\n 'showgrid': False,\n 'showticklabels': False,\n 'zeroline': False,\n 'ticks': '',\n 'range': [0, 1]\n\n },\n 'title': \" \"\n }\n }\n\n # heading\n for table_cell in range(nr_columns):\n plot['layout']['annotations'].append({})\n plot['layout']['annotations'][table_cell].update({'text': data[table_nr - 1][0][table_cell]})\n plot['layout']['annotations'][table_cell].update({'font': {\n 'color': '#ffffff',\n 'size': 15}\n })\n plot['layout']['annotations'][table_cell].update({'y': table_cell})\n plot['layout']['annotations'][table_cell].update({'x': 0.1})\n plot['layout']['annotations'][table_cell].update({'xref': 'x1'})\n plot['layout']['annotations'][table_cell].update({'yref': 'y1'})\n plot['layout']['annotations'][table_cell].update({'align': 'center'})\n plot['layout']['annotations'][table_cell].update({'xanchor': 'left'})\n plot['layout']['annotations'][table_cell].update({'showarrow': False})\n\n # content\n for table_cell in range(nr_columns):\n plot['layout']['annotations'].append({})\n plot['layout']['annotations'][table_cell + nr_columns].update({'text': data[table_nr - 1][1][table_cell]})\n plot['layout']['annotations'][table_cell + nr_columns].update({'x': 0.75})\n plot['layout']['annotations'][table_cell + nr_columns].update({'y': table_cell})\n plot['layout']['annotations'][table_cell + nr_columns].update({'xref': 'x1'})\n plot['layout']['annotations'][table_cell + nr_columns].update({'yref': 'y1'})\n plot['layout']['annotations'][table_cell + nr_columns].update({'showarrow': False})\n\n if plot_out is not None:\n plotly_plot(plot, filename=settings['plot_out'], auto_open=False)\n else:\n return plot" ]
[ "0.64666414", "0.629224", "0.6126542", "0.60390544", "0.58691555", "0.5715039", "0.5713977", "0.56380343", "0.56379694", "0.56378335", "0.5608983", "0.5569364", "0.55638283", "0.55580723", "0.55462444", "0.55326843", "0.5463168", "0.54459053", "0.5418356", "0.5393956", "0.5391823", "0.5377514", "0.53736967", "0.53665316", "0.53281575", "0.53100467", "0.5293378", "0.5260891", "0.5237462", "0.52364564" ]
0.6815292
0
Returns a plotted table on an axs. Based on statistics for disperssion, can take one column or more. Requires an matplot lib `axs` as input to be used.
def table_disperssion(data, axs, f=2): # Measures of disperssion v_bessel_sd = round(data.std(), f) v_var = round(data.var(), f) v_range = round((data.max()-data.min()), f) v_iqr = round((data.quantile(0.75)-data.quantile(0.25)), f) v_mad = round(data.mad(), f) # Use built in tex only, no depandancy needed sample_std_str = "stan. dev." + r' $s$ ' sample_var_str = "variance, " + '$s^2$' sample_range_str = "range" sample_iqr_str = "$IQR$" sample_mad_str = "mean abs. dev." symbols = pd.DataFrame( [sample_std_str, sample_iqr_str, sample_mad_str, sample_var_str, sample_range_str] ) val = pd.DataFrame( [v_bessel_sd, v_iqr, v_mad, v_var, v_range] ) data = pd.concat([symbols, val], axis=1) disperssion = axs.table( cellText=data.values, loc='center', cellLoc="center", colLoc='right', # xmin, ymin, width, height bbox=(0, 0, 1, 1), edges="") # No line title_color = '#9099A2' # Dark Grey axs.set_title( ('Disperssion'), fontsize=12, color=title_color) table_settings(axs, disperssion)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_table(self, axn: str = \"table\", df: Optional[DataFrame] = None) -> None:\n if self.axes is None:\n axs = self.initialize_figure(mosaic=[[axn]], figsize=(6, 4), return_ax=True)\n else:\n axs = self.axes\n\n if df is None:\n df = DataFrame([\"empty\"])\n\n axs[axn].table(\n df.values.tolist(),\n colLabels=df.columns,\n colColours=[(1.0, 1.0, 1.0, 1.0)]\n + [self.cmap(i, alpha=0.75) for i in range(len(df.columns) - 1)],\n bbox=(0.0, 0.0, 1.0, 1.0),\n )\n axs[axn].set_xticks([])\n axs[axn].set_yticks([])\n return axs[axn]", "def table(ax: Axes, data: DataFrame | Series, **kwargs) -> Table:\n plot_backend = _get_plot_backend(\"matplotlib\")\n return plot_backend.table(\n ax=ax, data=data, rowLabels=None, colLabels=None, **kwargs\n )", "def ana_plot_graphs(plotables,plotable_titles,figure_title=None,show=False):\n axes = num_to_subplots_axes(len(plotables))\n fig = plt.figure()\n fig.suptitle(figure_title)\n for i, ((plotable,plot_type),ana_type) in enumerate(zip(plotables,plotable_titles)):\n if plot_type == 'MESH':\n #ax = plot_mesh_sub(fig, axes+(i+1,), *plotable)\n ax = plot_imshow_from_mesh_sub(fig, axes+(i+1,), *plotable)\n # Suplots indexing is from 1 => i+1\n ax.set_title(ana_type)\n elif plot_type == 'HIST':\n ax = plot_imshow_sub(\n fig, axes+(i+1,), plotable[0],\n (np.min(plotable[1]),np.max(plotable[1])),\n (np.min(plotable[2]),np.max(plotable[2]))\n )\n ax.set_title(ana_type)\n else:\n assert False, \"Not implemented\"\n if show:\n plt.show()", "def descriptive_table(data, column_name, fig_size=(8, 8)):\n\n # Set up figure dimensions and sub components.\n sheet, axs = plt.subplots(4, 1, figsize=fig_size)\n\n # Heights ratio is based on the number of rows in each\n # table, this relates to the number of statistics each\n # sub table will show.\n gs = gridspec.GridSpec(4, 1, height_ratios=[2, 2, 5, 9])\n\n # Assign all subplots based on figure dimensions.\n ax0 = plt.subplot(gs[0])\n ax1 = plt.subplot(gs[1])\n ax2 = plt.subplot(gs[2])\n ax3 = plt.subplot(gs[3])\n\n title_color = '#9099A2' # Dark grey\n plt.suptitle(\n 'Descriptive Statistics',\n fontsize=16,\n color=title_color,\n x=0.25\n )\n\n table_top(data, column_name, ax0)\n table_central_tend(data, ax1)\n table_disperssion(data, ax2)\n table_distribution(data, ax3)\n\n # Adjust the spacing so the title fits correctly.\n sheet.subplots_adjust(hspace=0.2, top=0.95)", "def plot(self, dtables, figs, **kwargs):\n self.safe_update(**kwargs)\n figs.setup_amp_plots_grid(\"row\",\n title=\"Correlation: imaging region and serial overscan\",\n xlabel=\"Correlation\",\n ylabel=\"Number of frames\")\n figs.setup_amp_plots_grid(\"col\",\n title=\"Correlation: imaging region and paralell overscan\",\n xlabel=\"Correlation\",\n ylabel=\"Number of frames\")\n\n dtab = dtables.get_table(\"correl\")\n for i in range(16):\n s_correl = dtab['s_correl_a%02i' % i]\n p_correl = dtab['p_correl_a%02i' % i]\n figs.get_obj('row', 'axs').flat[i].hist(s_correl, bins=100, range=(-1., 1.))\n figs.get_obj('col', 'axs').flat[i].hist(p_correl, bins=100, range=(-1., 1.))", "def plot_results(self, a):\n import matplotlib.pyplot as plt\n fig, axes = plt.subplots(nrows=len(a.data_vars), sharex='all', sharey='all')\n for ax, var in zip(axes, a.data_vars):\n data = a[var]\n plt.sca(ax)\n data.plot(x='time', cmap=plt.cm.viridis_r, yincrease=False, robust=True)\n plt.show()", "def SA_data_display(opt_df, all_df):\n fig, axs = plt.subplots(2, 3)\n\n axs[0,0].set_title(\"Optimal rewire attempts for circularity\")\n axs[0,0].set_ylabel(\"Percent waste %\")\n axs[0,0].set_xlabel(\"Time (s)\")\n axs[0,0].plot(opt_df[\"Time (s)\"], opt_df[\"Percent waste (%)\"])\n\n axs[0,1].set_title(\"Optimal rewire attempts acceptance probability\")\n axs[0,1].set_ylabel(\"Acceptance Probability\")\n axs[0,1].set_xlabel(\"Time (s)\") # time??\n axs[0,1].scatter(opt_df[\"Time (s)\"], opt_df[\"Probability\"])\n\n axs[0,2].set_title(\"Optimal rewire attempts temperature decrease\")\n axs[0,2].set_ylabel(\"Temperature\")\n axs[0,2].set_xlabel(\"Time (s)\") # time??\n axs[0,2].plot(opt_df[\"Time (s)\"], opt_df[\"Temperature\"])\n\n axs[1,0].set_title(\"All rewire attempts for circularity\")\n axs[1,0].set_ylabel(\"Percent waste %\")\n axs[1,0].set_xlabel(\"Time (s)\")\n axs[1,0].plot(all_df[\"Time (s)\"], all_df[\"Percent waste (%)\"])\n\n axs[1,1].set_title(\"All rewire attempts acceptance probability\")\n axs[1,1].set_ylabel(\"Acceptance Probability\")\n axs[1,1].set_xlabel(\"Time (s)\") # time??\n axs[1,1].scatter(all_df[\"Time (s)\"], all_df[\"Probability\"])\n\n axs[1,2].set_title(\"All rewire attempts temperature decrease\")\n axs[1,2].set_ylabel(\"Temperature\")\n axs[1,2].set_xlabel(\"Time (s)\") # time??\n axs[1,2].plot(all_df[\"Time (s)\"], all_df[\"Temperature\"])\n\n return plt.show()", "def get_table_ms(plot=True, ax=None):\n table = ascii.read(tgas_path, delimiter=';', data_start=3)\n\n # floatify:\n table['BTmag'] = table['BTmag'].astype(float)\n table['VTmag'] = table['VTmag'].astype(float)\n\n # Compute the galactic latitude of each star, add to table\n coords = SkyCoord(ra=table['RA_ICRS'] * u.deg,\n dec=table['DE_ICRS'] * u.deg, frame='icrs')\n galactic_coords = coords.transform_to('galactic')\n abs_galactic_latitude = abs(galactic_coords.b).degree\n table.add_column(Column(data=abs_galactic_latitude, name='b'))\n\n # Compute distance, CMD\n def color_cut(b_minus_v):\n return -9. + 4.9 * b_minus_v\n\n parallax_mas = table['Plx']\n Vmag = table['VTmag']\n bt_minus_vt = table['BTmag'] - table['VTmag']\n\n parallax_arcsec = parallax_mas / 1000\n dist_pc = 1. / parallax_arcsec\n\n # Add astrometric uncertainty column to table\n table.add_column(Column(data=sigma_fov(table['<Gmag>']), name='sigma_fov'))\n\n # Add a distance column to the table:\n table.add_column(Column(data=dist_pc * u.pc, name='distance'))\n\n # Add a Nfov column to the table:\n table.add_column(Column(data=Nprime_fov(abs_galactic_latitude),\n name='N_fov'))\n\n M_V = Vmag - 5 * (np.log10(dist_pc) + 1)\n\n b_minus_v_lower = 0.6 # 0.64 # (B-V)_sun = 0.65\n b_minus_v_upper = 2\n\n main_sequence = ((np.abs(M_V - color_cut(bt_minus_vt)) < 1.) &\n (bt_minus_vt > b_minus_v_lower) &\n (bt_minus_vt < b_minus_v_upper))\n\n main_sequence_table = table[main_sequence]\n\n # Now match the B-V color table from HIPPARCOS to the main sequence TGAS table\n hipparcos_table = ascii.read(hipparcos_path, delimiter=';', header_start=0,\n data_start=3)\n hipparcos_table.add_index(\"HIP\")\n\n main_sequence_table['HIP'][main_sequence_table['HIP'].mask] = 0\n\n main_sequence_color_table = join(main_sequence_table, hipparcos_table,\n keys='HIP')\n\n # Cut again by the color cuts, this time with the real Johnson B and V,\n # rather than Tycho magnitudes:\n main_sequence = ((main_sequence_color_table['B-V'].data.data < b_minus_v_upper) &\n (main_sequence_color_table['B-V'].data.data > b_minus_v_lower))\n\n main_sequence_color_table = main_sequence_color_table[main_sequence]\n\n # Add in stellar radii with color-radius relation from Boyajian 2012\n R_star = bv_to_radius(main_sequence_color_table['B-V'].data.data)\n main_sequence_color_table.add_column(Column(data=R_star, name='R_star'))\n\n # Add in a column of interferometric angular diameters from\n # Boyajian 2012 where available:\n boyajian = ascii.read(boyajian_path)\n ang_diams = np.zeros(len(main_sequence_color_table))\n\n for row in boyajian:\n ang_diams[row['HIP'] == main_sequence_color_table['HIP']] = row['D(UD)']\n\n main_sequence_color_table.add_column(Column(data=ang_diams,\n name='angular_diameter'))\n\n boyajian_radii = main_sequence_color_table['angular_diameter'] != 0\n half_angle = (main_sequence_color_table['angular_diameter'][boyajian_radii]\n * u.marcsec/2)\n distance_pc = (main_sequence_color_table['Plx_1'][\n boyajian_radii].data.data / 1000)**-1 * u.pc\n measured_radii = distance_pc * np.tan(half_angle)\n\n R_star[boyajian_radii] = measured_radii\n\n # In radius reference column, `1`==color-radius estimate;\n # `2`==interferometric measurement\n refs = np.ones(len(R_star))\n refs[boyajian_radii] = 2\n main_sequence_color_table.add_column(Column(data=refs, name='rstar_ref'))\n\n # Add column containing approximate stellar effective temperatures based\n # on B-V -> T_eff table from Eric Mamajek:\n # http://www.pas.rochester.edu/~emamajek/EEM_dwarf_UBVIJHK_colors_Teff.txt\n mamajek = ascii.read(mamajek_path, format='commented_header')\n bv_to_teff = lambda bv: np.interp(bv, mamajek['B-V'], mamajek['Teff'])\n approx_teffs = bv_to_teff(main_sequence_color_table['B-V'])\n main_sequence_color_table.add_column(Column(data=approx_teffs, name='Teff'))\n\n if plot:\n if ax is None:\n ax = plt.gca()\n polygon_x = [0.6, 0.6, 2.0, 2.0, 0.6]\n polygon_y = [color_cut(0.6) - 1, color_cut(0.6) + 1,\n color_cut(2) + 1, color_cut(2) - 1,\n color_cut(0.6) - 1]\n\n H, xedges, yedges = np.histogram2d(bt_minus_vt[abs(bt_minus_vt) > 1e-3],\n M_V[abs(bt_minus_vt) > 1e-3],\n bins=1000)\n\n extent = [xedges.min(), xedges.max(), yedges.max(), yedges.min()]\n ax.imshow(np.log10(H.T), extent=extent, cmap=plt.cm.Greys, aspect=0.2)\n ax.plot(polygon_x, polygon_y, lw=2, color='r', ls='--')\n\n ax.set(xlim=[-0.5, 3], ylim=[2, -15],\n ylabel='$M_{VT}$', xlabel=\"BT - VT\")\n\n return main_sequence_color_table", "def draw_table(ax, dfs, legend, x, y):\n col_labels = dfs_all_values(dfs, x)\n column_legend = []\n cell_text = []\n # loop over all pandas.DataFrame objects\n for df in dfs:\n # to allow query y(x) easily\n df = df.set_index(x)\n df_row = df[y]\n # build a row with filled blanks '-'\n row = [\"{:.2f}\".format(df_row[column]) if column in df_row.index else '-' \\\n for column in col_labels]\n cell_text.append(row)\n\n ax.axis('tight')\n ax.axis('off')\n ax.table(cellText=cell_text, rowLabels=legend, colLabels=col_labels, \\\n loc='top')", "def plot_table(mat, width=.15, ratio=4):\n vmax = np.abs(mat).max()\n vals = np.around(mat, 2)\n fig = plt.figure()\n ax = fig.add_subplot(111, frameon=False, xticks=[], yticks=[])\n table = plt.table(cellText=vals, colWidths=[width]*vals.shape[1],\n loc='center', cellColours=plt.cm.RdBu_r(\n Normalize(-vmax, vmax)(mat)))\n table.scale(1, ratio)\n return fig", "def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r", "def plot(self, dtables, figs, **kwargs):\n self.safe_update(**kwargs)\n\n figs.setup_amp_plots_grid(\"ratio-row\", title=\"sflat ratio by row\",\n xlabel=\"row\", ylabel=\"Ratio\")\n figs.plot_xy_amps_from_tabledict(dtables, 'row', 'ratio_row',\n x_name='row_i', y_name='r_med')\n\n figs.setup_amp_plots_grid(\"ratio-col\", title=\"sflat ratio by col\",\n xlabel=\"col\", ylabel=\"Ratio\")\n figs.plot_xy_amps_from_tabledict(dtables, 'col', 'ratio_col',\n x_name='col_i', y_name='r_med')\n\n figs.setup_amp_plots_grid(\"scatter\", title=\"sflat ratio v. sbias\",\n xlabel=\"Superbias [ADU]\", ylabel=\"Ratio\")\n\n figs.plot_amp_arrays(\"mask\", self.quality_masks, vmin=0, vmax=3)\n\n for i, (amp, sbias_image) in enumerate(sorted(self.superbias_images)):\n figs.plot_two_image_hist2d('scatter', i,\n sbias_image,\n self.ratio_images[amp],\n bins=(200, 200),\n range=((-50, 50.), (0.018, 0.022)))", "def transaction_plot(ds):\n import seaborn as sns\n import pandas as pd\n df = pd.DataFrame()", "def plot(self, dtables, figs, **kwargs):\n self.safe_update(**kwargs)\n sumtable = dtables['biasoscorr_stats']\n figs.plot_stat_color('mean-s', sumtable['s_correl_mean'].reshape(9, 16))\n figs.plot_stat_color('mean-p', sumtable['p_correl_mean'].reshape(9, 16))", "def make_disp_sq_plots(comp_key,conn):\n\n (fin,) = conn.execute(\"select fout from comps where function = 'track_stats' and comp_key = ?\",\n (comp_key,)).fetchone()\n\n F = h5py.File(fin,'r')\n \n g = F[_fd('disp_sq_hist',comp_key)]\n cmap = color_mapper(0,len(g))\n\n (fig,ax) = plots.set_up_plot()\n istatus = plots.non_i_plot_start();\n for s in g:\n step = int(s[-7:])\n val = g[s]['bin_value'][:]\n \n ax.semilogy(g[s]['bin_edges'],val,color = cmap.get_color(step))\n F.close()\n\n (iden_fun,dset_key ) = conn.execute(\"select function,dset_key from comps where \" +\n \"comp_key in \" +\n \"(select iden_key from trk_stat_prams where \"+\n \"comp_key = ?)\",(comp_key,)).fetchone()\n\n ax.set_title(\"dset: \" + str(dset_key) + \" \" + iden_fun)\n\n plots.non_i_plot_stop(istatus)", "def test_plots(self):\n args = [\"L1\", 1126259446, 1126259478]\n pesummary_data = StrainData.fetch_open_data(*args)\n fig = pesummary_data.plot(type=\"td\")\n assert isinstance(fig, matplotlib.figure.Figure)\n fig = pesummary_data.plot(type=\"fd\")\n assert isinstance(fig, matplotlib.figure.Figure)\n fig = pesummary_data.plot(1126259446 + 20., type=\"omegascan\")\n assert isinstance(fig, matplotlib.figure.Figure)\n fig = pesummary_data.plot(type=\"spectrogram\")\n assert isinstance(fig, matplotlib.figure.Figure)", "def table_settings(axs_num, table_name):\n\n table_props = table_name.properties()\n table_cells = table_props['child_artists'] # matplotlib setting\n # iterate through cells of a table to change properties\n for cell in table_cells:\n cell._text.set_fontsize(15)\n cell._text.set_color('#192231') # Light grey\n\n # Set axis tick labels off, i.e. empty [].\n axs_num.set_yticklabels([])\n axs_num.set_xticklabels([])\n\n # Seaborn settings\n sns.set_style(\"whitegrid\")\n sns.set_style({'axes.grid': False})\n sns.set_context(\n \"poster\",\n rc={'font.sans-serif': 'Gill Sans MT'}\n )\n\n sns.despine(offset=2, top=False, trim=False, left=True, bottom=True)\n\n # Leave one line on top to break up the table\n axs_num.spines['top'].set_color('#9099A2')\n\n # Set tick labels to white in case they still are showing,\n # perhaps redudent but this is not perfect.\n plt.setp(\n [axs_num.get_xticklines(), axs_num.get_yticklines()],\n color=\"white\"\n )", "def _get_safety_totals_plot(self, ax, safety_stats):\n meta = self.meta\n violations_labels = meta['safety_constraints']\n total_violations = safety_stats['total_violations'].T\n\n for idx, violations in enumerate(total_violations):\n label = violations_labels[idx]\n ax.plot(np.arange(violations.shape[0]), violations, label=label)\n\n ax.set_title('# violations / episode')\n ax.legend()\n ax.set_ylabel('# violations')\n ax.set_xlabel('Episode')\n ax.plot()", "def plot_data(indf, prefix='html'):\n list_of_plots = []\n# scatter_matrix(indf)\n# pl.savefig('scatter_matrix.png')\n# list_of_plots.append('scatter_matrix.png')\n\n for col in indf:\n pl.clf()\n# cond = indf[col].notnull()\n# v = indf[cond][col]\n v = indf[col]\n# nent = len(v)\n# hmin, hmax = v.min(), v.max()\n# xbins = np.linspace(hmin,hmax,nent)\n# hmin, hmax, nbin = BOUNDS[col]\n# xbins = np.linspace(hmin, hmax, nbin)\n v.hist(bins=20, histtype='step', normed=True, log=True)\n pl.title(col)\n pl.savefig('%s_hist.png' % col)\n list_of_plots.append('%s_hist.png' % col)\n\n create_html_page_of_plots(list_of_plots, prefix)\n return", "def analyze_show():\n def mat_to_title(mat_file):\n mat_split = mat_file.split('_')\n while (mat_split.pop() not in ANALYSIS_METHODS):\n pass\n return string.join(mat_split,'_') + '*.mat'\n\n plotables = []\n for mat_file in Args.plotable_files:\n plotables.extend(\n [\n ((val.squeeze(),key), \"{0}: {1}\".format(mat_to_title(mat_file),key))\n for key,val in scipy.io.loadmat(mat_file).viewitems()\n if not (key.startswith('__') and key.endswith('__'))\n ]\n )\n ana_plot_graphs(*zip(*plotables),show=True)", "def _plot_scatter_matrix(self, ax=None, alpha=0.5, figsize=None,\n diagonal='kde', pretty=True):\n response_variable = self._response_variable\n response_variable_transform, raw_response_variable = find_raw_variable(response_variable)\n explanatory_df = self._model_dataset\n # format response\n transform_func = TRANSFORM_FUNCTIONS[response_variable_transform]\n # transform response variable\n response_df = transform_func(explanatory_df[[raw_response_variable]])\n # and rename the columns to reflect the transformation\n response_df.rename(columns={raw_response_variable:response_variable}, inplace=True)\n exogenous_df = self._get_exogenous_matrix(explanatory_df)\n # join exogenous variables and response variable\n variable_df = response_df[[response_variable]].join(exogenous_df)\n # omit the constant column from the plot\n variable_df.drop(labels='const', axis=1, inplace=True)\n\n # make the scatter plot matrix\n sm = scatter_matrix(variable_df, alpha=alpha, figsize=figsize, ax=ax,\n diagonal=diagonal)\n if pretty:\n # rotate the labels\n [s.xaxis.label.set_rotation(45) for s in sm.reshape(-1)]\n [s.yaxis.label.set_rotation(45) for s in sm.reshape(-1)]\n # offset y label\n [s.get_yaxis().set_label_coords(-.5, 0.5) for s in sm.reshape(-1)]", "def scatterplot_matrix():\r\n\r\n # load data\r\n iris_dataset = load_iris()\r\n data = iris_dataset\r\n setosa = data['data'][data['target'] == 0]\r\n versicolor = data['data'][data['target'] == 1]\r\n virginica = data['data'][data['target'] == 2]\r\n\r\n # set picture frame\r\n num = 4\r\n fig, axes = plt.subplots(nrows=num, ncols=num, figsize=(18, 18))\r\n fig.subplots_adjust(hspace=0.5, wspace=0.25)\r\n\r\n # set scatter plot\r\n for i in range(0, num):\r\n for j in range(0, num):\r\n if i == j:\r\n continue\r\n axes[j, i].plot(setosa[:, j], setosa[:, i], color='navy', marker='o', linestyle='none')\r\n axes[j, i].plot(versicolor[:, j], versicolor[:, i], color='purple', marker='*', linestyle='none')\r\n axes[j, i].plot(virginica[:, j], virginica[:, i], color='pink', marker='s', linestyle='none')\r\n\r\n # set histgram on the diagram\r\n for i in range(0, num):\r\n axes[i, i].hist(setosa[:, i], color='navy')\r\n axes[i, i].hist(versicolor[:, i], color='purple')\r\n axes[i, i].hist(virginica[:, i], color='pink')\r\n\r\n axes[0, 0].set_title('Sepal length')\r\n axes[1, 1].set_title('Sepal width')\r\n axes[2, 2].set_title('Petal length')\r\n axes[3, 3].set_title('Petal width')\r\n\r\n plt.legend(('Setosa', 'Virginica', 'Versicolor')) # add legend\r\n\r\n # add Main title\r\n fig.suptitle('Iris Plots, measurements in cm', size=20)\r\n plt.show()", "def plotMerged(self, matrix, expcol, expdata=None,\n title='', showtable=True, ax=None, name=None,\n stats=True):\n if expdata==None:\n expdata = self.parent.tablemodel.simpleCopy(include=['Mutations'])\n merged = self.mergeMatrix(matrix, expdata)\n x,y,names,muts = merged.getColumns(['Total',expcol,'name','Mutations'],allowempty=False)\n from Correlation import CorrelationAnalyser\n C = CorrelationAnalyser()\n muts = ['mutation: '+i for i in muts]\n labels = zip(names, muts)\n ax,frame,mh = C.plotCorrelation(x,y,labels,title=title,ylabel=expcol,\n ax=ax,plotname=name,stats=stats,err=4)\n x=[round(float(i),2) for i in x]\n y=[round(float(i),2) for i in y] \n if showtable == True:\n table = self.showTable(frame, merged)\n mh.table = table\n \n return ax,mh,x,y", "def plot_wanted_cols(self, df, cols_wanted_array):\n\t\tcols_to_plot = {}\n\t\tfor col_wanted in cols_wanted_array:\n\t\t\tfor col in df.columns:\n\t\t\t\tif col_wanted in col:\n\t\t\t\t\tcols_to_plot[col] = df[col]\n\t\tfor k,v in cols_to_plot.items():\n\t\t\tplt.plot(df.index, v, label=k)\n\t\tplt.title(\"Simple Plot\")\n\t\tplt.legend()\n\t\tplt.show()", "def plot_data_stats(data_dict, data_bxtxn, data_dt):\n print(onp.mean(onp.sum(data_bxtxn, axis=1)), \"spikes/second\")\n f = plt.figure(figsize=(12,4))\n plt.subplot(141)\n plt.hist(onp.mean(data_bxtxn, axis=1).ravel()/data_dt);\n plt.xlabel('spikes / sec')\n plt.subplot(142)\n plt.imshow(data_dict['hiddens'][0,:,:].T)\n plt.xlabel('time')\n plt.ylabel('neuron #')\n plt.title('Sample trial rates')\n plt.subplot(143);\n plt.imshow(data_bxtxn[0,:,:].T)\n plt.xlabel('time')\n plt.ylabel('neuron #')\n plt.title('spikes')\n plt.subplot(144)\n plt.stem(onp.mean(onp.sum(data_bxtxn, axis=1), axis=0));\n plt.xlabel('neuron #')\n plt.ylabel('spikes / sec');\n return f", "def ana_results_to_plotables(ana_results,analysis_attributes):\n plot_attributes = [\n (ana_attr.numbers,ana_attr.range)\n for ana_attr in analysis_attributes\n ]\n plotable_fnc = {\n 'SMAP': dec_tupl(create_mesh_grid, 'MESH'),\n 'PTS': dec_tupl(create_histogram, 'HIST')\n }\n return [\n plotable_fnc[stype](data,*plt_attr)\n for (data, stype), plt_attr in zip(ana_results,plot_attributes)\n ]", "def plot_scenario_distribution(self):\n x = self.arms\n\n y = self.df.groupby('price').mean().Converted[x]\n y_sex_0 = self.df[self.df.Sex == 0].groupby('price').mean().Converted[x]\n y_sex_1 = self.df[self.df.Sex == 1].groupby('price').mean().Converted[x]\n y_age_0 = self.df[self.df.Under_30 == 0].groupby('price').mean().Converted[x]\n y_age_1 = self.df[self.df.Under_30 == 1].groupby('price').mean().Converted[x]\n\n fig, ax_list = plt.subplots(2,1, figsize=(12, 9))\n\n for ax in ax_list:\n ax.grid(alpha=0.3, linestyle='--')\n\n ax.set_ylim(bottom=0, top=0.6)\n ax.set_xlim(left=50, right=104)\n\n ax.set_xlabel(\"Price\", fontsize=14)\n ax.set_ylabel(\"Conversion Rate\", fontsize=14)\n\n ax.set_xticks(self.arms)\n ax.set_xticklabels(self.arms.astype(np.int64), fontsize=12, alpha=0.7)\n ax.set_yticks(np.linspace(0, 0.7, 8))\n ax.set_yticklabels([str((i * 100).astype(np.int64)) + \"%\" for i in np.linspace(0, 0.7, 8)], fontsize=12, alpha=0.7)\n\n ax.spines['right'].set_alpha(0)\n ax.spines['left'].set_alpha(0.3)\n ax.spines['top'].set_alpha(0)\n ax.spines['bottom'].set_alpha(0.3)\n\n ax_list[0].plot(x, y, label='Global')\n ax_list[0].plot(x, y_sex_0, label='Male', color='moccasin')\n ax_list[0].plot(x, y_sex_1, label='Female', color='darkorange')\n\n ax_list[1].plot(x, y, label='Global')\n ax_list[1].plot(x, y_age_0, label='Under 30', color='red')\n ax_list[1].plot(x, y_age_1, label='Over 30', color='darkred')\n\n ax_list[0].legend()\n ax_list[1].legend()\n\n fig.suptitle(\"Conversion Rate\", fontsize=22)\n\n fig.show()\n\n plt.savefig('chapter5_pricing.png')", "def plot_settings_table(settings, table_nr=1, plot_out=None):\n\n keys = settings.keys()\n\n data_matrix_1 = [keys[:len(keys) / 3], []]\n for key in data_matrix_1[0]:\n data_matrix_1[1].append(str(settings[key]))\n\n data_matrix_2 = [keys[len(keys) / 3:2 * len(keys) / 3], []]\n for key in data_matrix_2[0]:\n data_matrix_2[1].append(str(settings[key]))\n\n data_matrix_3 = [keys[2 * len(keys) / 3:len(keys)], []]\n for key in data_matrix_3[0]:\n data_matrix_3[1].append(str(settings[key]))\n\n data = [data_matrix_1, data_matrix_2, data_matrix_3]\n\n nr_columns = len(data[table_nr - 1][0])\n plot = {'data': [{'colorscale': [[0, '#00083e'], [0.5, '#ededee'], [1, '#ffffff']],\n 'hoverinfo': 'none',\n 'opacity': 0.75,\n 'showscale': False,\n 'type': 'heatmap',\n 'z': [[0, 0.5] for row in range(nr_columns)]\n }],\n 'layout': {\n 'annotations': [],\n 'yaxis1': {'autorange': 'reversed',\n 'showgrid': False,\n 'showticklabels': False,\n 'zeroline': False,\n 'ticks': ''\n },\n 'xaxis1': {\n 'showgrid': False,\n 'showticklabels': False,\n 'zeroline': False,\n 'ticks': '',\n 'range': [0, 1]\n\n },\n 'title': \" \"\n }\n }\n\n # heading\n for table_cell in range(nr_columns):\n plot['layout']['annotations'].append({})\n plot['layout']['annotations'][table_cell].update({'text': data[table_nr - 1][0][table_cell]})\n plot['layout']['annotations'][table_cell].update({'font': {\n 'color': '#ffffff',\n 'size': 15}\n })\n plot['layout']['annotations'][table_cell].update({'y': table_cell})\n plot['layout']['annotations'][table_cell].update({'x': 0.1})\n plot['layout']['annotations'][table_cell].update({'xref': 'x1'})\n plot['layout']['annotations'][table_cell].update({'yref': 'y1'})\n plot['layout']['annotations'][table_cell].update({'align': 'center'})\n plot['layout']['annotations'][table_cell].update({'xanchor': 'left'})\n plot['layout']['annotations'][table_cell].update({'showarrow': False})\n\n # content\n for table_cell in range(nr_columns):\n plot['layout']['annotations'].append({})\n plot['layout']['annotations'][table_cell + nr_columns].update({'text': data[table_nr - 1][1][table_cell]})\n plot['layout']['annotations'][table_cell + nr_columns].update({'x': 0.75})\n plot['layout']['annotations'][table_cell + nr_columns].update({'y': table_cell})\n plot['layout']['annotations'][table_cell + nr_columns].update({'xref': 'x1'})\n plot['layout']['annotations'][table_cell + nr_columns].update({'yref': 'y1'})\n plot['layout']['annotations'][table_cell + nr_columns].update({'showarrow': False})\n\n if plot_out is not None:\n plotly_plot(plot, filename=settings['plot_out'], auto_open=False)\n else:\n return plot", "def _get_safety_per_step_plots(self, ax, safety_stats):\n meta = self.meta\n violations_labels = meta['safety_constraints']\n per_step_violations = safety_stats['per_step_violations']\n\n for idx, violations in enumerate(per_step_violations.T):\n label = violations_labels[idx]\n ax.plot(\n np.arange(violations.shape[0]), violations, label=label, alpha=0.75)\n\n ax.set_title('Mean violations / timestep')\n ax.legend(loc='upper right')\n ax.set_ylabel('Mean # violations')\n ax.set_xlabel('Timestep')\n ax.plot()", "def table_distribution(data, axs, f=2):\n\n # Measures of distribution\n v_max = round(data.max(), f)\n v_95 = round((data.quantile(0.95)), f)\n v_90 = round((data.quantile(0.9)), f)\n v_75 = round((data.quantile(0.75)), f)\n v_50 = round((data.quantile(0.5)), f)\n v_25 = round((data.quantile(0.25)), f)\n v_10 = round((data.quantile(0.1)), f)\n v_05 = round((data.quantile(0.05)), f)\n v_min = round(data.min(), f)\n\n # pandas quantile returns a series which needs to be recombined\n # hence reset_index and transpose used in this case.\n quantiles = pd.concat(\n [v_max, v_95, v_90,\n v_75, v_50, v_25,\n v_10, v_05, v_min],\n axis=1\n ).transpose().reset_index()\n quantiles.drop('index', axis=1, inplace=True)\n\n # Use built in tex only, no depandancy needed\n sample_max_str = r\"maximum\"\n sample_95_str = r\"$Q(0.95)$\"\n sample_90_str = r\"$Q(0.90)$\"\n sample_75_str = r\"$Q(0.75)$\"\n sample_50_str = r\"$Q(0.50)$\"\n sample_25_str = r\"$Q(0.25)$\"\n sample_10_str = r\"$Q(0.10)$\"\n sample_05_str = r\"$Q(0.05)$\"\n sample_min_str = r\"minimum\"\n\n symbols = pd.DataFrame([sample_max_str, sample_95_str, sample_90_str,\n sample_75_str, sample_50_str, sample_25_str,\n sample_10_str, sample_05_str, sample_min_str])\n\n data = pd.concat([symbols, quantiles], axis=1)\n\n distribution = axs.table(\n cellText=data.values,\n loc='center',\n cellLoc=\"center\",\n colLoc='right',\n # xmin, ymin, width, height\n bbox=(0, 0, 1, 1),\n edges=\"\")\n\n title_color = '#9099A2'\n axs.set_title(\n ('Distribution'),\n fontsize=12,\n color=title_color)\n\n table_settings(axs, distribution)" ]
[ "0.64039075", "0.617259", "0.6116411", "0.59898764", "0.5832481", "0.5824055", "0.5775828", "0.57557863", "0.56359196", "0.56138456", "0.55944836", "0.5584237", "0.5572196", "0.55337507", "0.5525587", "0.5466326", "0.5462922", "0.5445544", "0.5437162", "0.54298645", "0.5424249", "0.54021883", "0.5392681", "0.53779197", "0.53762394", "0.5371545", "0.53496295", "0.53326875", "0.5327577", "0.531802" ]
0.62601465
1
Returns a plotted table on an axs. Based on statistics for distribution, can take one column or more. Requires an matplot lib `axs` as input to be used.
def table_distribution(data, axs, f=2): # Measures of distribution v_max = round(data.max(), f) v_95 = round((data.quantile(0.95)), f) v_90 = round((data.quantile(0.9)), f) v_75 = round((data.quantile(0.75)), f) v_50 = round((data.quantile(0.5)), f) v_25 = round((data.quantile(0.25)), f) v_10 = round((data.quantile(0.1)), f) v_05 = round((data.quantile(0.05)), f) v_min = round(data.min(), f) # pandas quantile returns a series which needs to be recombined # hence reset_index and transpose used in this case. quantiles = pd.concat( [v_max, v_95, v_90, v_75, v_50, v_25, v_10, v_05, v_min], axis=1 ).transpose().reset_index() quantiles.drop('index', axis=1, inplace=True) # Use built in tex only, no depandancy needed sample_max_str = r"maximum" sample_95_str = r"$Q(0.95)$" sample_90_str = r"$Q(0.90)$" sample_75_str = r"$Q(0.75)$" sample_50_str = r"$Q(0.50)$" sample_25_str = r"$Q(0.25)$" sample_10_str = r"$Q(0.10)$" sample_05_str = r"$Q(0.05)$" sample_min_str = r"minimum" symbols = pd.DataFrame([sample_max_str, sample_95_str, sample_90_str, sample_75_str, sample_50_str, sample_25_str, sample_10_str, sample_05_str, sample_min_str]) data = pd.concat([symbols, quantiles], axis=1) distribution = axs.table( cellText=data.values, loc='center', cellLoc="center", colLoc='right', # xmin, ymin, width, height bbox=(0, 0, 1, 1), edges="") title_color = '#9099A2' axs.set_title( ('Distribution'), fontsize=12, color=title_color) table_settings(axs, distribution)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def table(ax: Axes, data: DataFrame | Series, **kwargs) -> Table:\n plot_backend = _get_plot_backend(\"matplotlib\")\n return plot_backend.table(\n ax=ax, data=data, rowLabels=None, colLabels=None, **kwargs\n )", "def plot_table(self, axn: str = \"table\", df: Optional[DataFrame] = None) -> None:\n if self.axes is None:\n axs = self.initialize_figure(mosaic=[[axn]], figsize=(6, 4), return_ax=True)\n else:\n axs = self.axes\n\n if df is None:\n df = DataFrame([\"empty\"])\n\n axs[axn].table(\n df.values.tolist(),\n colLabels=df.columns,\n colColours=[(1.0, 1.0, 1.0, 1.0)]\n + [self.cmap(i, alpha=0.75) for i in range(len(df.columns) - 1)],\n bbox=(0.0, 0.0, 1.0, 1.0),\n )\n axs[axn].set_xticks([])\n axs[axn].set_yticks([])\n return axs[axn]", "def descriptive_table(data, column_name, fig_size=(8, 8)):\n\n # Set up figure dimensions and sub components.\n sheet, axs = plt.subplots(4, 1, figsize=fig_size)\n\n # Heights ratio is based on the number of rows in each\n # table, this relates to the number of statistics each\n # sub table will show.\n gs = gridspec.GridSpec(4, 1, height_ratios=[2, 2, 5, 9])\n\n # Assign all subplots based on figure dimensions.\n ax0 = plt.subplot(gs[0])\n ax1 = plt.subplot(gs[1])\n ax2 = plt.subplot(gs[2])\n ax3 = plt.subplot(gs[3])\n\n title_color = '#9099A2' # Dark grey\n plt.suptitle(\n 'Descriptive Statistics',\n fontsize=16,\n color=title_color,\n x=0.25\n )\n\n table_top(data, column_name, ax0)\n table_central_tend(data, ax1)\n table_disperssion(data, ax2)\n table_distribution(data, ax3)\n\n # Adjust the spacing so the title fits correctly.\n sheet.subplots_adjust(hspace=0.2, top=0.95)", "def ana_plot_graphs(plotables,plotable_titles,figure_title=None,show=False):\n axes = num_to_subplots_axes(len(plotables))\n fig = plt.figure()\n fig.suptitle(figure_title)\n for i, ((plotable,plot_type),ana_type) in enumerate(zip(plotables,plotable_titles)):\n if plot_type == 'MESH':\n #ax = plot_mesh_sub(fig, axes+(i+1,), *plotable)\n ax = plot_imshow_from_mesh_sub(fig, axes+(i+1,), *plotable)\n # Suplots indexing is from 1 => i+1\n ax.set_title(ana_type)\n elif plot_type == 'HIST':\n ax = plot_imshow_sub(\n fig, axes+(i+1,), plotable[0],\n (np.min(plotable[1]),np.max(plotable[1])),\n (np.min(plotable[2]),np.max(plotable[2]))\n )\n ax.set_title(ana_type)\n else:\n assert False, \"Not implemented\"\n if show:\n plt.show()", "def plot_distributions(x, variable_name):\n n_cols = x.shape[1]\n\n plot_rows = n_cols // 2\n plot_rows += n_cols % 2\n plot_cols = 2\n\n position = range(1, n_cols + 1)\n fig = plt.figure()\n\n for col_index in range(n_cols):\n col_values = x[:, col_index]\n ax = fig.add_subplot(plot_rows, plot_cols, position[col_index])\n ax.hist(col_values)\n ax.set_title(\"Distribution of variable {}{}\".format(variable_name, col_index + 1))\n ax.set_ylabel(\"Frequency\")\n ax.set_xlabel(\"Value\")\n\n plt.tight_layout()\n plt.savefig(\"plots/{}Dist.png\".format(variable_name))\n plt.show()", "def plot_table(mat, width=.15, ratio=4):\n vmax = np.abs(mat).max()\n vals = np.around(mat, 2)\n fig = plt.figure()\n ax = fig.add_subplot(111, frameon=False, xticks=[], yticks=[])\n table = plt.table(cellText=vals, colWidths=[width]*vals.shape[1],\n loc='center', cellColours=plt.cm.RdBu_r(\n Normalize(-vmax, vmax)(mat)))\n table.scale(1, ratio)\n return fig", "def plot_eda(\r\n df:pd.DataFrame,\r\n columns:list,\r\n path_plot_dir:str=None\r\n ) -> None:\r\n # Check inputs.\r\n if not os.path.exists(path_plot_dir):\r\n raise IOError(textwrap.dedent(\"\"\"\\\r\n Path does not exist: path_plot_dir =\r\n {path}\"\"\".format(path=path_plot_dir)))\r\n ################################################################################\r\n # Plot frequency distributions.\r\n print('#'*80)\r\n print('Plot frequency distributions (histograms) of columns.')\r\n for col in columns:\r\n print('#'*40)\r\n print('Feature: {col}'.format(col=col))\r\n print('Timestamp:', time.strftime(r'%Y-%m-%dT%H:%M:%S%Z', time.gmtime()))\r\n # Plot frequency distributions by transaction.\r\n if col != buyer_retrate:\r\n df_plot = df[['BuyerID', col, buyer_retrate]].copy()\r\n else:\r\n df_plot = df[['BuyerID', buyer_retrate]].copy()\r\n buyer_retrate_omax = buyer_retrate+'_omax'\r\n df_plot[buyer_retrate_omax] = df_plot[buyer_retrate] > buyer_retrate_max\r\n itemized_counts = {\r\n is_omax: grp[col].values\r\n for (is_omax, grp) in df_plot.groupby(by=buyer_retrate_omax)}\r\n itemized_counts = collections.OrderedDict(\r\n sorted(itemized_counts.items(), key=lambda tup: tup[0], reverse=False))\r\n keys = itemized_counts.keys()\r\n bins = 50\r\n colors = sns.light_palette(sns.color_palette()[2], n_colors=len(keys))\r\n plt.hist(\r\n [itemized_counts[key] for key in itemized_counts.keys()],\r\n bins=bins, stacked=True, rwidth=1.0, label=keys, color=colors)\r\n plt.title('{col}\\nfrequency distribution'.format(col=col))\r\n plt.xlabel(col)\r\n plt.ylabel('Number of transactions with\\n{col} = X'.format(col=col))\r\n plt.legend(\r\n title='Buyer return\\nrate > {retrate:.0%}'.format(retrate=buyer_retrate_max),\r\n loc='upper left', bbox_to_anchor=(1.0, 1.0))\r\n rect = (0, 0, 0.85, 1)\r\n plt.tight_layout(rect=rect)\r\n if path_plot_dir is not None:\r\n plt.savefig(\r\n os.path.join(path_plot_dir, 'freq-dist-transaction_'+col+'.png'),\r\n dpi=300)\r\n plt.show()\r\n\r\n # Plot frequency distributions by buyer.\r\n itemized_counts = {\r\n is_omax: grp[['BuyerID', col]].groupby(by='BuyerID').mean().values.flatten()\r\n for (is_omax, grp) in df_plot.groupby(by=buyer_retrate_omax)}\r\n itemized_counts = collections.OrderedDict(\r\n sorted(itemized_counts.items(), key=lambda tup: tup[0], reverse=False))\r\n keys = itemized_counts.keys()\r\n plt.hist(\r\n [itemized_counts[key] for key in itemized_counts.keys()],\r\n bins=bins, stacked=True, rwidth=1.0, label=keys, color=colors)\r\n plt.title('Mean {col} per buyer\\nfrequency distribution'.format(col=col))\r\n plt.xlabel('Mean '+col)\r\n plt.ylabel('Number of buyers with\\nmean {col} = X'.format(col=col))\r\n plt.legend(\r\n title='Buyer return\\nrate > {retrate:.0%}'.format(retrate=buyer_retrate_max),\r\n loc='upper left', bbox_to_anchor=(1.0, 1.0))\r\n plt.tight_layout(rect=rect)\r\n if path_plot_dir is not None:\r\n plt.savefig(\r\n os.path.join(path_plot_dir, 'freq-dist-buyer_'+col+'.png'),\r\n dpi=300)\r\n plt.show()\r\n\r\n ################################################################################\r\n # Plot (timeseries) traces for fractional quantities vs fraction of completed transactions.\r\n # Columns to plot: catgory (cat), <category>_numTransactions (trans), <category>_frac* (col)\r\n print('#'*80)\r\n print('Plot traces (timeseries) for fractional quantities vs fraction of completed transactions.')\r\n plot_cols = list()\r\n for col in df.columns:\r\n if '_frac' in col:\r\n cat = col.split('_frac')[0]\r\n trans = cat+'_numTransactions'\r\n plot_cols.append([cat, trans, col])\r\n for (col_cat, col_trans, col_frac) in plot_cols:\r\n print('#'*40)\r\n print('Category column: {col}'.format(col=col_cat))\r\n print('Transaction column: {col}'.format(col=col_trans))\r\n print('Fraction column: {col}'.format(col=col_frac))\r\n print('Timestamp:', time.strftime(r'%Y-%m-%dT%H:%M:%S%Z', time.gmtime()))\r\n # Weight categorical values by number of transactions. \r\n assert (df[[col_cat, col_trans]].groupby(by=col_cat).last().sum() == len(df)).all()\r\n cat_wts = df[[col_cat, col_trans]].groupby(by=col_cat).last()/len(df)\r\n cat_wts.columns = [col_cat+'_wts']\r\n cats = cat_wts.sample(n=30, replace=True, weights=col_cat+'_wts').index.values\r\n # Make plot.\r\n for idx in range(len(cats)):\r\n cat = cats[idx]\r\n tfmask = df[col_cat] == cat\r\n xvals = (df.loc[tfmask, col_trans]/sum(tfmask)).values\r\n yvals = df.loc[tfmask, col_frac].values\r\n xvals_omax = (df.loc[np.logical_and(tfmask, df[buyer_retrate] > buyer_retrate_max), col_trans]/sum(tfmask)).values\r\n yvals_omax = df.loc[np.logical_and(tfmask, df[buyer_retrate] > buyer_retrate_max), col_frac].values\r\n if len(xvals) > 51: # downsample for speed\r\n step = 1/50\r\n xvals_resampled = np.arange(start=0, stop=1+step, step=step)\r\n yvals_resampled = np.interp(x=xvals_resampled, xp=xvals, fp=yvals)\r\n (xvals, yvals) = (xvals_resampled, yvals_resampled)\r\n if len(xvals_omax) > 51: # downsample for speed\r\n idxs_omax = np.random.choice(range(len(xvals_omax)), size=51, replace=False)\r\n xvals_omax_resampled = xvals_omax[idxs_omax]\r\n yvals_omax_resampled = yvals_omax[idxs_omax]\r\n (xvals_omax, yvals_omax) = (xvals_omax_resampled, yvals_omax_resampled)\r\n plt.plot(\r\n xvals, yvals,\r\n marker='.', alpha=0.1, color=sns.color_palette()[0])\r\n if idx == 0:\r\n label = 'Buyer return\\nrate > {retrate:.0%}'.format(retrate=buyer_retrate_max)\r\n else:\r\n label = None\r\n plt.plot(\r\n xvals_omax, yvals_omax,\r\n marker='o', alpha=0.2, linestyle='',\r\n color=sns.color_palette()[2], label=label)\r\n plt.title('{col_frac} vs\\nfraction of transactions completed'.format(col_frac=col_frac))\r\n plt.xlabel(\"Fraction of transactions completed\")\r\n plt.ylabel(col_frac)\r\n plt.legend(loc='upper left', bbox_to_anchor=(1.0, 1.0))\r\n rect = (0, 0, 0.80, 1)\r\n plt.tight_layout(rect=rect)\r\n if path_plot_dir is not None:\r\n plt.savefig(\r\n os.path.join(path_plot_dir, 'trace_'+col_frac+'.png'),\r\n dpi=300)\r\n plt.show()\r\n return None", "def dist_plotter(data: pd.DataFrame, variable: str):\n fig, axs = plt.subplots(2,5, gridspec_kw={'wspace': 0.5, 'hspace': 0.5}, figsize=(15, 8), sharex = False)\n\n for ax, dta in zip(axs.flat, data.values()) :\n sns.distplot(dta[f'{variable}'], hist= False, ax=ax)\n ax.set_xlabel(f'{variable}')\n \n for ax, dta in zip(axs.flatten(), data.keys()):\n ax.set_title(dta)\n\n return plt.show()", "def get_table_ms(plot=True, ax=None):\n table = ascii.read(tgas_path, delimiter=';', data_start=3)\n\n # floatify:\n table['BTmag'] = table['BTmag'].astype(float)\n table['VTmag'] = table['VTmag'].astype(float)\n\n # Compute the galactic latitude of each star, add to table\n coords = SkyCoord(ra=table['RA_ICRS'] * u.deg,\n dec=table['DE_ICRS'] * u.deg, frame='icrs')\n galactic_coords = coords.transform_to('galactic')\n abs_galactic_latitude = abs(galactic_coords.b).degree\n table.add_column(Column(data=abs_galactic_latitude, name='b'))\n\n # Compute distance, CMD\n def color_cut(b_minus_v):\n return -9. + 4.9 * b_minus_v\n\n parallax_mas = table['Plx']\n Vmag = table['VTmag']\n bt_minus_vt = table['BTmag'] - table['VTmag']\n\n parallax_arcsec = parallax_mas / 1000\n dist_pc = 1. / parallax_arcsec\n\n # Add astrometric uncertainty column to table\n table.add_column(Column(data=sigma_fov(table['<Gmag>']), name='sigma_fov'))\n\n # Add a distance column to the table:\n table.add_column(Column(data=dist_pc * u.pc, name='distance'))\n\n # Add a Nfov column to the table:\n table.add_column(Column(data=Nprime_fov(abs_galactic_latitude),\n name='N_fov'))\n\n M_V = Vmag - 5 * (np.log10(dist_pc) + 1)\n\n b_minus_v_lower = 0.6 # 0.64 # (B-V)_sun = 0.65\n b_minus_v_upper = 2\n\n main_sequence = ((np.abs(M_V - color_cut(bt_minus_vt)) < 1.) &\n (bt_minus_vt > b_minus_v_lower) &\n (bt_minus_vt < b_minus_v_upper))\n\n main_sequence_table = table[main_sequence]\n\n # Now match the B-V color table from HIPPARCOS to the main sequence TGAS table\n hipparcos_table = ascii.read(hipparcos_path, delimiter=';', header_start=0,\n data_start=3)\n hipparcos_table.add_index(\"HIP\")\n\n main_sequence_table['HIP'][main_sequence_table['HIP'].mask] = 0\n\n main_sequence_color_table = join(main_sequence_table, hipparcos_table,\n keys='HIP')\n\n # Cut again by the color cuts, this time with the real Johnson B and V,\n # rather than Tycho magnitudes:\n main_sequence = ((main_sequence_color_table['B-V'].data.data < b_minus_v_upper) &\n (main_sequence_color_table['B-V'].data.data > b_minus_v_lower))\n\n main_sequence_color_table = main_sequence_color_table[main_sequence]\n\n # Add in stellar radii with color-radius relation from Boyajian 2012\n R_star = bv_to_radius(main_sequence_color_table['B-V'].data.data)\n main_sequence_color_table.add_column(Column(data=R_star, name='R_star'))\n\n # Add in a column of interferometric angular diameters from\n # Boyajian 2012 where available:\n boyajian = ascii.read(boyajian_path)\n ang_diams = np.zeros(len(main_sequence_color_table))\n\n for row in boyajian:\n ang_diams[row['HIP'] == main_sequence_color_table['HIP']] = row['D(UD)']\n\n main_sequence_color_table.add_column(Column(data=ang_diams,\n name='angular_diameter'))\n\n boyajian_radii = main_sequence_color_table['angular_diameter'] != 0\n half_angle = (main_sequence_color_table['angular_diameter'][boyajian_radii]\n * u.marcsec/2)\n distance_pc = (main_sequence_color_table['Plx_1'][\n boyajian_radii].data.data / 1000)**-1 * u.pc\n measured_radii = distance_pc * np.tan(half_angle)\n\n R_star[boyajian_radii] = measured_radii\n\n # In radius reference column, `1`==color-radius estimate;\n # `2`==interferometric measurement\n refs = np.ones(len(R_star))\n refs[boyajian_radii] = 2\n main_sequence_color_table.add_column(Column(data=refs, name='rstar_ref'))\n\n # Add column containing approximate stellar effective temperatures based\n # on B-V -> T_eff table from Eric Mamajek:\n # http://www.pas.rochester.edu/~emamajek/EEM_dwarf_UBVIJHK_colors_Teff.txt\n mamajek = ascii.read(mamajek_path, format='commented_header')\n bv_to_teff = lambda bv: np.interp(bv, mamajek['B-V'], mamajek['Teff'])\n approx_teffs = bv_to_teff(main_sequence_color_table['B-V'])\n main_sequence_color_table.add_column(Column(data=approx_teffs, name='Teff'))\n\n if plot:\n if ax is None:\n ax = plt.gca()\n polygon_x = [0.6, 0.6, 2.0, 2.0, 0.6]\n polygon_y = [color_cut(0.6) - 1, color_cut(0.6) + 1,\n color_cut(2) + 1, color_cut(2) - 1,\n color_cut(0.6) - 1]\n\n H, xedges, yedges = np.histogram2d(bt_minus_vt[abs(bt_minus_vt) > 1e-3],\n M_V[abs(bt_minus_vt) > 1e-3],\n bins=1000)\n\n extent = [xedges.min(), xedges.max(), yedges.max(), yedges.min()]\n ax.imshow(np.log10(H.T), extent=extent, cmap=plt.cm.Greys, aspect=0.2)\n ax.plot(polygon_x, polygon_y, lw=2, color='r', ls='--')\n\n ax.set(xlim=[-0.5, 3], ylim=[2, -15],\n ylabel='$M_{VT}$', xlabel=\"BT - VT\")\n\n return main_sequence_color_table", "def plot(self, dtables, figs, **kwargs):\n self.safe_update(**kwargs)\n figs.setup_amp_plots_grid(\"row\",\n title=\"Correlation: imaging region and serial overscan\",\n xlabel=\"Correlation\",\n ylabel=\"Number of frames\")\n figs.setup_amp_plots_grid(\"col\",\n title=\"Correlation: imaging region and paralell overscan\",\n xlabel=\"Correlation\",\n ylabel=\"Number of frames\")\n\n dtab = dtables.get_table(\"correl\")\n for i in range(16):\n s_correl = dtab['s_correl_a%02i' % i]\n p_correl = dtab['p_correl_a%02i' % i]\n figs.get_obj('row', 'axs').flat[i].hist(s_correl, bins=100, range=(-1., 1.))\n figs.get_obj('col', 'axs').flat[i].hist(p_correl, bins=100, range=(-1., 1.))", "def draw_table(ax, dfs, legend, x, y):\n col_labels = dfs_all_values(dfs, x)\n column_legend = []\n cell_text = []\n # loop over all pandas.DataFrame objects\n for df in dfs:\n # to allow query y(x) easily\n df = df.set_index(x)\n df_row = df[y]\n # build a row with filled blanks '-'\n row = [\"{:.2f}\".format(df_row[column]) if column in df_row.index else '-' \\\n for column in col_labels]\n cell_text.append(row)\n\n ax.axis('tight')\n ax.axis('off')\n ax.table(cellText=cell_text, rowLabels=legend, colLabels=col_labels, \\\n loc='top')", "def plot(self, dtables, figs, **kwargs):\n self.safe_update(**kwargs)\n\n figs.setup_amp_plots_grid(\"ratio-row\", title=\"sflat ratio by row\",\n xlabel=\"row\", ylabel=\"Ratio\")\n figs.plot_xy_amps_from_tabledict(dtables, 'row', 'ratio_row',\n x_name='row_i', y_name='r_med')\n\n figs.setup_amp_plots_grid(\"ratio-col\", title=\"sflat ratio by col\",\n xlabel=\"col\", ylabel=\"Ratio\")\n figs.plot_xy_amps_from_tabledict(dtables, 'col', 'ratio_col',\n x_name='col_i', y_name='r_med')\n\n figs.setup_amp_plots_grid(\"scatter\", title=\"sflat ratio v. sbias\",\n xlabel=\"Superbias [ADU]\", ylabel=\"Ratio\")\n\n figs.plot_amp_arrays(\"mask\", self.quality_masks, vmin=0, vmax=3)\n\n for i, (amp, sbias_image) in enumerate(sorted(self.superbias_images)):\n figs.plot_two_image_hist2d('scatter', i,\n sbias_image,\n self.ratio_images[amp],\n bins=(200, 200),\n range=((-50, 50.), (0.018, 0.022)))", "def table_settings(axs_num, table_name):\n\n table_props = table_name.properties()\n table_cells = table_props['child_artists'] # matplotlib setting\n # iterate through cells of a table to change properties\n for cell in table_cells:\n cell._text.set_fontsize(15)\n cell._text.set_color('#192231') # Light grey\n\n # Set axis tick labels off, i.e. empty [].\n axs_num.set_yticklabels([])\n axs_num.set_xticklabels([])\n\n # Seaborn settings\n sns.set_style(\"whitegrid\")\n sns.set_style({'axes.grid': False})\n sns.set_context(\n \"poster\",\n rc={'font.sans-serif': 'Gill Sans MT'}\n )\n\n sns.despine(offset=2, top=False, trim=False, left=True, bottom=True)\n\n # Leave one line on top to break up the table\n axs_num.spines['top'].set_color('#9099A2')\n\n # Set tick labels to white in case they still are showing,\n # perhaps redudent but this is not perfect.\n plt.setp(\n [axs_num.get_xticklines(), axs_num.get_yticklines()],\n color=\"white\"\n )", "def plot_single(df_metrics):\n apfd = df_metrics['apfd']\n\n miu = np.round(np.mean(apfd), 2)\n sigma = np.round(np.std(apfd), 2)\n label = 'regression' + '\\n $\\mu$ - ' + str(miu) + ' $\\sigma$ - ' + str(sigma)\n\n sns.distplot(apfd, kde=True,\n bins=int(180 / 5), color=sns.color_palette()[0],\n hist_kws={'edgecolor': 'black'},\n kde_kws={'linewidth': 4, 'clip': (0.0, 1.0)}, label=label)\n\n plt.legend(frameon=True, loc='upper left', prop={'size': 20})\n plt.xlabel('APFD')\n\n #plt.title('APFD Distribution - 100 revisions ')\n plt.show()", "def matplotlib_histogram() -> Tuple:\n df = read_dataset(Path('..', '..', 'iris.csv'))\n df.drop(\"species\", axis=1, inplace=True)\n top_4_columns = list(df.columns)[:4]\n\n # Ref: https://stackoverflow.com/questions/31726643/how-do-i-get-multiple-subplots-in-matplotlib\n fig, ax = plt.subplots(nrows=2, ncols=2)\n\n c = 0\n for row in ax:\n for col in row:\n df_column = top_4_columns[c]\n col.hist(df[df_column].values)\n c = c + 1\n\n return fig, ax", "def plot_data(indf, prefix='html'):\n list_of_plots = []\n# scatter_matrix(indf)\n# pl.savefig('scatter_matrix.png')\n# list_of_plots.append('scatter_matrix.png')\n\n for col in indf:\n pl.clf()\n# cond = indf[col].notnull()\n# v = indf[cond][col]\n v = indf[col]\n# nent = len(v)\n# hmin, hmax = v.min(), v.max()\n# xbins = np.linspace(hmin,hmax,nent)\n# hmin, hmax, nbin = BOUNDS[col]\n# xbins = np.linspace(hmin, hmax, nbin)\n v.hist(bins=20, histtype='step', normed=True, log=True)\n pl.title(col)\n pl.savefig('%s_hist.png' % col)\n list_of_plots.append('%s_hist.png' % col)\n\n create_html_page_of_plots(list_of_plots, prefix)\n return", "def plot_results(self, a):\n import matplotlib.pyplot as plt\n fig, axes = plt.subplots(nrows=len(a.data_vars), sharex='all', sharey='all')\n for ax, var in zip(axes, a.data_vars):\n data = a[var]\n plt.sca(ax)\n data.plot(x='time', cmap=plt.cm.viridis_r, yincrease=False, robust=True)\n plt.show()", "def perm_plot(obs, perm, p, fig_title, tails = 1):\n plot_rows = len(perm.keys())\n \n fig, axes = plt.subplots(plot_rows, 1)\n\n for n, term in enumerate(perm.keys()):\n\n if plot_rows > 1:\n sns.distplot(perm[term], ax = axes[n], norm_hist = True)\n\n #Formatting\n axes[n].axvline(obs[term], 0, 1, linestyle = '--', color = [1, 0, 0], label = 'Observed')\n \n if tails == -1:\n thresh = np.percentile(perm[term], 5, interpolation = 'nearest')\n axes[n].axvline(thresh, 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n \n \n if tails == 1:\n thresh = np.percentile(perm[term], 95, interpolation = 'nearest')\n axes[n].axvline(thresh, 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n \n elif tails == 2:\n thresh = np.percentile(perm[term], [2.5, 97.5], interpolation = 'nearest')\n axes[n].axvline(thresh[0], 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n axes[n].axvline(thresh[1], 0, 1, linestyle = '-', color = [0, 0, 0])\n \n axes[n].set_title(term, fontsize = 16, x = 0.1, y = 1.05)\n axes[n].set_xlabel('Permuted Test Value', fontsize = 15)\n if p[term] < 0.001:\n axes[n].text(0.6, 0.5, 'p < 0.001', fontsize = 20, transform = axes[n].transAxes)\n else:\n axes[n].text(0.6, 0.5, 'p = ' + str(np.round(p[term], decimals = 5)), fontsize = 20, transform = axes[n].transAxes) \n \n\n for tick in axes[n].xaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n for tick in axes[n].yaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n \n if n == np.around(plot_rows / 2, decimals = 0) - 1:\n axes[n].legend(fontsize = 20, loc = \"center left\", bbox_to_anchor = (1, 0.5), numpoints = 1)\n\n\n else:\n sns.distplot(perm[term], ax = axes, norm_hist = True)\n\n #Formatting\n axes.axvline(obs[term], 0, 1, linestyle = '--', color = [1, 0, 0], label = 'Observed')\n \n if tails == -1:\n thresh = np.percentile(perm[term], 5, interpolation = 'nearest')\n axes.axvline(thresh, 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n \n \n if tails == 1:\n thresh = np.percentile(perm[term], 95, interpolation = 'nearest')\n axes.axvline(thresh, 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n \n elif tails == 2:\n thresh = np.percentile(perm[term], [2.5, 97.5], interpolation = 'nearest')\n axes.axvline(thresh[0], 0, 1, linestyle = '-', color = [0, 0, 0], label = 'Threshold')\n axes.axvline(thresh[1], 0, 1, linestyle = '-', color = [0, 0, 0])\n \n \n axes.set_title(term, fontsize = 16, x = 0.1, y = 1.05)\n axes.set_xlabel('Permuted Test Value', fontsize = 15)\n if p[term] < 0.001:\n axes.text(0.6, 0.5, 'p < 0.001', fontsize = 20, transform = axes.transAxes)\n else:\n axes.text(0.6, 0.5, 'p = ' + str(np.round(p[term], decimals = 5)), fontsize = 20, transform = axes.transAxes) \n \n for tick in axes.xaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n for tick in axes.yaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n\n axes.legend(fontsize = 20, loc = \"center left\", bbox_to_anchor = (1, 0.5), numpoints = 1)\n\n if fig_title != None: \n fig.suptitle(fig_title, fontsize = 24, y = 1.05) \n \n plt.tight_layout() \n plt.show()\n \n return(fig, axes)", "def table_disperssion(data, axs, f=2):\n\n # Measures of disperssion\n v_bessel_sd = round(data.std(), f)\n v_var = round(data.var(), f)\n v_range = round((data.max()-data.min()), f)\n v_iqr = round((data.quantile(0.75)-data.quantile(0.25)), f)\n v_mad = round(data.mad(), f)\n\n # Use built in tex only, no depandancy needed\n sample_std_str = \"stan. dev.\" + r' $s$ '\n sample_var_str = \"variance, \" + '$s^2$'\n sample_range_str = \"range\"\n sample_iqr_str = \"$IQR$\"\n sample_mad_str = \"mean abs. dev.\"\n\n symbols = pd.DataFrame(\n [sample_std_str, sample_iqr_str,\n sample_mad_str, sample_var_str,\n sample_range_str]\n )\n val = pd.DataFrame(\n [v_bessel_sd, v_iqr,\n v_mad, v_var, v_range]\n )\n data = pd.concat([symbols, val], axis=1)\n\n disperssion = axs.table(\n cellText=data.values,\n loc='center',\n cellLoc=\"center\",\n colLoc='right',\n # xmin, ymin, width, height\n bbox=(0, 0, 1, 1),\n edges=\"\") # No line\n\n title_color = '#9099A2' # Dark Grey\n axs.set_title(\n ('Disperssion'),\n fontsize=12,\n color=title_color)\n\n table_settings(axs, disperssion)", "def make_disp_sq_plots(comp_key,conn):\n\n (fin,) = conn.execute(\"select fout from comps where function = 'track_stats' and comp_key = ?\",\n (comp_key,)).fetchone()\n\n F = h5py.File(fin,'r')\n \n g = F[_fd('disp_sq_hist',comp_key)]\n cmap = color_mapper(0,len(g))\n\n (fig,ax) = plots.set_up_plot()\n istatus = plots.non_i_plot_start();\n for s in g:\n step = int(s[-7:])\n val = g[s]['bin_value'][:]\n \n ax.semilogy(g[s]['bin_edges'],val,color = cmap.get_color(step))\n F.close()\n\n (iden_fun,dset_key ) = conn.execute(\"select function,dset_key from comps where \" +\n \"comp_key in \" +\n \"(select iden_key from trk_stat_prams where \"+\n \"comp_key = ?)\",(comp_key,)).fetchone()\n\n ax.set_title(\"dset: \" + str(dset_key) + \" \" + iden_fun)\n\n plots.non_i_plot_stop(istatus)", "def plot(self, dtables, figs, **kwargs):\n self.safe_update(**kwargs)\n sumtable = dtables['biasoscorr_stats']\n figs.plot_stat_color('mean-s', sumtable['s_correl_mean'].reshape(9, 16))\n figs.plot_stat_color('mean-p', sumtable['p_correl_mean'].reshape(9, 16))", "def gen_plots(uf_dict, f_dict, min_x, max_x, min_y, max_y, axes, name, histogram, total):\n with PdfPages(name) as pdf:\n total_xuf = []\n total_yuf = []\n total_xf = []\n total_yf = []\n for entry in uf_dict:\n print 'Making plot for ' + entry\n xuf, yuf = zip(*uf_dict[entry])\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.scatter(xuf, yuf, c='#ad4851', marker='o', label='initial structures')\n try:\n xf, yf = zip(*f_dict[entry])\n ax1.scatter(xf, yf, c='orange', marker='x', label='selected structures')\n except ValueError:\n xf = []\n yf = []\n plt.legend(loc='upper right')\n plt.title(entry, fontsize=30)\n plt.xlim(min_x, max_x)\n plt.ylim(min_y, max_y)\n plt.xlabel(axes[0], fontsize=20)\n plt.ylabel(axes[1], fontsize=20)\n pdf.savefig(fig)\n plt.close()\n\n if total:\n total_xuf.extend(xuf)\n total_yuf.extend(yuf)\n total_xf.extend(xf)\n total_yf.extend(yf)\n\n if histogram:\n bins = np.linspace(min_y, max_y, num=10)\n plt.hist(yuf, bins, alpha=0.5, color='b', label='initial structures')\n try:\n plt.hist(yf, bins, alpha=0.5, color='orange', label='selected structures')\n except ValueError:\n pass\n plt.legend(loc='upper right')\n plt.title(entry, fontsize=30)\n plt.xlabel(axes[1], fontsize=20)\n plt.ylabel('Frequency', fontsize=20)\n pdf.savefig()\n plt.close()\n\n if total:\n print 'Making composite plot'\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.scatter(total_xuf, total_yuf, c='#ad4851', marker='o', label='initial structures')\n ax1.scatter(total_xf, total_yf, c='orange', marker='x', label='selected structures')\n plt.legend(loc='upper right')\n plt.title('Composite Plot', fontsize=30)\n plt.xlim(min_x, max_x)\n plt.ylim(min_y, max_y)\n plt.xlabel(axes[0], fontsize=20)\n plt.ylabel(axes[1], fontsize=20)\n pdf.savefig(fig)\n plt.close()", "def table_central_tend(data, axs, f=2):\n\n # Central tendacy\n v_mean = round(data.mean(), f)\n v_median = round(data.median(), f)\n\n # Use built in tex only, no depandancy needed\n sample_mean_str = \"mean, \" + r' $\\bar x$ '\n sample_median_str = \"median\"\n\n # Concatenate the statistics and symbols\n symbols = pd.DataFrame([sample_mean_str, sample_median_str])\n val = pd.DataFrame([v_mean, v_median])\n data = pd.concat([symbols, val], axis=1)\n\n # Plot onto matplotlib axs\n central_tend = axs.table(\n cellText=data.values,\n loc='center',\n cellLoc=\"center\",\n colLoc='center',\n # xmin, ymin, width, height\n bbox=(0, 0, 1, 1),\n edges=\"\" # No line\n )\n\n title_color = '#9099A2' # Dark grey\n axs.set_title(\n ('Central Tendancy'),\n fontsize=12,\n color=title_color\n )\n\n table_settings(axs, central_tend)", "def create_gnuplot_statistic(statistic_entries):\n grouped_by_number_of_entries = {}\n for statistic in statistic_entries:\n key = statistic['max entries']\n if key not in grouped_by_number_of_entries:\n grouped_by_number_of_entries[key] = [statistic]\n else:\n grouped_by_number_of_entries[key].append(statistic)\n\n all_plots = multiplot(\"learn.py statistics\", title_font=(\"\", 18), plots_per_row=2)\n\n pos = 0\n max_pos = len(grouped_by_number_of_entries) - 1\n for key, statistic in grouped_by_number_of_entries.items():\n average_time_plot = plot()\n average_time_plot.set_ylabel(\"seconds\")\n if pos == max_pos:\n average_time_plot.set_xlabel(\"n'th test run\")\n average_time_plot.set_xtics(\"1\")\n average_time_plot.set_ytics(\"0.5\")\n average_time_plot.set_line_style(1, \"lc rgb \\\"#00ff00\\\" lw 2\")\n average_time_plot.set_fill_style(1, \"transparent solid 0.4 border\")\n values = list(enumerate([average(entry) for entry in statistic], 1))\n average_time_plot.add_curve(\"average times (max entries=%d)\" % key,\n values=values, mode=plot.FILLEDCURVES)\n\n all_plots.add_plot(average_time_plot)\n\n number_of_tests_plot = plot()\n number_of_tests_plot.set_ylabel(\"# tests\")\n if pos == max_pos:\n number_of_tests_plot.set_xlabel(\"n'th test run\")\n number_of_tests_plot.set_xtics(\"1\")\n number_of_tests_plot.set_ytics(\"1\")\n number_of_tests_plot.set_line_style(1, \"lc rgb \\\"#00ff00\\\" lw 2\")\n number_of_tests_plot.set_fill_style(1, \"transparent solid 0.4 border\")\n values = list(enumerate([entry['correct answers'] + entry['wrong answers']\n for entry in statistic], 1))\n number_of_tests_plot.add_curve(\"# of tests (max entries=%d)\" % key,\n values=values, mode=plot.FILLEDCURVES)\n\n all_plots.add_plot(number_of_tests_plot)\n pos += 1\n\n calculated_height = len(grouped_by_number_of_entries) * 250\n script(\"learn.gp\", all_plots, width=800, height=calculated_height).execute()", "def plot_analogs(galaxies, mw, mwanalogs, save_fig=False, **kwargs):\n\n axis_font = {\"size\": \"20\"}\n for column in mw.columns:\n fig, ax = plt.subplots()\n low_lim = np.min(galaxies[column].values[mwanalogs])\n upper_lim = np.max(galaxies[column].values[mwanalogs])\n plt.hist(\n galaxies[column].values[mwanalogs],\n density=True,\n bins=20,\n histtype=\"stepfilled\",\n alpha=0.5,\n )\n x = np.linspace(low_lim, upper_lim, len(mwanalogs))\n plt.plot(x, norm.pdf(x, mw.at[\"mean\", column], mw.at[\"sigma\", column]), \"r-\")\n plt.xlabel(column, **axis_font)\n plt.ylabel(r\"N\", **axis_font)\n plt.xticks(fontsize=15)\n plt.yticks(fontsize=15)\n plt.tight_layout()\n if save_fig:\n plt.savefig(fname=column+\".pdf\", format=\"pdf\")\n else:\n plt.show()", "def ana_results_to_plotables(ana_results,analysis_attributes):\n plot_attributes = [\n (ana_attr.numbers,ana_attr.range)\n for ana_attr in analysis_attributes\n ]\n plotable_fnc = {\n 'SMAP': dec_tupl(create_mesh_grid, 'MESH'),\n 'PTS': dec_tupl(create_histogram, 'HIST')\n }\n return [\n plotable_fnc[stype](data,*plt_attr)\n for (data, stype), plt_attr in zip(ana_results,plot_attributes)\n ]", "def _create_data_plot(df, norms):\n # plot data\n ax = df.plot.scatter('x', 'y')\n\n # plot estimates\n for name, norm in norms.items():\n model = sm.RLM(y, X, M=norm)\n results = model.fit()\n estimate = results.params\n _plot_model(params=estimate,\n label=name,\n range_=(df['x'].min(), df['x'].max()))\n\n # make pretty\n plt.rcParams['figure.figsize'] = (15.0, 10.0)\n ax.legend()\n plt.show()", "def test_plots(self):\n args = [\"L1\", 1126259446, 1126259478]\n pesummary_data = StrainData.fetch_open_data(*args)\n fig = pesummary_data.plot(type=\"td\")\n assert isinstance(fig, matplotlib.figure.Figure)\n fig = pesummary_data.plot(type=\"fd\")\n assert isinstance(fig, matplotlib.figure.Figure)\n fig = pesummary_data.plot(1126259446 + 20., type=\"omegascan\")\n assert isinstance(fig, matplotlib.figure.Figure)\n fig = pesummary_data.plot(type=\"spectrogram\")\n assert isinstance(fig, matplotlib.figure.Figure)", "def plot_multiple_distributions(df: pd.DataFrame, title: str, column_bar_name: list,\n column_category_name: str, plot_width: int = 330, plot_height: int = 330,\n colours: list = ['#00BFA5', \"#8c9eff\", \"#536dfe\"], bins: int = 10):\n\n hover = HoverTool()\n\n p = figure(title=title, plot_width=plot_width, plot_height=plot_height, tools=[\"save\", hover])\n\n for ind, category_ in enumerate(sorted(df[column_category_name].unique())):\n temp_df = df[df[column_category_name] == category_]\n arr_hist, edges = np.histogram(temp_df[column_bar_name],\n bins = bins,\n range = [0, 1])\n\n hist_df = pd.DataFrame({'dis': arr_hist,\n 'left': edges[:-1],\n 'right': edges[1:]})\n source = ColumnDataSource(hist_df)\n p.quad(bottom=0, top='dis', left='left', right='right', fill_color=colours[ind], source=source,\n fill_alpha=0.6**ind, line_width=0, legend=column_category_name + \": \"+ str(category_))\n\n p.title.text_font = p.xaxis.axis_label_text_font = p.yaxis.axis_label_text_font = \"Helvetica Neue\"\n p.xgrid.visible = p.ygrid.visible = False\n\n tooltips = [(\"dis\", \"@\" + \"dis\" + '{0%}')]\n\n hover = p.select(dict(type=HoverTool))\n hover.tooltips = tooltips\n\n return p", "def plot_settings_table(settings, table_nr=1, plot_out=None):\n\n keys = settings.keys()\n\n data_matrix_1 = [keys[:len(keys) / 3], []]\n for key in data_matrix_1[0]:\n data_matrix_1[1].append(str(settings[key]))\n\n data_matrix_2 = [keys[len(keys) / 3:2 * len(keys) / 3], []]\n for key in data_matrix_2[0]:\n data_matrix_2[1].append(str(settings[key]))\n\n data_matrix_3 = [keys[2 * len(keys) / 3:len(keys)], []]\n for key in data_matrix_3[0]:\n data_matrix_3[1].append(str(settings[key]))\n\n data = [data_matrix_1, data_matrix_2, data_matrix_3]\n\n nr_columns = len(data[table_nr - 1][0])\n plot = {'data': [{'colorscale': [[0, '#00083e'], [0.5, '#ededee'], [1, '#ffffff']],\n 'hoverinfo': 'none',\n 'opacity': 0.75,\n 'showscale': False,\n 'type': 'heatmap',\n 'z': [[0, 0.5] for row in range(nr_columns)]\n }],\n 'layout': {\n 'annotations': [],\n 'yaxis1': {'autorange': 'reversed',\n 'showgrid': False,\n 'showticklabels': False,\n 'zeroline': False,\n 'ticks': ''\n },\n 'xaxis1': {\n 'showgrid': False,\n 'showticklabels': False,\n 'zeroline': False,\n 'ticks': '',\n 'range': [0, 1]\n\n },\n 'title': \" \"\n }\n }\n\n # heading\n for table_cell in range(nr_columns):\n plot['layout']['annotations'].append({})\n plot['layout']['annotations'][table_cell].update({'text': data[table_nr - 1][0][table_cell]})\n plot['layout']['annotations'][table_cell].update({'font': {\n 'color': '#ffffff',\n 'size': 15}\n })\n plot['layout']['annotations'][table_cell].update({'y': table_cell})\n plot['layout']['annotations'][table_cell].update({'x': 0.1})\n plot['layout']['annotations'][table_cell].update({'xref': 'x1'})\n plot['layout']['annotations'][table_cell].update({'yref': 'y1'})\n plot['layout']['annotations'][table_cell].update({'align': 'center'})\n plot['layout']['annotations'][table_cell].update({'xanchor': 'left'})\n plot['layout']['annotations'][table_cell].update({'showarrow': False})\n\n # content\n for table_cell in range(nr_columns):\n plot['layout']['annotations'].append({})\n plot['layout']['annotations'][table_cell + nr_columns].update({'text': data[table_nr - 1][1][table_cell]})\n plot['layout']['annotations'][table_cell + nr_columns].update({'x': 0.75})\n plot['layout']['annotations'][table_cell + nr_columns].update({'y': table_cell})\n plot['layout']['annotations'][table_cell + nr_columns].update({'xref': 'x1'})\n plot['layout']['annotations'][table_cell + nr_columns].update({'yref': 'y1'})\n plot['layout']['annotations'][table_cell + nr_columns].update({'showarrow': False})\n\n if plot_out is not None:\n plotly_plot(plot, filename=settings['plot_out'], auto_open=False)\n else:\n return plot" ]
[ "0.6402386", "0.6203925", "0.6012154", "0.57367206", "0.56446254", "0.56413215", "0.56215644", "0.56149924", "0.55738074", "0.555902", "0.5548091", "0.5471502", "0.5443948", "0.540761", "0.53955144", "0.53447646", "0.53437424", "0.53368604", "0.53203785", "0.5313929", "0.52982646", "0.52924454", "0.5278651", "0.5268041", "0.5262209", "0.5250009", "0.5248203", "0.5239337", "0.52358896", "0.5227844" ]
0.6416024
0
Returns a plotted table on an axs. Simpy creates a small table with the count of samples and column headers. Intended to act as the top of a set of tables. Requires an matplot lib `axs` as input to be used.
def table_top(data, name, axs): # Count v_count = [] for i in name: v_col_size = data[i].size v_count.append(v_col_size) # Use built in tex only, no depandancy needed sample_count_str = "samples, " + r' $n$ ' symbols = pd.DataFrame([sample_count_str]) val = pd.DataFrame([v_count]) data = pd.concat([symbols, val], axis=1) # Get column names out of list labels = [""] for i in name: labels.append(i) top = axs.table( cellText=data.values, colLabels=labels, loc='center', cellLoc="center", colLoc='center', # xmin, ymin, width, height bbox=(0, 0, 1, 1), edges="") table_settings(axs, top) # As the above table_settings function sets black # line on top overwrite that setting axs.spines['top'].set_color('white')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def table(ax: Axes, data: DataFrame | Series, **kwargs) -> Table:\n plot_backend = _get_plot_backend(\"matplotlib\")\n return plot_backend.table(\n ax=ax, data=data, rowLabels=None, colLabels=None, **kwargs\n )", "def plot_table(self, axn: str = \"table\", df: Optional[DataFrame] = None) -> None:\n if self.axes is None:\n axs = self.initialize_figure(mosaic=[[axn]], figsize=(6, 4), return_ax=True)\n else:\n axs = self.axes\n\n if df is None:\n df = DataFrame([\"empty\"])\n\n axs[axn].table(\n df.values.tolist(),\n colLabels=df.columns,\n colColours=[(1.0, 1.0, 1.0, 1.0)]\n + [self.cmap(i, alpha=0.75) for i in range(len(df.columns) - 1)],\n bbox=(0.0, 0.0, 1.0, 1.0),\n )\n axs[axn].set_xticks([])\n axs[axn].set_yticks([])\n return axs[axn]", "def table_settings(axs_num, table_name):\n\n table_props = table_name.properties()\n table_cells = table_props['child_artists'] # matplotlib setting\n # iterate through cells of a table to change properties\n for cell in table_cells:\n cell._text.set_fontsize(15)\n cell._text.set_color('#192231') # Light grey\n\n # Set axis tick labels off, i.e. empty [].\n axs_num.set_yticklabels([])\n axs_num.set_xticklabels([])\n\n # Seaborn settings\n sns.set_style(\"whitegrid\")\n sns.set_style({'axes.grid': False})\n sns.set_context(\n \"poster\",\n rc={'font.sans-serif': 'Gill Sans MT'}\n )\n\n sns.despine(offset=2, top=False, trim=False, left=True, bottom=True)\n\n # Leave one line on top to break up the table\n axs_num.spines['top'].set_color('#9099A2')\n\n # Set tick labels to white in case they still are showing,\n # perhaps redudent but this is not perfect.\n plt.setp(\n [axs_num.get_xticklines(), axs_num.get_yticklines()],\n color=\"white\"\n )", "def table_distribution(data, axs, f=2):\n\n # Measures of distribution\n v_max = round(data.max(), f)\n v_95 = round((data.quantile(0.95)), f)\n v_90 = round((data.quantile(0.9)), f)\n v_75 = round((data.quantile(0.75)), f)\n v_50 = round((data.quantile(0.5)), f)\n v_25 = round((data.quantile(0.25)), f)\n v_10 = round((data.quantile(0.1)), f)\n v_05 = round((data.quantile(0.05)), f)\n v_min = round(data.min(), f)\n\n # pandas quantile returns a series which needs to be recombined\n # hence reset_index and transpose used in this case.\n quantiles = pd.concat(\n [v_max, v_95, v_90,\n v_75, v_50, v_25,\n v_10, v_05, v_min],\n axis=1\n ).transpose().reset_index()\n quantiles.drop('index', axis=1, inplace=True)\n\n # Use built in tex only, no depandancy needed\n sample_max_str = r\"maximum\"\n sample_95_str = r\"$Q(0.95)$\"\n sample_90_str = r\"$Q(0.90)$\"\n sample_75_str = r\"$Q(0.75)$\"\n sample_50_str = r\"$Q(0.50)$\"\n sample_25_str = r\"$Q(0.25)$\"\n sample_10_str = r\"$Q(0.10)$\"\n sample_05_str = r\"$Q(0.05)$\"\n sample_min_str = r\"minimum\"\n\n symbols = pd.DataFrame([sample_max_str, sample_95_str, sample_90_str,\n sample_75_str, sample_50_str, sample_25_str,\n sample_10_str, sample_05_str, sample_min_str])\n\n data = pd.concat([symbols, quantiles], axis=1)\n\n distribution = axs.table(\n cellText=data.values,\n loc='center',\n cellLoc=\"center\",\n colLoc='right',\n # xmin, ymin, width, height\n bbox=(0, 0, 1, 1),\n edges=\"\")\n\n title_color = '#9099A2'\n axs.set_title(\n ('Distribution'),\n fontsize=12,\n color=title_color)\n\n table_settings(axs, distribution)", "def descriptive_table(data, column_name, fig_size=(8, 8)):\n\n # Set up figure dimensions and sub components.\n sheet, axs = plt.subplots(4, 1, figsize=fig_size)\n\n # Heights ratio is based on the number of rows in each\n # table, this relates to the number of statistics each\n # sub table will show.\n gs = gridspec.GridSpec(4, 1, height_ratios=[2, 2, 5, 9])\n\n # Assign all subplots based on figure dimensions.\n ax0 = plt.subplot(gs[0])\n ax1 = plt.subplot(gs[1])\n ax2 = plt.subplot(gs[2])\n ax3 = plt.subplot(gs[3])\n\n title_color = '#9099A2' # Dark grey\n plt.suptitle(\n 'Descriptive Statistics',\n fontsize=16,\n color=title_color,\n x=0.25\n )\n\n table_top(data, column_name, ax0)\n table_central_tend(data, ax1)\n table_disperssion(data, ax2)\n table_distribution(data, ax3)\n\n # Adjust the spacing so the title fits correctly.\n sheet.subplots_adjust(hspace=0.2, top=0.95)", "def plot_settings_table(settings, table_nr=1, plot_out=None):\n\n keys = settings.keys()\n\n data_matrix_1 = [keys[:len(keys) / 3], []]\n for key in data_matrix_1[0]:\n data_matrix_1[1].append(str(settings[key]))\n\n data_matrix_2 = [keys[len(keys) / 3:2 * len(keys) / 3], []]\n for key in data_matrix_2[0]:\n data_matrix_2[1].append(str(settings[key]))\n\n data_matrix_3 = [keys[2 * len(keys) / 3:len(keys)], []]\n for key in data_matrix_3[0]:\n data_matrix_3[1].append(str(settings[key]))\n\n data = [data_matrix_1, data_matrix_2, data_matrix_3]\n\n nr_columns = len(data[table_nr - 1][0])\n plot = {'data': [{'colorscale': [[0, '#00083e'], [0.5, '#ededee'], [1, '#ffffff']],\n 'hoverinfo': 'none',\n 'opacity': 0.75,\n 'showscale': False,\n 'type': 'heatmap',\n 'z': [[0, 0.5] for row in range(nr_columns)]\n }],\n 'layout': {\n 'annotations': [],\n 'yaxis1': {'autorange': 'reversed',\n 'showgrid': False,\n 'showticklabels': False,\n 'zeroline': False,\n 'ticks': ''\n },\n 'xaxis1': {\n 'showgrid': False,\n 'showticklabels': False,\n 'zeroline': False,\n 'ticks': '',\n 'range': [0, 1]\n\n },\n 'title': \" \"\n }\n }\n\n # heading\n for table_cell in range(nr_columns):\n plot['layout']['annotations'].append({})\n plot['layout']['annotations'][table_cell].update({'text': data[table_nr - 1][0][table_cell]})\n plot['layout']['annotations'][table_cell].update({'font': {\n 'color': '#ffffff',\n 'size': 15}\n })\n plot['layout']['annotations'][table_cell].update({'y': table_cell})\n plot['layout']['annotations'][table_cell].update({'x': 0.1})\n plot['layout']['annotations'][table_cell].update({'xref': 'x1'})\n plot['layout']['annotations'][table_cell].update({'yref': 'y1'})\n plot['layout']['annotations'][table_cell].update({'align': 'center'})\n plot['layout']['annotations'][table_cell].update({'xanchor': 'left'})\n plot['layout']['annotations'][table_cell].update({'showarrow': False})\n\n # content\n for table_cell in range(nr_columns):\n plot['layout']['annotations'].append({})\n plot['layout']['annotations'][table_cell + nr_columns].update({'text': data[table_nr - 1][1][table_cell]})\n plot['layout']['annotations'][table_cell + nr_columns].update({'x': 0.75})\n plot['layout']['annotations'][table_cell + nr_columns].update({'y': table_cell})\n plot['layout']['annotations'][table_cell + nr_columns].update({'xref': 'x1'})\n plot['layout']['annotations'][table_cell + nr_columns].update({'yref': 'y1'})\n plot['layout']['annotations'][table_cell + nr_columns].update({'showarrow': False})\n\n if plot_out is not None:\n plotly_plot(plot, filename=settings['plot_out'], auto_open=False)\n else:\n return plot", "def plot_table(mat, width=.15, ratio=4):\n vmax = np.abs(mat).max()\n vals = np.around(mat, 2)\n fig = plt.figure()\n ax = fig.add_subplot(111, frameon=False, xticks=[], yticks=[])\n table = plt.table(cellText=vals, colWidths=[width]*vals.shape[1],\n loc='center', cellColours=plt.cm.RdBu_r(\n Normalize(-vmax, vmax)(mat)))\n table.scale(1, ratio)\n return fig", "def draw_table(ax, dfs, legend, x, y):\n col_labels = dfs_all_values(dfs, x)\n column_legend = []\n cell_text = []\n # loop over all pandas.DataFrame objects\n for df in dfs:\n # to allow query y(x) easily\n df = df.set_index(x)\n df_row = df[y]\n # build a row with filled blanks '-'\n row = [\"{:.2f}\".format(df_row[column]) if column in df_row.index else '-' \\\n for column in col_labels]\n cell_text.append(row)\n\n ax.axis('tight')\n ax.axis('off')\n ax.table(cellText=cell_text, rowLabels=legend, colLabels=col_labels, \\\n loc='top')", "def show_table():\n\n title_list = ('ID', 'Platform', 'Producer', 'Year', 'Elements')\n \n return table, title_list", "def get_table_ms(plot=True, ax=None):\n table = ascii.read(tgas_path, delimiter=';', data_start=3)\n\n # floatify:\n table['BTmag'] = table['BTmag'].astype(float)\n table['VTmag'] = table['VTmag'].astype(float)\n\n # Compute the galactic latitude of each star, add to table\n coords = SkyCoord(ra=table['RA_ICRS'] * u.deg,\n dec=table['DE_ICRS'] * u.deg, frame='icrs')\n galactic_coords = coords.transform_to('galactic')\n abs_galactic_latitude = abs(galactic_coords.b).degree\n table.add_column(Column(data=abs_galactic_latitude, name='b'))\n\n # Compute distance, CMD\n def color_cut(b_minus_v):\n return -9. + 4.9 * b_minus_v\n\n parallax_mas = table['Plx']\n Vmag = table['VTmag']\n bt_minus_vt = table['BTmag'] - table['VTmag']\n\n parallax_arcsec = parallax_mas / 1000\n dist_pc = 1. / parallax_arcsec\n\n # Add astrometric uncertainty column to table\n table.add_column(Column(data=sigma_fov(table['<Gmag>']), name='sigma_fov'))\n\n # Add a distance column to the table:\n table.add_column(Column(data=dist_pc * u.pc, name='distance'))\n\n # Add a Nfov column to the table:\n table.add_column(Column(data=Nprime_fov(abs_galactic_latitude),\n name='N_fov'))\n\n M_V = Vmag - 5 * (np.log10(dist_pc) + 1)\n\n b_minus_v_lower = 0.6 # 0.64 # (B-V)_sun = 0.65\n b_minus_v_upper = 2\n\n main_sequence = ((np.abs(M_V - color_cut(bt_minus_vt)) < 1.) &\n (bt_minus_vt > b_minus_v_lower) &\n (bt_minus_vt < b_minus_v_upper))\n\n main_sequence_table = table[main_sequence]\n\n # Now match the B-V color table from HIPPARCOS to the main sequence TGAS table\n hipparcos_table = ascii.read(hipparcos_path, delimiter=';', header_start=0,\n data_start=3)\n hipparcos_table.add_index(\"HIP\")\n\n main_sequence_table['HIP'][main_sequence_table['HIP'].mask] = 0\n\n main_sequence_color_table = join(main_sequence_table, hipparcos_table,\n keys='HIP')\n\n # Cut again by the color cuts, this time with the real Johnson B and V,\n # rather than Tycho magnitudes:\n main_sequence = ((main_sequence_color_table['B-V'].data.data < b_minus_v_upper) &\n (main_sequence_color_table['B-V'].data.data > b_minus_v_lower))\n\n main_sequence_color_table = main_sequence_color_table[main_sequence]\n\n # Add in stellar radii with color-radius relation from Boyajian 2012\n R_star = bv_to_radius(main_sequence_color_table['B-V'].data.data)\n main_sequence_color_table.add_column(Column(data=R_star, name='R_star'))\n\n # Add in a column of interferometric angular diameters from\n # Boyajian 2012 where available:\n boyajian = ascii.read(boyajian_path)\n ang_diams = np.zeros(len(main_sequence_color_table))\n\n for row in boyajian:\n ang_diams[row['HIP'] == main_sequence_color_table['HIP']] = row['D(UD)']\n\n main_sequence_color_table.add_column(Column(data=ang_diams,\n name='angular_diameter'))\n\n boyajian_radii = main_sequence_color_table['angular_diameter'] != 0\n half_angle = (main_sequence_color_table['angular_diameter'][boyajian_radii]\n * u.marcsec/2)\n distance_pc = (main_sequence_color_table['Plx_1'][\n boyajian_radii].data.data / 1000)**-1 * u.pc\n measured_radii = distance_pc * np.tan(half_angle)\n\n R_star[boyajian_radii] = measured_radii\n\n # In radius reference column, `1`==color-radius estimate;\n # `2`==interferometric measurement\n refs = np.ones(len(R_star))\n refs[boyajian_radii] = 2\n main_sequence_color_table.add_column(Column(data=refs, name='rstar_ref'))\n\n # Add column containing approximate stellar effective temperatures based\n # on B-V -> T_eff table from Eric Mamajek:\n # http://www.pas.rochester.edu/~emamajek/EEM_dwarf_UBVIJHK_colors_Teff.txt\n mamajek = ascii.read(mamajek_path, format='commented_header')\n bv_to_teff = lambda bv: np.interp(bv, mamajek['B-V'], mamajek['Teff'])\n approx_teffs = bv_to_teff(main_sequence_color_table['B-V'])\n main_sequence_color_table.add_column(Column(data=approx_teffs, name='Teff'))\n\n if plot:\n if ax is None:\n ax = plt.gca()\n polygon_x = [0.6, 0.6, 2.0, 2.0, 0.6]\n polygon_y = [color_cut(0.6) - 1, color_cut(0.6) + 1,\n color_cut(2) + 1, color_cut(2) - 1,\n color_cut(0.6) - 1]\n\n H, xedges, yedges = np.histogram2d(bt_minus_vt[abs(bt_minus_vt) > 1e-3],\n M_V[abs(bt_minus_vt) > 1e-3],\n bins=1000)\n\n extent = [xedges.min(), xedges.max(), yedges.max(), yedges.min()]\n ax.imshow(np.log10(H.T), extent=extent, cmap=plt.cm.Greys, aspect=0.2)\n ax.plot(polygon_x, polygon_y, lw=2, color='r', ls='--')\n\n ax.set(xlim=[-0.5, 3], ylim=[2, -15],\n ylabel='$M_{VT}$', xlabel=\"BT - VT\")\n\n return main_sequence_color_table", "def table(self):\n\n param=self.x_param\n\n device=self.device\n\n base_params=device.get_params()\n\n data_tot=DataFrame()\n\n for i in range(len(param)):\n\n print_index=1\n\n for name in param.names:\n\n device._set_params(param(i))\n\n device.draw()\n\n df=device.export_all()\n\n if self.labels_bottom is not None:\n\n index=self.labels_bottom[i]\n\n else:\n\n index=str(i)\n\n print(\"Generating table, item {} of {}\\r\".format(print_index,len(param)),end=\"\")\n\n data_tot=data_tot.append(Series(df,name=index))\n\n device._set_params(base_params)\n\n return data_tot", "def make_disp_sq_plots(comp_key,conn):\n\n (fin,) = conn.execute(\"select fout from comps where function = 'track_stats' and comp_key = ?\",\n (comp_key,)).fetchone()\n\n F = h5py.File(fin,'r')\n \n g = F[_fd('disp_sq_hist',comp_key)]\n cmap = color_mapper(0,len(g))\n\n (fig,ax) = plots.set_up_plot()\n istatus = plots.non_i_plot_start();\n for s in g:\n step = int(s[-7:])\n val = g[s]['bin_value'][:]\n \n ax.semilogy(g[s]['bin_edges'],val,color = cmap.get_color(step))\n F.close()\n\n (iden_fun,dset_key ) = conn.execute(\"select function,dset_key from comps where \" +\n \"comp_key in \" +\n \"(select iden_key from trk_stat_prams where \"+\n \"comp_key = ?)\",(comp_key,)).fetchone()\n\n ax.set_title(\"dset: \" + str(dset_key) + \" \" + iden_fun)\n\n plots.non_i_plot_stop(istatus)", "def generate_table(self, outtablename,\n cols=['A', 'B', 'AB'],\n generateTable=True):\n if generateTable:\n new_indices = ['time (s)', 'mean counts']\n for idx in self.data[cols].describe().index[2:]:\n new_indices.append(idx)\n outTable = self.data[cols].describe()\\\n .set_index(pd.Index(new_indices))\n outTable.to_latex(\n self.tables_dir + outtablename + \".tex\", float_format=\"%d\")\n print(\"Outtable: \", outTable)", "def ascii_table(self, tablefmt=\"pipe\"):\n methods = self.methods\n xvalues = self.xvalues\n plot_matrix = self.plot_matrix\n\n import tabulate\n # https://pypi.python.org/pypi/tabulate\n aug_table = np.hstack((np.array(methods)[:, np.newaxis], plot_matrix))\n return tabulate.tabulate(aug_table, xvalues, tablefmt=tablefmt)", "def latex_table(samples, parameter_dict=None, labels=None):\n table = (\n \"\\\\begin{table}[hptb]\\n\\\\begin{ruledtabular}\\n\\\\begin{tabular}\"\n \"{l %s}\\n\" % (\"c \" * len(samples))\n )\n if labels:\n table += (\n \" & \" + \" & \".join(labels)\n )\n table += \"\\\\\\ \\n\\\\hline \\\\\\ \\n\"\n data = {i: i for i in samples[0].keys()}\n if parameter_dict is not None:\n import copy\n\n data = copy.deepcopy(parameter_dict)\n for param in parameter_dict.keys():\n if not all(param in samples_dict.keys() for samples_dict in samples):\n logger.warning(\n \"{} not in list of parameters. Not adding to \"\n \"table\".format(param)\n )\n data.pop(param)\n\n for param, desc in data.items():\n table += \"{}\".format(desc)\n for samples_dict in samples:\n median = samples_dict[param].average(type=\"median\")\n confidence = samples_dict[param].confidence_interval()\n table += (\n \" & $%s^{+%s}_{-%s}$\" % (\n np.round(median, 2),\n np.round(confidence[1] - median, 2),\n np.round(median - confidence[0], 2)\n )\n )\n table += \"\\\\\\ \\n\"\n table += (\n \"\\\\end{tabular}\\n\\\\end{ruledtabular}\\n\\\\caption{}\\n\\\\end{table}\"\n )\n return table", "def ana_plot_graphs(plotables,plotable_titles,figure_title=None,show=False):\n axes = num_to_subplots_axes(len(plotables))\n fig = plt.figure()\n fig.suptitle(figure_title)\n for i, ((plotable,plot_type),ana_type) in enumerate(zip(plotables,plotable_titles)):\n if plot_type == 'MESH':\n #ax = plot_mesh_sub(fig, axes+(i+1,), *plotable)\n ax = plot_imshow_from_mesh_sub(fig, axes+(i+1,), *plotable)\n # Suplots indexing is from 1 => i+1\n ax.set_title(ana_type)\n elif plot_type == 'HIST':\n ax = plot_imshow_sub(\n fig, axes+(i+1,), plotable[0],\n (np.min(plotable[1]),np.max(plotable[1])),\n (np.min(plotable[2]),np.max(plotable[2]))\n )\n ax.set_title(ana_type)\n else:\n assert False, \"Not implemented\"\n if show:\n plt.show()", "def plot(self, dtables, figs, **kwargs):\n self.safe_update(**kwargs)\n figs.setup_amp_plots_grid(\"row\",\n title=\"Correlation: imaging region and serial overscan\",\n xlabel=\"Correlation\",\n ylabel=\"Number of frames\")\n figs.setup_amp_plots_grid(\"col\",\n title=\"Correlation: imaging region and paralell overscan\",\n xlabel=\"Correlation\",\n ylabel=\"Number of frames\")\n\n dtab = dtables.get_table(\"correl\")\n for i in range(16):\n s_correl = dtab['s_correl_a%02i' % i]\n p_correl = dtab['p_correl_a%02i' % i]\n figs.get_obj('row', 'axs').flat[i].hist(s_correl, bins=100, range=(-1., 1.))\n figs.get_obj('col', 'axs').flat[i].hist(p_correl, bins=100, range=(-1., 1.))", "def table_disperssion(data, axs, f=2):\n\n # Measures of disperssion\n v_bessel_sd = round(data.std(), f)\n v_var = round(data.var(), f)\n v_range = round((data.max()-data.min()), f)\n v_iqr = round((data.quantile(0.75)-data.quantile(0.25)), f)\n v_mad = round(data.mad(), f)\n\n # Use built in tex only, no depandancy needed\n sample_std_str = \"stan. dev.\" + r' $s$ '\n sample_var_str = \"variance, \" + '$s^2$'\n sample_range_str = \"range\"\n sample_iqr_str = \"$IQR$\"\n sample_mad_str = \"mean abs. dev.\"\n\n symbols = pd.DataFrame(\n [sample_std_str, sample_iqr_str,\n sample_mad_str, sample_var_str,\n sample_range_str]\n )\n val = pd.DataFrame(\n [v_bessel_sd, v_iqr,\n v_mad, v_var, v_range]\n )\n data = pd.concat([symbols, val], axis=1)\n\n disperssion = axs.table(\n cellText=data.values,\n loc='center',\n cellLoc=\"center\",\n colLoc='right',\n # xmin, ymin, width, height\n bbox=(0, 0, 1, 1),\n edges=\"\") # No line\n\n title_color = '#9099A2' # Dark Grey\n axs.set_title(\n ('Disperssion'),\n fontsize=12,\n color=title_color)\n\n table_settings(axs, disperssion)", "def data_table(\n filepath=\"sparkify_data.csv\",\n title=\"Engineered Features Dataframe\",\n ):\n df = read_data_csv(filepath)\n fig = go.Figure(\n data=[\n go.Table(\n header=dict(\n values=list(df.columns), align=\"left\"\n ),\n cells=dict(\n values=[df[col] for col in df.columns],\n align=\"left\",\n ),\n )\n ]\n )\n\n fig.update_layout(title=go.layout.Title(text=title, x=0.5))\n\n return fig", "def plot(self, dtables, figs, **kwargs):\n self.safe_update(**kwargs)\n\n figs.setup_amp_plots_grid(\"ratio-row\", title=\"sflat ratio by row\",\n xlabel=\"row\", ylabel=\"Ratio\")\n figs.plot_xy_amps_from_tabledict(dtables, 'row', 'ratio_row',\n x_name='row_i', y_name='r_med')\n\n figs.setup_amp_plots_grid(\"ratio-col\", title=\"sflat ratio by col\",\n xlabel=\"col\", ylabel=\"Ratio\")\n figs.plot_xy_amps_from_tabledict(dtables, 'col', 'ratio_col',\n x_name='col_i', y_name='r_med')\n\n figs.setup_amp_plots_grid(\"scatter\", title=\"sflat ratio v. sbias\",\n xlabel=\"Superbias [ADU]\", ylabel=\"Ratio\")\n\n figs.plot_amp_arrays(\"mask\", self.quality_masks, vmin=0, vmax=3)\n\n for i, (amp, sbias_image) in enumerate(sorted(self.superbias_images)):\n figs.plot_two_image_hist2d('scatter', i,\n sbias_image,\n self.ratio_images[amp],\n bins=(200, 200),\n range=((-50, 50.), (0.018, 0.022)))", "def table_example():\n\n print(\"\\nExample making a new table from scratch:\\n\")\n # Make a new (empty) table object\n tbl = table(\"A table with random data\")\n # Add three columns called \"x\", \"x^2\" and \"1/x\"\n tbl.addcolumn(\"x\")\n tbl.addcolumn(\"x^2\")\n tbl.addcolumn(\"1/x\")\n # Add some rows of data\n for i in range(0, 10):\n row = dict()\n row[\"x\"] = i\n row[\"x^2\"] = i * i\n if i != 0:\n row[\"1/x\"] = 1.0 / float(i)\n else:\n row[\"1/x\"] = \"?\"\n tbl.add_data(row)\n # Define some graphs\n tbl.definegraph(\"Y = X(squared)\", (\"x\", \"x^2\"))\n tbl.definegraph(\"Y = 1/X\", (\"x\", \"1/x\"))\n tbl.definegraph(\"All data\", (\"x\", \"x^2\", \"1/x\"))\n # Print out the data as a simple \"table\" and in loggraph markup\n print(tbl.show())\n print(tbl.loggraph())", "def plot_eos_table(ax, mat, table_name, spec='t', vmin=None, vmax=None,\n nx=300, ny=350, xmax=None, ymax=None, xmin=None, ymin=None):\n\n table_name = table_name.format(s=spec)\n tab = mat.get_table(table_name)\n\n Rmin, Rmax = tab['Rmin'], tab['Rmax']\n Tmin, Tmax = tab['Tmin'], tab['Tmax']\n if xmin is not None:\n Rmin = xmin\n if ymin is not None:\n Tmin = ymin\n\n Xarr = np.logspace(np.log10(Rmin), np.log10(Rmax)-0.1, nx)\n Yarr = np.logspace(np.log10(Tmin), np.log10(Tmax)-0.1, ny)\n\n X, Y = np.meshgrid(Xarr, Yarr, indexing='ij')\n\n F = tab(X,Y)\n\n if vmax is None:\n vmax = np.percentile(F, 99.5) \n if vmin is None:\n vmin = np.percentile(F[F>0], 0.5)\n\n cs = ax.pcolormesh(X, Y*K2eV, F, cmap=plt.cm.jet, norm = LogNorm(),\n vmin=vmin, vmax=vmax)\n if vmin is not None:\n levels = np.arange(int(np.log10(vmin)), int(np.log10(F.max())))\n else:\n levels = np.arange(np.log10(F[F>0].min()), int(np.log10(F.max())))\n logF = np.log10(np.where(F>0, F, F[F>0].min()))\n cl = ax.contour(X, Y/11640, logF, levels, colors='k')\n plt.clabel(cl, fontsize=10, inline=False, fmt='%1.0d', use_clabeltext=True)\n plt.title('Table {0}: {1}'.format(tab['Material_ID'], table_name.replace('_', '\\_')))\n cb = plt.colorbar(cs)\n if F.min()<0:\n min_label = ' (min {0:.0e} GPa)'.format(F.min())\n else:\n min_label = ''\n cb.set_label('{0} [{1}] {2}'.format(tab.label.replace('_', '\\_'),\n tab.units, min_label))\n\n cl = ax.contourf(X, Y*K2eV, F>0, [0,0.5], colors='white', hatches=['//'])\n\n ax.set_xscale('symlog', linthreshx=3e-5)\n ax.set_yscale('symlog', linthreshy=0.1)\n if xmax is None:\n ax.set_xlim(0, Xarr.max())\n else:\n ax.set_xlim(0, xmax)\n if ymax is None:\n ax.set_ylim(0, Yarr.max()*K2eV)\n else:\n ax.set_ylim(0, ymax)\n\n ax.set_xlabel(r'$\\rho$ [g.cm$^{-3}$]')\n ax.set_ylabel(r'$T$ [eV]')\n return ax", "def table_spline(outpath, waves, slopes, stds, norm):\n # create the table\n table = Table(\n [waves, slopes, stds],\n names=(\n \"wavelength[micron]\",\n \"slope\",\n \"std\",\n ),\n )\n\n # save it in ascii format\n table.write(\n outpath + \"inv_RV_anchors\" + str(norm) + \".txt\",\n format=\"ascii.commented_header\",\n overwrite=True,\n )", "def dyntable(self):\n table = ANSITable(\n Column(\"j\", colalign=\">\", headalign=\"^\"),\n Column(\"m\", colalign=\"<\", headalign=\"^\"),\n Column(\"r\", colalign=\"<\", headalign=\"^\"),\n Column(\"I\", colalign=\"<\", headalign=\"^\"),\n Column(\"Jm\", colalign=\"<\", headalign=\"^\"),\n Column(\"B\", colalign=\"<\", headalign=\"^\"),\n Column(\"Tc\", colalign=\"<\", headalign=\"^\"),\n Column(\"G\", colalign=\"<\", headalign=\"^\"), border=\"thin\")\n\n for j, link in enumerate(self):\n table.row(link.name, *link._dyn2list())\n return str(table)", "def table(name, components):\n table = PrettyTable([name])\n table.align[name] = 'l'\n [table.add_row([component['name'][0:-5]]) for component in components]\n return table", "def get_summary_of_records(self):\n ids = self.get_saleman_ids()\n table = [\n [\"Seller name\",\"Number of sales\",\"Total Value ($)\"]\n ]\n for id in ids:\n table_id = [self.get_seller_name(id),self.get_number_of_sales(id),\n self.get_total_of_saleman(id)]\n table.append(table_id)\n data_table = AsciiTable(table)\n print(data_table.table)", "def latex_table():\n \n t = Table.read('../data/stream_origin.fits')\n N = len(t)\n \n f = open('../paper/stream_origin.tex', 'w')\n for i in range(N):\n t_ = t[i]\n for k in t_.colnames:\n if (t_[k]==np.nan) | (t_[k]=='nan'):\n t_[k] = '\\dots'\n #f.write('{:s} & {:s} & {:s} & {:s} & {:.1f}\\\\\\\\ \\n'.format(t_['Name'], t_['host'], t_['progenitor'], t_['type'], t_['feh']))\n line = '{:s} & {:s} & {:s} & {:s} & {:s}\\\\\\\\ \\n'.format(t_['Name'], t_['host'], t_['progenitor'], t_['friends'], t_['type'])\n f.write(line)\n print(line)\n \n f.close()", "def table_central_tend(data, axs, f=2):\n\n # Central tendacy\n v_mean = round(data.mean(), f)\n v_median = round(data.median(), f)\n\n # Use built in tex only, no depandancy needed\n sample_mean_str = \"mean, \" + r' $\\bar x$ '\n sample_median_str = \"median\"\n\n # Concatenate the statistics and symbols\n symbols = pd.DataFrame([sample_mean_str, sample_median_str])\n val = pd.DataFrame([v_mean, v_median])\n data = pd.concat([symbols, val], axis=1)\n\n # Plot onto matplotlib axs\n central_tend = axs.table(\n cellText=data.values,\n loc='center',\n cellLoc=\"center\",\n colLoc='center',\n # xmin, ymin, width, height\n bbox=(0, 0, 1, 1),\n edges=\"\" # No line\n )\n\n title_color = '#9099A2' # Dark grey\n axs.set_title(\n ('Central Tendancy'),\n fontsize=12,\n color=title_color\n )\n\n table_settings(axs, central_tend)", "def generate_table(self, rows):\n ...", "def test_plots(self):\n args = [\"L1\", 1126259446, 1126259478]\n pesummary_data = StrainData.fetch_open_data(*args)\n fig = pesummary_data.plot(type=\"td\")\n assert isinstance(fig, matplotlib.figure.Figure)\n fig = pesummary_data.plot(type=\"fd\")\n assert isinstance(fig, matplotlib.figure.Figure)\n fig = pesummary_data.plot(1126259446 + 20., type=\"omegascan\")\n assert isinstance(fig, matplotlib.figure.Figure)\n fig = pesummary_data.plot(type=\"spectrogram\")\n assert isinstance(fig, matplotlib.figure.Figure)" ]
[ "0.6636303", "0.62570447", "0.59922016", "0.58673024", "0.5672084", "0.5577076", "0.55361027", "0.5517768", "0.55032074", "0.54658043", "0.54374826", "0.5432732", "0.5390313", "0.53773123", "0.5351929", "0.53117126", "0.52992725", "0.52894443", "0.526929", "0.52465236", "0.51397634", "0.5114791", "0.5113373", "0.5113001", "0.5104147", "0.5102273", "0.50947803", "0.50815374", "0.5071387", "0.5065061" ]
0.6466206
1
Sets style settings on a table. Requires an matplot lib `axs` as input to be used. Enforces style settings onto that axs.
def table_settings(axs_num, table_name): table_props = table_name.properties() table_cells = table_props['child_artists'] # matplotlib setting # iterate through cells of a table to change properties for cell in table_cells: cell._text.set_fontsize(15) cell._text.set_color('#192231') # Light grey # Set axis tick labels off, i.e. empty []. axs_num.set_yticklabels([]) axs_num.set_xticklabels([]) # Seaborn settings sns.set_style("whitegrid") sns.set_style({'axes.grid': False}) sns.set_context( "poster", rc={'font.sans-serif': 'Gill Sans MT'} ) sns.despine(offset=2, top=False, trim=False, left=True, bottom=True) # Leave one line on top to break up the table axs_num.spines['top'].set_color('#9099A2') # Set tick labels to white in case they still are showing, # perhaps redudent but this is not perfect. plt.setp( [axs_num.get_xticklines(), axs_num.get_yticklines()], color="white" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_styles(plt, matplotlib):\n\n try:\n plt.style.use('default') \n for param, value in plt_params_dict.items():\n matplotlib.rcParams[param] = value\n params = {'mathtext.default': 'regular' } \n plt.rcParams.update(params)\n except:\n print(\"\"\"\n Before running set_styles(), you must:\n\n import matplotlib.pyplot as plt\n import matplotlib\n \"\"\")", "def set_style(fontSize=14, linewidth=1):\n# plt.rcParams['font.family'] = \"sans-serif\"\n plt.rcParams['font.sans-serif'] = \"Verdana\"\n plt.rcParams['font.size'] = fontSize\n plt.rcParams['font.weight'] = 'regular'\n plt.rcParams['mathtext.default'] = 'regular'\n plt.rcParams['savefig.dpi'] = '500'\n plt.rcParams['savefig.transparent'] = True\n plt.rcParams['lines.linewidth'] = linewidth\n plt.rcParams['lines.markersize'] = 6\n plt.rcParams['legend.fontsize'] = fontSize - 2\n plt.rcParams['legend.fancybox'] = False\n plt.rcParams['legend.labelspacing'] = 0.5\n plt.rcParams['legend.columnspacing'] = 1\n plt.rcParams['legend.borderpad'] = 0.5\n plt.rcParams['axes.labelweight'] = 'bold'\n #plt.rcParams['figure.autolayout'] = 'True'\n\n # Pandas options\n pd.options.display.max_columns = 100\n pd.options.display.max_rows = 200", "def plot_settings_table(settings, table_nr=1, plot_out=None):\n\n keys = settings.keys()\n\n data_matrix_1 = [keys[:len(keys) / 3], []]\n for key in data_matrix_1[0]:\n data_matrix_1[1].append(str(settings[key]))\n\n data_matrix_2 = [keys[len(keys) / 3:2 * len(keys) / 3], []]\n for key in data_matrix_2[0]:\n data_matrix_2[1].append(str(settings[key]))\n\n data_matrix_3 = [keys[2 * len(keys) / 3:len(keys)], []]\n for key in data_matrix_3[0]:\n data_matrix_3[1].append(str(settings[key]))\n\n data = [data_matrix_1, data_matrix_2, data_matrix_3]\n\n nr_columns = len(data[table_nr - 1][0])\n plot = {'data': [{'colorscale': [[0, '#00083e'], [0.5, '#ededee'], [1, '#ffffff']],\n 'hoverinfo': 'none',\n 'opacity': 0.75,\n 'showscale': False,\n 'type': 'heatmap',\n 'z': [[0, 0.5] for row in range(nr_columns)]\n }],\n 'layout': {\n 'annotations': [],\n 'yaxis1': {'autorange': 'reversed',\n 'showgrid': False,\n 'showticklabels': False,\n 'zeroline': False,\n 'ticks': ''\n },\n 'xaxis1': {\n 'showgrid': False,\n 'showticklabels': False,\n 'zeroline': False,\n 'ticks': '',\n 'range': [0, 1]\n\n },\n 'title': \" \"\n }\n }\n\n # heading\n for table_cell in range(nr_columns):\n plot['layout']['annotations'].append({})\n plot['layout']['annotations'][table_cell].update({'text': data[table_nr - 1][0][table_cell]})\n plot['layout']['annotations'][table_cell].update({'font': {\n 'color': '#ffffff',\n 'size': 15}\n })\n plot['layout']['annotations'][table_cell].update({'y': table_cell})\n plot['layout']['annotations'][table_cell].update({'x': 0.1})\n plot['layout']['annotations'][table_cell].update({'xref': 'x1'})\n plot['layout']['annotations'][table_cell].update({'yref': 'y1'})\n plot['layout']['annotations'][table_cell].update({'align': 'center'})\n plot['layout']['annotations'][table_cell].update({'xanchor': 'left'})\n plot['layout']['annotations'][table_cell].update({'showarrow': False})\n\n # content\n for table_cell in range(nr_columns):\n plot['layout']['annotations'].append({})\n plot['layout']['annotations'][table_cell + nr_columns].update({'text': data[table_nr - 1][1][table_cell]})\n plot['layout']['annotations'][table_cell + nr_columns].update({'x': 0.75})\n plot['layout']['annotations'][table_cell + nr_columns].update({'y': table_cell})\n plot['layout']['annotations'][table_cell + nr_columns].update({'xref': 'x1'})\n plot['layout']['annotations'][table_cell + nr_columns].update({'yref': 'y1'})\n plot['layout']['annotations'][table_cell + nr_columns].update({'showarrow': False})\n\n if plot_out is not None:\n plotly_plot(plot, filename=settings['plot_out'], auto_open=False)\n else:\n return plot", "def set_style(usetex=False):\n plt.rc('text', usetex=usetex)\n plt.rc('font', family='Serif')\n\n\n mpl.rcParams['figure.figsize'] = [10, 7]\n mpl.rcParams['font.size'] = 17\n\n mpl.rcParams['savefig.dpi'] = 150\n mpl.rcParams['xtick.minor.visible'] = True\n mpl.rcParams['ytick.minor.visible'] = True\n mpl.rcParams['xtick.direction'] = 'in'\n mpl.rcParams['ytick.direction'] = 'in'\n\n mpl.rcParams['xtick.top'] = True\n mpl.rcParams['ytick.right'] = True\n\n mpl.rcParams['xtick.major.size'] = 6\n mpl.rcParams['xtick.minor.size'] = 3\n\n mpl.rcParams['ytick.major.size'] = 6\n mpl.rcParams['ytick.minor.size'] = 3\n\n mpl.rcParams['xtick.labelsize'] = 13\n mpl.rcParams['ytick.labelsize'] = 13", "def plot_table(self, axn: str = \"table\", df: Optional[DataFrame] = None) -> None:\n if self.axes is None:\n axs = self.initialize_figure(mosaic=[[axn]], figsize=(6, 4), return_ax=True)\n else:\n axs = self.axes\n\n if df is None:\n df = DataFrame([\"empty\"])\n\n axs[axn].table(\n df.values.tolist(),\n colLabels=df.columns,\n colColours=[(1.0, 1.0, 1.0, 1.0)]\n + [self.cmap(i, alpha=0.75) for i in range(len(df.columns) - 1)],\n bbox=(0.0, 0.0, 1.0, 1.0),\n )\n axs[axn].set_xticks([])\n axs[axn].set_yticks([])\n return axs[axn]", "def set_axis_font(axs, fontproperties):\n axs = np.atleast_1d(axs)\n for ax in axs.ravel():\n props = (ax.get_xticklabels() + ax.get_yticklabels() +\n [ax.title, ax.xaxis.label, ax.yaxis.label])\n _ = plt.setp(props, fontproperties=fontproperties)", "def settabular(self, *args, **kwargs):\n return _coordsys.coordsys_settabular(self, *args, **kwargs)", "def set_row_style(sheet, row, style):\r\n\r\n for cell in sheet[row]:\r\n cell.style = style", "def set_style(self):", "def SetStyle(*args, **kwargs):\n return _gdi_.Font_SetStyle(*args, **kwargs)", "def setStyle(self, *args):\n return _libsbml.ASTNode_setStyle(self, *args)", "def styles(self, styles):\n # each cell owns it's own copy of the styles\n self._styles = {} if styles is None else styles.copy()", "def __event_table_style(self):\n table = TableStyle()\n table.set_width(100)\n table.set_columns(5)\n table.set_column_width(0, 15)\n table.set_column_width(1, 15)\n table.set_column_width(2, 35)\n table.set_column_width(3, 20)\n table.set_column_width(4, 25)\n self.default_style.add_table_style(\"PLC-EventTable\", table)\n table.set_width(100)\n table.set_columns(5)\n table.set_column_width(0, 35)\n table.set_column_width(1, 15)\n table.set_column_width(2, 25)\n table.set_column_width(3, 20)\n table.set_column_width(4, 25)\n self.default_style.add_table_style(\"PLC-PersonTable\", table)\n table.set_width(100)\n table.set_columns(3)\n table.set_column_width(0, 20)\n table.set_column_width(1, 70)\n table.set_column_width(2, 10)\n self.default_style.add_table_style(\"PLC-LitTable\", table)", "def set_spines(axs, color):\r\n\r\n for ax in axs:\r\n\r\n # Set the spines\r\n ax.spines['top'].set_visible(False)\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['left'].set_linewidth(2)\r\n ax.spines['bottom'].set_linewidth(2)\r\n ax.spines['left'].set_color(color)\r\n ax.spines['bottom'].set_color(color)\r\n\r\n # Make the tick widths match\r\n ax.tick_params(width=2)", "def mpl_switch_style(style=\"ggplot\"):\n # this import was moved here because ths code is executed when\n # the module is imported and for some reasons, it overides some of the settings\n # sphinx is doing and graphs are not part of the documentation but show up\n # in a separate window\n if \"plt\" not in sys.modules:\n import matplotlib.pyplot as plt # pylint: disable=C0415\n plt.style.use(style)", "def PlotStyle(Axes): \n Axes.spines['top'].set_visible(False)\n Axes.spines['bottom'].set_visible(True)\n Axes.spines['left'].set_visible(True)\n Axes.spines['right'].set_visible(False)\n Axes.xaxis.set_tick_params(labelsize=13)\n Axes.yaxis.set_tick_params(labelsize=13)", "def table(ax: Axes, data: DataFrame | Series, **kwargs) -> Table:\n plot_backend = _get_plot_backend(\"matplotlib\")\n return plot_backend.table(\n ax=ax, data=data, rowLabels=None, colLabels=None, **kwargs\n )", "def __cell_style(self):\n cell = TableCellStyle()\n self.default_style.add_cell_style(\"PLC-Cell\", cell)", "def UpdateBaseStyles(self):\n super(EditraBaseStc, self).UpdateBaseStyles()\n\n # Set control specific styles\n sback = self.GetItemByName('select_style')\n if not sback.IsNull():\n sback = sback.GetBack()\n else:\n sback = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT)\n self.VertEdit.SetBlockColor(sback)\n self.DefineMarkers()", "def plot_table(mat, width=.15, ratio=4):\n vmax = np.abs(mat).max()\n vals = np.around(mat, 2)\n fig = plt.figure()\n ax = fig.add_subplot(111, frameon=False, xticks=[], yticks=[])\n table = plt.table(cellText=vals, colWidths=[width]*vals.shape[1],\n loc='center', cellColours=plt.cm.RdBu_r(\n Normalize(-vmax, vmax)(mat)))\n table.scale(1, ratio)\n return fig", "def PlotSettings():\n\n # Color palette\n import seaborn as sns\n # sns.set()\n\n # Axes font size\n sns.set(font_scale=1.2)\n\n # LaTeX\n if find_executable('latex'):\n plt.rc('text',usetex=True)\n matplotlib.font_manager._rebuild()\n\n # Style sheet\n sns.set_style(\"white\")\n sns.set_style(\"ticks\")\n\n # Font (Note: this should be AFTER the plt.style.use)\n plt.rc('font', family='serif')\n plt.rcParams['svg.fonttype'] = 'none' # text in svg file will be text not path.", "def stylesheet_test(self, wdgt):\n\n #stylesheet_str\n stylesheet_str = 'background-color: red;'\n \n #set stylesheet\n wdgt.setStyleSheet(stylesheet_str)", "def _set_theme_seaborn_rcparams(self, rcParams, style, gridweight, context):\n # select grid line width:\n gridweights = {'extra heavy': 1.5,\n 'heavy': 1.1,\n 'medium': 0.8,\n 'light': 0.5, }\n if gridweight is None:\n if context == \"paper\":\n glw = gridweights[\"medium\"]\n else:\n glw = gridweights['extra heavy']\n elif np.isreal(gridweight):\n glw = gridweight\n else:\n glw = gridweights[gridweight]\n\n if style == \"darkgrid\":\n lw = .8 if context == \"paper\" else 1.5\n ax_params = {\"axes.facecolor\": \"#EAEAF2\",\n \"axes.edgecolor\": \"white\",\n \"axes.linewidth\": 0,\n \"axes.grid\": True,\n \"axes.axisbelow\": True,\n \"grid.color\": \"w\",\n \"grid.linestyle\": \"-\",\n \"grid.linewidth\": glw}\n\n elif style == \"whitegrid\":\n lw = 1.0 if context == \"paper\" else 1.7\n ax_params = {\"axes.facecolor\": \"white\",\n \"axes.edgecolor\": \"#CCCCCC\",\n \"axes.linewidth\": lw,\n \"axes.grid\": True,\n \"axes.axisbelow\": True,\n \"grid.color\": \"#DDDDDD\",\n \"grid.linestyle\": \"-\",\n \"grid.linewidth\": glw}\n\n elif style == \"nogrid\":\n ax_params = {\"axes.grid\": False,\n \"axes.facecolor\": \"white\",\n \"axes.edgecolor\": \"black\",\n \"axes.linewidth\": 1}\n\n elif style == \"ticks\":\n ticksize = 3. if context == \"paper\" else 6.\n tickwidth = .5 if context == \"paper\" else 1\n ax_params = {\"axes.grid\": False,\n \"axes.facecolor\": \"white\",\n \"axes.edgecolor\": \"black\",\n \"axes.linewidth\": 1,\n \"xtick.direction\": \"out\",\n \"ytick.direction\": \"out\",\n \"xtick.major.width\": tickwidth,\n \"ytick.major.width\": tickwidth,\n \"xtick.minor.width\": tickwidth,\n \"xtick.minor.width\": tickwidth,\n \"xtick.major.size\": ticksize,\n \"xtick.minor.size\": ticksize / 2,\n \"ytick.major.size\": ticksize,\n \"ytick.minor.size\": ticksize / 2}\n else:\n ax_params = {}\n\n rcParams.update(ax_params)\n\n # Determine the font sizes\n if context == \"talk\":\n font_params = {\"axes.labelsize\": 16,\n \"axes.titlesize\": 19,\n \"xtick.labelsize\": 14,\n \"ytick.labelsize\": 14,\n \"legend.fontsize\": 13,\n }\n\n elif context == \"notebook\":\n font_params = {\"axes.labelsize\": 11,\n \"axes.titlesize\": 12,\n \"xtick.labelsize\": 10,\n \"ytick.labelsize\": 10,\n \"legend.fontsize\": 10,\n }\n\n elif context == \"poster\":\n font_params = {\"axes.labelsize\": 18,\n \"axes.titlesize\": 22,\n \"xtick.labelsize\": 16,\n \"ytick.labelsize\": 16,\n \"legend.fontsize\": 16,\n }\n\n elif context == \"paper\":\n font_params = {\"axes.labelsize\": 8,\n \"axes.titlesize\": 12,\n \"xtick.labelsize\": 8,\n \"ytick.labelsize\": 8,\n \"legend.fontsize\": 8,\n }\n\n rcParams.update(font_params)\n\n # Set other parameters\n rcParams.update({\n \"lines.linewidth\": 1.1 if context == \"paper\" else 1.4,\n \"patch.linewidth\": .1 if context == \"paper\" else .3,\n \"xtick.major.pad\": 3.5 if context == \"paper\" else 7,\n \"ytick.major.pad\": 3.5 if context == \"paper\" else 7, })\n\n rcParams[\"timezone\"] = \"UTC\"\n rcParams[\"patch.antialiased\"] = \"True\"\n rcParams[\"font.family\"] = \"sans-serif\"\n rcParams[\"font.size\"] = \"12.0\"\n rcParams[\"font.serif\"] = [\"Times\", \"Palatino\", \"New Century Schoolbook\",\n \"Bookman\", \"Computer Modern Roman\",\n \"Times New Roman\"]\n rcParams[\"font.sans-serif\"] = [\"Helvetica\", \"Avant Garde\",\n \"Computer Modern Sans serif\", \"Arial\"]\n rcParams[\"axes.color_cycle\"] = [\"#333333\", \"348ABD\", \"7A68A6\", \"A60628\",\n \"467821\", \"CF4457\", \"188487\", \"E24A33\"]\n rcParams[\"legend.fancybox\"] = \"True\"\n rcParams[\"figure.figsize\"] = \"11, 8\"\n rcParams[\"figure.facecolor\"] = \"1.0\"\n rcParams[\"figure.edgecolor\"] = \"0.50\"\n rcParams[\"figure.subplot.hspace\"] = \"0.5\"", "def adjust_matplotlib_settings():\n font = {\"weight\": \"bold\", \"size\": 22, \"family\": \"sans-serif\"}\n matplotlib.rc(\"font\", **font)\n matplotlib.rc(\"text\", usetex=True)\n matplotlib.rcParams[\"mathtext.fontset\"] = \"dejavusans\"", "def use(style):\r\n plt.style.use(_paths[style])", "def set(font='Serif', fontsize=11, figsize=(8.6, 8.6),\n linewidth=1.5, color_scheme=color_scheme,\n color_labels=color_labels):\n params = {\n 'font.size': fontsize,\n 'backend': 'PDF',\n 'font.family': font,\n 'figure.figsize': (figsize[0]/2.54, figsize[1]/2.54),\n 'axes.prop_cycle': plt.cycler('color', color_scheme),\n 'axes.formatter.useoffset': False,\n 'lines.linewidth': linewidth,\n 'axes.axisbelow': True, # Grid axis below data\n 'grid.color': '#BFBFBF',\n 'grid.linestyle': '-',\n 'legend.fontsize': 10,\n 'figure.dpi': 200\n }\n\n plt.rcParams.update(params)\n builtins.cc = dict(zip(color_labels, color_scheme))", "def _set_style(style):\n if isinstance(style, (str, dict)):\n return Style(style)\n elif isinstance(style, Style):\n return style\n else:\n return Style()", "def assign_style_props(df, color=None, marker=None, linestyle=None, cmap=None):\n if color is None and cmap is not None:\n raise ValueError(\"`cmap` must be provided with the `color` argument\")\n\n # determine color, marker, and linestyle for each line\n n = (\n len(df[color].unique())\n if color in df.columns\n else len(df[list(set(df.columns) & set(IAMC_IDX))].drop_duplicates())\n )\n defaults = default_props(reset=True, num_colors=n, colormap=cmap)\n\n props = {}\n rc = run_control()\n\n kinds = [(\"color\", color), (\"marker\", marker), (\"linestyle\", linestyle)]\n\n for kind, var in kinds:\n rc_has_kind = kind in rc\n if var in df.columns:\n rc_has_var = rc_has_kind and var in rc[kind]\n props_for_kind = {}\n\n for val in df[var].unique():\n if rc_has_var and val in rc[kind][var]:\n props_for_kind[val] = rc[kind][var][val]\n # cycle any way to keep defaults the same\n next(defaults[kind])\n else:\n props_for_kind[val] = next(defaults[kind])\n props[kind] = props_for_kind\n\n # update for special properties only if they exist in props\n if \"color\" in props:\n d = props[\"color\"]\n values = list(d.values())\n # find if any colors in our properties corresponds with special colors\n # we know about\n overlap_idx = np.in1d(values, list(PYAM_COLORS.keys()))\n if overlap_idx.any(): # some exist in our special set\n keys = np.array(list(d.keys()))[overlap_idx]\n values = np.array(values)[overlap_idx]\n # translate each from pyam name, like AR6-SSP2-45 to proper color\n # designation\n for k, v in zip(keys, values):\n d[k] = PYAM_COLORS[v]\n # replace props with updated dict without special colors\n props[\"color\"] = d\n return props", "def setgraphs(self, graphs):\n\n self.__graphs = graphs\n # Create table_graph objects\n rgraph = re.compile(r\":([^:]+):([^:]+):([^:]+):\")\n for graph in rgraph.findall(graphs):\n new_graph = self.addgraph(graph[0])\n new_graph.setscaling(graph[1])\n new_graph.setcolumns(graph[2])\n self.__nonzero = True", "def plot_settings():\n\n # Color palette\n import seaborn as sns\n # sns.set()\n\n # Axes font size\n sns.set(font_scale=1.2)\n\n # LaTeX\n if find_executable('latex'):\n plt.rc('text', usetex=True)\n\n # Style sheet\n sns.set_style(\"white\")\n sns.set_style(\"ticks\")\n\n # Font (Note: this should be AFTER the plt.style.use)\n plt.rc('font', family='serif')\n plt.rcParams['svg.fonttype'] = 'none' # text in svg file is text not path." ]
[ "0.6217304", "0.59189606", "0.58773714", "0.5363297", "0.529926", "0.527115", "0.5224783", "0.52156645", "0.51874053", "0.51756155", "0.5173207", "0.51480645", "0.5089802", "0.507126", "0.50237226", "0.49920684", "0.49623072", "0.49523818", "0.49471563", "0.49456948", "0.49423036", "0.49227142", "0.49196196", "0.4916373", "0.4902696", "0.48856714", "0.48768717", "0.48738483", "0.48514104", "0.48497382" ]
0.735611
0
Creates a plotted table of descriptive statistics.
def descriptive_table(data, column_name, fig_size=(8, 8)): # Set up figure dimensions and sub components. sheet, axs = plt.subplots(4, 1, figsize=fig_size) # Heights ratio is based on the number of rows in each # table, this relates to the number of statistics each # sub table will show. gs = gridspec.GridSpec(4, 1, height_ratios=[2, 2, 5, 9]) # Assign all subplots based on figure dimensions. ax0 = plt.subplot(gs[0]) ax1 = plt.subplot(gs[1]) ax2 = plt.subplot(gs[2]) ax3 = plt.subplot(gs[3]) title_color = '#9099A2' # Dark grey plt.suptitle( 'Descriptive Statistics', fontsize=16, color=title_color, x=0.25 ) table_top(data, column_name, ax0) table_central_tend(data, ax1) table_disperssion(data, ax2) table_distribution(data, ax3) # Adjust the spacing so the title fits correctly. sheet.subplots_adjust(hspace=0.2, top=0.95)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def table_summary():\n \n t = dict()\n t['name'] = get_names()\n t['Name'] = [get_properties(name)['label'] for name in t['name']]\n N = len(t['name'])\n \n # host\n t['host'] = ['Sagittarius', 'Sagittarius', 'none', 'Gaia-Sausage-Enceladus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Sequoia / Arjuna', np.nan, np.nan, 'Sequoia / Arjuna', 'Gaia-Sausage-Enceladus', 'Sequoia / Arjuna', 'Helmi / Wukong', 'Helmi / Wukong', 'Sagittarius', 'in situ / Helmi / Wukong', 'Helmi / Wukong', 'Cetus', 'Cetus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Cetus', 'Sequoia / Arjuna / I\\'itoi']\n \n # progenitor\n t['progenitor'] = [np.nan, np.nan, 'itself', 'NGC 5139', 'NGC 4590', np.nan, 'NGC 3201', '(Wukong / Helmi)', '(Wukong / Helmi)', np.nan, np.nan, np.nan, np.nan, 'NGC 5024', np.nan, 'NGC 5272', 'NGC 5024', 'NGC 5824', 'NGC 5824', np.nan, np.nan, np.nan, np.nan]\n \n # progenitor type\n t['type'] = ['DG' if name in ['elqui', 'indus', 'jhelum'] else 'GC' for name in t['name']]\n \n # metallicity\n t['feh'] = [-2.4, -2.4, -2.2, -1.5, -2.16, -2.3, -1.5, -2.1, -2.1, -1.6, -1.95, -1.6, -2.7, np.nan, -1.7, -1.1, -2.7, -1.9, np.nan, np.nan, -2.2, np.nan, -1.9]\n \n # associations\n t['friends'] = ['ATLAS', 'Aliqa Uma', np.nan, np.nan, np.nan, np.nan, np.nan, 'Jhelum', 'Indus', np.nan, np.nan, np.nan, np.nan, 'Sylgr', np.nan, np.nan, 'Ravi', 'Turbio', 'Triangulum', np.nan, np.nan, np.nan, np.nan]\n \n tout = Table(t)\n tout.pprint()\n tout.write('../data/stream_origin.fits', overwrite=True)", "def descriptive_plot(data_onlyDV):\n outcome = data_onlyDV.columns.values[0] # get the outcome column name\n\n fig = plt.figure()\n # TODO: subplots appear in same frame instead of 3 separate ones (!!!)\n ax1 = fig.add_subplot(121)\n ax1 = data_onlyDV.plot(kind='hist', title=\"Histogram: \"+outcome, by=outcome)\n ax1.locator_params(axis='x', nbins=4)\n ax1.set_xlabel(outcome+\" bins\")\n ax1.set_ylabel(\"Num Instances\")\n\n ax2 = fig.add_subplot(122)\n ax2 = data_onlyDV.plot(kind='kde', title=\"KDE Density Plot: \"+outcome)\n\n fig.tight_layout()\n plt.show()", "def show_histo(dict, orient=\"horiz\", label=\"counts\", title=\"title\"):\n plt.clf()\n plt.cla()\n if orient==\"horiz\":\n bar_fun = plt.barh \n bar_ticks = plt.yticks\n bar_label = plt.xlabel\n elif orient==\"vert\":\n bar_fun = plt.bar\n bar_ticks = plt.xticks\n bar_label = plt.ylabel\n else:\n raise Exception(\"show_histo: Unknown orientation: %s \".format % orient)\n n = len(dict)\n bar_fun(range(n), list(dict.values()), align='center', alpha=0.4, color = 'g')\n bar_ticks(range(n), list(dict.keys())) \n bar_label(label)\n plt.title(title)\n plt.show()", "def descriptive_stats(array, verbose=True, label='', mean=False, plot=False):\n if mean:\n mean_ = np.mean(array)\n median = np.median(array)\n mini = np.min(array)\n maxi = np.max(array)\n first_qu = np.percentile(array, 25)\n third_qu = np.percentile(array, 75)\n\n if verbose:\n if mean:\n label += 'min={:.1f} / 1st QU={:.1f} / ave={:.1f} / med={:.1f} / '\n label += '3rd QU={:.1f} / max={:.1f}'\n print(label.format(mini, first_qu, mean_, median, third_qu, maxi))\n else:\n label += 'min={:.1f} / 1st QU={:.1f} / med={:.1f} / 3rd QU={:.1f} '\n label += '/ max={:.1f}'\n print(label.format(mini, first_qu, median, third_qu, maxi))\n\n if plot:\n boxplot(array, vert=False, meanline=mean, showfliers=True, sym='.')\n\n if mean:\n return mini, first_qu, mean_, median, third_qu, maxi\n else:\n return mini, first_qu, median, third_qu, maxi", "def _make_tex_table(self, tabletitle):\r\n stattable = (\r\n r\"\"\"\r\n \\begin{table}[h!]\r\n \\caption{%s}\r\n \\centering\r\n \\begin{tabular}{l l l l l}\r\n \\toprule\r\n \\textbf{Statistic} & \\textbf{Inlet} & \\textbf{Outlet} \\\\\"\"\"\r\n % tabletitle\r\n )\r\n\r\n stats = [\r\n {\"name\": \"Count\", \"attribute\": \"N\", \"rule\": \"top\", \"forceint\": True},\r\n {\"name\": \"Number of NDs\", \"attribute\": \"ND\", \"forceint\": True},\r\n {\"name\": \"Min; Max\", \"attribute\": [\"min\", \"max\"], \"twoval\": True},\r\n {\"name\": \"Mean\", \"attribute\": \"mean\"},\r\n {\r\n \"name\": \"(95\\% confidence interval)\",\r\n \"attribute\": \"mean_conf_interval\",\r\n \"twoval\": True,\r\n \"ci\": True,\r\n \"rule\": \"none\",\r\n },\r\n {\"name\": \"Standard Deviation\", \"attribute\": \"std\"},\r\n {\"name\": \"Log. Mean\", \"attribute\": \"logmean\"},\r\n {\r\n \"name\": \"(95\\% confidence interval)\",\r\n \"attribute\": \"logmean_conf_interval\",\r\n \"twoval\": True,\r\n \"ci\": True,\r\n \"rule\": \"none\",\r\n },\r\n {\"name\": \"Log. Standard Deviation\", \"attribute\": \"logstd\"},\r\n {\"name\": \"Geo. Mean\", \"attribute\": \"geomean\"},\r\n {\r\n \"name\": \"(95\\% confidence interval)\",\r\n \"attribute\": \"geomean_conf_interval\",\r\n \"twoval\": True,\r\n \"ci\": True,\r\n \"rule\": \"none\",\r\n },\r\n {\"name\": \"Coeff. of Variation\", \"attribute\": \"cov\"},\r\n {\"name\": \"Skewness\", \"attribute\": \"skew\"},\r\n {\"name\": \"Median\", \"attribute\": \"median\"},\r\n {\r\n \"name\": \"(95\\% confidence interval)\",\r\n \"attribute\": \"median_conf_interval\",\r\n \"twoval\": True,\r\n \"ci\": True,\r\n \"rule\": \"none\",\r\n },\r\n {\"name\": \"Quartiles\", \"attribute\": [\"pctl25\", \"pctl75\"], \"twoval\": True},\r\n {\r\n \"name\": \"Number of Pairs\",\r\n \"attribute\": \"n_pairs\",\r\n \"rule\": \"top\",\r\n \"fromdataset\": True,\r\n \"sigfigs\": 1,\r\n \"forceint\": True,\r\n },\r\n {\r\n \"name\": \"Wilcoxon p-value\",\r\n \"attribute\": \"wilcoxon_p\",\r\n \"fromdataset\": True,\r\n \"pval\": True,\r\n \"tex\": True,\r\n },\r\n {\r\n \"name\": \"Mann-Whitney p-value\",\r\n \"attribute\": \"mannwhitney_p\",\r\n \"fromdataset\": True,\r\n \"pval\": True,\r\n \"tex\": True,\r\n },\r\n ]\r\n for s in stats:\r\n stattable += self._tex_table_row(**s)\r\n\r\n stattable += r\"\"\"\r\n \\bottomrule\r\n \\end{tabular}\r\n \\end{table}\"\"\"\r\n\r\n return stattable + \"\\n\"", "def create_table(self, title: str, columns: List[str], data: Dict[str, str]) -> None:\n table = Table(title=title, box=box.SIMPLE)\n for column in columns:\n table.add_column(column, justify=\"right\", style=\"bright_yellow\", no_wrap=True)\n\n for model, percentage in data.items():\n table.add_row(model, percentage)\n\n console = Console()\n console.print(table)", "def make_performance_table(self):\n table = Table()\n table.add_column(\"Classifier\", ratio=25)\n table.add_column(\"Score\", ratio=10, justify=\"center\", no_wrap=True)\n table.add_column(\"Params\", ratio=25, no_wrap=False)\n table.add_column(\"Model ID\",ratio=40, no_wrap=True)\n\n for name, stuff in self.trainer.performance.items():\n score, params, hash_id = stuff\n style = \"bold green\" if name == self.trainer.best_classifier__name else \"\"\n best_one = \" ***\" if name == self.trainer.best_classifier__name else \"\"\n \n table.add_row(\n str(name),\n str(np.round(score, 3)), \n str(params), \n f\"{str(hash_id)}{best_one}\",\n style=style)\n \n return table", "def create_table_description(self):\n table_description = [\"Name\"]\n label = 0\n for i in range(self.total_days_count):\n #The name is never shown. Assure that it is small to not\n #column resizing\n table_description.append(\n (str(i), \"number\"))\n table_description.append((\n str(i + self.total_days_count), \"number\"))\n return table_description", "def PlotProfile():\n (metadata, data) = Parse('/tmp/sdcard-scalability.txt')\n gp = Gnuplot.Gnuplot(persist=1)\n gp('set data style impulses')\n gp('set xtics 1')\n gp('set pointsize 2')\n gp.clear()\n gp.xlabel('writer process')\n gp.ylabel('duration in second')\n gp.title(metadata.AsTitle())\n\n dataset = data[0]\n x = numpy.array(dataset.time, dtype='int_')\n d = Gnuplot.Data(x, dataset.data,\n title=dataset.name,\n with_='linespoints')\n gp.replot(d)\n gp.hardcopy('/tmp/%s-%s-%f.png' %\n (metadata.name, metadata.kernel, metadata.duration),\n terminal='png')", "def show_table():\n\n title_list = ('ID', 'Platform', 'Producer', 'Year', 'Elements')\n \n return table, title_list", "def plot_table(self):\r\n q = dict(sorted(decorator.arr.items(), key=lambda item: item[1]))\r\n print(\"PROGRAM | RANK | TIME ELAPSED\")\r\n count = 1\r\n for i in q:\r\n print(i[0], \"\\t\", count, \"\\t\", float(q[i]) * 1000, \"ms\")\r\n count += 1", "def basic_statistics():\n print(train_data['revenue'].describe())\n plt.hist(train_data['revenue'], color = 'blue', edgecolor = 'black',\n bins = int(4))\n\n # Add labels\n plt.title('Histogram of Revenues')\n plt.xlabel('revenues')\n plt.ylabel('P(revenues)')\n plt.show()", "def create_gnuplot_statistic(statistic_entries):\n grouped_by_number_of_entries = {}\n for statistic in statistic_entries:\n key = statistic['max entries']\n if key not in grouped_by_number_of_entries:\n grouped_by_number_of_entries[key] = [statistic]\n else:\n grouped_by_number_of_entries[key].append(statistic)\n\n all_plots = multiplot(\"learn.py statistics\", title_font=(\"\", 18), plots_per_row=2)\n\n pos = 0\n max_pos = len(grouped_by_number_of_entries) - 1\n for key, statistic in grouped_by_number_of_entries.items():\n average_time_plot = plot()\n average_time_plot.set_ylabel(\"seconds\")\n if pos == max_pos:\n average_time_plot.set_xlabel(\"n'th test run\")\n average_time_plot.set_xtics(\"1\")\n average_time_plot.set_ytics(\"0.5\")\n average_time_plot.set_line_style(1, \"lc rgb \\\"#00ff00\\\" lw 2\")\n average_time_plot.set_fill_style(1, \"transparent solid 0.4 border\")\n values = list(enumerate([average(entry) for entry in statistic], 1))\n average_time_plot.add_curve(\"average times (max entries=%d)\" % key,\n values=values, mode=plot.FILLEDCURVES)\n\n all_plots.add_plot(average_time_plot)\n\n number_of_tests_plot = plot()\n number_of_tests_plot.set_ylabel(\"# tests\")\n if pos == max_pos:\n number_of_tests_plot.set_xlabel(\"n'th test run\")\n number_of_tests_plot.set_xtics(\"1\")\n number_of_tests_plot.set_ytics(\"1\")\n number_of_tests_plot.set_line_style(1, \"lc rgb \\\"#00ff00\\\" lw 2\")\n number_of_tests_plot.set_fill_style(1, \"transparent solid 0.4 border\")\n values = list(enumerate([entry['correct answers'] + entry['wrong answers']\n for entry in statistic], 1))\n number_of_tests_plot.add_curve(\"# of tests (max entries=%d)\" % key,\n values=values, mode=plot.FILLEDCURVES)\n\n all_plots.add_plot(number_of_tests_plot)\n pos += 1\n\n calculated_height = len(grouped_by_number_of_entries) * 250\n script(\"learn.gp\", all_plots, width=800, height=calculated_height).execute()", "def summarize_as_table(self):\n h = human_readable_size\n h_throughput = human_readable_throughput\n table = [\n ['Total Time (seconds)', '%.3f' % self.total_time,\n self.std_dev_total_time],\n ['Maximum Memory', h(self.max_memory), h(self.std_dev_max_memory)],\n ['Maximum CPU (percent)', '%.1f' % self.max_cpu,\n self.std_dev_max_cpu],\n ['Maximum Sent Throughput', h_throughput(self.max_sent_throughput),\n h_throughput(self.max_sent_throughput)],\n ['Maximum Recv Throughput', h_throughput(self.max_recv_throughput),\n h_throughput(self.max_recv_throughput)],\n ['Average Memory', h(self.average_memory),\n h(self.std_dev_average_memory)],\n ['Average CPU (percent)', '%.1f' % self.average_cpu,\n self.std_dev_average_cpu],\n ['Average Sent Throughput',\n h_throughput(self.average_sent_throughput),\n h_throughput(self.average_sent_throughput)],\n ['Average Recv Throughput',\n h_throughput(self.average_recv_throughput),\n h_throughput(self.average_recv_throughput)],\n ]\n return tabulate(\n table,\n headers=[\n 'Metric over %s run(s)' % (self.total_files),\n 'Mean',\n 'Standard Deviation'\n ],\n tablefmt=\"grid\"\n )", "def helix_pair_stats (self):\n\n for Value in ['CrossingAngle','CrossingAngleEC','CrossingAngleIC']:\n\n HistogramPlot(np.array(self. values_list(Value, flat=True)), 'myproject/myapp/static/myapp/static/Stats/HelixPair/'+Value )\n #zrobic jakies dict coby robilo ranges, uzaleznialo np od zakresu albo od czegos\n\n return", "def basic_stats_and_plots():\n \n basename = sys.argv[1]\n ops = (\"two_opt\", \"twoh_opt\", \"three_opt\", \"three_opt_broad\", \"swap\", \"swap_adj\")\n opfs = {\n \"two_opt\": tsp.two_opt,\n \"twoh_opt\": tsp.twoh_opt,\n \"three_opt\": tsp.three_opt,\n \"three_opt_broad\": tsp.three_opt_broad,\n \"swap\": tsp.swap_two,\n \"swap_adj\": tsp.swap_adj\n }\n \n lengths = range(6, 11)\n for length in lengths:\n stddev = []\n gini = []\n nneighbours = []\n prop_unique = []\n for op in ops:\n filename = os.path.join(basename,\n \"tsp_length_%d_%s\" % (length, op),\n \"TP_row0.dat\")\n print op, length\n x = np.genfromtxt(filename)\n # stats to get:\n stddev.append(np.std(x))\n gini.append(random_walks.gini_coeff(x))\n nneighbours.append(np.sum(x > 0))\n mu, sigma = rw_experiment_with_op(length, opfs[op])\n prop_unique.append((mu, sigma))\n\n gini_barchart(length, gini, ops)\n stddev_barchart(length, stddev, ops)\n plot_gini_v_nneighbours(length, gini, nneighbours, ops)\n plot_stddev_v_nneighbours(length, stddev, nneighbours, ops)\n plot_gini_v_prop_unique(length, gini, prop_unique, ops)\n plot_stddev_v_prop_unique(length, stddev, prop_unique, ops)", "def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r", "def generate_table(self, outtablename,\n cols=['A', 'B', 'AB'],\n generateTable=True):\n if generateTable:\n new_indices = ['time (s)', 'mean counts']\n for idx in self.data[cols].describe().index[2:]:\n new_indices.append(idx)\n outTable = self.data[cols].describe()\\\n .set_index(pd.Index(new_indices))\n outTable.to_latex(\n self.tables_dir + outtablename + \".tex\", float_format=\"%d\")\n print(\"Outtable: \", outTable)", "def descriptive_statistics(relfreqs): \n means = np.mean(relfreqs, axis=\"columns\")\n stdevs = np.std(relfreqs, axis=\"columns\")\n return means, stdevs", "def fastqc_stats_table(self):\n \n headers = OrderedDict()\n headers['percent_duplicates'] = {\n 'title': '% Dups',\n 'description': '% Duplicate Reads',\n 'max': 100,\n 'min': 0,\n 'scale': 'RdYlGn-rev',\n 'format': '{:.1f}%'\n }\n headers['percent_gc'] = {\n 'title': '% GC',\n 'description': 'Average % GC Content',\n 'max': 80,\n 'min': 20,\n 'scale': 'PRGn',\n 'format': '{:.0f}%'\n }\n headers['avg_sequence_length'] = {\n 'title': 'Length',\n 'description': 'Average Sequence Length (bp)',\n 'min': 0,\n 'scale': 'RdYlGn',\n 'format': '{:.0f}'\n }\n headers['total_sequences'] = {\n 'title': 'M Seqs',\n 'description': 'Total Sequences (millions)',\n 'min': 0,\n 'scale': 'Blues',\n 'modify': lambda x: x / 1000000,\n 'shared_key': 'read_count'\n }\n self.general_stats_addcols(self.fastqc_stats, headers)", "async def stat_table(self, data):\n\n table = \"\"\n table += tabulate([data[\"stats\"][1]], data[\"stats\"][0], tablefmt=\"grid\") + \"\\n\"\n table += tabulate([data[\"resist\"][1]], data[\"resist\"][0], tablefmt=\"grid\") + \"\\n\"\n if data[\"inherits\"] and data[\"inherits\"][0]:\n table += tabulate([data[\"inherits\"][1]], data[\"inherits\"][0], tablefmt=\"grid\") + \"\\n\"\n \n skills = tabulate(data[\"skills\"][1], data[\"skills\"][0], tablefmt=\"grid\")\n if len(skills) > 2000:\n counter = 0\n split_skills = []\n skills = skills.split(\"\\n\")\n skills = [\"\\n\".join(skills[8*i:min(8*(i+1)+1, len(skills))])\n for i in range(int(len(skills) / 8))]\n else:\n skills = [skills]\n\n results = [\"```\\n\" + table[:-1] + \"\\n```\"]\n for skill in skills:\n results.append(\"```\\n\" + skill + \"\\n```\")\n return results", "def describe_list(lst, name):\n print(\"Description of\", name, \":\")\n print(\"Count:\", len(lst))\n print(\"Sum:\", sum(lst))\n print(\"Min:\", min(lst))\n print(\"Mean:\", sum(lst) / len(lst))\n print(\"Max:\", max(lst))\n sns.distplot(lst, axlabel=name)\n plt.show()\n print()", "def make_table(ranked_means):\n fp = open(\"table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c|}\n \\\\hline\n \\\\multicolumn{2}{|c||}{Slowest} & \\\\multicolumn{2}{|c|}{Fastest} \\\\\\\\ \\\\hline\n Feature & Rate & Feature & Rate \\\\\\\\ \\\\hline\n\"\"\")\n top_10 = ranked_means[0:10]\n bottom_10 = ranked_means[-10:]\n for ((f_rate, f_name),(s_rate,s_name)) in zip(top_10, bottom_10):\n f_name = f_name.split(\":\")[-1]\n f_name = f_name.rsplit(\" \", 1)[0] if f_name.endswith(\"(V)\") else f_name\n s_name = s_name.split(\":\")[-1]\n s_name = s_name.rsplit(\" \", 1)[0] if s_name.endswith(\"(V)\") else s_name\n fp.write(\" %s & %.2f & %s & %.2f \\\\\\\\ \\n\" % \\\n (f_name, f_rate, s_name, s_rate))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()\n\n fp = open(\"supp_meaning_table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c||l|c||l|c|}\n \\\\hline\n Meaning & Category & Meaning & Category & Meaning & Category & Meaning & Category\\\\\\\\ \\\\hline\n\n\"\"\")\n feature_names = [f.split(\":\")[-1] for (r,f) in ranked_means]\n feature_names.sort(key=lambda s: s.lower())\n col1 = feature_names[0:25]\n col2 = feature_names[25:50]\n col3 = feature_names[50:75]\n col4 = feature_names[75:]\n for a,b,c,d in zip(col1,col2,col3,col4):\n x,y,z,w = [get_meaning_category(i) or \"Excluded\" for i in (a,b,c,d)]\n # Lop off (V)s (we needed them above for get_meaning_category to work)\n a,b,c,d = [f.rsplit(\" \", 1)[0] if f.endswith(\"(V)\") else f for f in (a,b,c,d)]\n fp.write(\"%s & %s & %s & %s & %s & %s & %s & %s\\\\\\\\ \\n\" % (a, x, b, y, c, z, d, w))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()", "def plot_data_stats(data_dict, data_bxtxn, data_dt):\n print(onp.mean(onp.sum(data_bxtxn, axis=1)), \"spikes/second\")\n f = plt.figure(figsize=(12,4))\n plt.subplot(141)\n plt.hist(onp.mean(data_bxtxn, axis=1).ravel()/data_dt);\n plt.xlabel('spikes / sec')\n plt.subplot(142)\n plt.imshow(data_dict['hiddens'][0,:,:].T)\n plt.xlabel('time')\n plt.ylabel('neuron #')\n plt.title('Sample trial rates')\n plt.subplot(143);\n plt.imshow(data_bxtxn[0,:,:].T)\n plt.xlabel('time')\n plt.ylabel('neuron #')\n plt.title('spikes')\n plt.subplot(144)\n plt.stem(onp.mean(onp.sum(data_bxtxn, axis=1), axis=0));\n plt.xlabel('neuron #')\n plt.ylabel('spikes / sec');\n return f", "def plotly_table():\n model_data = your_choice()\n model_data[\"test_prediction\"] = list(model_data[\"test_prediction\"])\n \n df = pd.DataFrame(model_data[\"test_prediction\"], columns=[\"test_prediction\"])\n for k,v in model_data.items():\n if k != \"test_prediction\":\n df[k] = str(v)\n\n fig = a_libraries.plotly_table(df)\n\n return fig", "def descriptive_stats(data_lastDV):\n col_names = data_lastDV.columns.values.tolist() # get the columns' names\n outcome = col_names.pop() # remove the last item in the list\n\n # Summary of Number of Helpers Selected\n print(FORMAT_LINE)\n print(\"Descriptive statistics for: \\'\" + outcome+\"\\'\")\n print(data_lastDV[outcome].describe())\n print(FORMAT_LINE)\n\n # Descriptive Statistics of conditions\n print(FORMAT_LINE)\n print(\"Descriptive statistics for: all conditions\")\n df_conditions = data_lastDV[col_names]\n print(df_conditions.describe())\n df_conditions = data_lastDV[col_names+[outcome]] # add numerical column back in for descriptive stats\n\n # Count/Descriptive Stats of individual conditions & mean num helps of each (2^5) conditions\n for cond in col_names:\n print(FORMAT_LINE)\n print(\"Counts & Mean \" + outcome + \" for: \\'\" + cond)\n print(pd.concat([df_conditions.groupby(cond)[cond].count(), df_conditions.groupby(cond)[outcome].mean()], axis=1))", "def plot_table(timestamps: dict, threadList: list, mList: list) -> None:\n plt.plot(threadList, timestamps.values(), 'o-')\n plt.legend(mList, title = 'Total valores', loc='best', bbox_to_anchor=(0.5, 0., 0.5, 0.5))\n plt.xlabel('Número de processos')\n plt.ylabel('Tempo de Execução (s)')\n plt.title('Tempo de Execução por Total de Processos e Valores')\n plt.show()", "def create_display_data_table():\n\n for ccd in range(0, 10):\n for node in range(0, 4):\n file = 'ccd' + str(ccd) + '_' + str(node)\n infile = data_dir + file\n outfile = web_dir + 'Data/' + file\n\n f = open(infile, 'r')\n data = [line.strip() for line in f.readlines()]\n f.close()\n\n fo = open(outfile, 'w')\n#\n#--- adding heading\n#\n line = \"#\\n#Date Mn K alpha Al K alpha Ti K alpha Slope Sigma Int Sigma\\n#\\n\"\n fo.write(line)\n for ent in data:\n atemp = re.split('\\s+', ent)\n stime = int(atemp[0])\n#\n#--- converting the date into <mon> <year> form (e.g. May 2013)\n#\n ltime = tcnv.axTimeMTA(stime)\n btemp = re.split(':', ltime)\n year = btemp[0]\n [mon, mdate] = tcnv.changeYdateToMonDate(int(year), int(btemp[1]))\n lmon = tcnv.changeMonthFormat(mon)\n line = lmon + ' ' + year \n for j in range(1, len(atemp)):\n line = line + '\\t' + atemp[j]\n\n line = line + '\\n'\n fo.write(line)\n fo.close()", "def show_histograms(n_samples=1000):\n\n true_ps, obs_xs = get_ground_truth()\n\n prior = Prior()\n model = Model()\n stats = Stats()\n\n ps = prior.gen(n_samples)\n data = stats.calc(model.sim(ps))\n cond_data = stats.calc(model.sim(np.tile(true_ps, [n_samples, 1])))\n\n # plot prior parameter histograms\n fig = util.plot.plot_hist_marginals(ps, lims=get_disp_lims(), gt=true_ps)\n fig.suptitle('p(thetas)')\n\n # plot stats histograms\n fig = util.plot.plot_hist_marginals(data, gt=obs_xs)\n fig.suptitle('p(stats)')\n\n # plot stats histograms, conditioned on true params\n fig = util.plot.plot_hist_marginals(cond_data, gt=obs_xs)\n fig.suptitle('p(stats|true thetas)')\n\n plt.show()", "def buildTrivialStatsTable(self, deltaSeriesCollection, klass=TRIVIAL_STATS_TABLE, style=''):\n tableWrapper = HTML().div()\n klass = '{} {}'.format(TABLE_SUMMARY, klass)\n table = tableWrapper.table(border='1', klass=klass, style=style)\n self.buildStatsTableHeader(table)\n tbody = table.tbody\n\n for i, deltaSeries in enumerate(deltaSeriesCollection, 1):\n row = tbody.tr\n row.td('{0:,}'.format(i), klass=TD_KEY)\n row.td(deltaSeries.beginProbeName, klass=TD_KEY)\n row.td(deltaSeries.endProbeName, klass=TD_KEY)\n row.td(DURATION_FORMAT.format(deltaSeries.getMin()))\n row.td(DURATION_FORMAT.format(deltaSeries.getMax()))\n row.td(DURATION_FORMAT.format(deltaSeries.getMedian()))\n row.td(DURATION_FORMAT.format(deltaSeries.getMean()))\n row.td(DURATION_FORMAT.format(deltaSeries.getPercentile(self.percentile1)))\n row.td(DURATION_FORMAT.format(deltaSeries.getPercentile(self.percentile2)))\n row.td(DURATION_FORMAT.format(deltaSeries.getStandardDeviation()))\n return tableWrapper" ]
[ "0.64671993", "0.63206184", "0.62217784", "0.6156925", "0.6149821", "0.6114673", "0.61127305", "0.6099334", "0.6062513", "0.60417926", "0.60378116", "0.5995212", "0.59413856", "0.5903425", "0.5895154", "0.58884484", "0.58857155", "0.5882201", "0.5853967", "0.58519465", "0.58444345", "0.5837873", "0.5820542", "0.58139676", "0.5801258", "0.5801075", "0.57860845", "0.57686555", "0.57654643", "0.57503206" ]
0.7500924
0
create documentation as simple HTML, using the supplied path, which is interpreted as the project root. The documentation will be surroundeded with very basic HTML boilerplate, i.e. you probably do not want to use this method, which is a wrapper around get_documentation_body, and supply your own HTML boilerplate before and after that.
def get_documentation(path=""): return """<HTML><head><title>Python Minidoc for """+path+"""</title></head> <body> """+get_documentation_body(path)+""" </body></html>"""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_documentation(self, css_path=None, base_url=None):\n if base_url is None:\n first_key = next(iter(self.conf_doc))\n conf = self.conf_doc[first_key]\n else:\n conf = self.conf_doc[\"/\" + base_url]\n\n return (\n 200,\n \"\"\"<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n <head>\n <title>%s</title>\n <meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\"/>\n <meta name=\"viewport\" content=\"width=device-width\" />\n <style>%s</style>\n %s\n </head>\n <body>\n <aside>%s</aside>\n <main>%s</main>\n <section id=\"operations\">%s</section>\n <footer>%s</footer>\n </body>\n</html>\"\"\"\n % (\n self.__title(conf),\n self.__css(),\n self.__css_path(css_path),\n self.__sidebar(conf),\n self.__header(conf),\n self.__operations(conf),\n self.__footer(),\n ),\n )", "def do_docs(self, path):\n print(\"scaraping documentation\")\n for p in path.glob(\"**/*\"):\n if p.is_file():\n parts = p.relative_to(path).parts\n if parts[-1].endswith(\"rst\"):\n data = tsparse(p.read_bytes())\n blob = DocBlob()\n blob.arbitrary = data\n blob.content = {}\n\n blob.ordered_sections = []\n blob.item_file = None\n blob.item_line = None\n blob.item_type = None\n blob.aliases = []\n blob.example_section_data = Section()\n blob.see_also = []\n blob.signature = None\n blob.references = None\n blob.refs = []\n\n self.docs[parts] = json.dumps(blob.to_json(), indent=2)\n else:\n pass\n # data = p.read_bytes()", "def test_html_documentation(self):\n app = Sphinx(\n self.source_dir,\n self.config_dir,\n self.output_dir,\n self.doctree_dir,\n buildername='html',\n warningiserror=True,\n )\n app.build(force_all=self.all_files)", "def generate_documentation(self):\n self.generate_api_docs()\n build.main([\n self.SOURCE_DIR,\n self.BUILD_DIR,\n ])", "def html():\n builtdocs = path(\"docs\") / options.sphinx.builddir / \"html\"\n destdir = path(PACKAGE) / \"docs\"\n destdir.rmtree()\n builtdocs.move(destdir)", "def build_docs(options):\r\n verbose = getattr(options, 'verbose', False)\r\n\r\n cmd = \"cd {dir}; make html quiet={quiet}\".format(\r\n dir=doc_path(options),\r\n quiet=\"false\" if verbose else \"true\"\r\n )\r\n\r\n sh(cmd)", "def documentation():\n return render_template('help.html')", "def documentation():\n return auto.html()", "def beehive_make_doc(self):\n run_data = {\n u'tags':[u'doc'],\n u'local_package_path':self.local_package_path\n } \n self.ansible_playbook(u'docs', run_data, \n playbook=self.beehive_doc_playbook)", "def makeDocFile(self):\n\n f_out = \"%s/%s-doc.php\" % (self.dir_out, self.project_id)\n version = max(self.versions)\n\n with open(f_out, 'w') as f:\n f.write(\"<!DOCTYPE html>\\n\" \\\n \"<html xmlns=\\\"http://www.w3.org/1999/xhtml\\\">\\n\" \\\n \"<head>\\n\" \\\n \"<meta http-equiv=\\\"Content-Type\\\" content=\\\"text/html; charset=utf-8\\\"/>\\n\" \\\n \"\\n\" \\\n \"<title>Kit&Pack − Ultimate Power Booster</title>\\n\" \\\n \"<link rel=\\\"shortcut icon\\\" type=\\\"image/png\\\" href=\\\"../favicon.png\\\"/>\" \\\n \"<link rel=\\\"stylesheet\\\" type=\\\"text/css\\\" href=\\\"../css/doc-2.css\\\" />\\n\"\n \"\\n\" \\\n \"</head>\\n\" \\\n \"<body>\\n\" \\\n \"\\n\" \\\n \"<h1>Ultimate Power Booster</h1>\" \\\n \"\\n\")\n\n # Write a list of other versions of the documentation\n f.write(\"<p>Versions de cette documentation.</p>\\n\")\n f.write(\"<ul>\\n\")\n for v in self.versions:\n f.write(\"\\t<li><a href=\\\"%s.php\\\">%s</a></li>\\n\" % (\n v, v))\n f.write(\"</ul>\\n\\n\")\n\n f.write(\"<?php\\n\" \\\n \"include(\\\"%s.php\\\")\\n\" \\\n \"?>\\n\" \\\n \"\\n\" \\\n \"</body>\\n\" \\\n \"</html>\" % (version))", "def docs():\n sh('sphinx-build -W -b html docs docs/_build/html')", "def generate():\n local('cd doc && make clean && make html')", "def get_documentation():\n return send_file(base_dir / \"static/documentation.html\", \"text/html; charset=UTF-8\")", "def __init__(self,\n source_path='./*.py',\n template_path='./docs/templates/*_template.md',\n output_path='./docs/documentation.md',\n ignore=['extra']\n ):\n\n template_files = glob.glob(template_path)\n # filename = t.split('/')[-1]\n self.sources = {os.path.basename(s).split('.')[0]: os.path.normpath(s) for s in glob.glob(source_path) if not any(i in s for i in ignore)}\n self.templates = {os.path.basename(t).split('_')[0]: os.path.normpath(t) for t in template_files}\n self.output_path = output_path\n\n self.template_content = {}\n for k, v in self.templates.items():\n path = v\n with open(path, 'r') as template_file:\n self.template_content[k] = template_file.read()\n\n self.text = ''\n self.classes = []\n self.headers = ['Params', 'Returns', 'Attributes']\n self.hierarchy = [\n 'class',\n 'method',\n 'parameter',\n 'pinfo',\n 'extra'\n ]\n self.tab_length = 6", "def generate_html(repo_dir, package_dir, module):\n apidir = os.path.join(repo_dir, 'doc', 'api')\n print(f\"Generating {module} API docs in {apidir!r}\")\n if subprocess.call(['sphinx-apidoc', '-Tef', '-o', apidir,\n os.path.join(package_dir, module),\n os.path.join(package_dir, module, 'test'),\n os.path.join(package_dir, module, 'scripts')]):\n raise RuntimeError(f'API doc generation failed for {module}')", "def html():\n env.file_ext = \".html\"\n local(\"pandoc {input_files} -o {output_file}{file_ext} --standalone --bibliography={bib_file} --csl={csl_file} --toc --number-sections\".format(**env))", "def buildDocumentation():\n helptext = 'usage: build_doc.py <output format> <type of documentation>' \\\n '\\n - html: for html output' \\\n '\\n - pdf: for pdf output' \\\n '\\n\\n - all: complete documentation' \\\n '\\n - dev: only developer documentation' \\\n '\\n - user: only user documentation'\n if len(sys.argv) != 3:\n print helptext\n sys.exit(1)\n\n if sys.argv[1] not in ['pdf', 'html']:\n print helptext\n sys.exit(1)\n if sys.argv[2] not in ['all', 'dev', 'user']:\n print helptext\n sys.exit(1)\n\n copyfile('docs/index_%s.rst.template' % sys.argv[2], 'index.rst') # copy main file into root directory\n os.system('sphinx-build -b %s -c docs -D master_doc=index . docs/output/%s/%s' % (sys.argv[1], sys.argv[1], sys.argv[2]))\n os.remove('index.rst') # delete config file from root directory", "def doc(self):\n from distutils.dir_util import copy_tree\n\n def copy_tree_checker(src, dst):\n \"\"\"Wrap copy_tree to avoid pydoit error.\"\"\"\n copy_tree(src, dst)\n return True\n\n return {\n \"actions\": [\n (create_dir, [\"build/doc/source\"]),\n (copy_tree_checker, [\"docs\", \"build/doc/source\"]),\n TaskCreator.get_sphinx() + \"-apidoc -o build/doc/source --force --separate --module-first \" + self.project_name_sc,\n TaskCreator.get_sphinx() + \"-build -j auto -n build/doc/source build/doc/html\"\n ],\n \"verbosity\": 2\n }", "def store_documentation(self, file_path, css_path=None):\n html = self.get_documentation(css_path)[1]\n with open(file_path, \"w+\", encoding=\"utf8\") as f:\n f.write(html)", "def __html__(self, file_path:str):\n raise NotImplementedError", "def buildHTML(self):\n\n # TODO: make this configurable via a dialog\n os.chdir(self.file_path.parent)\n proc = subprocess.Popen(\n [\"make\", \"clean\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n for line in proc.stdout:\n print(\"stdout: \" + str(line.rstrip(), encoding='utf8'))\n print('----------------')\n proc = subprocess.Popen(\n [\"make\", \"html\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n proc.wait()\n for line in proc.stdout:\n print(\"stdout: \" + str(line.rstrip(), encoding='utf8'))\n\n # Load corresponding HTML file from newly-built Sphinx docs\n self.preview.load_html(self.output_html_path)", "def index():\n with open(\"PROJECT.md\", \"r\") as project_file:\n md_template_string = markdown.markdown(\n project_file.read(), extensions=[\"fenced_code\"]\n )\n return md_template_string", "def sphinxify(docstring, context, buildername='html', img_path=''):\n if img_path:\n if os.name == 'nt':\n img_path = img_path.replace('\\\\', '/')\n leading = '/' if os.name.startswith('posix') else ''\n docstring = docstring.replace('_images', leading+img_path)\n\n srcdir = osp.join(DOCDIR, '_sources')\n if not osp.exists(srcdir):\n os.makedirs(srcdir)\n base_name = osp.join(srcdir, xrtQookPageName)\n rst_name = base_name + '.rst'\n\n # This is needed so users can type \\\\ on latex eqnarray envs inside raw\n # docstrings\n docstring = docstring.replace('\\\\\\\\', '\\\\\\\\\\\\\\\\')\n\n # Add a class to several characters on the argspec. This way we can\n # highlight them using css, in a similar way to what IPython does.\n # NOTE: Before doing this, we escape common html chars so that they\n # don't interfere with the rest of html present in the page\n argspec = escape(context['argspec'])\n for char in ['=', ',', '(', ')', '*', '**']:\n argspec = argspec.replace(\n char, '<span class=\"argspec-highlight\">' + char + '</span>')\n context['argspec'] = argspec\n\n doc_file = codecs.open(rst_name, 'w', encoding='utf-8')\n doc_file.write(docstring)\n doc_file.close()\n\n confoverrides = {'html_context': context,\n 'extensions': ['sphinx.ext.mathjax',\n 'sphinxcontrib.jquery']}\n\n doctreedir = osp.join(DOCDIR, 'doctrees')\n sphinx_app = Sphinx(srcdir, DOCDIR, DOCDIR, doctreedir, buildername,\n confoverrides, status=None, warning=None,\n freshenv=True, warningiserror=False, tags=None)\n\n try:\n sphinx_app.build(None, [rst_name])\n except SystemMessage:\n pass", "def apiDocs():\n\treturn render_template('apiDocs.html')", "def generate(self):\n\n # Write Doxyfile\n doxyfile_content = DOXYFILE_TEMPLATE.format(\n name=\"wurfapi\",\n output_path=self.output_path,\n source_path=\" \".join(self.source_paths),\n recursive=\"YES\" if self.recursive else \"NO\",\n extra=\"\",\n )\n\n doxyfile_path = os.path.join(self.output_path, \"Doxyfile\")\n with open(doxyfile_path, \"w\") as doxyfile:\n\n doxyfile.write(doxyfile_content)\n\n # @todo: Doxygen generates a bunch of warnings. We should\n # propagate these somehow - if you want to know what\n # has not been documented etc.\n result = self.runner.run(\n command=self.doxygen_executable + \" Doxyfile\", cwd=self.output_path\n )\n\n # Doxygen reports warnings on stderr. So if we have some output\n # there raise it.\n self._suppress_incorrect_warnings(result.stderr)\n\n if result.stderr.output and self.warnings_as_error:\n raise wurfapi.doxygen_error.DoxygenError(result.stderr.output)\n\n # The Doxygen XML is written to the 'xml' subfolder of the\n # output directory\n return os.path.join(self.output_path, \"xml\")", "def get_documentation_body(path=\"\"):\n\n realpath = _convert_url_to_path(path)\n\n if realpath is not None:\n if os.path.isdir(realpath):\n for i in EXCLUDED_DIRS:\n if realpath.endswith(i):\n return ERROR\n return _get_dir_content(realpath)\n\n if os.path.isfile(realpath):\n file_name, file_ext = os.path.splitext(realpath)\n if not os.path.basename(file_name) in EXCLUDED_FILES and not os.path.basename(path) in EXCLUDED_FILES:\n if file_ext in FILETYPES:\n return _get_file_content(realpath)\n\n return ERROR", "def get_index(self, css_path=None):\n\n return \"\"\"\n <!doctype html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <title>RAMOSE</title>\n <meta name=\"description\" content=\"Documentation of RAMOSE API Manager\">\n <style>%s</style>\n %s\n </head>\n <body>\n %s\n <footer>%s</footer>\n </body>\n </html>\n \"\"\" % (\n self.__css(),\n self.__css_path(css_path),\n self.__parse_logger_ramose(),\n self.__footer(),\n )", "def opendocs():\n _open_file('_build/index.html')", "def create_html(self):\n # Add html content to the self.doc\n self.doc.asis('<!DOCTYPE html>')\n with self.tag('html'):\n self.design_header()\n self.design_body()\n # Write html content from self.doc\n with codecs.open(self.filestream.name, 'w', 'utf-8') as f:\n html_content = indent(\n self.doc.getvalue(),\n indentation=' ',\n newline='\\r\\n'\n )\n f.write(html_content)", "def docs_build(directory, site_name, view=True, assume_yes=False):\n context = toolkit.load_data_context_with_error_handling(directory)\n build_docs(context, site_name=site_name, view=view, assume_yes=assume_yes)\n toolkit.send_usage_message(\n data_context=context, event=\"cli.docs.build\", success=True\n )" ]
[ "0.67940426", "0.66304994", "0.6588725", "0.6524229", "0.64368373", "0.6319469", "0.6250843", "0.62383306", "0.6214085", "0.62133044", "0.6146423", "0.6126285", "0.6115011", "0.60907006", "0.6083795", "0.6074013", "0.60439724", "0.6003797", "0.59726506", "0.59555364", "0.59475875", "0.5941778", "0.5857618", "0.58538926", "0.5853662", "0.57755154", "0.5770441", "0.5764932", "0.57577646", "0.57176965" ]
0.77786535
0
Heteronity measure function. It should be noted that I substitute sum(vector) from sum_squared. Some mistakes can arise due to this purpose
def heterogen(vector): vector = [el for el in vector if el > 0] sum_squared = (sum(vector)) ** 2 squares = map(lambda x: x * (x - 1), vector) delta = sum_squared - sum(vector) if delta > 0: prob = (delta - sum(squares)) / float(delta) else: prob = 0 return prob
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vf_squared_hellinger(x, y, critic):\n p = 1.0 - torch.exp(-critic(x))\n q = 1.0 - torch.exp(-critic(y))\n return torch.mean(p - (q/(1-q)))", "def _calc_Hc(self, signal):\n\n return 2.8 * np.nanstd(signal)", "def inhale_efficiency(self) -> _VectorisedFloat:\n return self.η_inhale", "def hss(self):\n return 2 * (self.table[0, 0] * self.table[1, 1] - self.table[0, 1] * self.table[1, 0]) / (\n (self.table[0, 0] + self.table[0, 1]) * (self.table[0, 1] + self.table[1, 1]) +\n (self.table[0, 0] + self.table[1, 0]) * (self.table[1, 0] + self.table[1, 1]))", "def H(self, u, v):\n return (self.E(u, v) * self.N(u, v)\n - 2 * self.F(u, v) * self.M(u, v)\n + self.G(u, v) * self.L(u, v)) / \\\n (2 * (self.E(u, v) * self.G(u, v) - np.square(self.F(u, v))))", "def H_complicated(x):\n _ = x**2\n _[0] += np.sin(2*x[1]*x[0])\n _[1] += -3*x[0]**3 + np.log(np.abs(x[0]))\n return _", "def calc_lhv(self):\n hf = {}\n hf['hydrogen'] = 0\n hf['methane'] = -74.85\n hf['ethane'] = -84.68\n hf['propane'] = -103.8\n hf['butane'] = -124.51\n hf['O2'] = 0\n hf['CO2'] = -393.5\n # water (gaseous)\n hf['H2O'] = -241.8\n\n lhv = 0\n\n for f, x in self.fuel.val.items():\n molar_masses[f] = CP.PropsSI('M', f)\n fl = set(list(hf.keys())).intersection(\n set([a.replace(' ', '') for a in CP.get_aliases(f)]))\n if len(fl) == 0:\n continue\n\n if list(fl)[0] in self.fuels():\n structure = fluid_structure(f)\n\n n = {}\n for el in ['C', 'H', 'O']:\n if el in structure:\n n[el] = structure[el]\n else:\n n[el] = 0\n\n lhv += (-(n['H'] / 2 * hf['H2O'] + n['C'] * hf['CO2'] -\n ((n['C'] + n['H'] / 4) * hf['O2'] +\n hf[list(fl)[0]])) / molar_masses[f] * 1000) * x\n\n return lhv", "def _h(W):\r\n # E = slin.expm(W * W)\r\n # h = np.trace(E) - d\r\n M = np.eye(d) + W * W / d\r\n E = np.linalg.matrix_power(M, d - 1)\r\n h = (E.T * M).sum() - d\r\n G_h = E.T * W * 2\r\n return h, G_h", "def _energy(self, X, y):\n yhat = self.evaluate(X)\n loss = ((y - yhat) ** 2).sum() / 2\n return loss", "def test_H_hat(self):\n\t\tposition = [0.0, 1.57079, 3.14159, 4.71238, 6.28318, 7.85398, 9.42477]\n\t\tpotential = [0.0, 6.0, 0.0, -6.0, 0.0, 6.0, 0.0]\n\t\tc = 1\n\t\tposition = tf.constant(position, shape = [1, len(position)], dtype = tf.float32)\n\t\tpotential = tf.constant(potential, shape = [1, len(potential)], dtype = tf.float32)\n\t\tbasis = schrodinger.create_basis(5)\n\t\tv = schrodinger.v0(position, potential, basis)\n\t\tcoeff = schrodinger.coefficient(position, basis)\n\t\tv0_hat = tf.linalg.solve(coeff, v)\n\t\tH = schrodinger.H_hat(c, len(basis), v0_hat)\n\t\tself.assertEqual(coeff.get_shape(), [len(basis), len(basis)])", "def h(self, probs):\n\n return np.sum(-p*np.log2(p) if p > 0 else 0 for p in np.nditer(probs))", "def SSE(y, yhat):\n\n return np.sum(np.power(yhat - y, 2))", "def h(self):\n cost = 0\n for fl in range(1, self.num_floors):\n cost += len(self.floors[fl]) * (self.num_floors - fl)\n return cost", "def calcHeuristicFunc(self, dictio):\n\t\t_sum = 0\n\n\t\tfor u in self.unitlist:\n\t\t for p in self.units[u[0]]:\n\t\t nums = [0] *self.N**2\n\t\t for i in p:\n\t\t nums[dictio[i]-1] += 1\n\t\t for j in nums:\n\t\t if(j==0):\n\t\t _sum += 1\n\t\treturn _sum", "def compute_energy(x, input_HP_sequence):\n # add code here, feel free to change the argument list\n # Given a input HP sequence, we already which points are H's.\n return U", "def entropy(self):\n Z = self.sum()\n assert (Z > 0), 'Non-normalizable factor (perhaps log factor?)' # also check for positivity?\n H = 0.0\n for x in np.nditer(self.t, op_flags=['readonly']):\n p = x/Z\n H += 0.0 if p==0 else -p*np.log(p)\n return H", "def weighted_sum(h):\n return h", "def compute_hjorth_mobility(signal, fs=5000):\n\n variancex = signal.var(ddof=1)\n # diff signal is one sample shorter\n variancedx = np.var(np.diff(signal) * fs, ddof=1)\n # compute variance with degree of freedom=1 => The mean is normally\n # calculated as x.sum() / N, where N = len(x). If, however, ddof is\n # specified, the divisor N - ddof is used instead.\n\n hjorth_mobility = np.sqrt(variancedx / variancex)\n return hjorth_mobility", "def _vh_hwhm(edges,count):\n\n # find the maximum value\n\n c_max = np.max(count)\n indx = np.nonzero(count >= (c_max/2))[0]\n return (edges[indx[-1]] - edges[indx[0]])/2", "def measure(self, X, Y, index):\n X = np.array(X)\n Y = np.array(Y)\n length = len(X)\n score = 0\n if length != 0:\n for i in range(length):\n sigma = self.votility[index + i]\n if sigma != 0:\n score += abs(X[i]-Y[i])/sigma\n \n score = score/length \n return score", "def foh(self):\n return self.table[0, 0] / (self.table[0, 0] + self.table[0, 1])", "def salomonfcn(x: np.ndarray) -> np.ndarray:\n x2 = x**2\n sumx2 = np.sum(x2, axis=1)\n sqrtsx2 = np.sqrt(sumx2)\n\n scores = 1 - np.cos(2 * np.pi * sqrtsx2) + (0.1 * sqrtsx2)\n\n return scores", "def schwefelfcn(x: np.ndarray) -> np.ndarray:\n n = x.shape[1]\n scores = 418.9829 * n - np.sum(x * np.sin(np.sqrt(np.abs(x))), axis=1)\n return scores", "def calc_hrms(x,y):\n l = x[-1]-x[0]\n return np.sqrt(1./l*simps(y**2,x))/l", "def rh(self, h):\n sez=self.getSect(h)\n area=self.area(sez)\n wetborder = self.wetBorder(sez)\n return area/wetborder", "def see(p, y, yHat):\n n = y.shape[0]\n numer = ((y - yHat) ** 2).sum()\n denom = n - p - 1\n if (denom == 0):\n s = 0\n elif ( (numer / denom) < 0 ):\n s = 0.001\n else:\n s = (numer / denom) ** 0.5\n return s", "def get_fnllh(self):\n\n def fnllh(p):\n return 0.5 * anp.sum(self.get_fres()(p) ** 2)\n\n return fnllh", "def cost(h, y):\n\tm = y.shape[0]\n\tcost = (-1/m) * (y.T @ np.log(h) + (1 - y).T @ np.log(1 - h))\n\treturn cost", "def vectorized_hessp(self, x, p):\n primals = self.vectorizer.unpack(x)\n tangents = self.vectorizer.unpack(p)\n hp_arrays = self.handler.hessp(primals, tangents)\n self._n += 1\n self.losses.append(self.loss)\n self._maybe_update_pbar()\n return self.vectorizer.pack(hp_arrays, \"hp\")", "def feature_energy(wv):\n return np.sqrt(np.sum(wv ** 2, axis=0)).T" ]
[ "0.61793756", "0.6156195", "0.6097398", "0.6042531", "0.5997857", "0.59823656", "0.59759235", "0.5953194", "0.5927179", "0.5912538", "0.5900989", "0.5869575", "0.5856831", "0.585217", "0.58473444", "0.580972", "0.57908636", "0.5788946", "0.5761239", "0.5745539", "0.5744894", "0.5724269", "0.5724051", "0.57177144", "0.57004756", "0.56957346", "0.5682344", "0.5681886", "0.56808597", "0.56802565" ]
0.6275125
0
Generates all tests cases for a GEMM microkernel.
def generate_test_cases(ukernel, mr, k_block, isa): _, test_name = ukernel.split("_", 1) _, datatype, ukernel_type, _ = ukernel.split("_", 3) test_args = [ukernel] return xngen.preprocess(PACK_TEST_CODE, { "TEST_NAME": test_name.upper().replace("UKERNEL_", ""), "UKERNEL_TYPE": ukernel_type.upper(), "UKERNEL_NAME": ukernel, "DATATYPE": datatype, "MR": mr, "KBLOCK": k_block, "ISA_CHECK": ISA_TO_CHECK_MAP.get(isa, ""), "next_prime": next_prime, })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_KGE_methods(model_name):\n testing_function(model_name)", "def test_generate_all_testing(self):\n pass", "def testgen(self):\n self.parse()\n self.generate()", "def tests():", "def test():\n\t\treturn [\"vice.multizone\",\n\t\t\t[\n\t\t\t\ttest_from_output(),\n\t\t\t\tmig_matrix_row.test(run = False),\n\t\t\t\tmig_matrix.test(run = False),\n\t\t\t\tmig_specs.test(run = False),\n\t\t\t\tzone_array.test(run = False),\n\t\t\t\t_multizone.test(run = False),\n\t\t\t\tsrc_test(run = False)\n\t\t\t]\n\t\t]", "def test_genx(nsd, backend):\n # NCOLS of data:\n # 2 - test kernel only\n # 3 - test kernel and chi2 calculation\n # 4 - test resolution smearing and chi2 calculation\n\n test_name, slabs, data = nsd\n\n kernel_test(slabs, data, backend)", "def test_sample(system_generator):\n\n name, test = system_generator()\n print(name)\n\n w_F, w_R, N_k = test.sample([10, 8], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([1, 1], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([10, 0], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([0, 5], mode=\"wFwR\")", "def generate_testsystem(smiles = 'CCCC',\n forcefield_files = ['amber14/protein.ff14SB.xml', 'amber14/tip3p.xml'],\n forcefield_kwargs = {'removeCMMotion': False, 'ewaldErrorTolerance': 1e-4, 'constraints' : None, 'hydrogenMass' : 4 * unit.amus},\n nonperiodic_forcefield_kwargs = {'nonbondedMethod': app.NoCutoff},\n periodic_forcefield_kwargs = {'nonbondedMethod': app.PME},\n small_molecule_forcefield = 'gaff-2.11',\n padding=9*unit.angstroms,\n ionicStrength=0.0*unit.molar,\n water_model = 'tip3p',\n pressure = 1.0 * unit.atmosphere,\n temperature = 300 * unit.kelvin,\n barostat_period = 50,\n **kwargs\n ):\n from openforcefield.topology import Molecule\n from perses.utils.openeye import smiles_to_oemol\n from openmmforcefields.generators.system_generators import SystemGenerator\n from perses.utils.openeye import OEMol_to_omm_ff\n from simtk import openmm\n from qmlify.utils import pull_force_by_name\n\n oemol = smiles_to_oemol(smiles)\n off_molecules = [Molecule.from_openeye(oemol)]\n vac_system_generator = SystemGenerator(forcefields=forcefield_files,\n small_molecule_forcefield=small_molecule_forcefield,\n forcefield_kwargs=forcefield_kwargs,\n nonperiodic_forcefield_kwargs = nonperiodic_forcefield_kwargs, molecules = off_molecules)\n barostat = openmm.MonteCarloBarostat(pressure, temperature, barostat_period)\n sol_system_generator = SystemGenerator(forcefields=forcefield_files,\n small_molecule_forcefield=small_molecule_forcefield,\n forcefield_kwargs=forcefield_kwargs,\n periodic_forcefield_kwargs = periodic_forcefield_kwargs,\n molecules = off_molecules,\n barostat = barostat)\n\n\n vac_system, vac_positions, vac_topology = OEMol_to_omm_ff(oemol, vac_system_generator)\n\n #now i can attempt to solvate\n modeller = app.Modeller(vac_topology, vac_positions)\n modeller.addSolvent(sol_system_generator.forcefield, model=water_model, padding=padding, ionicStrength=ionicStrength)\n sol_positions, sol_topology = modeller.getPositions(), modeller.getTopology()\n sol_positions = unit.quantity.Quantity(value = np.array([list(atom_pos) for atom_pos in sol_positions.value_in_unit_system(unit.md_unit_system)]), unit = unit.nanometers)\n sol_system = sol_system_generator.create_system(sol_topology)\n\n vac_sys_pos_top = (vac_system, vac_positions, vac_topology)\n sol_sys_pos_top = (sol_system, sol_positions, sol_topology)\n\n #a quick assertion to make sure the nonbonded forces are being treated properly\n vac_nbf, sol_nbf = pull_force_by_name(vac_system, 'NonbondedForce'), pull_force_by_name(sol_system, 'NonbondedForce')\n assert not vac_nbf.usesPeriodicBoundaryConditions()\n assert sol_nbf.usesPeriodicBoundaryConditions()\n\n return vac_sys_pos_top, sol_sys_pos_top", "def spec_tests():\n pass", "def main():\n\n print(\"=\" * 80)\n print(\"DATA STRUCTURE TESTS\")\n test_module(structs.tests)\n test_module(structs.regularization)\n\n print(\"=\" * 80)\n print(\"END-TO-END TESTS\")\n test_module(globals())", "def test_generate_nb_testing(self):\n pass", "def tests_generator(self):\n cb_bin = os.path.join(bin_path, 'compilebench')\n cmd_list = [\n (\"Initial Create/Compile/Read Compiled Tree\", \"{0} -D {1} -i 10 --makej -s {2}\"),\n ]\n\n tests = []\n for idx, (desc, cmd) in enumerate(cmd_list):\n test_name = \"compile_bench_{0}_{1}\".format(idx + 1, to_safe_name(desc))\n test = TestProfile(\n name=test_name,\n desc=desc,\n test_path=self.test_path,\n bin_path=bin_path,\n command=cmd.format(cb_bin, self.test_path, bin_path))\n tests.append(test)\n return tests", "def test_rm500(self):\n\t\tmy_test_file = \"/\".join([os.path.dirname(sys.modules[\"cancerscope\"].__file__), \"../tests/data/test_tcga.txt\"])\n\t\tscope_ensemble_obj = cancerscope.scope()\n\t\ttest_X = scope_ensemble_obj.load_data(my_test_file) # X, samples, features_test, in_genecode\n\t\t## Get the model of interest\n\t\tmodel_name = \"v1_rm500\"\n\t\tmodel_in = \"\"\n\t\tquery_localdirs = cancerscope.get_models.findmodel(os.path.dirname(cancerscope.__file__), model_name)\n\t\tif query_localdirs is not None:\n\t\t\tmodel_in = query_localdirs[model_name]\n\t\telse:\n\t\t\tmodel_in = cancerscope.get_models.downloadmodel(model_label=model_name)\n\t\tself.assertTrue(os.path.isdir(model_in))\n\t\tself.assertTrue(os.path.exists(\"\".join([model_in, \"/lasagne_bestparams.npz\"])))\n\t\t\"\"\"Test if model can be setup correctly\"\"\"\n\t\tlmodel = cancerscope.scopemodel(model_in)\n\t\tlmodel.fit()\n\t\tself.assertEqual(len(lmodel.features), 17688)\n\t\tx_input = lmodel.prepare_input_featorders(X=test_X[0], x_features_genecode = test_X[3], x_features=test_X[2])\n\t\t\"\"\"Test if it predicts properly\"\"\"\n\t\tallpreds_names = lmodel.predict(x_input, get_all_predictions=True,get_numeric=False, get_predictions_dict=False)\n\t\tallpreds_values = lmodel.predict(x_input, get_all_predictions=True,get_numeric=True, get_predictions_dict=False)\n\t\ttoppreds_names = lmodel.predict(x_input, get_all_predictions=False,get_numeric=False, get_predictions_dict=False)\n\t\ttoppreds_values = lmodel.predict(x_input, get_all_predictions=False,get_numeric=True, get_predictions_dict=False)\n\t\ttoppreds_df = lmodel.predict(x_input, get_all_predictions=True,get_numeric=False, get_predictions_dict=True)\n\t\tself.assertEqual(len(allpreds_names[0]), 66); self.assertEqual(len(allpreds_names[1]), 66); \n\t\tself.assertEqual(allpreds_values.shape[1],66); \n\t\tself.assertAlmostEqual(allpreds_values[0][1], 0.003065253372039)\n\t\tself.assertEqual(toppreds_names[0], \"PAAD_TS\"); self.assertEqual(toppreds_names[1], \"HNSC_TS\")\n\t\tself.assertAlmostEqual(toppreds_values[0],0.20889836023919614, 6, 0.000001); self.assertAlmostEqual(toppreds_values[1], 0.44416348623870444, 6, 0.000001)\n\t\t#self.assertEqual(round(toppreds_values[0],12), round(0.208874390780809,12)); self.assertEqual(round(toppreds_values[1],12), round(0.444162763077693,12))\n\t\tself.assertEqual(toppreds_df[0][0][0], toppreds_names[0]); self.assertAlmostEqual(float(toppreds_df[0][0][1]), toppreds_values[0]); \n\t\tself.assertEqual(toppreds_df[1][0][0], toppreds_names[1]); self.assertAlmostEqual(float(toppreds_df[1][0][1]), toppreds_values[1])", "def classic_model_testing():\n dataset_path = \"/home/kateryna/Documents\"\n X_train, X_test, y_train, y_test = generate_embeddings_memory(dataset_path, classes=['normal', 'glare_small'])\n contam = 0.08\n models = [XGBOD(), OCSVM(contamination=contam), IForest(contamination=contam, n_estimators=150), XGBOD(learning_rate=0.01, n_estimators=150),\n COPOD(contamination=contam)]\n for model in models:\n model_name = model.__str__().split('(')[0]\n clf = model\n clf.fit(X_train, y_train)\n\n y_train_pred = clf.labels_\n y_train_scores = clf.decision_scores_\n\n # get the prediction on the test data\n # 0 stands for inliers and 1 for outliers.\n y_test_pred = clf.predict(X_test)\n y_test_scores = clf.decision_function(X_test)\n # y_probabilities = clf.predict_proba(X_test)\n print(\"\\nOn Training Data:\")\n evaluate_print(model_name, y_train, y_train_scores)\n print(\"\\nOn Test Data:\")\n evaluate_print(model_name, y_test, y_test_scores)\n print('roc auc', roc_auc_score(y_test, y_test_scores))\n\n conf_mtx_test = confusion_matrix(y_test, y_test_pred, labels=[0, 1])\n print(conf_mtx_test)\n conf_mtx_train = confusion_matrix(y_train, y_train_pred, labels=[0, 1])\n print(conf_mtx_train)\n print('~~~')", "def pytest_generate_tests(metafunc):\n if 'browser' in metafunc.fixturenames:\n if os.environ.get('E2E', 'no').lower() != 'yes':\n pytest.skip(\n \"End-to-end tests skipped because E2E environment variable \"\n \"was not set to 'yes'.\")\n\n # Parameterize test based on list of browsers.\n browsers = os.environ.get('E2E_WEBDRIVER_BROWSERS', 'Chrome').split()\n metafunc.parametrize('browser', browsers, indirect=True)", "def test_run():\n # Only few steps for test\n timesteps = 128\n\n # Compute all sub testing conf\n envs = ['CartPole-v0']\n ml_platforms = ['torch', 'tf']\n agents = ['dqn', 'a2c']\n\n test_combinations = list(it.product(\n envs,\n ml_platforms,\n agents\n )\n )\n\n # Finally test them all\n for conf in test_combinations:\n env_str, ml_platform_str, agent_str = conf\n run(\n agent_str,\n ml_platform_str,\n env_str,\n 'dense',\n timesteps,\n './target/')", "def main():\r\n parametrized_data = loadData('files/parametrized.p')\r\n config = loadConfig('config/gmm.cfg')\r\n\r\n data_ = eachDigitGMM(parametrized_data, config)\r\n\r\n save(data_)", "def _main():\n min_args = 6\n max_args = min_args + 3\n if len(sys.argv) not in range(min_args, max_args + 1):\n print(_HELP_STR)\n sys.exit(1)\n\n n_o = int(sys.argv[1])\n d = int(sys.argv[2])\n r_q = int(sys.argv[3])\n q = int(sys.argv[4])\n eps = float(sys.argv[5])\n kern = sys.argv[6] if len(sys.argv) > 6 else 'rbf'\n seed = int(sys.argv[7]) if len(sys.argv) > 7 else 1234\n testtype = sys.argv[8] if len(sys.argv) > 8 else 'inversion'\n kerntypes = ['rbf', 'periodic', 'matern', 'mix']\n testtypes = ['inv', 'opt']\n\n assert n_o > 7\n assert d > 0\n assert r_q > 0\n assert r_q <= d\n assert q > 0\n assert eps > 0\n assert kern in kerntypes\n assert testtype in testtypes\n np.random.seed(seed)\n n = n_o * d\n\n print('n_o {} d {} r_q {} q {} eps {} kern {} seed {} test-type {}'.format(\n n_o, d, r_q, q, eps, kern, seed, testtype))\n\n distrib = scipy.stats.truncnorm(-1, 1)\n coreg_vecs = distrib.rvs(size=(q, r_q, d))\n coreg_diags = np.reciprocal(np.random.gamma(shape=1, scale=1, size=(q, d)))\n noise = np.reciprocal(np.random.gamma(\n shape=(1 + (1 / eps)), scale=1, size=d))\n kernels = gen_kernels(q)\n descriptions = [\n 'rbf only - inv lengthscales in logspace(0, 1, q)',\n 'periodic only - inv lengthscale is 1, periods in logspace(0, 1, q)',\n 'matern32 only - inv lengthscales in logspace(0, 1, q)',\n 'mixed - rbf, periodic, matern varying params added together']\n kdict = {k_name: (k, desc) for k_name, k, desc in\n zip(kerntypes, kernels, descriptions)}\n\n Xs, Ys = np.random.rand(2, d, n_o)\n Xs = np.expand_dims(Xs, Xs.ndim)\n\n dists, grid_dists, interpolant, interpolant_T = prep(\n d, n_o, Xs)\n\n k, desc = kdict[kern]\n print()\n print(desc)\n\n fkern = FunctionalKernel(D=d, lmc_kernels=k,\n lmc_ranks=[len(x) for x in coreg_vecs])\n fkern.noise = noise\n fkern.coreg_vecs = coreg_vecs\n fkern.coreg_diags = coreg_diags\n fkern.set_input_dim(1)\n\n run_kernel_benchmark(\n Xs, Ys, fkern, dists, grid_dists, interpolant, interpolant_T, testtype)", "def generate_testcase(self, outfile, reg):\n \"\"\" testcase for the given register \"\"\"\n pass", "def test_full_hypm(self):\n test_files = glob.glob(INPUT_HYPM_PATH + '/*.mdd')\n\n mdd.procall(test_files)\n\n self.compare_node58()", "def test(): \n\treturn [\"vice.yields.ccsne.import\", \n\t\t[ \n\t\t\ttest_LC18_import(), \n\t\t\ttest_CL13_import(), \n\t\t\ttest_CL04_import(), \n\t\t\ttest_WW95_import(), \n\t\t\ttest_NKT13_import(), \n\t\t\ttest_S16_import() \n\t\t] \n\t]", "def test_machine_learning():", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def test_T01():", "def exe_tests(self):\n self.rank = mpicom.rank()\n self.size = mpicom.size()\n if mpicom.parallel():\n self.test(\"libname\",os.path.split(mpicom.__file__)[1],\"mpicom.so\")\n else:\n self.test(\"libname\",os.path.split(mpicom.__file__)[1],\"mpistub.pyc\")\n self.test_broadcast()\n self.test_reduce()\n self.test_p2p()\n self.test_gather()\n self.test_scatter()\n #self.test_alltoall()", "def main():\n run_test_all()", "def __main() :\n launchTests()", "def generate_x(number_dimensions, T_train, T_test, mu, feature_model):\n number_training_obeservations = T_train.shape[0]\n number_testing_obeservations = T_test.shape[0]\n\n X_train = np.zeros((number_training_obeservations,number_dimensions))\n X_test = np.zeros((number_testing_obeservations,number_dimensions))\n\n mixture_indicator_train = generate_mixture_indicator(number_training_obeservations)\n mixture_indicator_test = generate_mixture_indicator(number_testing_obeservations)\n\n G = np.random.normal(0,1,(number_dimensions,number_dimensions))\n q, r = np.linalg.qr(G)\n\n mu1 = mu*np.ones(number_dimensions)\n mu2 = -mu*np.ones(number_dimensions)\n\n if feature_model == \"A\":\n eigenvalues1 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues1 = np.sort(eigenvalues1, axis = 0)/np.sum(eigenvalues1)\n lambda1 = np.identity(number_dimensions)\n np.fill_diagonal(lambda1,eigenvalues1)\n cov1 = q@[email protected]\n\n for i in range(number_training_obeservations):\n if T_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n \n for i in range(number_testing_obeservations):\n if T_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n\n\n elif feature_model == \"B\":\n eigenvalues1 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues1 = np.sort(eigenvalues1, axis = 0)/np.sum(eigenvalues1)\n lambda1 = np.identity(number_dimensions)\n np.fill_diagonal(lambda1,eigenvalues1)\n cov1 = q@[email protected]\n\n eigenvalues2 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues2 = np.sort(eigenvalues2, axis = 0)[::-1]/np.sum(eigenvalues2)\n lambda2 = np.identity(number_dimensions)\n np.fill_diagonal(lambda2,eigenvalues2)\n cov2 = q@[email protected]\n\n\n for i in range(number_training_obeservations):\n if T_train[i] == 0:\n if mixture_indicator_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov2,1)\n else:\n if mixture_indicator_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov2,1)\n \n for i in range(number_testing_obeservations):\n if T_test[i] == 0:\n if mixture_indicator_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov2,1)\n else:\n if mixture_indicator_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov2,1)\n\n train_mean = np.mean(X_train, axis = 0)\n train_std = np.std(X_train, axis = 0)\n X_train = (X_train - train_mean)/train_std\n X_test = (X_test - train_mean)/train_std\n \n return X_train, X_test", "def test_feature_shape():\n\n N = 1024\n n_sample = 100\n\n def get_gemm_feature(target):\n k = tvm.reduce_axis((0, N), 'k')\n A = tvm.placeholder((N, N), name='A')\n B = tvm.placeholder((N, N), name='B')\n C = tvm.compute(A.shape, lambda y, x: tvm.sum(A[y, k] * B[k, x], axis=k),\n name='C')\n\n s = tvm.create_schedule(C.op)\n\n y, x = s[C].op.axis\n axes = list(s[C].tile(y, x, 8, 8)) + [k]\n perm = np.random.permutation(5)\n axes = [axes[x] for x in perm]\n s[C].reorder(*axes)\n\n if \"gpu\" in target.keys:\n pick = []\n # filter out reduction axis\n for i in range(len(perm)):\n if perm[i] != 4:\n pick.append(axes[i])\n s[C].bind(pick[0], tvm.thread_axis(\"blockIdx.x\"))\n s[C].bind(pick[1], tvm.thread_axis(\"vthread\"))\n s[C].bind(pick[2], tvm.thread_axis(\"threadIdx.y\"))\n\n with target:\n feas = feature.get_itervar_feature(s, [A, B, C])\n feas = feature.flatten_itervar_feature(feas)\n return feas\n\n targets = [\n tvm.target.cuda(),\n tvm.target.mali(),\n tvm.target.arm_cpu(),\n ]\n\n for target in targets:\n dim = len(get_gemm_feature(target))\n for i in range(n_sample):\n assert dim == len(get_gemm_feature(target)), \"dimensions of feature do not match\" \\\n \" for different configurations\"", "def test():\n import unittest\n testmodules = [\n 'bettermathlib_tests',\n 'randomwebapp_tests',\n ]\n suite = unittest.TestSuite()\n for t in testmodules:\n suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))\n unittest.TextTestRunner(verbosity=2).run(suite)" ]
[ "0.6356984", "0.6329179", "0.6058904", "0.5985874", "0.59467685", "0.5932151", "0.59172386", "0.5886024", "0.5883808", "0.58833915", "0.5840056", "0.5799488", "0.57936954", "0.57375807", "0.5693975", "0.56644064", "0.5638442", "0.5610446", "0.55895394", "0.5586659", "0.5577155", "0.5558991", "0.55445766", "0.55424243", "0.5525481", "0.5517251", "0.54838675", "0.5479699", "0.5475904", "0.5471116" ]
0.66081387
0
Sync file source to dest
def sync_file(source, dest, dryrun=False, diff=False): if diff: if not exists(dest): logger.info(("Destination '{}' does not exist:".format(dest), " skipping diff")) return with open(source) as a: with open(dest) as b: s1 = a.readlines() s2 = b.readlines() sys.stdout.writelines( difflib.unified_diff(s1, s2, fromfile=source, tofile=dest)) return if not exists(dest): if dryrun: logger.info("DRY_RUN: Copying rule '{}' to '{}'".format( source, dest)) else: if not exists(dirname(dest)): os.makedirs(dirname(dest)) logger.info("Copying rule '{}' to '{}'".format(source, dest)) shutil.copy2(source, dest) else: equal = filecmp.cmp(source, dest) if (not equal): if dryrun: logger.info("DRY_RUN: Updating rule '{}' to '{}'".format( source, dest)) else: logger.info("Updating rule '{}' to '{}'".format(source, dest)) shutil.copy2(source, dest) else: if dryrun: logger.info("DRY_RUN: rule '{}' up to date".format(dest)) else: logger.info("rule '{}' up to date".format(dest))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _copy_file ( self, source, dest ):\n return", "def copy(self, src_path: str, tgt_path: str) -> None:", "def copy(self, source_host, dest_host, filename):", "def copy_file(source_file, target_file):\n\t# print('\\n\\nCopying [{}] to [{}].\\n\\n'.format(source_file, target_file))\n\trun_rsync([source_file, target_file])", "def _rsync(self, source, dest):\n \n #print(source)\n \n # Test SSH connection.\n if not self._ssh('test 1 -eq 1', use_pwd=False):\n print \"Waiting for SSH on %s with key %s\" % (self.address[0], self.key_file)\n time.sleep(1)\n while not self._ssh('test 1 -eq 1', use_pwd=False):\n time.sleep(1)\n\n # Archive, compress, delete extraneous files from dest dirs.\n rsync = ['rsync', '-az', '--delete']\n\n # Use key file\n if self.key_file:\n ssh = 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentitiesOnly=yes -i \"%s\"'\n rsync.extend(['-e', ssh % self.key_file])\n\n if isinstance(source, list):\n rsync.extend(source)\n rsync.append(dest)\n else:\n rsync.extend([source, dest])\n \n print 'Sync files from %s to %s...' % (source, dest)\n \n if subprocess.call(rsync) == 0:\n return True\n else:\n return False", "def copyFile( src, dest ):\n\tinFile = open( src, 'r' )\n\toutFile = open( dest, 'w' )\n\tfor line in inFile:\n\t\toutFile.write( line )\n\toutFile.close()\n\tinFile.close()", "def send_file(self, src: PathLike, dest: PathLike, force: bool = False):", "def send_to(self, dest='.', src='/tmp/', url='localhost',\n rsync='rsync -auv'):\n files = self.setup(dest=dest, src=src)\n self.send_files(files, url=url, rsync=rsync)", "def copystat(src, dest):\n import shutil\n\n shutil.copystat(str(src), str(dest))", "def sync(src, dst, force_sync=True, use_sudo='false'):\n use_sudo = use_sudo.lower() == 'true'\n\n # Update dotfiles\n if force_sync:\n fetch()\n\n # Synchronize system\n dotfiles = '/home/%(user)s/dotfiles' % env\n env_dotfiles = '%(dotfiles)s/%(src)s' % locals()\n if is_dir(env_dotfiles):\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n cmd = 'rsync -avr --exclude \".git/\" \"%(env_dotfiles)s\" \"%(dst)s\"' % locals()\n if use_sudo:\n sudo(cmd)\n else:\n run(cmd)", "def copy_file(src, dest):\n with open_local_or_gcs(src, 'r') as h_src:\n with open_local_or_gcs(dest, 'w') as h_dest:\n shutil.copyfileobj(h_src, h_dest)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def cp(self, src, dest):\r\n return self._call(\"-cp\", src, dest, suppress_output=True)", "def sync(raw='./raw'):\r\n\r\n subprocess.call(['rsync','-d','--ignore-existing',raw+'/','.'])", "def copy(self, source, target, recursive=True):\n if recursive:\n command = 'cp -R %s %s'\n else:\n command = 'cp %s %s'\n self.communicate(command % (source, target))", "def _copyFile(self, source, dstDir):\n dstFile = os.path.join(dstDir, os.path.basename(source))\n touch = \"/usr/bin/touch\" if OSUtilities.isMacOS() else \"/bin/touch\"\n subprocess.call([touch, dstFile])\n subprocess.call([\"/bin/cp\", source, dstDir])\n self._logger.info(\"Copying file \" + source + \" to \" + dstDir)\n self._numCopiedFiles += 1", "def run_copy(self, src, dst):\n pass", "def update_copy(self, source, dest):\n relsource = os.path.relpath(source, os.path.realpath(self.dirname))\n for copy in self.runscript.copies:\n if copy[1] == dest:\n copy[0] = relsource\n break\n else:\n self.runscript.add_copy(relsource, dest)", "def scp_transfer_file(self, source_file, dest_file):\n self.scp_client.put(source_file, dest_file)", "def copy_file_to(self,src,dst=\"~/\" ,password=''):\n command = 'scp -v {0} {1}@{2} {3}'.format(src,self.username,self.host,dst)\n if ':' in self.host:\n command='scp -v -P {0} -o NoHostAuthenticationForLocalhost=yes {1} {2}@{3}:{4}'.format(\n str(self.port),src,self.username,self.ip ,dst)\n if password=='':\n self.__spawn_scp(command, self.password)\n else:\n self.__spawn_scp(command, password)", "def copy_file_from(self,src,dst=\".\",password=''):\n command = 'scp -v {0}@{1}:{2} {3}'.format(self.username,self.host,src,dst)\n if ':' in self.host:\n command = 'scp -v -P {0} -o NoHostAuthenticationForLocalhost=yes {1} {2}@{3}:{4}'.format(\n str(self.port),self.username,self.ip,src,dst)\n if password=='':\n self.__spawn_scp(command, self.password)\n else:\n self.__spawn_scp(command, password)", "def send_dir(self, src: PathLike, dest: PathLike, force: bool = False):", "def sync(self):\n if (len(self._source_list) == 0) or (len(self._dest_list) == 0):\n return\n \n for src in self._source_list:\n self._sync('.', src, self._depth)", "def cp(src, dest):\n _shutil.copy2(native(src), native(dest))", "def replaceFileAtomic(source_path, dest_path):\n\n if python_version >= 0x300:\n os.replace(source_path, dest_path)\n else:\n importFromInlineCopy(\"atomicwrites\", must_exist=True).replace_atomic(\n source_path, dest_path\n )", "def put(self, src, dst):\r\n abs_src = os.path.expanduser(src)\r\n assert os.path.exists(abs_src), 'File does not exist, cannot copy: %s' % abs_src\r\n return self._do_put(abs_src, dst)", "def file_copy(\n self,\n src: str,\n dest: Optional[str] = None,\n file_system: Optional[str] = None,\n peer: Optional[bool] = False,\n ) -> None:\n if dest is None:\n dest = os.path.basename(src)\n\n if file_system is None:\n file_system = self._get_file_system()\n\n # netmiko's enable_scp\n self.enable_scp()\n self._file_copy(src, dest, file_system)\n if peer:\n self.peer_device._file_copy(src, dest, file_system) # pylint: disable=protected-access\n\n # logging removed because it messes up unit test mock_basename.assert_not_called()\n # for tests test_file_copy_no_peer_pass_args, test_file_copy_include_peer\n # log.info(\"Host %s: File %s transferred successfully.\")", "def transfer_files(src: str, dst: str, move_src_data: bool = False):\n if move_src_data:\n logger.info('Move {0} to {1}'.format(src, dst))\n shutil.move(src, dst)\n else:\n logger.info('Copy {0} to {1}'.format(src, dst))\n copy_tree(src, dst)" ]
[ "0.6974644", "0.68930113", "0.6891386", "0.6765977", "0.67231584", "0.6550781", "0.6504014", "0.6470677", "0.63714147", "0.63575894", "0.62633055", "0.62472004", "0.62472004", "0.62472004", "0.6213031", "0.6193602", "0.61707354", "0.61391485", "0.61217904", "0.61214125", "0.6119155", "0.60860074", "0.60796845", "0.6079559", "0.60776234", "0.6035017", "0.60294384", "0.6005782", "0.5989932", "0.5968756" ]
0.69120526
1
Function which automatically sets an exposire time such that the average intensity in the camera images is 'targetIntensity'
def find_exposure_time(cam,targetIntensity=100,margin=5): from numpy import mean if targetIntensity < 0 or targetIntensity > 255: print("Invalid target intensity") return 1 minExposure = 0.01 # Smallest value in ms maxExposure = 80 counter = 0 # Calculate exposures at the different end Image = cam.grab_image(timeout='1s', copy=True, exposure_time=number_to_millisecond(minExposure)) minIntensity = mean(Image) Image = cam.grab_image(timeout='1s', copy=True, exposure_time=number_to_millisecond(maxExposure)) maxIntensity = mean(Image) midIntensity = 1 while midIntensity < (targetIntensity - margin) or\ midIntensity > (targetIntensity+margin) and counter < 20: # Set exposure, take a picture and check how good it was counter = counter + 1 midExposure = (maxExposure + minExposure) / 2 Image = cam.grab_image(timeout='1s', copy=True, exposure_time=number_to_millisecond(midExposure)) midIntensity = mean(Image) if midIntensity > targetIntensity: # Exposure time too short maxExposure = midExposure # maxIntensity = midIntensity else: # Exposure time too long minExposure = midExposure # minIntensity = midIntensity if counter == 100: print("WARNING: Find exposure function ran max number of iterations!\ No really suitable exposure setting found") # Update the exposure time of the camera and return the target exposure cam.set_defaults(exposure_time=number_to_millisecond(midExposure)) return midExposure#number_to_millisecond(midExposure)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tune_exposure_time(camera, target, initial_exptime, min_exptime=0, max_exptime=None,\n max_steps=5, tolerance=0.1, cutout_size=256, bias=None, **kwargs):\n camera.logger.info(f\"Tuning exposure time for {camera}.\")\n\n images_dir = camera.get_config(\"directories.images\", None)\n if images_dir:\n images_dir = os.path.join(images_dir, \"temp\")\n os.makedirs(images_dir, exist_ok=True)\n\n # Parse quantities\n initial_exptime = get_quantity_value(initial_exptime, \"second\") * u.second\n\n if min_exptime is not None:\n min_exptime = get_quantity_value(min_exptime, \"second\") * u.second\n if max_exptime is not None:\n max_exptime = get_quantity_value(max_exptime, \"second\") * u.second\n\n try:\n bit_depth = camera.bit_depth.to_value(\"bit\")\n except NotImplementedError:\n bit_depth = 16\n\n saturated_counts = 2 ** bit_depth\n\n prefix = images_dir if images_dir is None else images_dir + \"/\"\n with tempfile.NamedTemporaryFile(suffix=\".fits\", prefix=prefix, delete=False) as tf:\n\n exptime = initial_exptime\n\n for step in range(max_steps):\n\n # Check if exposure time is within valid range\n if (exptime == max_exptime) or (exptime == min_exptime):\n break\n\n # Get an image\n cutout = camera.get_cutout(exptime, tf.name, cutout_size, keep_file=False, **kwargs)\n cutout = cutout.astype(\"float32\")\n if bias is not None:\n cutout -= bias\n\n # Measure average counts\n normalised_counts = np.median(cutout) / saturated_counts\n\n camera.logger.debug(f\"Normalised counts for {exptime} exposure on {camera}:\"\n f\" {normalised_counts}\")\n\n # Check if tolerance condition is met\n if tolerance:\n if abs(normalised_counts - target) < tolerance:\n break\n\n # Update exposure time\n exptime = exptime * target / normalised_counts\n if max_exptime is not None:\n exptime = min(exptime, max_exptime)\n if min_exptime is not None:\n exptime = max(exptime, min_exptime)\n\n camera.logger.info(f\"Tuned exposure time for {camera}: {exptime}\")\n\n return exptime", "def exptime(et=0.02):\n if et < 0.02:\n et = 0.02\n logger.error('Exposure time less than 0.02 seconds specified, using 0.02.')\n print camera.exptime(et)\n camera.status.update()", "def exp(self, exposure_time):\n print(f\"exp: {exposure_time}\")\n self.device_control.exposure = exposure_time\n yield", "def exposuretime(self) -> ErrorValue:\n return ErrorValue(self._data['ExpTime'], self._data.setdefault('ExpTimeError',0.0))", "def set_exposure(self, exposure):\n self.logger.info(f'Setting exposure to {exposure}')\n self._driver.ExposureTime.SetValue(exposure)", "def estimated_intensity(self, events, intensity_track_step, end_time=None):\n if end_time is None:\n end_time = max(map(max, events))\n\n simu = self._corresponding_simu()\n if intensity_track_step is not None:\n simu.track_intensity(intensity_track_step)\n\n simu.set_timestamps(events, end_time)\n return simu.tracked_intensity, simu.intensity_tracked_times", "def averageTime(self):\n \n pass", "def runPass(self, exposure_range, rate):\n r = rospy.Rate(rate)\n for i, exposure in enumerate(exposure_range):\n if rospy.is_shutdown():\n break\n\n self.current_exposure = exposure\n self.client.update_configuration(\n {\"exposure\": self.current_exposure})\n r.sleep()\n\n finished = (i >= (len(exposure_range)-1))\n if finished:\n optimal_exposure = max(self.scores, key=self.scores.get)\n self.reset()\n return optimal_exposure # an optimal exposure has been found\n else:\n return -1", "def set_exposure_times(self, exposure_time=None, duration=None,\n start_time=None, mid_time=None, end_time=None):\n import time, datetime\n # Modified Julian date of the \"zero epoch\" of the time library (1/1/70)\n MJD_ZEROPOINT = 40587.0\n # Number of seconds per day.\n SECONDS_PER_DAY = 86400.0\n if hasattr(self, 'meta') and hasattr(self.meta, 'exposure'):\n if exposure_time is not None:\n self.meta.exposure.exposure_time = exposure_time\n if duration is not None:\n self.meta.exposure.duration = duration\n elif exposure_time is not None:\n self.meta.exposure.duration = exposure_time\n \n if start_time == 'NOW':\n start_time = MJD_ZEROPOINT + (time.time()/SECONDS_PER_DAY)\n if start_time is not None:\n self.meta.exposure.start_time = float(start_time)\n \n if mid_time == 'NOW':\n mid_time = MJD_ZEROPOINT + (time.time()/SECONDS_PER_DAY)\n if mid_time is not None:\n self.meta.exposure.mid_time = float(mid_time)\n \n if end_time == 'NOW':\n end_time = time.time()\n elif self.meta.exposure.start_time is not None and \\\n self.meta.exposure.duration is not None and end_time is None:\n # Set the end time to start_time + duration\n end_time = self.meta.exposure.start_time + \\\n (self.meta.exposure.duration/SECONDS_PER_DAY)\n if end_time is not None:\n self.meta.exposure.end_time = float(end_time)\n else:\n strg = \"Exposure metadata attributes missing from data model\"\n raise AttributeError(strg)", "def expose(self):\n if self.camera is None: # test mode -- immediately return test image\n print(\"NO SPECTRAL CAMERA FOUND -- USING TEST DATA\")\n self.filename = \"example_fits_files/Mooi\"\n return\n\n exposure_time = self.time.get()\n try:\n self.exposure_time = float(exposure_time)\n except:\n message = \"Exposure time \\\"{0}\\\" cannot be converted to floating point number\".format(exposure_time)\n messagebox.showerror(\"Error\", message)\n raise ValueError(message)\n filename = \"spectra/{0}\".format(timestamp())\n self.camera.spectrum(self.exposure_time, filename)\n self.filename = filename", "def exposuretime(self):\n _, = self.exposuretimes\n return _", "def stabilize_intensity(which_cam, cam, verbose=False):\n L = 0.5 # Correction Rate\n mags = np.ones(12) ### !\n ntraps = len(mags)\n iteration = 0\n while iteration < 5:\n iteration += 1\n print(\"Iteration \", iteration)\n\n im = cam.latest_frame()\n try:\n trap_powers = analyze_image(which_cam, im, ntraps, iteration, verbose)\n except (AttributeError, ValueError) as e:\n print(\"No Bueno, error occurred during image analysis:\\n\", e)\n break\n\n mean_power = trap_powers.mean()\n rel_dif = 100 * trap_powers.std() / mean_power\n print(f'Relative Power Difference: {rel_dif:.2f} %')\n if rel_dif < 0.8:\n print(\"WOW\")\n break\n\n deltaP = [mean_power - P for P in trap_powers]\n dmags = [(dP / abs(dP)) * sqrt(abs(dP)) * L for dP in deltaP]\n mags = np.add(mags, dmags)\n print(\"Magnitudes: \", mags)\n break\n # self._update_magnitudes(mags)\n _ = analyze_image(im, ntraps, verbose=verbose)", "def move_average(source, target, tau=0.005):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(\n target_param.data * (1.0 - tau) + param.data * tau\n )", "def setExposureTime(self, cmd, expTime):\n\n pass", "def autoExposure(this, **kargs):\n\t\t\n\t\t# Arguments\n\t\tframes = kargs.get('frames', 4)\n\t\tstart = kargs.get('start', -10)\n\t\tend = kargs.get('start', -3)\n\t\t\n\t\tmax = 0\n\t\tv = start\n\t\tprint 'Auto Exposition starting...'\n\t\t\n\t\tfor i in range(start, end):\n\t\t\tthis.setProp('exposure', i)\n\t\t\tfor j in range(frames): this.getFrame()\n\t\t\t\n\t\t\te = imEntropy(this.frame)\n\t\t\tif e > max:\n\t\t\t\tmax = e\n\t\t\t\tv = i\n\t\t\n\t\tthis.setProp('exposure', v)\n\t\tfor j in range(frames): this.getFrame()\n\t\tprint 'Exposure Calibrated: %i / Entropy: %.4f' % (v, max)", "def avg_inference_time(self, avg_inference_time):\n\n self._avg_inference_time = avg_inference_time", "def endexposureloop(self):\n self.max_exposures = self.current_exposure", "def main():\n NAME = os.path.basename(__file__).split(\".\")[0]\n\n # Pass/fail thresholds\n MIN_AVG_FRAME_DELTA = 30 # at least 30ms delta between frames\n MAX_VAR_FRAME_DELTA = 0.01 # variance of frame deltas\n MAX_FRAME_DELTA_JITTER = 0.3 # max ms gap from the average frame delta\n\n with its.device.ItsSession() as cam:\n props = cam.get_camera_properties()\n if not its.caps.manual_sensor(props):\n print \"Test skipped\"\n return\n\n req, fmt = its.objects.get_fastest_manual_capture_settings(props)\n caps = cam.do_capture([req]*50, [fmt])\n\n # Print out the millisecond delta between the start of each exposure\n tstamps = [c['metadata']['android.sensor.timestamp'] for c in caps]\n deltas = [tstamps[i]-tstamps[i-1] for i in range(1,len(tstamps))]\n deltas_ms = [d/1000000.0 for d in deltas]\n avg = sum(deltas_ms) / len(deltas_ms)\n var = sum([d*d for d in deltas_ms]) / len(deltas_ms) - avg * avg\n range0 = min(deltas_ms) - avg\n range1 = max(deltas_ms) - avg\n print \"Average:\", avg\n print \"Variance:\", var\n print \"Jitter range:\", range0, \"to\", range1\n\n # Draw a plot.\n pylab.plot(range(len(deltas_ms)), deltas_ms)\n matplotlib.pyplot.savefig(\"%s_deltas.png\" % (NAME))\n\n # Test for pass/fail.\n assert(avg > MIN_AVG_FRAME_DELTA)\n assert(var < MAX_VAR_FRAME_DELTA)\n assert(abs(range0) < MAX_FRAME_DELTA_JITTER)\n assert(abs(range1) < MAX_FRAME_DELTA_JITTER)", "def update_application_average(self, time: int) -> None:\n # TODO p5: write test\n last_applied = time - self.talp - self.tav\n\n if self.exp < 1. / self.cfg.beta:\n # average of values seen so far\n self.tav += last_applied / self.exp\n else:\n self.tav += self.cfg.beta * last_applied\n\n self.talp = time", "def take_image(self, shutter='normal', exptime=0.0,\n readout=2.0, save_as=\"\", timeout=None):\n\n s = time.time()\n parameter_list = []\n readout_time = 5\n exptime_ms = 0\n\n print(self.opt.getParameter('TimeStamps'), 'timestamp')\n # 1. Set the shutter state\n shutter_return = self._set_shutter(shutter)\n if shutter_return:\n parameter_list += shutter_return\n else:\n return {'elaptime': time.time()-s,\n 'error': \"Error setting shutter state\"}\n\n # 2. Convert exposure time to ms`\n try:\n exptime_ms = int(float(exptime) * 1000)\n logger.info(\"Converting exposure time %(exptime)ss\"\n \" to %(exptime_ms)s\"\n \"milliseconds\", {'exptime': exptime,\n 'exptime_ms': exptime_ms})\n parameter_list.append(['ExposureTime', exptime_ms])\n except Exception as e:\n self.lastError = str(e)\n logger.error(\"Error setting exposure time\", exc_info=True)\n\n # 3. Set the readout speed\n logger.info(\"Setting readout speed to: %s\", readout)\n if readout not in self.AdcSpeed_States:\n logger.error(\"Readout speed '%s' is not valid\", readout)\n return {'elaptime': time.time()-s,\n 'error': \"%s not in AdcSpeed states\" % readout}\n parameter_list.append(['AdcSpeed', readout])\n\n # 4. Set parameters and get readout time\n try:\n logger.info(\"Sending configuration to camera\")\n readout_time = self._set_parameters(parameter_list)\n r = int(readout_time) / 1000\n logger.info(\"Expected readout time=%ss\", r)\n except Exception as e:\n self.lastError = str(e)\n logger.error(\"Error setting parameters\", exc_info=True)\n\n # 5. Set the timeout return for the camera\n if not timeout:\n timeout = int(int(readout_time) + exptime_ms + 10000)\n else:\n timeout = 10000000\n\n # 6. Get the exposure start time to use for the naming convention\n start_time = datetime.datetime.utcnow()\n self.lastExposed = start_time\n logger.info(\"Starting %(camPrefix)s exposure\",\n {'camPrefix': self.camPrefix})\n try:\n data = self.opt.readNFrames(N=1, timeout=timeout)[0][0]\n except Exception as e:\n self.lastError = str(e)\n logger.error(\"Unable to get camera data\", exc_info=True)\n return {'elaptime': -1*(time.time()-s),\n 'error': \"Failed to gather data from camera\",\n 'send_alert': True}\n\n logger.info(\"Readout completed\")\n logger.debug(\"Took: %s\", time.time() - s)\n\n if not save_as:\n start_exp_time = start_time.strftime(\"%Y%m%d_%H_%M_%S\")\n # Now make sure the utdate directory exists\n if not os.path.exists(os.path.join(self.outputDir,\n start_exp_time[:8])):\n logger.info(\"Making directory: %s\", os.path.join(self.outputDir,\n start_exp_time[:8]))\n\n os.mkdir(os.path.join(self.outputDir, start_exp_time[:8]))\n\n save_as = os.path.join(self.outputDir, start_exp_time[:8], self.camPrefix+start_exp_time+'.fits')\n\n try:\n datetimestr = start_time.isoformat()\n datestr, timestr = datetimestr.split('T')\n hdu = fits.PrimaryHDU(data, uint=False)\n hdu.scale('int16', bzero=32768)\n hdu.header.set(\"EXPTIME\", float(exptime), \"Exposure Time in seconds\")\n hdu.header.set(\"ADCSPEED\", readout, \"Readout speed in MHz\")\n hdu.header.set(\"TEMP\", self.opt.getParameter(\"SensorTemperatureReading\"),\n \"Detector temp in deg C\")\n hdu.header.set(\"GAIN_SET\", 2, \"Gain mode\")\n hdu.header.set(\"ADC\", 1, \"ADC Quality\")\n hdu.header.set(\"MODEL\", 22, \"Instrument Mode Number\")\n hdu.header.set(\"INTERFC\", \"USB\", \"Instrument Interface\")\n hdu.header.set(\"SNSR_NM\", \"E2V 2048 x 2048 (CCD 42-40)(B)\", \"Sensor Name\")\n hdu.header.set(\"SER_NO\", self.serialNumber, \"Serial Number\")\n hdu.header.set(\"TELESCOP\", self.telescope, \"Telescope ID\")\n hdu.header.set(\"GAIN\", self.gain, \"Gain\")\n hdu.header.set(\"CAM_NAME\", \"%s Cam\" % self.camPrefix.upper(), \"Camera Name\")\n hdu.header.set(\"INSTRUME\", \"SEDM-P60\", \"Camera Name\")\n hdu.header.set(\"UTC\", start_time.isoformat(), \"UT-Shutter Open\")\n hdu.header.set(\"END_SHUT\", datetime.datetime.utcnow().isoformat(), \"Shutter Close Time\")\n hdu.header.set(\"OBSDATE\", datestr, \"UT Start Date\")\n hdu.header.set(\"OBSTIME\", timestr, \"UT Start Time\")\n hdu.header.set(\"CRPIX1\", self.crpix1, \"Center X pixel\")\n hdu.header.set(\"CRPIX2\", self.crpix2, \"Center Y pixel\")\n hdu.header.set(\"CDELT1\", self.cdelt1, self.cdelt1_comment)\n hdu.header.set(\"CDELT2\", self.cdelt2, self.cdelt2_comment)\n hdu.header.set(\"CTYPE1\", self.ctype1)\n hdu.header.set(\"CTYPE2\", self.ctype2)\n hdu.writeto(save_as, output_verify=\"fix\", )\n logger.info(\"%s created\", save_as)\n if self.send_to_remote:\n ret = self.transfer.send(save_as)\n if 'data' in ret:\n save_as = ret['data']\n return {'elaptime': time.time()-s, 'data': save_as}\n except Exception as e:\n self.lastError = str(e)\n logger.error(\"Error writing data to disk\", exc_info=True)\n return {'elaptime': -1*(time.time()-s),\n 'error': 'Error writing file to disk:' % str(e)}", "def Exposure(self, time):\r\n IS_EXPOSURE_CMD_SET_EXPOSURE = 12 #there is a whole list to implement\r\n TIME = DOUBLE(time)\r\n nSizeOfParam = 8\r\n CALL('Exposure', self, \r\n UINT(IS_EXPOSURE_CMD_SET_EXPOSURE), \r\n byref(TIME), \r\n UINT(nSizeOfParam))", "def target_temperature_step(self):\n return PRECISION_HALVES", "def intensity(self, value: int, /) -> None:", "def timeWarp(destExposure, srcExposure, warpingControl):\n startTime = time.time()\n for nIter in range(1, MaxIter + 1):\n goodPix = afwMath.warpExposure(\n destExposure, srcExposure, warpingControl)\n endTime = time.time()\n if endTime - startTime > MaxTime:\n break\n\n return (endTime - startTime, nIter, goodPix)", "def settemp(t=-10):\n print camera.SetTemperature(t)\n camera.status.update()", "def __getTimeEvolution(self, target):\n omega = np.log(self._eigs[target]) / self.timeScales['training']['dt']\n van = np.exp(np.multiply(*np.meshgrid(omega, self._getTimeScale())))\n timeEvol = (van * self._amplitudes[target]).T\n return timeEvol", "def findStarAndMoveToTarget(self, exposureLengthSeconds, targetPixelX, targetPixelY, tolerancePixels, maxIterations=5):\n\n for iteration in range(1, maxIterations+1):\n print \"Attempting to roughly center brightest star (iteration %d of %d)...\" % (iteration, maxIterations)\n\n print \" Exposing guider to find star\"\n self.camera.GuiderExpose(exposureLengthSeconds)\n\n # Poll the guider until the image is ready\n while self.camera.GuiderRunning:\n time.sleep(0.1) # Wait 100 milliseconds before asking again\n\n print \" Exposure complete\"\n print \" Guide star found at pixel (%.2f, %.2f)\" % (self.camera.GuiderXStarPosition, self.camera.GuiderYStarPosition)\n\n # Calculate how far off we are in X and Y pixels\n targetErrorXPixels = self.camera.GuiderXStarPosition - targetPixelX\n targetErrorYPixels = self.camera.GuiderYStarPosition - targetPixelY\n\n print \" Pointing error: X = %.2f pixels, Y = %.2f pixels\" % (targetErrorXPixels, targetErrorYPixels)\n\n # Are we close enough to consider the procedure complete?\n if targetErrorXPixels < tolerancePixels and targetErrorYPixels < tolerancePixels:\n print \"Star is on target (within %.2f-pixel tolerance)\" % tolerancePixels\n return True\n\n\n # Need to make an adjustment. Transform error from pixel \n # coordinates to guider coordinates\n guiderAngleRads = math.radians(self.camera.GuiderAngle)\n\n guiderErrorX = targetErrorXPixels*math.cos(guiderAngleRads) - targetErrorYPixels*math.sin(guiderAngleRads)\n guiderErrorY = targetErrorXPixels*math.sin(guiderAngleRads) + targetErrorYPixels*math.cos(guiderAngleRads)\n \n # GuiderXSpeed and GuiderYSpeed are in pixels per second\n guideDurationX = float(guiderErrorX) / self.camera.GuiderXSpeed\n guideDurationY = float(guiderErrorY) / self.camera.GuiderYSpeed\n\n # Convert +/- guide durations into direction code and positive duration\n\n if guideDurationX > 0:\n xSign = \"+\"\n xDirection = 0 # Positive X direction\n else:\n xSign = \"-\"\n xDirection = 1 # Negative X direction\n guideDurationX = -guideDurationX\n\n if guideDurationY > 0:\n ySign = \"+\"\n yDirection = 2 # Positive Y direction\n else:\n ySign = \"-\"\n yDirection = 3 # Negative Y direction\n guideDurationY = -guideDurationY\n\n\n # Make the X guider adjustment if necessary\n if guideDurationX > 0:\n print \" Moving in %sX for %f sec...\" % (xSign, guideDurationX)\n self.camera.GuiderMove(xDirection, guideDurationX)\n while self.camera.GuiderMoving:\n time.sleep(0.1)\n\n # Make the Y guider adjustment if necessary\n if guideDurationY > 0:\n print \" Moving in %sY for %f sec...\" % (ySign, guideDurationY)\n self.camera.GuiderMove(yDirection, guideDurationY)\n while self.camera.GuiderMoving:\n time.sleep(0.1)\n\n print \" Guide adjustment finished\"\n # Loop again to see if we are on target\n\n print \"DID NOT CONVERGE AFTER %d ITERATIONS\" % maxIterations\n return False", "def intensity(self) -> int:", "def qc_illumina(args):\n clarity_epp.qc.illumina.set_avg_q30(lims, args.process_id)", "def mapRetime(ti, timelineTime):\n return ti.sourceIn() + int((timelineTime - ti.timelineIn()) * ti.playbackSpeed())" ]
[ "0.71642697", "0.6080449", "0.60105157", "0.5899675", "0.5737851", "0.57146853", "0.5641477", "0.5489508", "0.54747295", "0.5439164", "0.5424886", "0.5414795", "0.5378947", "0.5367175", "0.5364558", "0.53452235", "0.53417873", "0.5340514", "0.53373754", "0.52857375", "0.52779037", "0.5256163", "0.5228843", "0.5216824", "0.52120304", "0.5202668", "0.5139264", "0.51229846", "0.51160884", "0.511025" ]
0.703297
1
A function that creates the user paddle
def create_paddle(self, pos): self.shape("square") self.penup() self.color("blue") self.shapesize(stretch_wid=1, stretch_len=4) self.setpos(pos)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPaddle(self):\n return Paddle()", "def new_game():\r\n \r\n global paddle1_pos, paddle2_pos, paddle1_vel, paddle2_vel # these are numbers\r\n global score1, score2 # these are ints\r\n \r\n # initialise the positons, velocity of paddles, and scores of players.\r\n paddle1_pos = HEIGHT / 2\r\n paddle2_pos = HEIGHT / 2\r\n \r\n paddle1_vel = 0\r\n paddle2_vel = 0\r\n \r\n score1 = 0\r\n score2 = 0\r\n \r\n # spawn the ball in the centre\r\n if random.randrange(0,2):\r\n spawn_ball(RIGHT)\r\n else:\r\n spawn_ball(LEFT)", "def __init__(self, x, y):\r\n super(paddle, self).__init__(image=paddle.paddle2, x=x, y=y)\r\n self.points=games.Text(value=0, size=50, color=color.white, top=5, right=games.screen.width-5)\r\n games.screen.add(self.points)", "def drawPaddle(self, view):\n self._paddle.draw(view)", "def __init__(self, screen_Size, p1_Type, p2_Type, action_Space):\r\n self.screen_Width, self.screen_Height = screen_Size\r\n\r\n # Initialize game objects\r\n self.paddle_1 = Paddle(screen_Size, p1_Type, 1, action_Space)\r\n self.paddle_2 = Paddle(screen_Size, p2_Type, 2, action_Space)\r\n self.ball = Ball(screen_Size, self.paddle_1.Width)", "def paddle_init(paddle):\n global paddle1_vel, paddle2_vel\n if paddle == RIGHT:\t# player Two paddle (right)\n x1 = WIDTH - PAD_WIDTH\n x2 = WIDTH\n paddle2_vel = 0\n else:\t\t\t\t# player One paddle (left)\n x1 = 0\n x2 = PAD_WIDTH\n paddle1_vel = 0\n y1 = HEIGHT / 2 - HALF_PAD_HEIGHT\n y2 = HEIGHT / 2 + HALF_PAD_HEIGHT\n return [[x1, y1], [x2, y1], [x2, y2], [x1, y2]]", "def __init__(self):\n self.center = Point()\n #x coordinate is set in these amount of pixels to leave a slight gap between the screen and paddle just like in real pong video games\n self.center.x = SCREEN_WIDTH - 10\n #when game starts, paddle is placed on the middle of screen's right edge\n self.center.y = SCREEN_HEIGHT / 2", "def show_paddle(self, screen, fgColor):\r\n if self.player_Num == 1:\r\n pygame.draw.rect(screen, fgColor, pygame.Rect((0, self.y, self.Width, self.Height)))\r\n elif self.player_Num == 2:\r\n pygame.draw.rect(screen, fgColor, pygame.Rect((self.screen_Width-self.Width, self.y, self.Width, self.Height)))", "def __init__(self, screen_Size, paddle_Width):\r\n self.screen_Width, self.screen_Height = screen_Size\r\n\r\n # Setup x,y limits for ball position\r\n self.left_x = paddle_Width\r\n self.right_x = self.screen_Width - paddle_Width\r\n self.top_y = self.Radius\r\n self.bot_y = self.screen_Height - self.Radius\r\n\r\n self.x = self.screen_Width//2\r\n self.y = np.random.randint(self.Radius, self.screen_Height-self.Radius)\r\n\r\n self.vx = np.random.choice([-1, 1]) * np.random.randint(25, 30)\r\n self.vy = np.random.choice([-1, 1]) * np.random.randint(25, 30)\r\n\r\n # Ralley counter to see game progress\r\n self.rallies = 0", "def __init__(self):\n \n self._wall = BrickWall() \n self._paddle = GRectangle(\n x=GAME_WIDTH/2 - PADDLE_WIDTH/2,\n y=PADDLE_OFFSET,\n width=PADDLE_WIDTH,\n height=PADDLE_HEIGHT,\n fillcolor = PADDLE_COLOR)\n self._clickdist = 0\n self._ball = Ball() \n self._last = None\n self._tries = 2\n self._lostlife = False", "def newPlayer():\r\n pass", "def hit_paddle(self):\n pass\n\n #Implement if collision with paddle is detected\n\n #Add randomness to how ball direction will change and return value", "def _step_their_paddle(self):\n if random.random() < self.their_update_probability:\n if self.paddle_l.y < self.ball.y:\n if self.paddle_l.top_bound < self.top_bound:\n self.paddle_l.up()\n else:\n if self.paddle_l.bottom_bound > self.bottom_bound:\n self.paddle_l.down()", "def generate_bottonUp(self):\n posx = self.posx + self.sizex - 2*self.sizebotx \n posy = self.posy \n return Boton(posx, posy, self.sizebotx , self.sizeboty, color=(229, 57, 57),color_h=(186, 44, 44),text_color=(0,0,0), press_color = (150, 34, 34), font_size=5)", "def shorten_paddle_exec(self):\n if self.shorten_paddle_count == 0 and self.glitch_count == 1:\n self.window.remove(self.paddle)\n self.paddle = GRect(self.paddle_width-20, self.paddle_height, x=(self.window_width - self.paddle_width) / 2,\n y=self.window_height - self.paddle_offset)\n self.paddle.color = 'magenta'\n self.paddle.filled = True\n self.paddle.fill_color = 'magenta'\n self.window.add(self.paddle)\n self.glitch_count += 1\n elif 0 < self.shorten_paddle_count <= 5:\n pass\n elif self.shorten_paddle_count > 5:\n self.window.remove(self.paddle)\n self.paddle = GRect(self.paddle_width, self.paddle_height, x=(self.window_width - self.paddle_width) / 2,\n y=self.window_height - self.paddle_offset)\n self.paddle.color = 'black'\n self.paddle.filled = True\n self.paddle.fill_color = 'black'\n self.window.add(self.paddle)\n self.shorten_paddle_count = 0\n self.shorten_paddle_exist = False\n self.shorten_paddle_start = False\n self.glitch_count = 1", "def create_ball():\n balls.append(gen_ball())\n generate_velocity(balls)", "def __init__(self, myCanvas, color, paddleW, paddleH, yAxisPos):\n self.canvas = myCanvas\n self.id = myCanvas.create_rectangle(0, 0, paddleW, paddleH, fill=color)\n\n # Getting height and width of current window\n self.canvas_width = self.canvas.winfo_width()\n self.canvas_height = self.canvas.winfo_height()\n\n # Horizontal Scroll\n self.x = 0\n\n # Centering from width and setting height as per yAxisPos\n self.canvas.move(self.id,\n (self.canvas_width//2) - paddleW // 2,\n ((int(self.canvas_height * yAxisPos)) - (paddleH//2)))\n\n # Binding Arrow Keys\n self.canvas.bind_all('<KeyPress-Left>', self.turn_left)\n self.canvas.bind_all('<KeyPress-Right>', self.turn_right)", "def updatePaddle(self, selfinput):\n assert isinstance(selfinput,GInput)\n position = 0\n \n if selfinput.is_key_down('right'):\n position = 5\n if selfinput.is_key_down('left'):\n position = -5\n \n self._paddle.move(position)", "def __set_paddle_position(self):\n self.__window.remove(self.__paddle)\n self.__window.add(self.__paddle, (self.__window.width - self.__paddle.width) / 2,\n self.__window.height - self.__paddle_offset)", "def new_game(cls, user):\n game = Game(user=user,\n game_state=\".........\",\n game_over=False)\n game.put()\n return game", "def reset_paddle(self):\r\n self.y = self.screen_Height // 2\r\n self.vy = 0", "def draw_horizontal_paddle(self):\n pygame.draw.rect(self.screen, self.color, self.top_rect)\n pygame.draw.rect(self.screen, self.color, self.bot_rect)", "def paddle_reset_position(self, mouse):\n if (0 + self.paddle.width / 2) <= mouse.x <= (self.window.width - self.paddle.width / 2):\n self.paddle_x = mouse.x - self.paddle.width / 2\n self.window.add(self.paddle, self.paddle_x, self.paddle_y)", "def draw(canvas):\r\n \r\n global score1, score2, strike_counter, paddle1_pos, paddle2_pos, ball_pos, ball_vel\r\n \r\n # draw mid line and gutters\r\n canvas.draw_line([WIDTH / 2, 0],[WIDTH / 2, HEIGHT], 1, \"White\")\r\n canvas.draw_line([PAD_WIDTH, 0],[PAD_WIDTH, HEIGHT], 1, \"White\")\r\n canvas.draw_line([WIDTH - PAD_WIDTH, 0],[WIDTH - PAD_WIDTH, HEIGHT], 1, \"White\")\r\n \r\n # update ball's position\r\n ball_pos[0] += ball_vel[0]\r\n ball_pos[1] += ball_vel[1]\r\n \r\n # update ball's vertical velocity\r\n if ball_pos[1] <= BALL_RADIUS or ball_pos[1] >= HEIGHT - BALL_RADIUS :\r\n ball_vel[1] = -ball_vel[1]\r\n \r\n # draw ball\r\n canvas.draw_circle(ball_pos, BALL_RADIUS, 2, \"Red\", \"White\")\r\n \r\n # update paddle's vertical position, keep paddle on the screen\r\n if paddle1_pos - HALF_PAD_HEIGHT + paddle1_vel < 0 :\r\n paddle1_pos = HALF_PAD_HEIGHT\r\n elif HEIGHT - (paddle1_pos + HALF_PAD_HEIGHT + paddle1_vel) < 0 :\r\n paddle1_pos = HEIGHT - HALF_PAD_HEIGHT\r\n else:\r\n paddle1_pos += paddle1_vel\r\n \r\n if (paddle2_pos - HALF_PAD_HEIGHT + paddle2_vel < 0):\r\n paddle2_pos = HALF_PAD_HEIGHT\r\n elif HEIGHT - (paddle2_pos + HALF_PAD_HEIGHT + paddle2_vel) < 0 :\r\n paddle2_pos = HEIGHT - HALF_PAD_HEIGHT\r\n else:\r\n paddle2_pos += paddle2_vel\r\n \r\n paddle1_left = 0\r\n paddle1_right = PAD_WIDTH\r\n paddle1_top = paddle1_pos - HALF_PAD_HEIGHT\r\n paddle1_bottom = paddle1_pos + HALF_PAD_HEIGHT\r\n paddle2_left = WIDTH - PAD_WIDTH\r\n paddle2_right = WIDTH\r\n paddle2_top = paddle2_pos - HALF_PAD_HEIGHT\r\n paddle2_bottom = paddle2_pos + HALF_PAD_HEIGHT\r\n \r\n # draw paddles\r\n canvas.draw_polygon([[paddle1_left,paddle1_top], [paddle1_right, paddle1_top], [paddle1_right, paddle1_bottom], [paddle1_left,paddle1_bottom]], 0.1, \"Black\", \"White\") \r\n canvas.draw_polygon([[paddle2_left,paddle2_top], [paddle2_right, paddle2_top], [paddle2_right, paddle2_bottom], [paddle2_left,paddle2_bottom]], 0.1, \"Black\", \"White\") \r\n\r\n # determine whether paddle and ball collide,\r\n # if collide, increase the ball's speed\r\n # otherwise, increment the score of the other player\r\n # and respawn a ball in the centre.\r\n if ball_pos[0] <= (paddle1_right+BALL_RADIUS) :\r\n if (ball_pos[1] >= paddle1_top) and (ball_pos[1] <= paddle1_bottom) :\r\n ball_vel[0] = -ball_vel[0]\r\n strike_counter += 1\r\n ball_vel[0] = ball_vel[0] * (1 + ADD_DIFFICULTY)\r\n ball_vel[1] = ball_vel[1] * (1 + ADD_DIFFICULTY)\r\n else:\r\n spawn_ball(RIGHT)\r\n score2 += 1\r\n \r\n elif (ball_pos[0] >= (paddle2_left-BALL_RADIUS)) :\r\n if (ball_pos[1] >= paddle2_top) and (ball_pos[1] <= paddle2_bottom) : \r\n ball_vel[0] = -ball_vel[0]\r\n strike_counter += 1\r\n ball_vel[0] = ball_vel[0] * (1 + ADD_DIFFICULTY)\r\n ball_vel[1] = ball_vel[1] * (1 + ADD_DIFFICULTY)\r\n else:\r\n spawn_ball(LEFT)\r\n score1 += 1\r\n \r\n # draw scores\r\n canvas.draw_text(str(score1), [0.22*WIDTH, 0.2*HEIGHT], 50, \"White\")\r\n canvas.draw_text(str(score2), [0.75*WIDTH, 0.2*HEIGHT], 50, \"White\")", "def __init__(self,number):\n\n self.number=number\n self.score=0\n self.positionx=400\n self.amount=0\n\n # Placement for the paddles\n if self.number==1:\n self.positiony=1150\n if self.number==2:\n self.positiony=50", "def _movePaddle(self):\n self._click()\n self._game.updatePaddle(self._touch)\n self._last = self._touch", "def create_player():\n\n\t#TODO : Ajout d'une BDD des différents joueurs avec des scores et vérifier la présence des joueurs choisis dans cette BDD pour charger les scores\n\n\tactivator = ''\n\tinhibitor = ''\n\n\tprint(\"\\nEntrez le pseudo du joueur\",colors.GREEN + \"'Activator' : \" + colors.STOP, end = \"\")\n\tactivator = input()\n\n\tprint(\"\\nEntrez le pseudo du joueur\", colors.RED + \"'Inhibitor' : \"+colors.STOP, end = \"\")\n\tinhibitor = input()\n\n\t# Default usernames if not defined by users\n\tif len(activator) == 0:\n\t\tactivator = 'Activator'\n\n\tif len(inhibitor) == 0:\n\t\tinhibitor = 'Inhibitor'\n\n\t# Attribute to each player the status he chose\n\tData.current_player['Activator'] = activator\n\tData.current_player['Inhibitor'] = inhibitor\n\n\treturn activator, inhibitor", "def generate_bottonDown(self):\n posx = self.posx + self.sizex - 2*self.sizebotx \n posy = self.posy + self.sizeboty \n return Boton(posx, posy, self.sizebotx, self.sizeboty, color=(124, 117, 221), color_h=(95, 90, 173), text_color=(0,0,0), press_color = (36, 26, 117), font_size=5)", "def Board_create(objPoints, dictionary, ids):\n pass", "def create_player(id_player: str):\n id_player = str(id_player)\n last_name = input(\"Last name of the player : \")\n first_name = input(\"First name of the player : \")\n birthday = input(\"Birthday of the player : \")\n sex = input(\"Sex of the player : \")\n elo = int(input(\"Elo of the player: \"))\n\n if not Player.get(id_player):\n Player(id_player, last_name, first_name, birthday, sex, elo)\n else:\n raise Exception(f\"The ID {id_player} already exists : {Player.get(id_player)}\")" ]
[ "0.6745972", "0.64317685", "0.6356615", "0.6160553", "0.6098694", "0.6085133", "0.60210156", "0.5916166", "0.5859541", "0.58264154", "0.58034784", "0.578718", "0.57084256", "0.57044506", "0.56902385", "0.5576419", "0.55714655", "0.5560821", "0.5558044", "0.5557991", "0.5550492", "0.552842", "0.55130637", "0.5512384", "0.5500373", "0.5494773", "0.54493105", "0.54059017", "0.5397973", "0.53834236" ]
0.70745236
0
Extract the CSS declaration around the given position.
def _extract_css_declaration(self, ac, styleClassifier, trg, is_for_calltip=False): DEBUG = DebugStatus #DEBUG = True #PERF: Use accessor.gen_chars_and_styles() if possible. try: ac.resetToPosition(trg.pos) p, ch, style = ac.getPrevPosCharStyle() if not styleClassifier.is_operator(style, ac): if DEBUG: print "Current ch is not an operator, so getting the " \ "preceeding one, p: %d, ch: %r, style: %d" % \ (p, ch, style, ) p, ch, style = ac.getPrevPosCharStyle( ignore_styles=styleClassifier.ignore_styles) except IndexError: # This occurs when already at the end of the buffer, so we reset to # the last buffer position then ac.resetToPosition(trg.pos - 1) p, ch, style = ac.getCurrentPosCharStyle() if DEBUG: print """------ _extract_css_declaration -----""" print " _extract_css_declaration:: Trg.pos: %d" % (trg.pos) #ac._debug = True print " _extract_css_declaration:: pos: %r" % (p) print " _extract_css_declaration:: ch: %r" % (ch) print " _extract_css_declaration:: style: %r" % (style) ac.dump() # Walk back to ':' operator. num_close_parenthesis = 0 min_pos = max(0, trg.pos - 200) # Lookback up to 200 chars in total while p >= min_pos: #print "ch: %r, style: %d" % (ch, style, ) if ch == ':' and styleClassifier.is_operator(style, ac): break elif num_close_parenthesis > 0: if ch == "(": num_close_parenthesis -= 1 if DEBUG: print "Found matching open paren," \ " num_close_parenthesis now: %d" % ( num_close_parenthesis) elif DEBUG: print "Ignoring everything inside the parenthesis" elif ch == "(" and (styleClassifier.is_operator(style) or styleClassifier.is_value(style)): if DEBUG: print "Already inside a paren, no cpln's then." #XXX SCSS and Less support arithmetic expressions return (None, None, None) elif ch == ")" and (styleClassifier.is_operator(style) or styleClassifier.is_value(style)): num_close_parenthesis += 1 if DEBUG: print "Found close paren, need to skip over contents," \ " num_close_parenthesis: %d" % ( num_close_parenthesis) elif styleClassifier.is_operator(style): if ch not in ":,%": if DEBUG: print "%s: couldn't find ':' operator, found invalid " \ "operator: %d %r %d" % (trg.name, p, ch, style) #TODO: SCSS and Less support arithmetic expressions return (None, None, None) elif styleClassifier.is_string(style): # Used to skip over string items in property values if DEBUG: print "Found string style, ignoring it" elif not (styleClassifier.is_value(style) or styleClassifier.is_default(style)): # old CSS lexer: everything betwee ":" and ';' used to be a value. if DEBUG: print "%s: couldn't find ':' operator, found invalid " \ "style: pcs: %d %r %d" % (trg.name, p, ch, style) return (None, None, None) p, ch, style = ac.getPrevPosCharStyle( ignore_styles=styleClassifier.ignore_styles) else: if DEBUG: print "%s: couldn't find ':' operator within 200 chars, " \ "giving up" % (trg.name) return (None, None, None) if DEBUG: print " _extract_css_declaration:: Found ':' at pos: %d" % (p) # Parse out the property name. colan_pos = p p, ch, style = ac.getPrecedingPosCharStyle(style, ignore_styles=styleClassifier.ignore_styles, max_look_back=150) if style not in styleClassifier.identifier_styles: if DEBUG: print " _extract_css_declaration:: No identifier style found" \ " before ':', found style %d instead" % (style) return (None, None, None) p, property = ac.getTextBackWithStyle(style) property = property.strip() if is_for_calltip: # We have all the info we need if DEBUG: print " _extract_css_declaration:: Returning property: %r" % ( property) return (property, '', []) # Walk forward parsing the value information, ends when we hit a ";" or # have gone ahead a maximum of 200 chars. ac.resetToPosition(colan_pos) prev_pos, prev_ch, prev_style = ac.getCurrentPosCharStyle() from_pos = prev_pos p = colan_pos # Value info, list of tuples (pos, text) value_info = [] max_p = p + 200 try: while p < max_p: p, ch, style = ac.getNextPosCharStyle(max_look_ahead=100, ignore_styles=styleClassifier.comment_styles) if p is None or not styleClassifier.is_css_style(style): # Went past max_look_ahead, just use what we've got then if DEBUG: print "%s: css value reached max length or end of " \ "document: trg.pos %d" % (trg.name, trg.pos) value_info.append((from_pos, ac.text_range(from_pos, p))) break # Sass test if ch == "\n" and self.lang == "Sass" and styleClassifier.is_default(style): value_info.append((from_pos, ac.text_range(from_pos, p))) break if ch in WHITESPACE or styleClassifier.is_string(style): if not prev_ch in WHITESPACE and not styleClassifier.is_string(prev_style): value_info.append((from_pos, ac.text_range(from_pos, p))) from_pos = p+1 elif styleClassifier.is_operator(style): if ch in ";{}": value_info.append((from_pos, ac.text_range(from_pos, p))) break # Other chars should be okay to collect elif not styleClassifier.is_value(style) and \ style not in styleClassifier.ignore_styles: if DEBUG: print "%s: invalid style found: pos %d, style: %d" % ( trg.name, trg.pos, style) return (None, None, None) prev_pos, prev_ch, prev_style = p, ch, style else: if DEBUG: print "%s: css value too long: trg.pos %d" % (trg.name, trg.pos) return (None, None, None) except IndexError: if DEBUG: print "ran out of buffer" # Work out the values and the current value current_value = None values = [] trg_pos = trg.pos for p, value in value_info: if value and _isident_first_char(value[0]): if DEBUG: print "Is a valid value, p: %d, value: %r" % (p, value, ) values.append(value) if current_value is None and trg_pos >= p and \ trg_pos <= p + len(value): current_value = value if DEBUG: print " _extract_css_declaration:: Returning property: %r, " \ "current_value: %r, values: %r" % (property, current_value, values) return (property, current_value, values)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_position(self, position):", "def elements_position(self, selector):\n attr, pattern, val = self.parser_selector(selector, attr='identifier')\n\n strip = lambda v: v.strip()\n\n if pattern:\n val = locals()[pattern](val)\n\n\n def identifier(query):\n return id(query) or name(query)\n\n def name(query):\n return css(\"*[name='%s']\" % query)\n\n def id(query):\n return css('#' + query)\n\n def link(query):\n return xpath(u\"//a[text()='%s']\" % query.replace(\"\\'\", \"\\\\'\"))\n\n def css(query):\n result = []\n for ele in self.main_frame.findAllElements(query):\n if not ele.isNull():\n result.append(ele.geometry().center())\n return result\n\n def xpath(query):\n positions = self.evaluate(u\"\"\"\n function GetAbsoluteLocationEx(element)\n {\n if ( arguments.length != 1 || element == null )\n {\n return null;\n }\n var elmt = element;\n var offsetTop = elmt.offsetTop;\n var offsetLeft = elmt.offsetLeft;\n var offsetWidth = elmt.offsetWidth;\n var offsetHeight = elmt.offsetHeight;\n while( elmt = elmt.offsetParent )\n {\n // add this judge\n if ( elmt.style.position == 'absolute' || elmt.style.position == 'relative'\n || ( elmt.style.overflow != 'visible' && elmt.style.overflow != '' ) )\n {\n break;\n }\n offsetTop += elmt.offsetTop;\n offsetLeft += elmt.offsetLeft;\n }\n return { absoluteTop: offsetTop, absoluteLeft: offsetLeft,\n offsetWidth: offsetWidth, offsetHeight: offsetHeight };\n }\n result=[];\n for (var r = document.evaluate('%s', document, null, 5, null), n; n = r.iterateNext();) {\n pos=GetAbsoluteLocationEx(n)\n result.push([pos.absoluteLeft+pos.offsetWidth/2.0,pos.absoluteTop+pos.offsetHeight/2.0]);\n }\n result\n \"\"\" % query.replace(\"\\'\", \"\\\\'\"))\n\n return map(lambda x: QPoint(*tuple(x)), positions)\n\n return locals()[attr](val)", "def findCSS(self, query):\n\t\ttry:\n\t\t\tassert(type(query)) == str or Pattern\n\t\t\treturn self.driver.find_element_by_css_selector(query)\n\t\texcept Exception as e:\n\t\t\tprint(\"[*] Unable to locate CSS id {}\\n{}\".format(query, e))\n\t\t\treturn -1", "def _extract_from_arn(arn, position):\n\n return re.findall(\"(.*?):\", arn)[position]", "def find_css(self, selector):\n return self.page.q(css=self._bounded_selector(selector))", "def declaration(self) -> str:\n if self._declaration is None:\n # First try to utilize the clang comment's version as it is assumed\n # to be the more correct.\n self._declaration = self.get_soup_declaration()\n\n if self._declaration is None:\n # soup failed so fall back to manual parsing\n self._declaration = self.get_parsed_declaration()\n\n return self._declaration", "def extract_position_from_link(link):\n response = requests.get(link)\n html = response.text\n soup = BeautifulSoup(html, 'html.parser')\n mydivs = soup.find_all(\"div\", {\"id\": \"meta\"})\n if len(mydivs) != 1:\n return \"unknown\"\n\n pars = mydivs[0].find_all(\"p\")\n if len(mydivs) < 1:\n return \"unknown\"\n\n for par in pars:\n if \"Position:\" in par.text:\n start_idx = par.text.find(\"Position:\")\n strings = par.text[start_idx:17].split()\n if len(strings) < 2:\n return \"unknown\"\n position = strings[1]\n return position\n return \"unknown\"", "def extract_var(node):\n if (node[\"nodeType\"] == \"VariableDeclarationStatement\"):\n for var_decl in node[\"declarations\"]:\n if(var_decl[\"nodeType\"] == \"VariableDeclaration\"):\n return var_decl\n else:\n return node", "def get_pos(term):\n # pylint: disable=invalid-name\n # Invalid variable name \"Position\"\n Position = collections.namedtuple('Position', ('row', 'column'))\n\n pos = Position(*term.get_location(timeout=5.0))\n\n if -1 in pos:\n print('stdin: not a human', file=sys.stderr)\n exit(2)\n\n return pos", "def get_definition(self):\r\n # TODO: Should probably check that this is either a reference or\r\n # declaration prior to issuing the lookup.\r\n return conf.lib.clang_getCursorDefinition(self)", "def updatePosition(char, position):\n line, col = position\n return (line + 1, 1) if (char == '\\n') else (line, col + 1)", "def get_position():\n\n return character['Position']", "def _position_to_offset(self, position: Position) -> int:\n return self._line_offsets[position.line] + position.character", "def get_position(self, pos):\n element = self.head\n count = 1\n\n if pos == 1:\n return element\n elif pos > 1 and self.head:\n while count < pos:\n element = element.next\n count += 1\n if element is None:\n break\n return element # if (element is not None) else (f\"Position {pos} does not exist.\") # conditional expression\n # else:\n # return None\n return None", "def css_find(css, wait_time=30):\r\n wait_for_present(css_selector=css, timeout=wait_time)\r\n return world.browser.find_by_css(css)", "def getPos(self):\n pos = [None,None]\n try:\n for i in self.itemType.find('parameters'):\n paramType = i.find('type').text.strip()\n if paramType.startswith('position-x'):\n pos[0] = round(float(self.params[i.find('name').text]))\n if paramType.startswith('position-y'):\n pos[1] = round(float(self.params[i.find('name').text]))\n except:\n pos = [-1,-1]\n return pos", "def get_position(filestring, position):\n lines = filestring.split(\"\\n\")\n line_number, place, count = 0, 0, 0\n #print \"Number of lines: \", len(lines)\n \n while line_number < len(lines):\n line = lines[line_number]\n new_count = count + len(line) #+ 1 # +1 nes dar newline pridedame\n if position <= new_count:\n place = position - count\n break\n count = new_count # +1 nes dar newline pridedame\n line_number += 1\n \n print \"\\n\".join([\"%s:%s\" % ((\"===> \" if i==line_number else \"\") + str(i), lines[i]) for i in xrange(len(lines))])\n return (line_number, place)", "def get_column(board, position):\n col = (position-1) % 3\n return [board[x] for x in (col, col+3, col+6)]", "def get_declaration(self, type_):\n return self.__apply_sequence(type_)", "def position(self):\n return (self.__position)", "def position(self):\n return self._position", "def css(self):\n css = urllib2.urlopen(self.cssfonturl)\n return css.read()", "def findParameter(self, pos):\n text = self.text()\n comma_pos = text.find(',', pos)\n if comma_pos == -1:\n comma_pos = len(text)\n left_comma = text.rfind(',', 0, comma_pos) + 1\n left_eq = text.rfind('=', 0, comma_pos) + 1\n left_delim = max(left_comma, left_eq)\n start = left_delim\n length = comma_pos - left_delim\n return start, length", "def from_position(tu, file, line, column):\r\n return conf.lib.clang_getLocation(tu, file, line, column)", "def get_gradient(expression, position):\n\t# initialize the output\n\tlength = len(position)\n\tgradient = np.zeros(length)\n\t# get the variables\n\tvariables = list(expression.free_symbols)\n\t# construct the evaluation list\n\tvalue = [(variables[k], position[k]) for k in range(length)]\n\tfor k in range(length):\n\t\tgradient[k] = expression.diff(variables[k]).subs(value)\n\treturn gradient", "def C0004(self, position):\n # type: (IrPosition) -> Dict[str, Any]\n return locals()", "def start_at(self) -> global___Statement.Declaration:", "def get_coords_from_position(position, file):\n line_counter = 1\n column_counter = 1\n try:\n with open(file, 'r') as source:\n string = source.read()\n except:\n #unable to open file -> 3\n error.ThrowError(3)\n i = 0\n j = position\n while j > 0:\n if string[i] == '\\n':\n line_counter += 1\n column_counter = 1\n else:\n column_counter += 1\n i += 1\n j -= 1\n return Coords(line_counter, column_counter, position)", "def set_position():\n\n global character\n return character['Position']", "def getPixel(self, position):\n (x,y,z) = position\n if z<0 or z>=self.length:\n mamba.raiseExceptionOnError(mambaCore.ERR_BAD_SIZE)\n err, value = mambaCore.MB_GetPixel(self.seq[z].mbIm, x, y)\n mamba.raiseExceptionOnError(err)\n return value" ]
[ "0.5486392", "0.5393296", "0.5300908", "0.5157982", "0.5035882", "0.49647793", "0.49503165", "0.4924343", "0.49232784", "0.4919716", "0.49102503", "0.4879096", "0.48680133", "0.48434153", "0.47953552", "0.4792464", "0.47892663", "0.47704002", "0.4770048", "0.4767887", "0.4747903", "0.4736203", "0.47347254", "0.47319543", "0.47294357", "0.47227025", "0.471387", "0.47103706", "0.4710186", "0.46957806" ]
0.6803759
0
Register language support with the Manager.
def register(mgr): mgr.set_lang_info(lang, silvercity_lexer=CSSLexer(), buf_class=CSSBuffer, langintel_class=CSSLangIntel, is_cpln_lang=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_game_language_subscriber(event):\n request = event.request\n # TODO: look up game language from a cookie or something\n en = db.get_by_identifier_query(db.t.Language, u'en').first()\n request.tmpl_context.game_language = en", "def setLanguage(self, translator: ghidra.program.util.LanguageTranslator, monitor: ghidra.util.task.TaskMonitor) -> None:\n ...", "def translate(self, language=None):", "def register(cls, L):\r\n ...", "def set_language(self, lang):\n self.lang = lang", "def do_lang(self, lang):\n\n self.lang = lang\n print(\"Set language to %s\" % lang)", "def supported_languages(self):\n return SUPPORT_LANGUAGES", "def enableVocolaTakesLanguages(self):\n key = \"VocolaTakesLanguages\"\n self.userregnl.set(key, 1)", "def initialise_languages():\n is_language_folder = r\"^[^\\\\\\.]*\" # Cannot have backslash or dot.\n language_folder_path = os.path.join(definitions.ROOT_DIR, \"languages\")\n\n for root, dirs, files in os.walk(language_folder_path):\n for name in files:\n if name.startswith(\"_NEW_\") or name.startswith(\"_CHANGED_\"):\n # Files that are auto-generated will not be added.\n continue\n\n full_dir = os.path.join(root, name)\n relative_dir = full_dir.replace(language_folder_path + \"\\\\\", \"\")\n\n match = re.match(is_language_folder, relative_dir)\n language_id = match.group(0)\n\n language = None\n if language_id != \"cache\":\n language = definitions.LANGUAGES[language_id]\n else:\n language = definitions.CACHE_LANGUAGE\n\n if relative_dir == \"{id}\\\\commands.json\".format(id=language_id):\n # Take the commands.\n definitions.COMMANDS.add_command_localisation(full_dir, language_id)\n\n elif relative_dir == \"{id}\\\\languages.json\".format(id=language_id):\n # Take the languages.\n language.add_languages(full_dir)\n\n elif relative_dir == \"{id}\\\\meta.json\".format(id=language_id):\n # Take the metadata.\n language.add_meta(full_dir)\n\n elif relative_dir == \"{id}\\\\permissions.json\".format(id=language_id):\n # Take the permissions.\n language.add_permission_names(full_dir)\n\n elif relative_dir == \"{id}\\\\units.json\".format(id=language_id):\n language.add_units(full_dir)\n\n else:\n # Take the keys\n language.add_keys_from_path(full_dir)\n\n for name in dirs:\n full_dir = os.path.join(root, name)\n relative_dir = full_dir.replace(language_folder_path + \"\\\\\", \"\")\n\n match = re.fullmatch(is_language_folder, relative_dir)\n if match is not None:\n if name != \"cache\":\n definitions.LANGUAGES[name] = Language(name)\n else:\n definitions.CACHE_LANGUAGE = Language(name)", "def add_languages_to_db():\n\n async def insert_language(language: str) -> None:\n await Language.create(name=language)\n\n async def add_languages() -> None:\n await Tortoise.init(config=TORTOISE_ORM)\n async with anyio.create_task_group() as tg:\n for item in get_all_lexers():\n tg.start_soon(insert_language, item[0])\n await Tortoise.close_connections()\n\n anyio.run(add_languages)\n click.secho('languages inserted!', fg='green')", "def handle_translation_registrations(*args, **kwargs):\n from modeltranslation.settings import ENABLE_REGISTRATIONS\n\n if not ENABLE_REGISTRATIONS:\n # If the user really wants to disable this, they can, possibly at their\n # own expense. This is generally only required in cases where other\n # apps generate import errors and requires extra work on the user's\n # part to make things work.\n return\n\n # Trigger autodiscover, causing any TranslationOption initialization\n # code to execute.\n autodiscover()", "def use_en(self):\n pass", "def languages(self, languages):\n self._languages = languages", "def on_language_changed(self, locale_code: str):\n self.localisationsettings.formats.on_language_changed(locale_code) # XXX: notify\n self.localisationsettings.keyboard.on_language_changed(locale_code) # XXX: notify\n self.translate_to(locale_code)\n self.mainwindow.current_language = localization.language_from_locale(locale_code)", "def set_lang(lang, graceful_fail = False, **kwargs):\r\n registry = pylons.request.environ['paste.registry']\r\n if not lang:\r\n registry.replace(pylons.translator, NullTranslations())\r\n else:\r\n translator = _get_translator(lang, graceful_fail = graceful_fail, **kwargs)\r\n registry.replace(pylons.translator, translator)", "def set_lang(self, lang: LangEnum) -> None:\n self._logger.debug(\"running\")\n self._base_strings = strings[lang]\n self._logger.debug(\"done\")", "def includeme(config):\r\n config.add_translation_dirs('faapp:locale', )\r\n config.add_subscriber('faapp.locale.add_renderer_globals', 'pyramid.events.BeforeRender')\r\n config.add_subscriber('faapp.locale.add_localizer', 'pyramid.events.NewRequest')", "def setup_site_languages(context):\n portal = context.getSite()\n ltool = portal.portal_languages\n \n defaultLanguage = bc.default_language\n supportedLanguages = list(bc.zope_i18n_allowed_languages.split())\n ltool.manage_setLanguageSettings(defaultLanguage, supportedLanguages,\n setUseCombinedLanguageCodes=True,\n setCookieN=True, setRequestN=True)\n logger.info(\"Site languages enabled.\")", "def _save_lang(self):\n for combobox, (option, _default) in list(self.comboboxes.items()):\n if option == 'interface_language':\n data = combobox.itemData(combobox.currentIndex())\n value = from_qvariant(data, to_text_string)\n break\n save_lang_conf(value)\n self.set_option('interface_language', value)", "def add_localizer(event):\n def auto_translate(string):\n \"\"\" Use the message factory to translate strings.\"\"\"\n return localizer.translate(MessageFactory(string))\n\n def gettext_translate(string):\n \"\"\" Translate untranslated strings with FormEncode.\"\"\"\n # Try default translation first\n translation = localizer.old_translate(i18n.TranslationString(string))\n if translation == string:\n # translation failed then use FormEncode\n translation = formencode_api._stdtrans(string)\n return translation\n\n request = event.request\n localizer = i18n.get_localizer(request)\n request.localizer = localizer\n request.translate = auto_translate\n\n if not hasattr(localizer, \"old_translate\"):\n localizer.old_translate = localizer.translate\n locale_name = i18n.get_locale_name(request)\n formencode_api.set_stdtranslation(languages=[locale_name])\n localizer.translate = gettext_translate", "def includeme(config):\n\n import patches\n config.add_translation_dirs('kotti_multilingual:locale')\n config.scan(__name__)", "def list_languages(self):\n known = [ob.capitalize() for ob in self.caller.languages.known_languages]\n known += [\"Arvani\"]\n self.msg(\"{wYou can currently speak:{n %s\" % \", \".join(known))\n self.msg(\n \"You can learn %s additional languages.\"\n % self.caller.languages.additional_languages\n )", "def __updateLanguages(self):\n self.__ensureTranslationEngineReady()\n if self.__translationEngine is not None:\n supportedCodes = self.__translationEngine.supportedLanguages()\n enabledCodes = self.__plugin.getPreferences(\"EnabledLanguages\")\n \n # 1. save current selections\n origLanguage = self.origLanguageComboBox.itemData(\n self.origLanguageComboBox.currentIndex())\n \n # 2. reload the original language combo box\n self.origLanguageComboBox.blockSignals(True)\n self.origLanguageComboBox.clear()\n for code in enabledCodes:\n if code in supportedCodes:\n language = self.__languages.getLanguage(code)\n if language:\n icon = self.__languages.getLanguageIcon(code)\n self.origLanguageComboBox.addItem(\n icon, language, code)\n self.origLanguageComboBox.model().sort(0)\n origIndex = self.origLanguageComboBox.findData(origLanguage)\n if origIndex == -1:\n origIndex = 0\n self.origLanguageComboBox.blockSignals(False)\n self.origLanguageComboBox.setCurrentIndex(origIndex)", "def register_linter(linter_class, name, attrs):\n if name:\n name = name.lower()\n linter_class.name = name\n languages[name] = linter_class\n\n if not name.startswith('embedded'):\n linter_settings = settings.get('linters', {})\n linter_class.lint_settings = linter_settings.get(name, {})\n\n # The sublime plugin API is not available until plugin_loaded is executed\n if plugin_is_loaded:\n load_settings(force=True)\n\n # If a linter is reloaded, we have to reassign linters to all views\n from . import linter\n\n for view in views.values():\n linter.Linter.assign(view, reassign=True)\n\n printf('{} linter reloaded'.format(linter_class.__name__))\n else:\n printf('{} linter loaded'.format(linter_class.__name__))", "def SetLanguage(self, language):\n try:\n newDict = guicmd.CommandInterface.MessageHandler.GetLanguageDict(language)\n if newDict:\n self.languageDict = newDict\n self.language = language\n except:\n pass", "def __init__(self, language=None):\n self.language = language\n self.translations = {}", "def languages(self, languages):\n\n self._languages = languages", "def __init__(self, *args, **kwargs):\n _gdi_.PyLocale_swiginit(self,_gdi_.new_PyLocale(*args, **kwargs))\n PyLocale._setCallbackInfo(self, self, PyLocale)", "def language(self, language: str):\n self._language = language", "def init(lang):\n pass" ]
[ "0.63181674", "0.6169716", "0.59083724", "0.5867996", "0.58526134", "0.57024425", "0.57021755", "0.5701746", "0.56978625", "0.5690859", "0.5625662", "0.5609462", "0.55977046", "0.5581769", "0.5573173", "0.5572357", "0.5570256", "0.5531588", "0.5516688", "0.5481292", "0.5476093", "0.5473661", "0.5445829", "0.54260486", "0.54163456", "0.5410942", "0.5406855", "0.5396798", "0.5387474", "0.53487647" ]
0.71172
0
Call after notifier timeout or processed event to check if the program should terminate.
def check_main_stop(notifier): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_terminate(self):\n pass", "def test_terminate_run(self):\n pass", "def handle_termination(self):\n pass", "def check_termination(self) -> bool:\n return self.terminate", "def __exit__(self, exc_type, exc_value, traceback):\n if self.returncode is None and self.proc.poll() is None:\n self.proc.terminate()", "def _termination_handler(self, signum, frame):\n print '[i] Shutting down...'\n self.sensor.close()\n sys.exit(1)", "def should_terminate(self):\n return False", "def do_exit(self, args) :\r\n\r\n self.__Logger.warn(\"stopping the timer loop\")\r\n\r\n self.cmds[\"SimulatorStartup\"] = True\r\n self.cmds[\"SimulatorShutdown\"] = True\r\n\r\n\r\n return True", "def terminate(self):\n return", "def terminate(self):\n return", "def terminate(self):\n self._running = False", "def __before_termination__(self, sig):\n print(\"Ahhhh! I'm going to be killed. My pid:{}, signal received:{}\".format(self.pid, sig ) )", "def _CheckForIdleQuit(self):\n timeout = time.time() + self.idle_timeout_secs\n while time.time() < timeout:\n if self._shutdown_requested_event.is_set():\n # An external source called shutdown()\n return\n elif self._rpc_received_event.is_set():\n logging.debug('Resetting the idle timeout')\n timeout = time.time() + self.idle_timeout_secs\n self._rpc_received_event.clear()\n time.sleep(1)\n # We timed out, kill the server\n logging.warning('Shutting down the server due to the idle timeout')\n self.shutdown()", "def daemonControlRun(self):\n if not self._mainLoop():\n self._logGeneral(\"done-error\").error(\"process exited with error\")\n else:\n self._logGeneral(\"done-ok\").notice(\"process terminated\")", "def gracefully_terminate(self):\n self.running = False", "def stopped_check(self, timeout=None):", "def wait_for_termination(self):\n self.server.wait_for_termination()", "def _defunctdog(self):\n self._is_running.wait()\n while self._is_running.is_set() and not self._ask_to_stop.is_set():\n self.return_code = self._popen.poll()\n if self.return_code is not None:\n break\n time.sleep(self._check_interval)\n self._log(\"debug\", \"defunctdog loop end: return code {0}\".format(self.return_code))\n if self.return_code is None: # If loop end by ask to stop\n self._stop_process() # Really stop the thread\n self.return_code = self._popen.poll()\n self._log(\"debug\", \"after process stop: return code {0}\".format(self.return_code))\n else:\n self._log(\"raw\", \"ended itself with {0} code\".format(self.return_code))\n self._process_ended()", "def signal_handler(self):\n\t\twith open(\"/dev/null\") as sys.stderr:\n\t\t\tself._thread_executor.shutdown(wait=False)\n\t\t\t[task.cancel() for task in Task.all_tasks() if task is not Task.current_task()]\n\t\t\tself._event_loop.stop()\n\t\t\tself._dump_test_parser_log()", "def quitme(self, evt=None):\n if evt:\n self.dbgprint(\"too much for testing: so-long\")\n sys.exit()", "def terminate(self):", "def do_exit(self):\n self._loop = False\n print('exiting')\n return True", "def terminate():\n sys.exit()", "def __exit__(self, *ex_info):\n if self.device:\n self._device_ctx.__exit__(*ex_info)\n\n stdout('')\n stdout('Finished {0} in {1:0.1f}s'.format(self.name, self.timer_elapsed('script')))", "def terminate(self):\n self.terminated = True", "def atexit(self):\n self.stop_listen()\n for driver in self.drivers.values():\n driver.stop()\n if hasattr(driver, \"atexit\"):\n driver.atexit()\n try:\n self.processor_thread.join()\n except AttributeError:\n pass", "async def shutdown_gracefully(self) -> None:", "async def shutdown_gracefully(self) -> None:", "def exit(self) -> None:\n self.on_exit(None)", "def terminate(self):\n self.stop_timer()\n self.terminate_event.set()\n self.log.info(self.name + \" timer terminated\")" ]
[ "0.6717264", "0.66706556", "0.6639872", "0.6634525", "0.6587442", "0.65862644", "0.6508719", "0.64766693", "0.64387584", "0.64387584", "0.64321905", "0.64121026", "0.6358221", "0.6338427", "0.6337538", "0.63262683", "0.6324838", "0.6317632", "0.6312474", "0.6300536", "0.6280848", "0.62792486", "0.6267463", "0.6253468", "0.62511444", "0.6197552", "0.6188772", "0.6188772", "0.61781365", "0.61732364" ]
0.72217506
0
Always returns the widget, creating one if no current instance exists
def widget(self): if self.widget_instance is None: self.widget_instance = self.widget_class(settings=self.settings) return self.widget_instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_widget(self):\n\t\treturn None", "def get_widget(self):\r\n return None", "def getWidget(self):\n if self.__widget is None:\n self.__widget = self.createWidget(self.__parent)\n hooks = self.getHooks()\n if hooks is not None:\n hooks.viewWidgetCreated(self, self.__widget)\n return self.__widget", "def get_widget(self, request=None, as_instance=False):\n raise NotImplementedError", "def create_widget(self):\n pass", "def __call__(self, *args, **kwargs):\n if not self.instance:\n self.instance = super().__call__(*args, **kwargs)\n return self.instance", "def getWidget(self):", "def _widget_item(self):\n try:\n res = self.__widget_item\n except AttributeError:\n res = self.__widget_item = QWidgetItem(self.widget)\n return res", "def get_instance(self):\n try:\n return self._instance\n except AttributeError:\n self._instance = self._decorated()\n return self._instance", "def widget(self):\n return self.containedWidget", "def createWidget(self, class_name, parent=None, name=''):\n\n if parent is None and self.baseinstance:\n # supposed to create the top-level widget, return the base\n # instance instead\n return self.baseinstance\n\n else:\n\n # For some reason, Line is not in the list of available\n # widgets, but works fine, so we have to special case it here.\n if class_name in self.availableWidgets() or class_name == 'Line':\n # create a new widget for child widgets\n widget = QUiLoader.createWidget(self, class_name, parent, name)\n\n else:\n # If not in the list of availableWidgets, must be a custom\n # widget. This will raise KeyError if the user has not\n # supplied the relevant class_name in the dictionary or if\n # customWidgets is empty.\n try:\n widget = self.customWidgets[class_name](parent)\n except KeyError as error:\n raise Exception(\n f'No custom widget {class_name} '\n 'found in customWidgets'\n ) from error\n\n if self.baseinstance:\n # set an attribute for the new child widget on the base\n # instance, just like PyQt4.uic.loadUi does.\n setattr(self.baseinstance, name, widget)\n\n return widget", "def createWidget(self, class_name, parent=None, name=''):\n\n if class_name is QMainWindow.__name__:\n return self.window\n\n if parent is None and self.window:\n return self.window\n else:\n if class_name in self.availableWidgets():\n widget = QUiLoader.createWidget(self, class_name, parent, name)\n widget.show()\n else:\n try:\n widget = self.customWidgets[class_name](parent)\n except (TypeError, KeyError) as e:\n raise Exception(class_name, 'was not found are you sure it was promoted?')\n\n if self.window:\n setattr(self.window, name, widget)\n\n return widget", "def _get_toolkit_widget(self):\n try:\n res = self.parent.toolkit_widget\n except AttributeError:\n res = None\n return res", "def returnDocker(self):\r\n # Ensure there's a widget to return\r\n if self.widget:\r\n self.widgetDocker.setWidget(self.widget)\r\n self.widget = None\r\n self.widgetDocker = None", "def widget(self, p_int): # real signature unknown; restored from __doc__\n return QWidget", "def toolkit_widget(self):\n return self.widget", "def widget(self) -> tk.Frame:\r\n return self.main_frame", "def widget(self, class_id, name):\n mw = self.getMainWindow()\n form = mw.findChild(QtGui.QWidget, \"TaskPanel\")\n return form.findChild(class_id, name)", "def as_widget(self):\n return self._as_widget", "def createWidget(self, parent):\n raise NotImplementedError()", "def get_widget(self, name):\n return self.params[name].widget", "def __get__(self, instance, owner):\n return self", "def _get_instance(self):", "def _get_instance(self):", "def createWidget(self, QWidget): # real signature unknown; restored from __doc__\n pass", "def get_object(self):\n if getattr(self, 'current_instance', None):\n ret = self.current_instance\n else:\n ret = super().get_object()\n return ret", "def create_widget(self):\n item = QNodeItem(self)\n self.widget = item", "def widget(self) -> ttk.Treeview:\r\n return self.wrapper", "def MultiWidget_getWidget(self, idx):\n # This monkey patch sets the widgets ignoreContext, ignoreRequest and\n # context based on self.ignoreContext, self.ignoreRequest and self.context\n valueType = self.field.value_type\n widget = zope.component.getMultiAdapter((valueType, self.request),\n interfaces.IFieldWidget)\n self.setName(widget, idx)\n widget.mode = self.mode\n widget.ignoreContext = self.ignoreContext\n widget.ignoreRequest = self.ignoreRequest\n widget.context = self.context\n #set widget.form (objectwidget needs this)\n if interfaces.IFormAware.providedBy(self):\n widget.form = self.form\n zope.interface.alsoProvides(\n widget, interfaces.IFormAware)\n widget.update()\n return widget", "def instance(self):\n return self.__instance" ]
[ "0.7231976", "0.7222826", "0.71262175", "0.6965019", "0.68230003", "0.6606461", "0.659004", "0.6489969", "0.64566183", "0.641995", "0.6392658", "0.6347045", "0.6330786", "0.6318223", "0.63119763", "0.61843294", "0.6175768", "0.6164428", "0.61581355", "0.6142356", "0.6125436", "0.6047191", "0.6045252", "0.6045252", "0.6039104", "0.6034491", "0.60341173", "0.600573", "0.59664553", "0.5951697" ]
0.780208
0
Store the widget settings in this flow and remove it from the passed settings layout.
def deselect_widget(self, settings): if self.widget_instance: self.settings = self.widget_instance.get_settings() self.widget_instance = None current_setting_item = settings.takeAt(0) if current_setting_item: current_setting_item.widget().deleteLater() del current_setting_item
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_layout(self, layout: Layout):\n self.layouts.pop(layout, None)", "def _clear_gui_settings(self):\n self._settings.clear()\n self._settings.sync()\n self.gui_settings_clear_button.setEnabled(False)\n self.cleared = True", "def remove_layout(self, layout):\n\n #we can get 'layout' or 'layout (variant)'\n (layout, variant) = parse_layout_variant(layout)\n\n layouts_variants = zip(self._rec.layouts, self._rec.variants)\n\n if not (layout, variant) in layouts_variants:\n msg = \"'%s (%s)' not in the list of added layouts\" % (layout,\n variant)\n raise XklWrapperError(msg)\n\n idx = layouts_variants.index((layout, variant))\n # pylint: disable=unsubscriptable-object\n new_layouts = self._rec.layouts[:idx] + self._rec.layouts[(idx + 1):]\n # pylint: disable=unsubscriptable-object\n new_variants = self._rec.variants[:idx] + self._rec.variants[(idx + 1):]\n\n self._rec.set_layouts(new_layouts)\n self._rec.set_variants(new_variants)\n\n if not self._rec.activate(self._engine):\n raise XklWrapperError(\"Failed to remove layout '%s (%s)'\" % (layout,\n variant))", "def customize_settings(recorder: dict, uploaded_settings: dict, loaded: bool) -> dict:\n\n with st.expander(\"Settings\", loaded):\n checked = [_ for _ in recorder[\"workflow\"] if not recorder[\"workflow\"][_]]\n checked_ = []\n for _ in checked:\n if _ in WORKFLOW_DICT:\n checked_.extend(WORKFLOW_DICT[_])\n\n exclude = [\"experiment\", \"workflow\"] + checked_\n\n for key in SETTINGS_TEMPLATE.keys():\n if key not in exclude:\n\n group = SETTINGS_TEMPLATE[key]\n # Check if different than default\n if loaded:\n changed = (\n sum(\n [\n uploaded_settings[key][element]\n != group[element][\"default\"]\n for element in group\n ]\n )\n > 0\n )\n else:\n changed = False\n\n if st.checkbox(key, changed):\n for element in group:\n override = None\n if changed:\n if (\n uploaded_settings[key][element]\n != group[element][\"default\"]\n ):\n override = uploaded_settings[key][element]\n\n recorder = widget_from_setting(\n recorder, key, group, element, override, indent=True\n )\n\n return recorder", "def _onSettings(self, event):\n dialog = sc.SettingsDialog(self)\n if dialog.ShowModal() == wx.ID_OK:\n dialog.saveSettings()\n dialog.Destroy()", "def clear(self, layout):\n panels = layout.layout_content.get('panels')\n\n for panel in panels:\n for layout_descriptor in panel.get('descriptors'):\n descriptor = get_object_or_404(Descriptor, name=layout_descriptor.get('name'))\n\n # values are loaded on demand (displaying the panel or opening the dropdown)\n\n acc_value = self.entity.descriptors.get(descriptor.name)\n\n if DescriptorFormatTypeManager.has_external(descriptor.format):\n self.own_list.append((descriptor.format, acc_value, None))\n\n # no more values\n self._descriptors = {}", "def delete_widgets_from(layout):\n for i in reversed(range(layout.count())):\n widgetToRemove = layout.itemAt(i).widget()\n # remove it from the layout list\n layout.removeWidget(widgetToRemove)\n # remove it from the gui\n widgetToRemove.setParent(None)", "def clearSetting(self, name: unicode) -> None:\n ...", "def del_layout(layout): # FIXME delete it\n for i in reversed(range(layout.count())):\n if layout.itemAt(i).widget() is not None:\n layout.itemAt(i).widget().setParent(None)\n elif layout.itemAt(i).layout() is not None:\n del_layout(layout.itemAt(i).layout())\n layout.itemAt(i).layout().setParent(None)\n else:\n layout.removeItem(layout.itemAt(i))", "def removeExistWidget(self, layout):\n for index in range(layout.count()):\n if layout.itemAt(index).widget():\n layout.itemAt(index).widget().deleteLater()", "def delsetting(name):\r\n if '__delattr__' in settings.__class__.__dict__:\r\n delattr(settings, name)\r\n else:\r\n delattr(settings._wrapped, name)", "def set_dash_layout_settings(self, values=None, user_info=None):\n if not user_info:\n user = users.get_current_user()\n if not user:\n return\n email = user.email()\n try:\n user_info = self.get_by_id(UserInfo, email)\n except Exception as err:\n logging.exception(err)\n pass\n if user_info:\n if type(values) is not dict:\n # Assign values to the default admin template.\n values = {\n \"nav\": [\"app_management\", \"appscale_management\",\n \"debugging_monitoring\"],\n \"panel\": [\"app_console\", \"upload_app\", \"cloud_stats\",\n \"database_stats\",\n \"memcache_stats\"]\n }\n layout_settings = values\n lookup_dict = self.build_dict(user_info=user_info)\n layout_settings['nav'] = [{key: lookup_dict.get(key)} for key in\n layout_settings.get('nav') if\n key in lookup_dict]\n\n layout_settings['panel'] = [{key: lookup_dict.get(key)} for key in\n layout_settings.get('panel') if\n key in lookup_dict and (\n lookup_dict.get(key).get(\n 'is_admin_panel') ==\n user_info.is_user_cloud_admin\n or not lookup_dict.get(key).get(\n 'is_admin_panel'))]\n user_info.dash_layout_settings = layout_settings\n user_info.put()\n return user_info.dash_layout_settings\n return", "def updateSettingsUI(self):\n\n pass", "def __clear_layout(self):\r\n\r\n # Test if layout is empty\r\n if self.__layout.count():\r\n for i in reversed(range(self.__layout.count())):\r\n widget = self.__layout.takeAt(i).widget()\r\n if widget is not None:\r\n widget.setParent(None)", "def clear(self, disconnect=False):\n if self.params:\n if disconnect:\n try:\n self.value_changed.disconnect()\n except TypeError: # no signals connected\n pass\n for name in self.params:\n path=(self.display_table_root,name)\n self.display_table.remove_handler(path)\n self.display_table.remove_indicator_handler(path)\n self.params={}\n utils.clean_layout(self.formLayout,delete_layout=True)\n self.formLayout = QtWidgets.QGridLayout(self)\n self.formLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)\n self.formLayout.setContentsMargins(5,5,5,5)\n self.formLayout.setObjectName(_fromUtf8(\"formLayout\"))\n self._update_cache_values()", "def clear_settings(self):\n\n for entry in range(1, 11):\n getattr(self.dlg, \"uTextDomain{0}\".format(entry)).setText(\"\")\n getattr(self.dlg, \"uTextAPIKey{0}\".format(entry)).setText(\"\")", "def clear_settings(site_name): # untested - do I need/want this?\n return update_settings(site_name, {})", "def clear_widgets(self):\n self.json_progress = None\n self.progress_message_bar = None\n self.json_progress_message_bar = None\n if self.progress_message_bar_widget:\n self.iface.messageBar().popWidget(self.progress_message_bar_widget)\n self.progress_message_bar_widget = None\n if self.json_progress_message_bar_widget:\n self.iface.messageBar().popWidget(self.json_progress_message_bar_widget)\n self.json_progress_message_bar_widget = None", "def set_layout(layout):\r\n # Get active window and set reference to active view\r\n window = sublime.active_window()\r\n previous_active = window.active_view()\r\n\r\n # Do not set layout when disabled\r\n if get_value(S.KEY_DISABLE_LAYOUT):\r\n S.RESTORE_LAYOUT = window.get_layout()\r\n set_window_value('restore_layout', S.RESTORE_LAYOUT)\r\n S.RESTORE_INDEX = H.new_dictionary()\r\n set_window_value('restore_index', S.RESTORE_INDEX)\r\n return\r\n\r\n # Show debug layout\r\n if layout == 'debug':\r\n debug_layout = get_value(S.KEY_DEBUG_LAYOUT, S.LAYOUT_DEBUG)\r\n if window.get_layout() != debug_layout:\r\n # Save current layout\r\n S.RESTORE_LAYOUT = window.get_layout()\r\n set_window_value('restore_layout', S.RESTORE_LAYOUT)\r\n # Remember view indexes\r\n S.RESTORE_INDEX = H.new_dictionary()\r\n for view in window.views():\r\n view_id = \"%d\" % view.id()\r\n group, index = window.get_view_index(view)\r\n S.RESTORE_INDEX[view_id] = { \"group\": group, \"index\": index }\r\n set_window_value('restore_index', S.RESTORE_INDEX)\r\n # Set debug layout\r\n window.set_layout(S.LAYOUT_NORMAL)\r\n window.set_layout(debug_layout)\r\n # Show previous (single) layout\r\n else:\r\n # Get previous layout configuration\r\n if S.RESTORE_LAYOUT is None:\r\n S.RESTORE_LAYOUT = get_window_value('restore_layout', S.LAYOUT_NORMAL)\r\n if S.RESTORE_INDEX is None:\r\n S.RESTORE_INDEX = get_window_value('restore_index', {})\r\n # Restore layout\r\n window.set_layout(S.LAYOUT_NORMAL)\r\n window.set_layout(S.RESTORE_LAYOUT)\r\n for view in window.views():\r\n view_id = \"%d\" % view.id()\r\n # Set view indexes\r\n if view_id in H.dictionary_keys(S.RESTORE_INDEX):\r\n v = S.RESTORE_INDEX[view_id]\r\n window.set_view_index(view, v[\"group\"], v[\"index\"])\r\n\r\n # Restore focus to previous active view\r\n if not previous_active is None:\r\n window.focus_view(previous_active)", "def cleanWorkspace(self):\n self.window.labelMessage.setText(\"\")\n\n if self.inspectinoAnalyzer:\n del self.analyzerWidget\n self.inspectinoAnalyzer = False\n\n for index in reversed(range(self.window.layoutDepthermInpesction.count())):\n layoutItem = self.window.layoutDepthermInpesction.itemAt(index)\n widgetToRemove = layoutItem.widget()\n print(\"found widget: \" + str(widgetToRemove))\n widgetToRemove.setParent(None)\n self.window.layoutDepthermInpesction.removeWidget(widgetToRemove)", "def update_layouts(self):\n self.layouttreestore.clear()\n layouts = self.config.list_layouts()\n for layout in sorted(layouts, key=str.lower):\n if layout != \"default\":\n self.layouttreestore.append([layout])\n else:\n self.layouttreestore.prepend([layout])", "def widgetstosettings(self):\n print \"in widgets to settings\"\n self.ABsettings[\"intensity_range\"]=(self.spansliderInt.lowerValue,self.spansliderInt.upperValue)\n self.ABsettings[\"rgb\"]=self.colorBox.getRGB\n self.ABsettings[\"visible\"]=self.abEnabledCB.isChecked()\n self.ABsettings[\"zrange\"]=(self.spansliderZ.lowerValue,self.spansliderZ.upperValue)\n self.ABsettings[\"Antibody\"]=self.ab\n for button in self.radiobuttons:\n if button.isChecked():\n self.ABsettings[\"selected_DAPI_channel\"]=str(button.objectName())[:-3]\n print \"Dapi channel setting is \", self.ABsettings[\"selected_DAPI_channel\"]", "def undo_settings(self):\r\n cF.undo_settings()", "def settings(self, settings):\n\n self._settings = settings", "def build_settings(self, settings):\n settings.add_json_panel('Makesmith Settings', self.config, data=self.json)", "def save_settings(self, plugin_settings, instance_settings):\n instance_settings.set_value(\"output_directory\", self.output_directory)\n instance_settings.set_value(\"labels\", self.labels)\n if self._sub:\n instance_settings.set_value(\"topic_name\", self._sub.name)", "def clear_new_talk_fields(self):\r\n self.newTalkWidget.talkDetailsWidget.titleLineEdit.clear()\r\n self.newTalkWidget.talkDetailsWidget.presenterLineEdit.clear()\r\n self.newTalkWidget.talkDetailsWidget.descriptionTextEdit.clear()\r\n self.newTalkWidget.talkDetailsWidget.categoryLineEdit.clear()", "def load_from_settings(self):\n for param, value in self.settings['swan'].items():\n # Some settings do not have a GUI element, continue if encountered\n if param not in self.input_elements.keys():\n continue\n\n # Check if parameter is not empty before filling in\n if self.validate_parameter(value):\n self.input_elements[param].set_value(value)\n\n # Validate\n self.validate(check_empty=False)", "def save_settings(self):\r\n self.QtSettings.beginGroup(\"MainWindow\")\r\n self.QtSettings.setValue(\"geometry\",self.saveGeometry())\r\n self.QtSettings.setValue(\"state\",self.saveState())\r\n self.QtSettings.endGroup()\r\n \r\n #save element content\r\n self.QtSettings.beginGroup(\"Settings\")\r\n pyguitools.gui_save(self.ui,self.QtSettings)\r\n self.QtSettings.endGroup()", "def use(self, layout):\n self._wid.setLayout(layout)\n return layout" ]
[ "0.6029419", "0.5720786", "0.5576734", "0.545593", "0.5422754", "0.5388249", "0.5365285", "0.53313774", "0.5319288", "0.5231639", "0.5218475", "0.52031684", "0.5201091", "0.51799446", "0.517654", "0.51620686", "0.51608", "0.51577705", "0.51485753", "0.5138839", "0.5133913", "0.5101269", "0.5096345", "0.5087866", "0.50700897", "0.50647247", "0.50509506", "0.5050832", "0.5047619", "0.5029834" ]
0.6526072
0
Checks if a given migration script name has already been executed against this database.
def check_migration(self, migration: str) -> bool: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_exists(self):\n\n return os.path.isfile(os.path.join(self.scripts_dir, self.python_name))", "def is_migrated_before():\n\n global migration_sign\n if os.path.exists(migration_sign):\n return True\n else:\n return False", "def _is_non_real_command_found(self, script_data):\n is_valid = True\n depends_on_commands = script_data.get('depends_on')\n if depends_on_commands:\n for command in depends_on_commands:\n if command != 'test-module':\n if command.endswith('dev') or command.endswith('copy'):\n error_message, error_code = Errors.invalid_command_name_in_script(script_data.get('name'),\n command)\n if self.handle_error(error_message, error_code, file_path=\"id_set.json\"):\n return not is_valid\n return is_valid", "def _needs_migrations(self, table: str, current_level: int) -> bool:\n sql_dir = self.migrations / table\n if not sql_dir.exists():\n if current_level > 0: # Where did the previous migrations go?\n raise MigrationException(f\"{table} already has {current_level}, but directory is missing\")\n else: # No migrations? That's ok\n return False\n\n # TODO return paths to migrations that need to be applied\n # (would avoid listing files)\n for migration in sorted(sql_dir.iterdir()):\n level = int(migration.name.split('_')[0])\n if level > current_level: # Not yet applied\n return True # Need to apply it later\n return False # Up to date!", "def _is_already_installed(script_dir):\n\t#first check the bin folder\n\tif SCRIPT_NAME in os.listdir(script_dir):\n\t\treturn True\n\tfor name in EXEC_NAMES:\n\t\tif name in os.listdir(\"/bin/\"):\n\t\t\treturn True\n\t\n\treturn False", "def __check_if_task_exists(self, server_id):\n if server_id in self.__migrating_tasks.keys():\n return True\n return False", "def find_new_scripts(script_order, already_ran_scripts):\n\n for sql_script_name in script_order:\n\n log.debug(\"Checking sql_script_name {0}...\".format(\n sql_script_name))\n\n absolute_path = pkg_resources.resource_filename(\n \"rtapebbletest\", \"database-change-scripts/{0}\".format(sql_script_name))\n\n just_the_file = os.path.basename(absolute_path)\n\n if just_the_file not in already_ran_scripts:\n\n log.debug(\"Looks like we should run {0}...\".format(just_the_file))\n\n yield absolute_path", "def ifAlreadyDone(self, cxRepo, schemaRepo, schema, tablename):\n logging.debug(f\"\"\"check if {schema}.{tablename} has been analyzed\"\"\")\n conn = self.connect(cxRepo)\n sql = f\"\"\"select table_name from {schemaRepo}.tablediff where lower\n (table_name) = lower('{tablename}') and schema1 = '{schema}' and\n server1_status = 'ready' and server1_status = 'ready' and result in\n ('ready', 'init')\"\"\"\n with conn:\n with conn.cursor() as curs:\n curs.execute(sql)\n row = curs.fetchone()\n if row is None:\n return 1\n else:\n return 0", "def has_command_with_name(self, command_name):\n return command_name in self.commands", "def check(self):\n # validate contents still to do - for now just check if it exists\n return os.path.exists(self.getDefaultDatabaseConnectionParameter()['path'])", "def isScriptHashScript(pkScript):\n return extractScriptHash(pkScript) is not None", "def _check_file(self, name):\n self.assertTrue(os.path.exists(name), \"Could not find table %s.\" % name)", "def needs_migrations(self):\n # TODO(majklk): also check models etc.\n if len(self.widgets) > 0:\n return True\n return False", "def script_exists(self, *hashes):\n return self._execute([b'SCRIPT', b'EXISTS'] + list(hashes))", "def need_completion_refresh(queries):\n for query in sqlparse.split(queries):\n try:\n first_token = query.split()[0]\n if first_token.lower() in ('alter', 'create', 'use', '\\\\r',\n '\\\\u', 'connect', 'drop'):\n return True\n except Exception:\n return False", "def _check_and_apply_migrations(self) -> None:\n from hathor.transaction.storage.exceptions import OutOfOrderMigrationError, PartialMigrationError\n db_is_empty = self.is_empty()\n self.log.debug('step through all migrations', count=len(self._migrations))\n migrations_to_run = []\n # XXX: this is used to ensure migrations don't advance out of order\n previous_migration_state = MigrationState.COMPLETED\n for migration in self._migrations:\n migration_name = migration.get_db_name()\n self.log.debug('step migration', migration=migration_name)\n\n # short-cut to avoid running migrations on empty database\n if migration.skip_empty_db() and db_is_empty:\n self.log.debug('migration is new, but does not need to run on an empty database',\n migration=migration_name)\n self.set_migration_state(migration_name, MigrationState.COMPLETED)\n continue\n\n # get the migration state to decide whether to run, skip or error\n migration_state = self.get_migration_state(migration_name)\n\n if migration_state > previous_migration_state:\n raise OutOfOrderMigrationError(f'{migration_name} ran after a migration that wasn\\'t advanced')\n previous_migration_state = migration_state\n\n should_run_migration: bool\n if migration_state is MigrationState.NOT_STARTED:\n self.log.debug('migration is new, will run', migration=migration_name)\n should_run_migration = True\n elif migration_state is MigrationState.STARTED:\n self.log.warn('this migration was started before, but it is not marked as COMPLETED or ERROR, '\n 'it will run again but might fail', migration=migration_name)\n should_run_migration = True\n elif migration_state is MigrationState.COMPLETED:\n self.log.debug('migration is already complete', migration=migration_name)\n should_run_migration = False\n elif migration_state is MigrationState.ERROR:\n self.log.error('this migration was run before but resulted in an error, the database will need to be '\n 'either manually fixed or discarded', migration=migration_name)\n raise PartialMigrationError(f'Migration error state previously: {migration_name}')\n else:\n raise ValueError(f'Unexcepted migration state: {migration_state!r}')\n\n # run if needed, updating the state along the way\n if should_run_migration:\n migrations_to_run.append(migration)\n self.log.debug('stepped through all migrations')\n if migrations_to_run:\n self.log.info('there are migrations that need to be applied')\n migrations_to_run_count = len(migrations_to_run)\n for i, migration in enumerate(migrations_to_run):\n migration_name = migration.get_db_name()\n self.log.info(f'running migration {i+1} out of {migrations_to_run_count}', migration=migration_name)\n self.set_migration_state(migration_name, MigrationState.STARTED)\n try:\n migration.run(self)\n # XXX: we catch \"any\" exception because just we want to mark the state as \"ERROR\"\n except Exception as exc:\n self.set_migration_state(migration_name, MigrationState.ERROR)\n raise PartialMigrationError(f'Migration error state: {migration_name}') from exc\n else:\n self.set_migration_state(migration_name, MigrationState.COMPLETED)\n if migrations_to_run:\n self.log.info('all migrations have been applied')", "def test_unique(self):\n leading_digits = re.compile(r'^\\d+')\n seen_numbers = set()\n path = self._migrations_path()\n for filename in listdir(path):\n match = leading_digits.match(filename)\n if match:\n number = match.group()\n if number in seen_numbers:\n self.fail('There is more than one migration #%s in %s.' %\n (number, path))\n seen_numbers.add(number)", "def __contains__(self, item):\n if \".\" not in item:\n # Normalize to full name\n item = \"%s.%s\" % (self._object.profile.name, item)\n return script_loader.has_script(item)", "def is_already_import_dll(self, dll_name):\n for descriptor in self.import_entries:\n if descriptor.dll == dll_name:\n return True\n return False", "def check_missing_migrations():\n from django.db.migrations.autodetector import MigrationAutodetector\n from django.db.migrations.loader import MigrationLoader\n from django.db.migrations.questioner import (\n NonInteractiveMigrationQuestioner as Questioner,\n )\n from django.db.migrations.state import ProjectState\n\n loader = MigrationLoader(None, ignore_no_migrations=True)\n conflicts = loader.detect_conflicts()\n if conflicts:\n raise Exception(\n \"Migration conflicts detected. Please fix your migrations.\"\n )\n questioner = Questioner(dry_run=True, specified_apps=None)\n autodetector = MigrationAutodetector(\n loader.project_state(),\n ProjectState.from_apps(apps),\n questioner,\n )\n changes = autodetector.changes(\n graph=loader.graph,\n trim_to_apps=None,\n convert_apps=None,\n migration_name=None,\n )\n if changes:\n raise Exception(\n \"Migration changes detected. \"\n \"Please update or add to the migration file as appropriate\"\n )\n print(\"Migration-checker detected no problems.\")", "def is_command_ancillary(args):\n # pylint: disable=bad-continuation\n if (\n # skip the parent check and only\n # determine if the parameter is present\n is_valid_executes(args, skip=True)\n ):\n return True\n return False", "def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"write_qiime_train_db.py\", get_files)", "def has_hookscript ( self ):\n return self.hook_script_ref is not None", "def db_upgrade():\n generate_migration_file()\n dbu_query = anosql.from_path(MIGRATION_FILE, 'psycopg2')\n\n for time_step in [_.strip('.sql') for _ in migration_files()]:\n decide = MySQLScheme.fetch_one(REVISION_EXISTS,\n **{\"args\": {'revision': time_step}})\n if not decide:\n MySQLScheme.commit(getattr(dbu_query, f\"upgrade_{time_step}\").sql)\n LOGGER.info(f\"successful migration: {time_step}\")\n else:\n LOGGER.info(f'migration already exists: {time_step}')", "def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"validate_match_batch.py\", get_files)", "def db_exists(self):\n \n with self.connection:\n c = self.connection.cursor()\n c.execute(\"SELECT EXISTS(SELECT 1 FROM sqlite_master WHERE name=?)\", [PUBLICATIONS_TABLE])\n exists = c.fetchone()\n if(exists[0] == 1):\n return True\n else:\n return False", "def command_exists(name, path=None):\n if path is None:\n path = sys.path\n\n for prefix in path:\n filename = os.path.join(prefix, name)\n is_executable = os.access(filename, os.X_OK)\n is_file = os.path.isfile(filename)\n if is_executable and is_file:\n return True\n\n return False", "def database_exists (name, parent=None):\n return get_database(name, parent) is not None", "def needs_patch():\n return (IndexName is not None and\n hasattr(BaseDatabaseSchemaEditor, '_create_unique_sql'))", "def validate_setup_teardown_query_file(**kwargs):\n qfilename = kwargs[\"query_filename\"]\n basename = os.path.basename(qfilename)\n check_str = False\n if kwargs[\"check_which\"] == 'setup':\n check_str = basename.lower().find('setup') > -1\n elif kwargs[\"check_which\"] == 'teardown':\n check_str = basename.lower().find('teardown') > -1\n else:\n raise TypeError('Unsupported `check_which` parameter.')\n return_val = True\n if not qfilename.endswith(\".sql\"):\n logging.warning(\n \"Query filename \"\n + qfilename\n + ' is invalid - does not end in \".sql\". Skipping'\n )\n return_val = False\n elif not check_str:\n quiet = True if 'quiet' in kwargs and kwargs['quiet'] else False\n if not quiet:\n logging.warning(\n \"Query filename \"\n + qfilename\n + ' does not match \"setup\" or \"teardown\". Skipping'\n )\n return_val = False\n return return_val" ]
[ "0.614944", "0.6089585", "0.60354674", "0.60084146", "0.59065694", "0.5823774", "0.5776442", "0.5734036", "0.5726742", "0.57071155", "0.57049215", "0.56707036", "0.56460166", "0.5644467", "0.56159735", "0.5583992", "0.5568545", "0.5549832", "0.55248946", "0.5498679", "0.5462118", "0.54240805", "0.54109293", "0.53914607", "0.5380997", "0.5367283", "0.5348112", "0.53409314", "0.5340682", "0.53346455" ]
0.62848705
0
Insert the given migration into the _MigrationsRun table.
def update_migrations_run(self, migration: str): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_migrations(self, migrations):\n with self.internal_db.begin() as conn:\n for migration in migrations:\n conn.execute(\n \"INSERT INTO migration (name) \" \"VALUES ('%s');\" % migration\n )", "def run_migration(self, migration: str):\n # read the migration script\n f = open(migration, 'r')\n sql = f.read()\n\n # run the migration script\n cursor = self.cnxn.cursor()\n cursor.execute(sql)\n self.cnxn.commit()\n\n # update the _MigrationsRun table\n self.update_migrations_run(migration)", "def run_migration(self):\n step = \"Migrating Database\"\n try:\n self.slacker.send_thread_reply(step)\n self.kuber.run_migration(tag=self.tag, source=config.APP_MIGRATOR_SOURCE)\n self.migration_completed = True\n except Exception as e:\n self.raise_step_error(step=step, error=e)", "def insert_action(self, run_id: str, action: RunAction) -> None:\n insert = sqlalchemy.insert(action_table).values(\n _convert_action_to_sql_values(run_id=run_id, action=action),\n )\n\n with self._sql_engine.begin() as transaction:\n try:\n transaction.execute(insert)\n except sqlalchemy.exc.IntegrityError as e:\n raise RunNotFoundError(run_id=run_id) from e\n\n self._clear_caches()", "def migration():", "def run_migrations(self, migrations):\n for migration in migrations:\n name = migration[\"name\"]\n migration[\"script\"] = self.get_sql_script(name)\n\n if self.dry_run:\n for migration in migrations:\n print(f'---------------- {migration[\"name\"]} ----------------')\n print(migration[\"script\"])\n return\n\n if not self.accept_all and not self.prompt_for_migrations(migrations):\n return\n\n applied_migrations = []\n with self.target_db.begin() as conn:\n for migration in migrations:\n name = migration[\"name\"]\n script = migration[\"script\"]\n if self.apply_migrations:\n print(f\"Applying {name}\")\n conn.execute(script)\n applied_migrations.append(name)\n if self.register:\n self.register_migrations(applied_migrations)", "def _create_migration(self, table: TableSchema, alter_reqs: List[AlterRequest]) -> None:\n print(f\"Creating migration for {table['name']}...\")\n statements = []\n for request in alter_reqs:\n sql = request.sql\n if len(request.input_needed) == 0:\n print(f\"{request.description} - auto\")\n else:\n print(f\"{request.description} - input needed\")\n\n # If user input was needed, replace it in all SQL statements provided\n for key, reason in request.input_needed.items():\n value = input(f\" {reason}: \")\n for i, line in enumerate(sql):\n sql[i] = line.replace(key, value)\n\n statements.extend(sql)\n\n sql_dir = self.migrations / table['name']\n sql_dir.mkdir(exist_ok=True)\n level = len(list(sql_dir.iterdir()))\n print(f\"Migrations generated for {table['name']}.\")\n description = input(f\"{sql_dir}/{level}_\")\n with open(sql_dir / f'{level}_{description}.sql', 'w') as f:\n f.write(';\\n'.join(statements))", "def db_initialise():\n generate_migration_file()\n if not MySQLScheme.fetch_one(IS_MIGRATION_TABLE,\n **{\"args\": {'schema': SCHEMA}}):\n with open(MIGRATION_FILE, 'r') as init_sql:\n data = init_sql.read()\n\n if f\"CREATE TABLE IF NOT EXISTS {MIGRATION_TABLE}\" not in data:\n when = str(int(time.time()))\n sql_file = os.path.join(MIGRATION_FOLDER, f\"{when}.sql\")\n\n with open(sql_file, 'w') as save_sql:\n up = MYSQL_MIGRATION_UP.format(f\"upgrade-{when}\", when,\n MIGRATION_TABLE)\n down = MYSQL_MIGRATION_DOWN.format(f\"downgrade-{when}\",\n MIGRATION_TABLE)\n\n save_sql.write(\"\\n\\n\".join([up, down]))\n LOGGER.info(f\"migration file: \"\n f\"{os.path.join('migrations', sql_file)}\")\n else:\n when = re.findall('[0-9]+', data)[0]\n\n generate_migration_file()\n dbi_query = anosql.from_path(MIGRATION_FILE, 'psycopg2')\n MySQLScheme.commit(getattr(dbi_query, f\"upgrade_{when}\").sql)\n LOGGER.info(f\"initial successful migration: {when}\")", "def db_upgrade():\n generate_migration_file()\n dbu_query = anosql.from_path(MIGRATION_FILE, 'psycopg2')\n\n for time_step in [_.strip('.sql') for _ in migration_files()]:\n decide = MySQLScheme.fetch_one(REVISION_EXISTS,\n **{\"args\": {'revision': time_step}})\n if not decide:\n MySQLScheme.commit(getattr(dbu_query, f\"upgrade_{time_step}\").sql)\n LOGGER.info(f\"successful migration: {time_step}\")\n else:\n LOGGER.info(f'migration already exists: {time_step}')", "def register(migration):\n MIGRATIONS[migration.__name__] = migration", "def process_migration(self, migration):\n migrations_to_run = self.get_migrations_to_run(migration)\n self.run_migrations(migrations_to_run)", "def add_new_migrant(self, migrant_dict:dict):\n\t\t_id = self.migrants.insert_one(migrant_dict)\n\t\tself.matcher.add_migrant(migrant_dict)\n\t\treturn str(_id.inserted_id)", "def insert_vm_migration(vm, hostname):\n IMPL.insert_vm_migration(vm, hostname)", "def db_migrate():\n when = str(int(time.time()))\n sql_file = os.path.join(MIGRATION_FOLDER, f\"{when}.sql\")\n\n with open(sql_file, 'w') as save_sql:\n up = MYSQL_UP.format(f\"upgrade-{when}\", when, MIGRATION_TABLE)\n down = MYSQL_DOWN.format(f\"downgrade-{when}\", when, MIGRATION_TABLE)\n\n save_sql.write(\"\\n\\n\".join([up, down]))\n LOGGER.info(f\"migration file: {os.path.join('migrations', sql_file)}\")", "def _run_migrations(self, current_migration_version: int):\n logger.debug(\"Checking for necessary database migrations...\")\n\n while current_migration_version < latest_migration_version:\n next_migration_version = current_migration_version + 1\n logger.info(\n f\"Migrating the database from v{current_migration_version} to v{next_migration_version}...\",\n )\n\n migration = importlib.import_module(f\".migrations.{str(next_migration_version).rjust(3, '0')}\", \"middleman\")\n # noinspection PyUnresolvedReferences\n migration.migrate(self)\n\n # Update the stored migration version\n self._execute(\"UPDATE migration_version SET version = ?\", (next_migration_version,))\n\n logger.info(f\"Database migrated to v{next_migration_version}\")\n current_migration_version += 1", "async def _migrate(db, configs, migration_name):\n for test_config in configs:\n try:\n await TestConfig(**test_config).save_to_db(db)\n except Exception as exc:\n log.error(f\"Migration {migration_name} has failed\")\n else:\n db.Migrations.insert_one({migration_name: True})", "def post_migrations(self):", "def run_migration(env, upgrade_type):\n pass", "def insert_db():\n populate_tables()", "async def migrate(self):\n # Controlla se ci sono tabelle nel db\n async with self.db.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute(\"\"\"SELECT COUNT(DISTINCT table_name) as c\n FROM information_schema.columns\n WHERE table_schema = %s\"\"\", (conn.db,))\n db_empty = (await cur.fetchone())[\"c\"] <= 0\n\n # Se ci sono tabelle, prova a leggere `db_version`\n if not db_empty:\n await cur.execute(\"SELECT db_version FROM db_version LIMIT 1\")\n db_version_in_db = await cur.fetchone()\n db_version = 0 if db_version_in_db is None else db_version_in_db[\"db_version\"]\n else:\n db_version = 0\n\n # Prendi la lista di file sql e py da eseguire\n new_migrations = [x for x in self.migrations if x.id > db_version]\n\n # Controlla se ci sono migration da eseguire\n if not new_migrations:\n self.logger.info(\"No new migrations. The database is already up to date!\")\n return\n\n # Esegui migrations\n self.logger.info(\"Current db version: @{}\".format(db_version))\n db_version += 1\n current_migration = self.get_migration(db_version)\n while current_migration is not None:\n self.logger.info(\"Executing {}\".format(current_migration.file_name))\n\n if current_migration.type == \"sql\":\n # Leggi ed esegui file sql\n with open(\n os.path.join(os.path.dirname(__file__), \"migrations/{}\".format(current_migration.file_name)), \"r\"\n ) as f:\n data = f.read()\n async with self.db.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute(data)\n await conn.commit()\n elif current_migration.type == \"py\":\n # Importa modulo py\n module = importlib.import_module(\"migrator.migrations.{}\".format(current_migration.file_name[:-3]))\n migr = getattr(module, \"do\")\n await migr()\n\n # Migration eseguita, aggiorna `db_version`\n self.logger.info(\"Migration {} executed with no errors\".format(current_migration.file_name))\n await self.save_db_version(db_version)\n\n # Vai alla prossima migration\n db_version += 1\n current_migration = self.get_migration(db_version)\n self.logger.info(\"All migrations executed correctly\")", "def migrate(cls)->None:\n database.cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS votes(\n id serial PRIMARY KEY,\n question integer,\n user_id integer,\n value integer\n )\"\"\")\n database.connection.commit()", "def create(self, migration_name):\n # original version vith timestamp version format\n # timestamp = strftime(\"%Y%m%d%H%M%S\", localtime())\n next_ver = self.get_next_version()\n file_name = \"%s_%s%s\" % (next_ver, migration_name, Migration.MIGRATION_FILES_EXTENSION)\n if not Migration.is_file_name_valid(file_name):\n raise Exception(\n \"invalid migration name ('%s'); it should contain only letters, numbers and/or underscores\"\n % file_name)\n\n new_file_name = os.path.join(self.__migrations_dir, file_name)\n\n try:\n f = codecs.open(new_file_name, \"w\", \"utf-8\")\n f.write(Migration.TEMPLATE)\n f.close()\n except IOError:\n raise Exception(\"could not create file ('%s')\" % new_file_name)\n\n migration = Migration(new_file_name)\n self.__migrations.append(migration)\n return migration", "def _check_and_apply_migrations(self) -> None:\n from hathor.transaction.storage.exceptions import OutOfOrderMigrationError, PartialMigrationError\n db_is_empty = self.is_empty()\n self.log.debug('step through all migrations', count=len(self._migrations))\n migrations_to_run = []\n # XXX: this is used to ensure migrations don't advance out of order\n previous_migration_state = MigrationState.COMPLETED\n for migration in self._migrations:\n migration_name = migration.get_db_name()\n self.log.debug('step migration', migration=migration_name)\n\n # short-cut to avoid running migrations on empty database\n if migration.skip_empty_db() and db_is_empty:\n self.log.debug('migration is new, but does not need to run on an empty database',\n migration=migration_name)\n self.set_migration_state(migration_name, MigrationState.COMPLETED)\n continue\n\n # get the migration state to decide whether to run, skip or error\n migration_state = self.get_migration_state(migration_name)\n\n if migration_state > previous_migration_state:\n raise OutOfOrderMigrationError(f'{migration_name} ran after a migration that wasn\\'t advanced')\n previous_migration_state = migration_state\n\n should_run_migration: bool\n if migration_state is MigrationState.NOT_STARTED:\n self.log.debug('migration is new, will run', migration=migration_name)\n should_run_migration = True\n elif migration_state is MigrationState.STARTED:\n self.log.warn('this migration was started before, but it is not marked as COMPLETED or ERROR, '\n 'it will run again but might fail', migration=migration_name)\n should_run_migration = True\n elif migration_state is MigrationState.COMPLETED:\n self.log.debug('migration is already complete', migration=migration_name)\n should_run_migration = False\n elif migration_state is MigrationState.ERROR:\n self.log.error('this migration was run before but resulted in an error, the database will need to be '\n 'either manually fixed or discarded', migration=migration_name)\n raise PartialMigrationError(f'Migration error state previously: {migration_name}')\n else:\n raise ValueError(f'Unexcepted migration state: {migration_state!r}')\n\n # run if needed, updating the state along the way\n if should_run_migration:\n migrations_to_run.append(migration)\n self.log.debug('stepped through all migrations')\n if migrations_to_run:\n self.log.info('there are migrations that need to be applied')\n migrations_to_run_count = len(migrations_to_run)\n for i, migration in enumerate(migrations_to_run):\n migration_name = migration.get_db_name()\n self.log.info(f'running migration {i+1} out of {migrations_to_run_count}', migration=migration_name)\n self.set_migration_state(migration_name, MigrationState.STARTED)\n try:\n migration.run(self)\n # XXX: we catch \"any\" exception because just we want to mark the state as \"ERROR\"\n except Exception as exc:\n self.set_migration_state(migration_name, MigrationState.ERROR)\n raise PartialMigrationError(f'Migration error state: {migration_name}') from exc\n else:\n self.set_migration_state(migration_name, MigrationState.COMPLETED)\n if migrations_to_run:\n self.log.info('all migrations have been applied')", "def migrate(self):\n\tpass", "def test_startMigration(self):\n source = MockContentStore()\n destination = MockContentStore(store=self.store)\n result = self.manager.migrate(source, destination)\n self.assertEquals(result.ran, 1)\n self.assertEquals(source.migrationDestination, destination)\n self.assertEquals(IMigration(self.store), result)", "def migrate():\n if apply_migrations():\n click.echo(OK)\n else:\n sys.exit(1)", "def _migrate(self):\n with self.engine.begin() as conn:\n context = alembic.migration.MigrationContext.configure(conn)\n current_rev = context.get_current_revision()\n self.log.debug('Current migration revision: %s' % current_rev)\n\n config = alembic.config.Config()\n config.set_main_option(\"script_location\",\n \"zuul:driver/sql/alembic\")\n config.set_main_option(\"sqlalchemy.url\",\n self.connection_config.get('dburi'))\n\n # Alembic lets us add arbitrary data in the tag argument. We can\n # leverage that to tell the upgrade scripts about the table prefix.\n tag = {'table_prefix': self.table_prefix}\n alembic.command.upgrade(config, 'head', tag=tag)", "def insertIntoStepLog(self, data: Dict) -> int:\n step_payload = {\n **data,\n **{\n \"step_name\": \"Data Loader\",\n \"step_end_ts\": str(datetime.datetime.now()),\n \"upsert_by\": \"DLoaderMS\",\n \"upsert_ts\": str(datetime.datetime.now()),\n },\n }\n\n insertQuery = \"\"\"\n INSERT INTO file_process_step_log\n (file_process_id,\n step_name,\n step_status,\n step_status_detail,\n step_start_ts,\n step_end_ts,\n upsert_by,\n upsert_ts)\n VALUES ( '{file_process_id}',\n '{step_name}',\n '{step_status}',\n '{step_status_detail}',\n timestamp '{step_start_ts}',\n timestamp '{step_end_ts}',\n '{upsert_by}',\n timestamp '{upsert_ts}' ) \n RETURNING step_id\n \"\"\"\n cursor = self.engine.cursor()\n try:\n cursor.execute(insertQuery.format(**step_payload))\n step_id = cursor.fetchone()[0]\n return step_id\n except Exception as e:\n raise DLoaderException(\n \"Failed while inserting data into audit table {0}\".format(e)\n )\n finally:\n cursor.close()", "def insert_into_table(self, conn, insert_into_table_sql):\n try:\n c = conn.cursor()\n c.execute(insert_into_table_sql)\n conn.commit()\n\n except Error as e:\n print(e)", "def migrate(cls):\n database.cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS meetups(\n id serial PRIMARY KEY,\n topic varchar,\n happening_date varchar,\n tags varchar,\n location varchar,\n images varchar,\n body varchar\n )\"\"\")\n database.connection.commit()" ]
[ "0.6739179", "0.6299863", "0.62400484", "0.5966218", "0.5836835", "0.5790835", "0.5692351", "0.5618229", "0.5618169", "0.5581772", "0.55558246", "0.5523925", "0.5503373", "0.5392908", "0.53832996", "0.5312781", "0.53054965", "0.5258299", "0.52449775", "0.5219013", "0.5194695", "0.51940376", "0.51725656", "0.5171055", "0.51541024", "0.5145083", "0.51373917", "0.5134939", "0.5128541", "0.5089806" ]
0.6500243
1
Assert that ret is as expected
def verify_ret(self, ret, expected_ret): assert ret == expected_ret, ( "Function should return: " + ret_vals_dictionary[expected_ret] + ".\nInstead returned: " + ret_vals_dictionary[ret] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assertion_passed(self, func):", "def test_unit(self):\n self.assertTrue(return_true())", "def test_should_be_ok(self):\n self.assertTrue(True)", "def assertReturns(rtype):\n def check_returns(func):\n @wraps(func)\n def wrapped_func(*args, **kwds):\n retval = func(*args, **kwds)\n assert isinstance(retval, rtype), \\\n f\"Retval `{retval}` of function `{func.__name__}` is expected to be of type `{rtype}`\"\n return retval\n return wrapped_func\n return check_returns", "def test_integration(self):\n self.assertTrue(return_true())", "def check_result(context, expected):\n assert context.result == expected, \"Wrong result: {r} != {e}\".format(\n r=context.result, e=expected\n )", "def test_expect_ok(self) -> None:\n assert Ok(2).expect(\"err\") == 2", "def testReturn(self):\n\t\tx = BaseAction('x')\n\t\tself.failUnless(x.record() == x)", "def the_response_should_be_result(result):\n assert web_app.check_response(result)", "def validate_Assert(result, _dummy_condition):\n return result", "def test_is_ok(self) -> None:\n assert Ok(1).is_ok()\n assert not Err(1).is_ok()", "def testExpectAndReturn(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tc.expectAndReturn(x.g(8, 9), 5)\n\t\tc.replay()\n\t\tself.failUnless(x.g(8, 9) == 5)\n\t\tc.verify()", "def test_failed():\n assert False", "def testHasReturns(self):\n concise = build_code([], [], [], concise=True)\n full = build_code([], [], [], concise=False)\n self.assertNotIn('return', concise)\n self.assertIn('return', full)", "def expected_value(expected, actual):\n assert expected == actual", "def test_return_value(self):\n self.assertEqual(self.r0.from_json_string(self.string), self.d)", "def func_case(self):\n test.success(\"\")", "def test_wip(self):\n self.assertTrue(not return_true())", "def test_api_response_data(self):", "def _assertEqual(self, func, test_input, test_output, result):\n diff = '\\n'.join(list(self.differ.compare([test_output], [result])))\n msg = ('Expected %s to translate %r to %r, but got %r\\n%s' %\n (func, test_input, test_output, result, diff))\n self.assertEqual(test_output, result, msg)", "def assert_same(result, expect):\n assert sorted(result) == sorted(expect)", "def assertion_failed(self, func, exception):", "def _check(self, expected, actual):\n\n assert expected == actual, 'Assert fail. expected={} but actual={}'.format(expected, actual)", "def test_compare(self):", "def assertion_errored(self, func, exception):", "def test_ok(self, start: Result[int, int], exp: Option[int]) -> None:\n assert start.ok() == exp", "def verify():", "def test_xfailed_but_passed():\n pass", "def test_if(self):", "def test_always_succeed():\n assert True" ]
[ "0.71568924", "0.70153856", "0.6888419", "0.68762743", "0.6791112", "0.6758023", "0.67320305", "0.6685891", "0.6594677", "0.65407693", "0.6522941", "0.64063513", "0.640348", "0.6390991", "0.6390034", "0.63731635", "0.63705343", "0.6368727", "0.63404477", "0.63290125", "0.6324123", "0.6304696", "0.6298735", "0.6283247", "0.6273552", "0.6260456", "0.6254094", "0.62244266", "0.62188876", "0.62177545" ]
0.8016156
0
Tests digest data mechs
def test_digest_data(self, mech, data): ret, digested_data = c_digest(self.h_session, data, mech) self.verify_ret(ret, CKR_OK) assert len(digested_data) > 0, "The digested data should have a length" assert data != digested_data, "Digested data should not be the same as the original string"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def digest(self):\n pass", "def digest(self):\n pass", "def digest(self):\n pass", "def test_str_with_data(self):\n data = b'\\x00\\x01\\x02\\x03'\n digest_value = DigestValue(data)\n self._test_str(digest_value, str(data))", "def test_010(self):\n calculator = checksum.get_checksum_calculator_by_dataone_designator('SHA-1')\n calculator.update('test')\n self.assertTrue(calculator.hexdigest())", "def digest(self, *args, **kwargs): # real signature unknown\n pass", "def digest(self, *args, **kwargs): # real signature unknown\n pass", "def digest(self, *args, **kwargs): # real signature unknown\n pass", "def digest(self, *args, **kwargs): # real signature unknown\n pass", "def digest(self, *args, **kwargs): # real signature unknown\n pass", "def digest(self, *args, **kwargs): # real signature unknown\n pass", "def test_030(self):\n for i in range(10):\n flo = StringIO.StringIO('ateststring')\n c1 = checksum.generate_from_flo(flo)\n c2 = dataoneTypes_v1.CreateFromDocument(c1.toxml())\n c = d1_common.checksum.get_checksum_calculator_by_dataone_designator(\n c2.algorithm\n )\n c.update('ateststring')\n self.assertEquals(c.hexdigest(), c2.value())", "def test_dig_sig(self):\n\n for using in [HashTypes.SHA1, HashTypes.SHA2, ]:\n self.do_test_dig_sig(using)", "async def test_get_digest(\n config_digest: str,\n config_digest_signed: str,\n image_config: ImageConfig,\n image_config_signed: ImageConfig,\n):\n assert image_config.get_digest() == config_digest\n assert image_config_signed.get_digest() == config_digest_signed", "def test_040(self):\n for i in range(10):\n c1 = checksum.generate_from_string('ateststring')\n c2 = dataoneTypes_v1.CreateFromDocument(c1.toxml())\n c = d1_common.checksum.get_checksum_calculator_by_dataone_designator(\n c2.algorithm\n )\n c.update('ateststring')\n self.assertEquals(c.hexdigest(), c2.value())", "def test_hash_string(self):\n self.assertEqual(hexlify(self._hashdigest(pubkey_sha)), sample_ripe)", "def test_str_with_no_data(self):\n data = b''\n digest_value = DigestValue(data)\n self._test_str(digest_value, str(data))", "def test_digest_matches_standard_library_md5(self):\n test_string = \"a short test string\"\n standard_md5 = hashlib.md5()\n md5 = PersistableMD5()\n\n standard_md5.update(test_string)\n md5.update(test_string)\n\n self.assertEqual(md5.digest(), standard_md5.digest())", "def do_test_dig_sig(self, hashtype):\n\n if hashtype == HashTypes.SHA1:\n sha = hashes.SHA1\n elif hashtype == HashTypes.SHA2:\n sha = hashes.SHA256\n sk_priv = rsa.generate_private_key(\n public_exponent=65537,\n key_size=1024, # cheap key for testing\n backend=default_backend())\n sk_ = sk_priv.public_key()\n\n print(\"WARNING: cannot use hashlib's sha code with pyca cryptography\")\n print(\"WARNING: pyca cryptography does not support sha3/keccak\")\n\n signer = sk_priv.signer(\n padding.PSS(\n mgf=padding.MGF1(sha()),\n salt_length=padding.PSS.MAX_LENGTH),\n sha())\n\n count = 64 + self.rng.next_int16(192) # [64..256)\n data = bytes(self.rng.some_bytes(count))\n\n signer.update(data)\n signature = signer.finalize() # a binary value; bytes\n\n # BEGIN interlude: conversion to/from base64, w/ 76-byte lines\n b64sig = base64.encodebytes(signature).decode('utf-8')\n sig2 = base64.decodebytes(b64sig.encode('utf-8'))\n self.assertEqual(sig2, signature)\n # END interlude ---------------------------------------------\n\n verifier = sk_.verifier(\n signature,\n padding.PSS(\n mgf=padding.MGF1(sha()),\n salt_length=padding.PSS.MAX_LENGTH),\n sha())\n verifier.update(data)\n\n try:\n verifier.verify()\n # digital signature verification succeeded\n except InvalidSignature:\n self.fail(\"dig sig verification unexpectedly failed\")\n\n # twiddle a random byte in data array to make verification fail\n data2 = bytearray(data)\n which = self.rng.next_int16(count)\n data2[which] = 0xff & ~data2[which]\n data3 = bytes(data2)\n\n verifier = sk_.verifier(\n signature, # same digital signature\n padding.PSS(\n mgf=padding.MGF1(sha()),\n salt_length=padding.PSS.MAX_LENGTH),\n sha())\n verifier.update(data3)\n\n try:\n verifier.verify()\n self.fail(\"expected verification of modified message to fail\")\n\n except InvalidSignature:\n pass # digital signature verification failed", "def test_get_key_digest_with_integer_key(self):\n\n digest = self.as_connection.get_key_digest(\"test\", \"demo\", 1)\n\n assert isinstance(digest, bytearray)", "def test_hash(self):\r\n self.assertEqual(processor_hash('test'), 'GqNJWF7X7L07nEhqMAZ+OVyks1Y=')\r\n self.assertEqual(processor_hash('edx '), '/KowheysqM2PFYuxVKg0P8Flfk4=')", "def test_011(self):\n self.assertRaises(\n Exception, checksum.get_checksum_calculator_by_dataone_designator,\n 'SHA-224-bogus'\n )", "def test_create_with_args(self):\n hashing_algorithm = HashingAlgorithmEnum.MD5\n digest_value = b'\\x00\\x01\\x02\\x03'\n key_format_type = KeyFormatTypeEnum.PKCS_1\n digest = Digest.create(hashing_algorithm, digest_value,\n key_format_type)\n\n self._test_create(digest, hashing_algorithm, digest_value,\n key_format_type)", "def test_collect(self) -> None:\n for algorithm, expected in {\n 'md5': ('698d51a19d8a121ce581499d7b701668',\n '8980c988edc2c78cc43ccb718c06efd5',\n '53fd88c84ff8a285eb6e0a687e55b8c7'),\n 'sha1': ('6216f8a75fd5bb3d5f22b6f9958cdede3fc086c2',\n '42eda1b5dcb3586bccfb1c69f22f923145271d97',\n '2eb2f7be4e883ebe52034281d818c91e1cf16256'),\n 'sha256': ('f6e0a1e2ac41945a9aa7ff8a8aaa0cebc12a3bcc981a929ad5cf810a090e11ae',\n '25235f0fcab8767b7b5ac6568786fbc4f7d5d83468f0626bf07c3dbeed391a7a',\n 'f8d3d0729bf2427e2e81007588356332e7e8c4133fae4bceb173b93f33411d17'),\n }.items():\n # if the current platform does not support the algorithm we're looking at,\n # skip the test steps for that algorithm, but display a warning to the user\n if algorithm not in ALLOWED_HASH_FORMATS:\n warnings.warn(\"Missing hash algorithm {} on this platform, cannot test with it\".format(algorithm), ResourceWarning)\n else:\n hs = functools.partial(hash_signature, hash_format=algorithm)\n s = list(map(hs, ('111', '222', '333')))\n\n assert expected[0] == hash_collect(s[0:1], hash_format=algorithm)\n assert expected[1] == hash_collect(s[0:2], hash_format=algorithm)\n assert expected[2] == hash_collect(s, hash_format=algorithm)", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass" ]
[ "0.67761403", "0.67761403", "0.67761403", "0.6729145", "0.64079225", "0.63451827", "0.63451827", "0.63451827", "0.63451827", "0.63451827", "0.63451827", "0.6271026", "0.62410635", "0.6239406", "0.6196442", "0.61466837", "0.6110236", "0.6104734", "0.6050555", "0.60361606", "0.6026625", "0.60151744", "0.5998457", "0.59975225", "0.5986482", "0.5986482", "0.5986482", "0.5986482", "0.5986482", "0.5986482" ]
0.8185931
0
Checks if the acceptance ratio or maximum iterations is reached
def single_iteration_condition(args): return np.logical_and( np.greater(args[-3], acceptance_ratio), np.less(args[-2], max_iteration))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_convergence(self, time_step):\n \n ##compare the average episode length between two loop\n if self.past_episode == time_step:\n self.convergence = True\n else:\n self.convergence = False", "def test_convergence(self, time_step):\n \n ##compare the average episode length between two loop\n if self.past_episode == time_step:\n self.convergence = True\n else:\n self.convergence = False", "def checkConvergence(self, iteration):\n threshold = abs(0.05*self.overBestVal)\n stdev = np.std(np.array([particle.bestXYZ[2] for particle in self.swarm]))\n if self.overBestVal==self.prevBestVal:\n self.bestStreak+=1\n else:\n self.bestStreak=0\n if stdev<=threshold:\n exitFlag = 0 #set this convergence pattern as exit flag 0\n print('Converged: All points converged to same position; std was less than threshold')\n elif self.bestStreak>=50:\n exitFlag = 1 #set this convergence patter as exit flag 1\n print('Converged: Best value did not increase %d times in a row' %50)\n elif iteration>=800:\n exitFlag = 2 #sets no convergence as exit flag 2\n print('Did not converge, exceeded iteration threshold')\n else:\n exitFlag = None\n return [stdev <= threshold or self.bestStreak>=50 or iteration>=800, exitFlag]", "def reached_convergence(self, delta):\n num_evaluations = len(self.evaluations)\n if num_evaluations < 4:\n return False\n\n if self.best_eval_actions is not None and \\\n (len(self.best_eval_actions) - self.evaluations[num_evaluations - 1]) > delta + 2:\n return False\n\n diff1 = abs(self.evaluations[num_evaluations - 4] - self.evaluations[num_evaluations - 3])\n diff2 = abs(self.evaluations[num_evaluations - 4] - self.evaluations[num_evaluations - 2])\n diff3 = abs(self.evaluations[num_evaluations - 4] - self.evaluations[num_evaluations - 1])\n diff4 = abs(self.evaluations[num_evaluations - 3] - self.evaluations[num_evaluations - 2])\n diff5 = abs(self.evaluations[num_evaluations - 3] - self.evaluations[num_evaluations - 1])\n diff6 = abs(self.evaluations[num_evaluations - 2] - self.evaluations[num_evaluations - 1])\n\n actions = 0\n for agent in self.agents:\n actions += len(agent.actions)\n\n # num of (all possible) actions ~= num of states\n eval_steps = min(actions, 100)\n\n if (self.evaluations[num_evaluations - 1] < eval_steps) and\\\n (self.evaluations[num_evaluations - 2] < eval_steps) and\\\n (self.evaluations[num_evaluations - 3] < eval_steps) and \\\n (self.evaluations[num_evaluations - 4] < eval_steps):\n\n if diff1 < delta and diff2 < delta and diff3 < delta and diff4 < delta and diff5 < delta and diff6 < delta:\n return True\n\n return False", "def _exceeded_maximum_iteration(self) -> bool:\n if self.iteration >= self._maxiter:\n logger.warning(\n f\"Reached the maximum number of iterations \"\n f\"*{self._maxiter}*. Did not converge\"\n )\n return True\n\n else:\n return False", "def _CheckConvergence(self):\n self.is_converged = True\n self.are_converged[0] = (abs(self.delta_e) < self.conv_delta_e)\n self.are_converged[1] = (self.grad_rms < self.conv_grad_rms)\n self.are_converged[2] = (self.grad_max < self.conv_grad_max)\n self.are_converged[3] = (self.disp_rms < self.conv_disp_rms)\n self.are_converged[4] = (self.disp_max < self.conv_disp_max)\n for i in range(5):\n if self.must_converge[i] and not self.are_converged[i]:\n self.is_converged = False", "def _should_continue(self):\n # should_continue = self.iter < 20\n # self.iter += 1\n # return should_continue\n if self.iter > self.max_iter:\n return False\n elif self.prev_elbo is None:\n self.prev_elbo = self._get_elbo()\n return True\n elbo = self._get_elbo()\n improvement = (elbo - self.prev_elbo) / self.prev_elbo\n self.prev_elbo = elbo\n self.iter += 1\n return self.epsilon < improvement", "def perform_strategy(self, counter):\r\n if counter < self.percent * len(self.envelopes): # in the first self.percent percent\r\n self.curr_max = max(self.curr_max, self.envelopes[counter].money)\r\n return\r\n return self.envelopes[counter].money > self.curr_max", "def perform_strategy(self, counter):\r\n if counter == 0: # reset. allows for multiple runs with the same instance\r\n self.maxes_counter = 0\r\n self.curr_max = -inf\r\n m = self.envelopes[counter].money\r\n if m >= self.curr_max:\r\n self.maxes_counter += 1\r\n self.curr_max = m\r\n return self.maxes_counter == self.N", "def _check_for_completion(self, node):\n dis=0\n for i in range(node.state.size):\n dis+=(node.state[i]-self.goal.state[i])**2\n\n dis=np.sqrt(dis)\n if(dis<=self.step_size):\n return True\n else: return False", "def step_solution(self):\n import time, random\n time.sleep(1.0)\n print '(step_solution) Implement me!'\n return True if random.random() < 0.25 else False", "def _validate_iterations(self, proposal):\n iterations = proposal[\"value\"]\n if iterations <= 0:\n raise traitlets.TraitError(\"iterations must be greater than 0.\")\n return iterations", "def check(self):\n self.lower_bound(5e-4)\n self.upper_bound(5e2)", "def is_exhausted(self):\n return random.random() < 0.5", "def isPossibleSubsumer(self):\n if self.action_cnt > cons.theta_sub and self.error < cons.err_sub: #self.prediction < cons.err_sub: (why does it work?)\n return True\n return False", "def check_performance(self):\n self.lg.debug('Checking performance.')\n avg_up = (sum(self.results_up)) / len(self.results_up)\n avg_down = (sum(self.results_down)) / len(self.results_down)\n if (\n avg_up < self.tolerance * self.up or\n avg_down < self.tolerance * self.down\n ):\n self.bad_performance = True\n else:\n self.bad_performance = False", "def check(self):\r\n self.check_probabilities()\r\n self.check_sum()", "def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False", "def _check_optimality(self):\n\n dual_obj = -0.5* np.dot(self.beta, self.beta) + np.sum(self.alpha)\n\n prim_obj = 0.5* np.dot(self.beta, self.beta) + self.C * np.sum( np.maximum(1 - np.multiply(np.dot(self.X, self.beta), self.y), 0))\n\n # print (prim_obj - dual_obj)\n self.gap = prim_obj - dual_obj\n if self.gap <= 1e-6:\n return True\n else:\n return False", "def finished(self):\n hit_max_evals = len(self.rounds) >= self.max_evals\n\n if len(self.rounds) < self.conv_check_iters:\n hit_conv = False\n else:\n last_rounds = self.rounds[-self.conv_check_iters:]\n a = zip(*last_rounds)[1]\n a_sd = np.std(a, axis=0)\n hit_conv = (a_sd < self.conv_action_eps).all()\n\n hit_max_time = self.duration > self.max_time\n\n return hit_max_evals or hit_conv or hit_max_time", "def checkGoal(self):\n # -- It is not included for simplifity --#\n if self.reward_cumulative != None:\n x = round((abs(self.reward_cumulative) - abs(round(self.reward_cumulative))) * 100);\n rem_goal = x % 25\n rem_timeout = x % 20\n if rem_goal == 0 and x != 0:\n self.is_goal = True\n else:\n self.is_goal = False\n\n if rem_timeout == 0 and x != 0:\n self.is_timeout = True\n else:\n self.is_timeout = False", "def compute_mc_acceptance(self):\n if self.steps > 0:\n self.move_viability = \\\n (1. * self.viablesteps) / self.steps\n if self.viablesteps > 0:\n self.acceptance = float(self.acceptedsteps)/float(self.viablesteps)\n else:\n self.acceptance = 0.0\n else:\n self.move_viability = 0.0\n self.acceptance = 0.0", "def single_acceptance_condition(args):\n return np.logical_and(\n np.less(args[-2], 1),\n np.less(args[-1], max_acceptance))", "def check_params(self):\r\n \r\n # TODO: More cases?\r\n\r\n if self.N <= 0:\r\n print('Bad Parameter: N')\r\n \r\n if self.Ha_tally <= 0 or self.Ha_tally > self.N:\r\n print('Bad Parameter: Reported winner tally')\r\n \r\n if len(self.round_sched) < 1 or not self.check_inc_sched(self.round_sched):\r\n print('Bad Parameter: Round Schedule')\r\n\r\n if self.alpha <= 0 or self.alpha >= .5:\r\n print('Bad Parameter: Alpha')", "def _limit_fill():\n z = random.randint(0, 10)\n if z/10.0 < LIMIT_FILL_PROBABILITY:\n return True\n else:\n return False", "def single_iteration(args):\n def single_acceptance_condition(args):\n \"\"\"checks proposal has been accepted or max iterations reached\n\n Parameters\n ----------\n args : tuple\n see loop variable in `single_iteration`\n\n Returns\n -------\n bool:\n True if proposal not accepted and number of attempts to get\n an accepted proposal not yet reached\n \"\"\"\n return np.logical_and(\n np.less(args[-2], 1),\n np.less(args[-1], max_acceptance))\n\n def single_acceptance(args):\n \"\"\"Draws a proposal, simulates and compresses, checks distance\n\n A new proposal is drawn from a truncated multivariate normal\n distribution whose mean is centred on the parameter to move and\n the covariance is set by the population. From this proposed\n parameter value a simulation is made and compressed and the\n distance from the target is calculated. If this distance is\n less than the current position then the proposal is accepted.\n\n Parameters\n ----------\n args : tuple\n see loop variable in `single_iteration`\n\n Returns\n -------\n bool:\n True if proposal not accepted and number of attempts to get\n an accepted proposal not yet reached\n\n Todo\n ----\n Parallel sampling is currently commented out\n \"\"\"\n (rng, loc, scale, summ, dis, draws, accepted,\n acceptance_counter) = args\n rng, key = jax.random.split(rng)\n proposed, summaries = self.get_samples(\n key, None, dist=tmvn(\n loc, scale, self.prior.low, self.prior.high,\n max_counter=max_samples))\n distances = np.squeeze(\n self.distance_measure(\n np.expand_dims(summaries, 0),\n target,\n F))\n # if n_parallel_simulations is not None:\n # min_distance_index = np.argmin(distances)\n # min_distance = distances[min_distance_index]\n # closer = np.less(min_distance, ϵ)\n # loc = jax.lax.cond(\n # closer,\n # lambda _ : proposed[min_distance_index],\n # lambda _ : loc,\n # None)\n # summ = jax.lax.cond(\n # closer,\n # lambda _ : summaries[min_distance_index],\n # lambda _ : summ,\n # None)\n # dis = jax.lax.cond(\n # closer,\n # lambda _ : distances[min_distance_index],\n # lambda _ : dis,\n # None)\n # iteration_draws = n_parallel_simulations \\\n # - np.isinf(distances).sum()\n # draws += iteration_draws\n # accepted = closer.sum()\n # else:\n closer = np.less(distances, np.min(dis))\n loc = jax.lax.cond(\n closer,\n lambda _: proposed,\n lambda _: loc,\n None)\n summ = jax.lax.cond(\n closer,\n lambda _: summaries,\n lambda _: summ,\n None)\n dis = jax.lax.cond(\n closer,\n lambda _: distances,\n lambda _: dis,\n None)\n iteration_draws = 1 - np.isinf(distances).sum()\n draws += iteration_draws\n accepted = closer.sum()\n return (rng, loc, scale, summ, dis, draws, accepted,\n acceptance_counter + 1)\n\n (rng, samples, summaries, distances, weighting, acceptance_reached,\n iteration_counter, total_draws) = args\n n_to_move = samples[ϵ_ind:].shape[0]\n cov = self.w_cov(samples, weighting)\n scale = np.linalg.cholesky(cov)\n rng, *keys = jax.random.split(rng, num=n_to_move + 1)\n\n results = jax.vmap(\n lambda key, loc, scale, summaries, distances, draws, accepted,\n acceptance_counter: jax.lax.while_loop(\n single_acceptance_condition,\n single_acceptance,\n (key, loc, scale, summaries, distances, draws, accepted,\n acceptance_counter)))(\n np.array(keys),\n samples[ϵ_ind:],\n np.repeat(np.expand_dims(scale, 0), n_to_move, axis=0),\n summaries[ϵ_ind:],\n distances[ϵ_ind:],\n np.zeros(n_to_move, dtype=np.int32),\n np.zeros(n_to_move, dtype=np.int32),\n np.zeros(n_to_move))\n\n weighting = jax.vmap(\n lambda proposed: (\n self.prior.prob(proposed)\n / (np.sum(weighting * tfd.MultivariateNormalTriL(\n loc=proposed,\n scale_tril=np.repeat(\n np.expand_dims(scale, 0),\n samples.shape[0],\n axis=0)).prob(proposed)))))(\n np.vstack([samples[:ϵ_ind], results[1]]))\n samples = jax.ops.index_update(\n samples,\n jax.ops.index[ϵ_ind:, :],\n results[1])\n summaries = jax.ops.index_update(\n summaries,\n jax.ops.index[ϵ_ind:, :],\n results[3])\n distances = jax.ops.index_update(\n distances,\n jax.ops.index[ϵ_ind:],\n results[4])\n sample_indices = np.argsort(distances)\n samples = samples[sample_indices]\n summaries = summaries[sample_indices]\n distances = distances[sample_indices]\n weighting = weighting[sample_indices]\n acceptance_reached = results[-2].sum() / results[-3].sum()\n return (rng, samples, summaries, distances, weighting,\n acceptance_reached, iteration_counter + 1,\n total_draws + results[-3].sum())", "def check_criteria(self, _sender, **msg):\n msg = msg['iter_msg']\n if len(_sender.performance_history) == _sender.patience:\n # Value / threshold based methods:\n # Check the latest value of the performance history against a\n # threshold calculated based on the performance history\n msg.should_stop = \\\n check_should_stop(\n mode=_sender.mode,\n performance_history=_sender.performance_history)", "def acceptance_fraction(self):\n return float(self._accepted / self.num_iterations)", "def epidemic_finish(states, iteration):\n return np.sum(states) == 0 and iteration > 10", "def no_improvement_termination(population, num_generations, num_evaluations, args):\r\n max_generations = args.setdefault('max_generations', 10)\r\n previous_best = args.setdefault('previous_best', None)\r\n current_best = max(population).fitness\r\n if previous_best is None or previous_best != current_best:\r\n args['previous_best'] = current_best\r\n args['generation_count'] = 0\r\n return False\r\n else:\r\n if args['generation_count'] >= max_generations:\r\n return True\r\n else:\r\n args['generation_count'] += 1\r\n return False" ]
[ "0.6803158", "0.6803158", "0.6721316", "0.6673236", "0.649509", "0.64930516", "0.6422876", "0.63096845", "0.6304908", "0.6214944", "0.61827135", "0.61529905", "0.613734", "0.6118663", "0.6102597", "0.60772574", "0.60533637", "0.6036281", "0.60237837", "0.6006274", "0.59957045", "0.5990638", "0.5987594", "0.5987239", "0.5973464", "0.5972304", "0.59638906", "0.59609467", "0.5958243", "0.58917856" ]
0.716327
0
Fills containers and calculates distances All parameters and summaries of simulations made at those parameters (and their distance from the summary of the observed data) are kept in containers for ease of use. This function sets the ``all`` attribute of these containers with the passed summaries and parameters (and distances if provided or it calculates the distances). These are concatenated to existing values unless ``replace = True`` in which the existing values are removed and overwritten with the passed summaries, parameters and distances.
def set_samples(self, parameters, summaries, distances=None, replace=False): if distances is None: distances = jax.vmap( lambda target, F: self.distance_measure( summaries, target, F))(self.target_summaries, self.F) if (self.parameters.all is None) or (replace): self.parameters.all = parameters self.summaries.all = summaries self.distances.all = distances else: self.parameters.all = np.concatenate( [self.parameters.all, parameters], axis=1) self.summaries.all = np.concatenate( [self.summaries.all, summaries], axis=1) self.distances.all = np.concatenate( [self.distances.all, distances], axis=1) self.parameters.size = self.parameters.all.shape[0] self.summaries.size = self.summaries.all.shape[0] self.distances.size = self.distances.all.shape[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fillstation(self, stanames, all=None, plot=None, summary=None, From=None, To=None, by=None,\n how='mean', variables=None, distance=None, sort_cor=True, constant=True, cor_lim=None):\n\n if all == True:\n stations = self.network.getsta([], all=True).values()\n else:\n stations = self.network.getsta(stanames)\n\n for station in stations:\n staname = station.getpara('stanames')\n\n if variables == None:\n newdataframe = station.getData(reindex=True, From=From, To=To, by=by,\n how=how) # Dataframe which stock the new data of the stations\n newdataframe['U m/s'] = station.getData('U m/s', reindex=True, From=From, To=To, by=by, how=how)\n newdataframe['V m/s'] = station.getData('V m/s', reindex=True, From=From, To=To, by=by, how=how)\n newdataframe['Ua g/kg'] = station.getData('Ua g/kg', reindex=True, From=From, To=To, by=by, how=how)\n newdataframe['Theta C'] = station.getData('Theta C', reindex=True, From=From, To=To, by=by, how=how)\n variables_name = newdataframe.columns\n else:\n newdataframe = station.getData(var=variables, reindex=True, From=From, To=To, by=by,\n how=how) # Dataframe which stock the new data of the stations\n variables_name = variables\n # select and sort nearest stations\n selections, selectionsnames = self.__getpredictors_distance(staname, distance)\n\n for var in variables_name:\n print(\"I\" * 30)\n print(\"variable -> \" + var)\n\n try:\n selections, params = self.__sort_predictors_by_corr(station, selections, var, From, To, by, how,\n constant=constant,\n selectionsnames=selectionsnames,\n sort_cor=sort_cor, cor_lim=cor_lim)\n\n selections_iter = iter(selections)\n params_iter = iter(params)\n # print newdataframe\n idxmissing = newdataframe[var][\n newdataframe[var].isnull() == True].index # slect where their is missing data\n\n while len(idxmissing) > 0:\n print(\"Their is [\" + str(len(idxmissing)) + \"] events missing\")\n\n try: # Try if their is still other stations to fill with\n selection = selections_iter.next()\n param = params_iter.next()\n except StopIteration:\n print(\"NO MORE SELECTED STATIONS\")\n break\n\n try:\n Y = station.getData(var, From=From, To=To, by=by, how=how) # variable to be filled\n X1 = selection[0].getData(var, From=From, To=To, by=by,\n how=how) # stations variable used to fill\n X2 = selection[1].getData(var, From=From, To=To, by=by,\n how=how) # stations variable used to fill\n\n select = pd.concat([X1, X2], keys=['X1', 'X2'], axis=1, join='inner').dropna()\n\n if constant:\n newdata = param[0] + param[1] * select['X1'] + param[2] * select[\n 'X2'] # reconstruct the data\n else:\n newdata = param[0] * select['X1'] + param[1] * select['X2'] # reconstruct the data\n\n newdataframe.loc[idxmissing, var] = newdata.loc[idxmissing, var]\n idxmissing = newdataframe[var][\n newdataframe[var].isnull() == True].index # slect where their is missing data\n\n\n except KeyError:\n print(\"&\" * 60)\n print('Selected stations did not fill any events')\n except ValueError:\n print('The variable ' + var + \"Does not exist or no data to do the multilinear regression \")\n\n if plot == True:\n df = pd.concat([Y, X1, X2, newdata, newdataframe[var]],\n keys=['Y', 'X1', 'X2', 'estimated data', 'Estimated replaced'], axis=1,\n join='outer')\n self.plotcomparison(df)\n\n print(\"Their is [\" + str(len(idxmissing)) + \"] FINALLY events missing\")\n # Recalculate the wind direction and speed from the U an V components\n\n try:\n speed, dir = cart2pol(newdataframe['U m/s'], newdataframe['V m/s'])\n newdataframe['Dm G'] = dir\n newdataframe['Sm m/s'] = speed\n except ValueError:\n print\n 'No wind found in the dataframe'\n except KeyError:\n print('No wind found in the dataframe')\n\n self.newdataframes[staname] = newdataframe", "def updateall(self, params):\n for gauge in self.gauges:\n self.safexec(gauge.updateall, params)", "def addAll(self,*args, **kwargs):\n pass", "def combine_all(self):\n combined = copy.deepcopy(self.train)\n\n def _combine_data(data):\n for img_path, pid, camid in data:\n\n if pid in self._junk_pids:\n continue\n #pdb.set_trace()\n pid = self.dataset_name + \"_\" + str(pid)\n camid = self.dataset_name + \"_\" + str(camid)\n combined.append((img_path, pid, camid))\n\n _combine_data(self.query)\n _combine_data(self.gallery)\n\n self.train = combined\n self.num_train_pids = self.get_num_pids(self.train)", "def build_summary(self):\n for k, v in self.metrics.items():\n tf.summary.scalar(k, v)\n \n self.summary_op = tf.summary.merge_all()", "def run_all(params, mock=False):\n start = datetime.datetime.now()\n g_places = {}\n\n bounds = params[\"bounds\"]\n i = 0\n for lat, lng in get_circle_centers([bounds[\"lower\"][\"lat\"], bounds[\"lower\"][\"lng\"]], # southwest\n [bounds[\"upper\"][\"lat\"], bounds[\"upper\"][\"lng\"]], # northeast\n params[\"radius\"]):\n if not mock:\n logging.info(f\"Fetching places for {lat}, {lng}\")\n sleep(0.5)\n # all places found in the current circle (using the nearly API)\n circle_places = get_radar(params, {\n \"pos\": (lat, lng),\n \"res\": 0\n })\n logging.info(f\"{len(circle_places)} places found for {lat}, {lng}\")\n\n # add the places found in this circle to all places for the given bounding box\n g_places.update(circle_places)\n\n i += 1\n\n if mock:\n logging.info(f\"Mock run finished with {i} circles\")\n\n logging.info(\"Finished in: {}\".format(str(datetime.datetime.now() - start)))\n\n return g_places", "def build_all_analysis(self, matrix_handler, trajectory_handler):\n distance_matrix = matrix_handler.distance_matrix\n\n self.all_possible_analysis = {}\n\n # Pure queries\n self.all_possible_analysis[\"Details\"] = Analysis(\"Details\", self.analysis_function_details)\n self.all_possible_analysis[\"NumClusters\"] = Analysis(\"Number of clusters\", self.analysis_function_num_clusters)\n self.all_possible_analysis[\"NumClusteredElems\"] = Analysis(\"Number of clustered elements\", self.analysis_function_total_elements)\n self.all_possible_analysis[\"MeanClusterSize\"] = Analysis(\"Mean cluster size\", self.analysis_function_mean_cluster_size)\n self.all_possible_analysis[\"PercentInTop4\"] = Analysis(\"Percent in top 4 clusters\", self.analysis_function_top_4)\n self.all_possible_analysis[\"PercentInTop\"] = Analysis(\"Percent in top cluster\", self.analysis_function_top_percent)\n self.all_possible_analysis[\"ClustersTo90\"] = Analysis(\"Clusters to 90\", self.analysis_function_num_clusters_to_percent, 90)\n self.all_possible_analysis[\"NoiseLevel\"] = Analysis(\"Noise level\", self.analysis_function_noise_level, distance_matrix.row_length)\n\n # Evaluators\n self.all_possible_analysis[\"MirrorCohesion\"] = Analysis(\"MirrorCohesion\", self.evaluate_with_calculator,\n {\"class\":MirrorCohesionCalculator,\"matrix\":distance_matrix})\n\n self.all_possible_analysis[\"Cohesion\"] = Analysis(\"Cohesion\", self.evaluate_with_calculator,\n {\"class\":CohesionCalculator,\"matrix\":distance_matrix})\n\n self.all_possible_analysis[\"Separation\"] = Analysis(\"Separation\", self.evaluate_with_calculator,\n {\"class\":SeparationCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"MinimumMeanSeparation\"] = Analysis(\"MinimumMeanSeparation\", self.evaluate_with_calculator,\n {\"class\":MeanMinimumDistanceCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"Silhouette\"] = Analysis(\"Silhouette\", self.evaluate_with_calculator,\n {\"class\":SilhouetteCoefficientCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"Calinski-Harabasz\"] = Analysis(\"Calinski-Harabasz\", self.evaluate_with_calculator,\n {\"class\":CalinskiHarabaszCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"Dunn\"] = Analysis(\"Dunn\", self.evaluate_with_calculator,\n {\"class\":DunnCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"Davies-Bouldin\"] = Analysis(\"Davies-Bouldin\", self.evaluate_with_calculator,\n {\"class\":DaviesBouldinCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"GaussianSeparation\"] = Analysis(\"GaussianSeparation\", self.evaluate_with_calculator,\n {\"class\":GaussianSeparationCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"Compactness\"] = Analysis(\"Compactness\", self.evaluate_with_calculator,\n {\"class\":CompactnessCalculator,\"matrix\":distance_matrix})\n\n # Cython\n self.all_possible_analysis[\"CythonMirrorCohesion\"] = Analysis(\"CythonMirrorCohesion\", self.evaluate_with_calculator,\n {\"class\":CythonMirrorCohesionCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"CythonMinimumMeanSeparation\"] = Analysis(\"CythonMinimumMeanSeparation\", self.evaluate_with_calculator,\n {\"class\":CythonMeanMinimumDistanceCalculator,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"CythonSilhouette\"] = Analysis(\"CythonSilhouette\", self.evaluate_with_calculator,\n {\"class\":CythonSilhouetteCoefficientCalculator,\"matrix\":distance_matrix})\n\n # Graph\n self.all_possible_analysis[\"RatioCut\"] = Analysis(\"RatioCut\", self.evaluate_with_calculator,\n {\"class\":RatioCut,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"NCut\"] = Analysis(\"NCut\", self.evaluate_with_calculator,\n {\"class\":NCut,\"matrix\":distance_matrix})\n self.all_possible_analysis[\"NormNCut\"] = Analysis(\"NormNCut\", self.analysis_function_norm_n_cut,distance_matrix)\n self.all_possible_analysis[\"MinMaxCut\"] = Analysis(\"MinMaxCut\", self.evaluate_with_calculator,\n {\"class\":MinMaxCut,\"matrix\":distance_matrix})\n\n # Cython & Graph\n self.all_possible_analysis[\"CythonNormNCut\"] = Analysis(\"CythonNormNCut\", self.analysis_function_cython_norm_n_cut,distance_matrix)\n\n # PCA\n self.all_possible_analysis[\"PCAanalysis\"] = Analysis(\"PCAanalysis\", self.analysis_function_pca, trajectory_handler)", "def disease_stats_update_aggregates(records=None, all=False):\n\n if not records:\n return\n\n # Test to see which date format we have based on how we were called\n if isinstance(records, str):\n from_json = True\n from dateutil.parser import parse\n records = json.loads(records)\n elif isinstance(records[0][\"date\"],\n (datetime.date, datetime.datetime)):\n from_json = False\n else:\n from_json = True\n from dateutil.parser import parse\n\n db = current.db\n #s3db = current.s3db\n atable = db.disease_stats_aggregate\n\n if not all:\n # Read the database to get all the relevant records\n # @ToDo: Complete this\n #dtable = s3db.disease_stats_data\n return\n\n # For each location/parameter pair, create a time-aggregate summing all\n # the data so far\n\n now = current.request.now\n\n # Assemble raw data\n earliest_period = now.date()\n locations = {}\n parameters = []\n pappend = parameters.append\n for record in records:\n location_id = record[\"location_id\"]\n if location_id not in locations:\n locations[location_id] = {}\n parameter_id = record[\"parameter_id\"]\n if parameter_id not in parameters:\n pappend(parameter_id)\n if parameter_id not in locations[location_id]:\n locations[location_id][parameter_id] = {}\n if from_json:\n date = parse(record[\"date\"]) # produces a datetime\n date = date.date()\n else:\n date = record[\"date\"]\n if date < earliest_period:\n earliest_period = date\n locations[location_id][parameter_id][date] = record[\"value\"]\n\n # Full range of dates\n # 1 per day from the start of the data to the present day\n from dateutil.rrule import rrule, DAILY\n dates = rrule(DAILY, dtstart=earliest_period, until=now)\n dates = [d.date() for d in dates]\n\n # Add the sums\n insert = atable.insert\n lfield = atable.location_id\n pfield = atable.parameter_id\n dfield = atable.date\n ifield = atable.id\n _q = (atable.agg_type == 1)\n for location_id in locations:\n location = locations[location_id]\n query = _q & (lfield == location_id)\n for parameter_id in location:\n parameter = location[parameter_id]\n q = query & (pfield == parameter_id)\n for d in dates:\n values = []\n vappend = values.append\n for date in parameter:\n if date <= d:\n vappend(parameter[date])\n values_sum = sum(values)\n exists = db(q & (dfield == d)).select(ifield,\n limitby=(0, 1))\n if exists:\n db(ifield == exists.first().id).update(sum = values_sum)\n else:\n insert(agg_type = 1, # Time\n location_id = location_id,\n parameter_id = parameter_id,\n date = d,\n sum = values_sum,\n )\n\n # For each location/parameter pair, build a location-aggregate for all\n # ancestors, by level (immediate parents first).\n # Ensure that we don't duplicate builds\n # Do this for all dates between the changed date and the current date\n\n # Get all the ancestors\n # Read all the Paths\n # NB Currently we're assuming that all Paths have been built correctly\n gtable = db.gis_location\n ifield = gtable.id\n location_ids = set(locations.keys())\n paths = db(ifield.belongs(location_ids)).select(gtable.path)\n paths = [p.path.split(\"/\") for p in paths]\n # Convert list of lists to flattened list & remove duplicates\n import itertools\n ancestors = tuple(itertools.chain.from_iterable(paths))\n # Remove locations which we already have data for\n ancestors = [a for a in ancestors if a not in location_ids]\n\n # Get all the children for each ancestor (immediate children not descendants)\n pfield = gtable.parent\n query = (gtable.deleted == False) & \\\n (pfield.belongs(ancestors))\n all_children = db(query).select(ifield, pfield)\n\n # Read the levels\n query = (gtable.id.belongs(ancestors)) & \\\n (gtable.level.belongs((\"L4\", \"L3\", \"L2\", \"L1\"))) # L0?\n rows = db(query).select(gtable.id,\n gtable.level,\n # Build the lowest level first\n # FIXME this ordering makes no real sense when building async\n # with multiple workers; in that case, the entire child\n # cascade must happen synchronously inside each top-level\n # build\n orderby = ~gtable.level,\n )\n\n run_async = current.s3task.run_async\n from gluon.serializers import json as jsons\n\n dates = jsons(dates)\n for row in rows:\n location_id = row.id\n children = [c.id for c in all_children if c.parent == location_id]\n children = json.dumps(children)\n for parameter_id in parameters:\n run_async(\"disease_stats_update_location_aggregates\",\n args = [location_id, children, parameter_id, dates],\n timeout = 1800 # 30m\n )", "def combine_all(self):\n if self._train_only:\n return\n\n combined = copy.deepcopy(self.train)\n\n # relabel pids in gallery (query shares the same scope)\n g_pids = set()\n for items in self.gallery:\n pid = items[1]\n if pid in self._junk_pids:\n continue\n g_pids.add(pid)\n pid2label = {pid: i for i, pid in enumerate(g_pids)}\n\n def _combine_data(data):\n for img_path, pid, camid, dsetid in data:\n if pid in self._junk_pids:\n continue\n pid = pid2label[pid] + self.num_train_pids\n combined.append((img_path, pid, camid, dsetid))\n\n _combine_data(self.query)\n _combine_data(self.gallery)\n\n self.train = combined\n self.num_train_pids = self.get_num_pids(self.train)", "def build_summary(self, summary_dict):\n for key, value in summary_dict.items():\n tf.summary.scalar(key, value)\n merged_op = tf.summary.merge_all()\n return merged_op", "def datacenters_some(self, datacenters_some):\n\n self._datacenters_some = datacenters_some", "def rebuild_all(self, vector=None):\n if not hasattr(self, \"positions\"):\n self.load_data()\n\n if vector is not None and not isinstance(vector, np.ndarray):\n raise ValueError(\"`vector` must be a 1-d numpy array\")\n\n if vector is not None:\n assert vector.shape == self.params.shape, \\\n \"Incorrect `vector` shape. Is {}, but should be {}\".format(\n vector.shape, self.params.shape\n )\n # Copy to break references and avoid later manipulation by RNG\n self.sample = (self.rng.next() if vector is None else vector).copy()\n self.rebuild_technosphere_matrix(self.tech_sample)\n self.rebuild_biosphere_matrix(self.bio_sample)\n if self.lcia:\n self.rebuild_characterization_matrix(self.cf_sample)\n if self.weighting:\n self.weighting_value = self.weighting_sample\n\n if self.presamples:\n self.presamples.update_matrices()", "def add_all(self, *values):\n for value in values:\n self.add(value)", "def test_update_summary_tables(self, mock_sum, mock_tag_sum, mock_vol_tag_sum, mock_delete, mock_cluster_populate):\n start_date = self.dh.today\n end_date = start_date\n\n start_date_str = start_date.strftime(\"%Y-%m-%d\")\n end_date_str = end_date.strftime(\"%Y-%m-%d\")\n\n self.updater.update_summary_tables(start_date_str, end_date_str)\n mock_delete.assert_called_with(self.ocp_provider.uuid, start_date.date(), end_date.date())\n mock_sum.assert_called()\n mock_tag_sum.assert_called()\n mock_vol_tag_sum.assert_called()", "def set_all_values(self, values):\n return self.display_table.set_all_values(values,root=self.display_table_root,include=self.params)", "def all_measurements(candidate, godata):\n measurements = OrderedDict()\n measurements.update(concept_measurements(candidate, godata))\n measurements.update(evidence_measurements(candidate))\n measurements.update(bias_measurements(candidate))\n return measurements", "def updateAllParams(self):\n try:\n self.sigTreeStateChanged.disconnect(self.updateSystem)\n reconnect = True\n except TypeError:\n reconnect = False\n try:\n with self.treeChangeBlocker():\n for param in self:\n constraints = self.system._vars[param.name()][3]\n if 'f' in constraints:\n fixed = param['fixed']\n else:\n fixed = None\n\n\n if fixed is True:\n self.updateParam(param, 'fixed')\n else:\n try: # value is auto-generated\n val = getattr(self.system, param.name())\n if param.type() == 'str':\n param.setValue(repr(val))\n else:\n param.setValue(val)\n param.setReadonly(True)\n if fixed is False:\n self.updateParam(param, 'autoFixable')\n else:\n self.updateParam(param, 'auto')\n\n except RuntimeError: \n if fixed is not None: # no value, fixable\n self.updateParam(param, 'incomplete')\n else:\n self.updateParam(param, 'unconstrained')\n\n finally:\n if reconnect:\n self.sigTreeStateChanged.connect(self.updateSystem)", "def updateParameters(self, parameters):\r\n if parameters[0].altered or parameters[1].altered:\r\n in_layer_value = parameters[0].valueAsText\r\n in_spacing_value = parameters[1].valueAsText\r\n if in_layer_value is not None and in_spacing_value is not None:\r\n parameters[5].value = in_layer_value + \"_densified_\" + str(int(in_spacing_value)) + \"m\"\r\n \r\n if parameters[2].altered:\r\n with arcpy.da.SearchCursor(parameters[0].valueAsText, parameters[2].valueAsText) as g_rows:\r\n parameters[3].filter.list = sorted(list(set([row[0] for row in g_rows])))\r\n with arcpy.da.SearchCursor(parameters[0].valueAsText, parameters[2].valueAsText) as l_rows:\r\n parameters[4].filter.list = sorted(list(set([row[0] for row in l_rows])))\r\n return", "def reset_all(self) -> None:\n for metric in self:\n metric.reset()", "def addAll(self, *args):\n pass", "def build_summaries(self):\n\n # Loss summary.\n tf.summary.scalar('loss', self.loss)\n\n merged = tf.summary.merge_all()\n self.summary_op = merged\n tf.logging.info('summary op set')", "def build_summaries(self):\n\n # Loss summary.\n tf.summary.scalar('loss', self.loss)\n\n merged = tf.summary.merge_all()\n self.summary_op = merged\n tf.logging.info('summary op set')", "def docker(all_, command) -> None:\n if all_ is not None:\n docker_manager = DockerManager()\n getattr(docker_manager, f\"{all_}_all\")()\n else:\n if len(command):\n DockerManager.execute_command(command)", "def produce_test_all(root_dir):\n test = 'testImnamesSe.csv'\n all_bboxes = 'bboxesDF.csv'\n test_lab = 'testLabeledDF.csv'\n\n if test in os.listdir(root_dir) and all_bboxes in os.listdir(root_dir) \\\n and test_lab in os.listdir(root_dir):\n test_imnames = pd.read_csv(osp.join(root_dir, test),\n header=None, squeeze=True)\n all_bboxes = pd.read_csv(osp.join(root_dir, all_bboxes))\n test_lab = pd.read_csv(osp.join(root_dir, test_lab))\n else:\n _, test_imnames = process_pool_mat(root_dir)\n all_bboxes, _ = process_images_mat(root_dir)\n test_lab, _ = process_test_mat(root_dir)\n\n test_all = all_bboxes[all_bboxes['imname'].isin(test_imnames)]\n test_all.index = range(test_all.shape[0])\n test_all = pd.merge(test_all, test_lab, how='outer')\n test_all['pid'] = test_all['pid'].fillna(-1)\n test_all['is_query'] = test_all['is_query'].fillna(0)\n test_all['pid'] = test_all['pid'].values.astype(np.int32)\n test_all['is_query'] = test_all['is_query'].values.astype(np.int32)\n\n test_all.to_csv(osp.join(root_dir, 'testAllDF.csv'), index=False)\n\n return test_all", "def ScheduleAll(self):\n \n if self._scheduled:\n\n if self.verbose:\n\n print \"All objects are already scheduled\"\n\n return\n \n else:\n\n if self.verbose:\n\n print \"Scheduling all simulation objects\"\n\n\n\n if len(self._inputs) > 0:\n \n if self.verbose:\n\n print \"\\tScheduling inputs:\"\n \n for i in self._inputs:\n\n if self.verbose:\n\n print \"\\t\\tScheduling input '%s'\" % i.GetName()\n\n self.AddSchedulee(i, 'input')\n\n\n if len(self._solver_collection.solvers) > 0:\n \n if self.verbose:\n\n print \"\\tScheduling solvers:\"\n\n for s in self._solver_collection.solvers:\n\n if self.verbose:\n\n print \"\\t\\tScheduling solver '%s'\" % s.GetName()\n\n self.AddSchedulee(s, 'solver')\n\n\n # If we have an event distributor set we\n # schedule it here. The rsnet example in perl\n # has it sheduled after the heccer solvers\n # so i'll do the same.\n if not self._event_distributor is None:\n\n if self.verbose:\n\n print \"\\tScheduling event distributor '%s'\" % self._event_distributor.GetName()\n\n self.AddSchedulee(self._event_distributor, 'event_distributor')\n \n\n\n if len(self._outputs) > 0:\n\n if self.verbose:\n\n print \"\\tScheduling outputs:\"\n\n for o in self._outputs:\n\n if self.verbose:\n\n print \"\\t\\tScheduling output '%s'\" % o.GetName()\n \n self.AddSchedulee(o, 'output')\n\n self._scheduled = True", "def do_calculate_all(self, **kwargs):\n _return = False\n\n # Calculate all Allocations, skipping the top node in the tree.\n for _node in self.tree.all_nodes()[1:]:\n if _node.identifier != 0:\n self.do_calculate(_node.identifier, **kwargs)\n\n return _return", "def setConfigAll(self,*args,**kwargs):\n return self.configAll(True,*args,**kwargs)", "def summarize(self, locuslen):\n # First, calculate the mean of the parameter estimates from each\n # of the replicates\n hot_means = []\n for r_t in zip(*self.hot_params):\n v = [x for x in r_t if not math.isnan(x)]\n hot_means.append(sum(v)/len(v))\n cold_means = []\n for r_t in zip(*self.cold_params):\n v = [x for x in r_t if not math.isnan(x)]\n cold_means.append(sum(v)/len(v))\n bfgs_means = []\n for r_t in zip(*self.opt_params):\n v = [x for x in r_t if not math.isnan(x)]\n bfgs_means.append(sum(v)/len(v))\n theta_mean = sum(self.theta) / len(self.theta)\n # Then, convert the parameters into meaningful values\n # the theta estimate is 4*Na*u*L\n anc_ne = theta_mean / (4 * 3e-9 * locuslen)\n # Then, the parameters are scaled by that. Population sizes are scaled\n # by theta (4Na), and times and migration rates are given in units of\n # 2N.\n scaled_params = []\n for name, val in zip(self.params['Names'], bfgs_means):\n if name.startswith('N'):\n scaled_params.append(val * anc_ne)\n elif name.startswith('m'):\n scaled_params.append(val /(anc_ne * 2))\n elif name.startswith('T'):\n scaled_params.append(val * anc_ne * 2)\n else:\n scaled_params.append(val)\n # Write these values into the class data\n self.hot_mean = hot_means\n self.cold_mean = cold_means\n self.bfgs_mean = bfgs_means\n self.theta_mean = theta_mean\n self.Na = anc_ne\n self.scaled_params = scaled_params\n return", "def preinitialize(self):\n for group in self.param_groups:\n for p in group['params']:\n self.state[p][\"sum\"] = torch.full_like(\n p,\n group[\"initial_accumulator_value\"],\n memory_format=torch.preserve_format,\n device=\"cpu\",\n ).to(p.device)", "def build_summary(self):\n assert self.mode==\"train\"\n\n for var in tf.trainable_variables():\n with tf.name_scope(var.name[:var.name.find(\":\")]):\n with tf.name_scope(\"values\"):\n self.variable_summary(var)\n\n for g, var in zip(self.gs, self.g_vars):\n with tf.name_scope(var.name[:var.name.find(\":\")]):\n with tf.name_scope(\"gradients\"):\n self.variable_summary(g)\n\n with tf.name_scope(\"cross_entropies\"):\n self.variable_summary(self.cross_entropies)\n\n with tf.name_scope(\"attention\"):\n self.variable_summary(self.sum_alpha) \n\n with tf.name_scope(\"scores\"):\n self.variable_summary(self.scores) \n\n tf.summary.scalar(\"num_correct_words\", self.num_correct_words)\n\n tf.summary.scalar(\"cross_entropy_loss\", self.cross_entropy_loss)\n tf.summary.scalar(\"attention_loss\", self.attention_loss)\n tf.summary.scalar(\"l2_loss\", self.l2_loss)\n tf.summary.scalar(\"loss\", self.loss)\n \n self.summary = tf.summary.merge_all()" ]
[ "0.56989044", "0.50126624", "0.49523053", "0.4929002", "0.4917026", "0.4851263", "0.48271608", "0.4822689", "0.4796284", "0.47489873", "0.47026354", "0.46610367", "0.4643256", "0.46427402", "0.4603709", "0.45961693", "0.45687324", "0.45621866", "0.45471042", "0.4514141", "0.4512947", "0.4512947", "0.45091766", "0.45084336", "0.45071784", "0.4500858", "0.4496763", "0.44956002", "0.44778216", "0.44566423" ]
0.6270599
0
Get token from the storage by token_name and username.
def get_token_by_username(self, token_name, username): args = (token_name, username) row = self.db_manager.execute_sql_and_fetchone( SQL_TOKEN_GET_BY_TOKEN_USERNAME, args ) if row: token_object = convert_db_row_to_dict(row, TOKEN_MODEL_FIELDS) else: token_object = {} return token_object
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_token(id=None, name=None):\n\tif id is None and name is None:\n\t\tname = config['username']\n\treturn get_user(id=id, name=name, get_missing=False).token", "def token_by_owner(username):\n return Token.query.filter_by(owner=username).first()", "def _get_token(self):\n return user.get_token()", "def get_username_from_token(self, token):\n dataBase = self.read_database()\n if token in dataBase['sessions']:\n userName = dataBase['sessions'][token]['userName']\n return userName\n else:\n raise InvalidTokenException(\"Token not valid.\")", "def get_token(self, name):\n if self.kv.get(name):\n return self.kv.get(name)\n token = self.random_string(24)\n self.kv.set(name, token)\n return token", "def get_token(username, password):\n\t\ttoken = cf.get_token(username, password)\n\t\treturn token", "def get_master_token(user, repo, name, config):\n url = \"{}/repos/{}/{}/master_tokens\".format(config['url_base'], user, repo)\n\n try:\n resp = (api_call(url, 'get', config['debug']))\n tokens = resp.json()\n except ValueError as ex:\n abort(\"Unexpected response from packagecloud API: \"\n \"{}\".format(ex.message))\n for token in tokens:\n if token['name'] == name:\n return token\n\n return None", "def get_user(self, token: str) -> Optional[User]:", "def get_user(self, token: str) -> Optional[User]:", "async def get_user_name(request, token):\n try: \n async with request.app['db'].acquire() as conn:\n query = db.users.select().where(db.users.c.token == token)\n result = await conn.fetch(query)\n return result[0]['login']\n except Exception as e:\n print('Was exception: ', e)", "def _get_token_info(self, username, token_id):\n user = self.db.get_user(username)\n if not user:\n raise YKAuthError(\"No such user: %s\" % username)\n logger.debug('Found user: %s', user)\n token = self.db.get_token(user['users_id'], token_id)\n if not token:\n logger.error('Token %s is not associated with %s', token_id, username)\n raise YKAuthError(\"Token %s is not associated with %s\" % (token_id, username))\n logger.debug('Found token: %s', token)\n if not token.get('yubikeys_enabled'):\n logger.error('Token %s is disabled for %s', token_id, username)\n raise YKAuthError(\"Token is disabled for %s\" % username)\n return user", "def getUser(self, authenticationToken):\r\n pass", "def token(cls, token):\n user_db = User.get_by('token', token)\n if not user_db:\n raise ValueError('Sorry, your token is either invalid or expired.')\n return token", "def get_token(self):\n token_model = TokenModel.find_by_user_id(self.id)\n return token_model.token if token_model else None", "async def get_user(token: str = Depends(get_user_token_strict)) -> schemas.UserToken:\n token_info = await security.decode_jwt(token)\n return schemas.UserToken.from_token(token_info)", "def token(self):\n if self.is_auth_needed():\n self.authorize()\n\n return self.get_from_cache('token')", "async def get_user_token(\n token: Optional[str] = None,\n x_token: Optional[str] = Header(None),\n authorization: Optional[str] = Header(None),\n sessiontoken: Optional[str] = Cookie(None),\n) -> Optional[str]:\n if token:\n return token\n if x_token:\n return x_token\n if authorization and authorization.startswith(\"Bearer \"):\n return authorization[7:]\n if sessiontoken:\n return sessiontoken\n return None", "def load_token(token):\n \n #The Token itself was generated by User.get_auth_token. So it is up to \n #us to known the format of the token data itself. \n \n #The Token was encrypted using itsdangerous.URLSafeTimedSerializer which \n #allows us to have a max_age on the token itself. When the cookie is stored\n #on the users computer it also has a exipry date, but could be changed by\n #the user, so this feature allows us to enforce the exipry date of the token\n #server side and not rely on the users cookie to exipre. \n max_age = REMEMBER_COOKIE_DURATION.total_seconds()\n \n #Decrypt the Security Token, data = [username, hashpass]\n data = login_serializer.loads(token, max_age=max_age)\n \n #Find the User\n user = load_user(data[0])\n \n #Check Password and return user or None\n if user and data[1] == user.password:\n return user\n return None", "def getTokens(username):\n tokens = users.find({\"Username\": username})[0][\"Tokens\"]\n return tokens", "async def token(request: Request):\n return get_token()", "def get_token():\n if os.path.exists(AUTH_TOKEN_PATH):\n with open(str(AUTH_TOKEN_PATH), 'r') as TokenObj:\n try:\n data = TokenObj.read()\n except (OSError, IOError) as e:\n echo(e)\n data = json.loads(data)\n token = data[\"token\"]\n return token\n else:\n echo(\"\\nYour token file doesn't exists.\")\n echo(\"\\nIt should be present at ~/.evalai/token.json\\n\")\n return None", "def get(uid: int, token_id: int):\n\n token = Token.get(uid, token_id).as_dto().to_primitive()\n\n if token:\n return token.to_primitive()\n else:\n raise NotFound(\"Token Not Found\")", "def UserToken(self) -> object:", "def get_user_from_jwt(request):\n jwt_token = request.META.get('HTTP_AUTHORIZATION').split(' ')[1] \n jwt_token = jwt.decode(jwt_token, settings.SECRET_KEY, algorithms=[\"HS256\"])\n username = jwt_token['username']\n user = User.objects.get(username=username)\n return username, user", "def get_token(cls, user, full_result=False):\n if user is None:\n return EMPTY_KNOX_TOKEN\n result = AuthToken.objects.create(user=user)\n return result if full_result else result[1]", "def get_username_from_jwt(token: str):\n return decode_token(token).get(\"username\")", "async def get_user(\n token: str = Depends(get_token), users: UserRepository = Depends()\n) -> User:\n user = await users.get(token=token)\n if user:\n return user\n raise HTTPException(status_code=403, detail=\"Invalid token\")", "def __get_authentication_token(self):\n cache = load_json(self._tokenPath)\n return cache[\"authentication_token\"]", "def get_user_by_token(token):\n collection = get_collection(\"user\")\n user_info = collection.find_one({\"token\": token})\n return user_info", "def get_token(self, token_file: str = 'token') -> str:\n token = ''\n with open(self.path(token_file), 'r') as file:\n tokens = file.read().split('\\n')\n # Loop over all tokens in the file\n for t in tokens:\n # Check if name of token matches name of bot\n if self.name in t:\n token = t.split(' ')[1]\n return token" ]
[ "0.75296414", "0.7063065", "0.6634913", "0.6587599", "0.65856594", "0.65776986", "0.64540553", "0.64417773", "0.64417773", "0.63905823", "0.6359355", "0.63377416", "0.6332766", "0.6272585", "0.626637", "0.6266115", "0.6264914", "0.6258217", "0.6233344", "0.62162817", "0.6211519", "0.62035954", "0.6179707", "0.6160842", "0.6155967", "0.61400133", "0.612062", "0.611872", "0.6108683", "0.6108112" ]
0.7691706
0
parse and run the turtle program, producing an image. return the image (as a filename?)
def run_turtle_program(source): ast = parser.parse(source) t = turtle.Turtle() for stmt in ast.statement: do_statement(stmt, t) canvas = turtle.Screen().getcanvas() canvas.postscript(file='image.eps') img = Image.open('image.eps') img.save('image.png', 'png') turtle.Screen().bye() return 'image.png'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saveImage(turtle, filename):\n ts = turtle.getscreen()\n tc = ts.getcanvas()\n tc.postscript(file=filename)", "def main():\n tmp_dir = xm.constants['dir_tmp']\n exr_f = join(tmp_dir, 'test.exr')\n exr = EXR(exr_f)\n exr.extract_normal(join(tmp_dir, 'test.png'), vis=True)", "def main():\n parser = argparse.ArgumentParser(\n description=\"A program to generate geometric wallpaper.\")\n\n parser.add_argument(\"-s\", \"--shape\",\n help=\"the name of shapes to generate\",\n type=str,\n choices=(\"hexagon\",\n \"square\",\n \"diamond\",\n \"half-diamond\"),\n default=\"hexagon\")\n\n parser.add_argument(\"-n\", \"--num-across\",\n help=\"the number of shapes across the canvas to create\",\n type=int,\n default=10)\n\n parser.add_argument(\"--size\",\n help=\"the size of the created image\",\n type=int,\n nargs=2,\n default=instagram_size)\n\n parser.add_argument(\"-o\", \"--outfile\",\n help=\"name of the created file\",\n type=str)\n\n args = parser.parse_args()\n\n # Create the image.\n im = Image.new('RGB', args.size)\n draw = ImageDraw.Draw(im)\n\n for shape in gw.shapes.cover_in_shapes(args.shape, im.size,\n args.num_across):\n draw.polygon(shape, fill=gw.colors.palette_chooser())\n\n # Save the image.\n if args.outfile:\n im.save(args.outfile)\n else:\n im.save(\"{}_{}_{}x{}.png\".format(args.shape, args.num_across,\n args.size[0], args.size[1]),\n \"PNG\")", "def main():\n usage = \"usage: %prog [options] input: BioC File (args[0]); Output Directory for the (picture) .svg file.\"\n parser = OptionParser(version='%prog 0.99', usage=usage)\n\n parser.add_option('-l', '--logfile', dest='logfilename',\n help='write log to FILE', metavar='FILE')\n parser.add_option('-q', '--quiet',\n action='store_true', dest='quiet', default=False,\n help='do not print status messages to stderr')\n parser.add_option('-d', '--debug',\n action='store_true', dest='debug', default=False,\n help='print debug information')\n\n\n\n (options, args) = parser.parse_args()\n\n if options.debug: print >> sys.stderr, '# Starting processing'\n\n process(options=options,args=args)\n\n\n\n\n sys.exit(0) # Everything went ok!", "def main():\n filename = \"data/2009/4/20090423.gif\"\n words = words_from_image(filename)\n print(words)", "def make_image():\n click.echo(\"make_image\")", "def main():\n args = _argument_parsing()\n _prepare_turtle()\n _if_else_statement(args)\n turtle.mainloop()", "def main():\n # parse command-line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--no-export\", action=\"store_true\",\n help=\"Don't export an .eps file of the drawing\")\n parser.add_argument(\"--fast\", action=\"store_true\",\n help=\"Add triangles directly to the Tkinter canvas for speed\")\n parser.add_argument(\"--birds-eye\", action=\"store_true\",\n help=\"Show a bird's eye view of the entire terrain\")\n parser.add_argument(\"--random-terrain\", action=\"store_true\",\n help=\"Use a random seed for the terrain heightmap\")\n parser.add_argument(\"--random-color-offset\", action=\"store_true\",\n help=\"Use a random seed for the color offset heightmap\")\n args = parser.parse_args()\n \n # set up turtle parameters\n print(\"Setting up...\")\n turtle.setup(9999, 9999)\n win_scale = min(turtle.window_width()//22, turtle.window_height()//17)\n turtle.setup(win_scale*22, win_scale*17) # the largest 11x8.5 window possible\n turtle.title(\"Submission by Quinn Tucker\")\n turtle.tracer(0, 0)\n turtle.setundobuffer(None)\n turtle.hideturtle()\n turtle.penup()\n \n # fill the background with the sky gradient\n print(\"Filling the sky...\")\n fill_sky_gradient(256, 0.58)\n \n # set up the lights and camera\n lights = [\n #DirectionalLight(SUNLIGHT_DIRECTION, SUNLIGHT_COLOR, dot_clip=0.0),\n DirectionalLight(AMBIENT_LIGHT_DIRECTION, AMBIENT_LIGHT_COLOR, dot_clip=-0.0),\n ]\n if args.birds_eye:\n camera = Camera((0, 6.0, -2.4), math.pi*0.34, 0, 0, zoom=3.4, fog_factor=0, lights=lights, fast_draw=args.fast)\n else:\n camera = Camera((0, 0.07, -0.001), 0, 0, 0, zoom=1.2, fog_factor=FOG_FACTOR, lights=lights, fast_draw=args.fast)\n \n # generate and draw the terrain\n print(\"Generating terrain...\")\n if args.random_color_offset:\n color_offset_seed = random.getrandbits(32)\n print(f\" Color offset seed = {color_offset_seed}\")\n else:\n color_offset_seed = 3038607546\n random.seed(color_offset_seed)\n color_offset = Terrain(recursion_depth=9, noise_depth=4, scale=0.35)\n \n if args.random_terrain:\n terrain_seed = random.getrandbits(32)\n print(f\" Terrain seed = {terrain_seed}\")\n else:\n terrain_seed = 129477298\n random.seed(terrain_seed)\n terrain = Terrain(recursion_depth=9, noise_depth=7, scale=0.10,\n snow_height=0.025, tree_height=-0.015, color_offset_heightmap=color_offset)\n \n terrain.draw(camera)\n print(\"Updating the screen...\")\n turtle.update()\n \n # export the drawing to a file\n if not args.no_export:\n OUTPUT_FILE = \"output.eps\"\n print(f\"Exporting {OUTPUT_FILE}...\")\n turtle.getcanvas().postscript(file=OUTPUT_FILE, colormode=\"color\", pagewidth=\"11i\")\n \n # wait for the user to close the window\n print(\"Done!\")\n turtle.mainloop()", "def main():\n original = SimpleImage('images/mt-rainier.jpg')\n original.show()\n reflected = make_reflected('images/mt-rainier.jpg')\n reflected.show()", "def execute():\n args = parse()\n \n image = args.image\n \n # Switch on the options\n if args.test:\n unittest()\n elif args.grade:\n grade(image)\n elif args.encode:\n encode(image)\n else:\n launchgui(image)", "def run(self):\n generated_gif = self.generate()\n with open(self.out_filename, 'wb') as out_fd:\n out_fd.write(generated_gif)", "def main():\n try:\n pixid = sys.argv[1]\n except IndexError:\n print('Usage: python pixget.py [pixid] (save_path)')\n exit(1)\n\n # get the path\n if len(sys.argv) > 2:\n path = sys.argv[2]\n else:\n path = '.'\n\n imgInfoPool = []\n if get_image_url(pixid, imgInfoPool):\n exit(1)\n download_image(path, imgInfoPool)", "def main():\n me = SimpleImage(\"images/me.JPG\")\n dinosaur = SimpleImage(\"images/dinosaur.jpg\")\n\n dinosaur.make_as_big_as(me)\n combine = magic(me, dinosaur)\n combine.show()", "def main():\n\tparser = construct_parser()\n\targs = parser.parse_args()\n\ttiles = slice(args.image, args.num_tiles, save=False)\n\tsave_tiles(tiles, prefix=get_basename(args.image), directory=args.dir,\n\t\t format=args.format)", "def main() -> None:\n try:\n input_image_path = argv[1]\n output_image_path = argv[2]\n\n color_spray( input_image_path, output_image_path )\n except IndexError:\n RuntimeError('Usage: INPUT_GRAY_IMAGE_PATH OUTPUT_RGB_IMAGE_PATH')\n return None", "def main(filename=\"./img/roulette.gif\", emblem=\"./img/cat.png\", fontname=\"disco\"):\n # Load resources\n logo = Image.open(emblem).resize((160, 160))\n font = ImageFont.truetype(f\"./img/font/{fontname}.ttf\", 48)\n\n # Run the animation\n colors = [COLOR_GREEN] + ([COLOR_RED, COLOR_BLACK] * 18)\n display_numbers = [str(x) for x in NUMBERS]\n frames, durations, ang = spinner.generate_animation(\n 90,\n 37,\n display_numbers,\n colors,\n logo,\n font,\n 416,\n 448,\n 4,\n emblem_func=render_emblem,\n )\n\n # Save the GIF\n frames[0].save(\n filename,\n format=\"GIF\",\n append_images=frames[1:],\n save_all=True,\n duration=durations,\n loop=0,\n )\n\n # Figure out the winning prize\n width = 360 / 37\n offset = 180 + (width / 2)\n print(NUMBERS[math.floor(((ang + offset) % 360) / width)])", "def main(image_path):\n temp_dir = tempfile.mkdtemp()\n print('Saving output to {}'.format(temp_dir))\n estimator = run_image(image_path)\n visualize(estimator, image_path, temp_dir)", "def main():\n\n location = input(\"Enter the pathway to the directory containing the files\"\n \"to be converted:\\n\")\n os.chdir(location)\n gtiff(location)\n tiff(location)", "def init():\n \n # General parameters\n exp_path = '/home/laura/Documents/stacks tif/1705_regMovie.tif' # experimental tif stack (grayscale)\n bin_path = '/home/laura/Documents/stacks tif/1705/1705_binarizedMovie.tif' # binarized tif stack\n vect_path = '/home/laura/Documents/STAGE3/1705_NET/' # gpickle directory\n dest_path = '/home/laura/Documents/STAGE3/1705_NET/superposition' # output directory\n verbose = True\n debug = True\n invert = True \n main_params = [exp_path, bin_path, vect_path, dest_path, verbose, debug, invert]\n \n # Output options\n doImg = -1 # image index\n doStack = False \n doVideo = False \n compress = 3 # advice: no more than 5\n output_params = [doImg, doStack, doVideo, compress]\n \n # Drawing options (colors as BGR)\n line = True # edges drawing\n line_color = (0, 255, 0) # green \n line_size = 1 \n apex_color = (0, 0, 255) # red\n apex_size = 5\n node_color = (255, 0, 0) # blue\n node_size = 5\n body_color = (0, 255, 0) # green\n body_size = 3\n drawing_params = [line, line_color, line_size, apex_color, apex_size,\n node_color, node_size, body_color, body_size]\n \n return main_params, output_params, drawing_params", "def setup():\n img = Image.new('RGB', (10, 20))\n img.putpixel((5, 10), (0, 255, 0))\n img.save('green-dot.tif')\n img.save('green-dot.jpg')\n img.save('green-dot.png')", "def main(argv=None):\n parser = argparse.ArgumentParser(\"Test TFL attention mechanism\")\n parser.add_argument('-i', '--image', type=str, help='Path to an image')\n parser.add_argument(\"-j\", \"--json\", type=str, help=\"Path to json GT for comparison\")\n parser.add_argument('-d', '--dir', type=str, help='Directory to scan images in')\n args = parser.parse_args(argv)\n default_base = '../data'\n if args.dir is None:\n args.dir = default_base\n flist = glob.glob(os.path.join(args.dir, '*_leftImg8bit.png'))\n for image in flist:\n json_fn = image.replace('_leftImg8bit.png', '_gtFine_polygons.json')\n if not os.path.exists(json_fn):\n json_fn = None\n test_find_tfl_lights(image, json_fn)\n if len(flist):\n print(\"You should now see some images, with the ground truth marked on them. Close all to quit.\")\n else:\n print(\"Bad configuration?? Didn't find any picture to show\")\n plt.show(block=True)", "def main(argv=None):\n parser = argparse.ArgumentParser(\"Test TFL attention mechanism\")\n parser.add_argument('-i', '--image', type=str, help='Path to an image')\n parser.add_argument(\"-j\", \"--json\", type=str, help=\"Path to json GT for comparison\")\n parser.add_argument('-d', '--dir', type=str, help='Directory to scan images in')\n args = parser.parse_args(argv)\n default_base = '../data'\n if args.dir is None:\n args.dir = default_base\n flist = glob.glob(os.path.join(args.dir, '*_leftImg8bit.png'))\n for image in flist:\n json_fn = image.replace('_leftImg8bit.png', '_gtFine_polygons.json')\n if not os.path.exists(json_fn):\n json_fn = None\n test_find_tfl_lights(image, json_fn)\n if len(flist):\n print(\"You should now see some images, with the ground truth marked on them. Close all to quit.\")\n else:\n print(\"Bad configuration?? Didn't find any picture to show\")\n plt.show(block=True)", "def __main__() :\n try :\n poly = Polyhedre(sys.argv[1])\n \n name = sys.argv[2]\n \n createAllFiles(poly, name)\n\n createAllImageFiles(poly, name)\n \n except FileNotFoundError :\n print(\"Use an existing file\")", "def pnghack(filepath, width=2000, height=2000):\t#cmd.png() doesnt work with api\n cmd.set('ray_trace_frames', 1) # Frames are raytraced before saving an image.\n cmd.viewport(width, height) # Set resolution\n cmd.mpng(filepath, 1, 1) # Use batch png mode with 1 frame only\n cmd.mplay() # cmd.mpng needs the animation to 'run'", "def process(image):\n pass", "def main():\n images = Images()\n #print images.create_image_urls()\n print images.get_image_random()\n print images.get_image(12)", "def main():\n name = raw_input(\"Please enter the name: \")\n dob = raw_input(\"Please enter the DOB: \")\n font = ImageFont.truetype(\"arial.ttf\", 48)\n Tk().withdraw()\n image = askopenfilename()\n img = Image.open(image)\n img = img.rotate(180)\n draw = ImageDraw.Draw(img)\n draw.text((0, 10), name, (0, 0, 0), font=font)\n draw.text((0, 60), dob, (0, 0, 0), font=font)\n img.save('c:/~output/' + name + '.jpg')\n subprocess.Popen('explorer \"c:\\~output\"')", "def main(url):\n print(f\"Running main with URL = {url}...\")\n imagehits(downloaddata(url))", "def generate():\n content = request.json\n assert 'svg' in content\n\n bw = BaseWorkflow(content['svg'])\n alt_text = bw.execute()\n print(alt_text)\n\n return jsonify({'alt_text': alt_text})", "def sex(image):\n \n # run sextractor with different default parameters\n print('running SExtractor to {}...'.format(image))\n P = Popen('sextractor -c goto.sex '+image+' -CATALOG_NAME '+image[:-5]+'.cat', shell=True)\n P.wait()" ]
[ "0.66624415", "0.6060211", "0.60250473", "0.5991611", "0.5983499", "0.59735477", "0.5825115", "0.5824906", "0.5777183", "0.5756378", "0.57338184", "0.56626606", "0.566012", "0.5658301", "0.5580608", "0.5556367", "0.55391324", "0.5536966", "0.55120844", "0.54929626", "0.5479386", "0.5479386", "0.5474022", "0.54718834", "0.54623795", "0.5443061", "0.5422108", "0.5348128", "0.53476924", "0.5329085" ]
0.7995979
0
Tests the maximax fn
def test_maximax(): f = np.asarray([ [0.99, 1.0, 0.5], [0.69, 0.6, 0.6]]) R = common_metrics.maximax(f, maximise=True) expected = np.asarray( [1.0, 0.69]) assert np.allclose(R, expected) R = common_metrics.maximax(f, maximise=False) expected = np.asarray( [-0.5, -0.6]) assert np.allclose(R, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def maximizer(evaluate):\n def strategy(player, board):\n def score_move(move):\n return evaluate(player, Othello.make_move(move, player, list(board)))\n return max(Othello.legal_moves(player, board), key=score_move)\n return strategy", "def test_find_largest(self):\n largestValue = max(self.values)\n valueFound = self.tree.findLargest(self.tree.root)\n self.assertEqual(largestValue, valueFound)", "def max_value(board): # the X player wants to maximize the score\n if terminal(board):\n return utility(board), None\n else:\n v = -math.inf\n move = None\n for action in actions(board):\n val, _ = min_value(result(board, action))\n # Check if returned Value is less than v if not return v and current action\n if val > v:\n # Assign v the maximum value for future evaluation\n v = max(v,val)\n # Keep track of action\n move = action\n # If best move then return it\n if v == 1:\n return v, move\n return v, move", "def test_minimax_interface(self):\n h, w = 7, 7 # board size\n test_depth = 1\n starting_location = (5, 3)\n adversary_location = (0, 0) # top left corner\n iterative_search = False\n search_method = \"minimax\"\n heuristic = lambda g, p: 0. # return 0 everywhere\n\n # create a player agent & a game board\n agentUT = game_agent.CustomPlayer(\n test_depth, heuristic, iterative_search, search_method)\n agentUT.time_left = lambda: 99 # ignore timeout for fixed-depth search\n board = isolation.Board(agentUT, 'null_agent', w, h)\n\n # place two \"players\" on the board at arbitrary (but fixed) locations\n board.apply_move(starting_location)\n board.apply_move(adversary_location)\n\n for move in board.get_legal_moves():\n next_state = board.forecast_move(move)\n v, _ = agentUT.minimax(next_state, test_depth)\n\n self.assertTrue(type(v) == float,\n (\"Minimax function should return a floating \" +\n \"point value approximating the score for the \" +\n \"branch being searched.\"))", "def maxit(board):\n maxval = -2\n\n row_index = None\n col_index = None\n # if terminal board, terminate the function.\n if terminal(board) == True:\n result = utility(board)\n return (result, 0, 0) \n # for each possible move, calculate its utility, saving the maximum.\n for i in range(0, 3):\n for j in range(0, 3):\n if board[i][j] == EMPTY:\n board[i][j] = X\n (m, mini, minj) = minit(board)\n if m > maxval:\n maxval=m\n row_index=i\n col_index=j\n board[i][j] = EMPTY\n return (maxval, row_index, col_index)", "def max_value(gameState):\n if terminal_test(gameState): return -1", "def mini_max(board: Board, alpha: float, beta: float, depth: int, max_player: Player, cache: MiniMaxCache2) \\\n -> MiniMaxResult:\n\n # first, let's see if we already cached this value\n cached_value = cache.get(board, max_player)\n if cached_value is not None:\n return cached_value\n\n # If we hit max depth or the board is in a final state, calculate and return the heuristic.\n # We cannot decide about the best move here, so leave it empty.\n if depth == 0 or board.get_state() != GameState.ONGOING:\n return MiniMaxResult(None, calculate_heuristic(board, max_player).value)\n\n # collect a list of available moves from the board and order them according to moveOrder\n # which orders moves based on their distance to the middle column\n available_moves = board.actions()\n available_moves = [move for move in moveOrder if move in available_moves]\n\n # check if we are maximizing right now\n is_maximizing = (board.player == max_player)\n\n # store min and max value, initialize with boundaries\n max_value, min_value = alpha, beta\n # initialize the best move randomly\n best_move: Column = random.choice(available_moves)\n\n # enumerate all available moves. Recurse for each while respecting is_maximizing and adjusting\n # min_value and max_value accordingly.\n for move in available_moves:\n # create a board with the new state\n child_board = board.drop_piece_copy(move)\n\n if is_maximizing:\n min_result = mini_max(child_board, max_value, beta, depth - 1, max_player, cache)\n if min_result == np.inf:\n # we cannot perform better than +inf.\n return MiniMaxResult(move, np.inf)\n elif min_result.value > max_value:\n max_value = min_result.value\n best_move = move\n if max_value >= beta:\n # beta pruning\n break\n else:\n max_result = mini_max(child_board, alpha, min_value, depth - 1, max_player, cache)\n if max_result.value == -np.inf:\n # we cannot perform better than -inf.\n return MiniMaxResult(move, -np.inf)\n elif max_result.value < min_value:\n min_value = max_result.value\n best_move = move\n if min_value <= alpha:\n # alpha pruning\n break\n\n # return the best move and according heuristic\n return MiniMaxResult(best_move, max_value if is_maximizing else min_value)", "def play_minimax(self, board, depth, max_player, game):\n if depth == 0 or (game.check_if_over() and game.winner != 0):\n return board.evaluate(), board\n\n elif max_player:\n max_eval = float('-inf')\n best_action = None\n all_valid_actions = self.simulate_all_valid_actions(board, game.board.top_opponent_color)\n random.shuffle(all_valid_actions) # Remove the shuffling if the minimax deapth increases\n for action in all_valid_actions:\n evaluation = self.play_minimax(action, depth-1, False, game)[0]\n max_eval = max(max_eval, evaluation)\n if max_eval == evaluation:\n best_action = action\n return max_eval, best_action\n\n else:\n min_eval = float('inf')\n best_action = None\n all_valid_actions = self.simulate_all_valid_actions(board, game.board.bottom_player_color)\n random.shuffle(all_valid_actions) # Remove the shuffling if the minimax deapth increases\n for action in all_valid_actions:\n evaluation = self.play_minimax(action, depth-1, True, game)[0]\n min_eval = min(min_eval, evaluation)\n if min_eval == evaluation:\n best_action = action\n return min_eval, best_action", "def max(x):\n pass", "def max_val(board):\n v = -math.inf\n if terminal(board):\n return utility(board)\n for action in actions(board):\n v = max(v,min_val(result(board,action)))\n return v", "def __mini_max_ab(self,\n board: Board,\n depth: int,\n is_max: bool,\n alpha: int,\n beta: int,\n states: List[Board]\n ) -> Tuple[int, Board]:\n self.nodes_count += 1\n if depth == 0:\n return self.__moves_available(board), states[0]\n if self.get_num_of_moves(board, self.opponent_color) == 0:\n return 9999, states[0]\n all_moves = self.get_all_moves(board, self.player_color)\n\n if is_max:\n best = (-9999, board)\n for move in all_moves:\n next_state = self.__mini_max_ab(move, depth - 1, False, alpha, beta, states + [move])\n best = max(best, next_state, key=lambda x: x[0])\n alpha = max(alpha, best[0])\n if beta <= alpha:\n self.pruning_count += 1\n break\n return best\n else:\n best = (9999, board)\n for move in all_moves:\n next_state = self.__mini_max_ab(move, depth - 1, True, alpha, beta, states + [move])\n best = min(best, next_state, key=lambda x: x[0])\n beta = min(beta, best[0])\n if beta <= alpha:\n self.pruning_count += 1\n break\n return best", "def test_maxIndex(self):\t\t\n self.assertEqual(attempt.maxIndexZ, 113)\n self.assertEqual(attempt.maxIndexW, 134)", "def test_minimax():\n board = Board(*TEST_AGRU1)\n comp = Computer(board, COMP_DISK, HUMAN_DISK)\n comp.b.board = [\n [COMP_DISK, COMP_DISK, 0, 0, 0],\n [HUMAN_DISK, HUMAN_DISK, HUMAN_DISK, 0, 0],\n ]\n comp.b.columns_list = [\n [COMP_DISK, HUMAN_DISK],\n [COMP_DISK, HUMAN_DISK],\n [HUMAN_DISK],\n [],\n [],\n ]\n comp.b.add_to_board(0, INDEX_TWO, COMP_DISK)\n assert comp.minimax(comp.b, 1, MAX_START_SCORE, MINI_START_SCORE, False) \\\n == -1\n\n board = Board(*TEST_AGRU1)\n comp = Computer(board, COMP_DISK, HUMAN_DISK)\n comp.b.board = [\n [HUMAN_DISK, 0, HUMAN_DISK, HUMAN_DISK, 0],\n [HUMAN_DISK, 0, COMP_DISK, COMP_DISK, COMP_DISK],\n ]\n comp.b.columns_list = [\n [HUMAN_DISK, HUMAN_DISK],\n [],\n [HUMAN_DISK, COMP_DISK],\n [HUMAN_DISK, COMP_DISK],\n [],\n ]\n comp.b.add_to_board(1, 1, COMP_DISK)\n assert comp.minimax(comp.b, 1, MAX_START_SCORE, MINI_START_SCORE, False) \\\n == SCORE", "def negamax(self):\n if self.check_winner():\n return 1\n elif self.full():\n return 0\n else:\n bestScore = -10\n for r, c in self.empty_cells():\n self.grid[r][c] = self.player\n self.next_player() \n score = -self.negamax()\n if score > bestScore:\n bestScore = score\n self.grid[r][c] = GameModel.EMPTY\n self.next_player()\n return bestScore", "def maxValue(board, cur_optimal_val, player):\n # base case (leave recursion)\n if isTerminal(board):\n return getUtility(board)\n\n val = -math.inf\n for action in getActions(board):\n val = max(val, minValue(getResult(board, action), cur_optimal_val, player))\n if (\n (player == O and (val >= cur_optimal_val or val == 1)) or\n (player == X and val == 1)\n ):\n break\n return val", "def expectimax(game, depth, value_function):\n\n # base case: reached depth\n if depth == 0:\n return value_function(game), 'null'\n\n possible = possible_moves(game)\n\n # base case: no more moves\n if not possible:\n return value_function(game), 'null'\n\n # get the possible outcomes of each possible move\n expanded = [(move, move_made_all(game, move)) for move in possible]\n\n move_value = []\n # calculate expected value of each possible move\n for move, games in expanded:\n value = sum(p * expectimax(g, depth - 1, value_function)[0] for p, g in games)\n move_value.append((value, move))\n\n return max(move_value)", "def max_value(self, state, max_alpha, max_beta, max_depth):\r\n if state.terminal_test():\r\n return state.utility(0)\r\n if max_depth <=0 :\r\n return self.score(state)\r\n\r\n v = float(\"-inf\")\r\n for a in state.actions():\r\n v = max(v, self.min_value(state.result(a), max_alpha, max_beta, max_depth - 1))\r\n if v >= max_beta:\r\n return v\r\n max_alpha = max(max_alpha, v)\r\n return v", "def test_eval_one_max(self):\n f0 = np.ones((10, 5))\n self.assertTrue(np.isinf(eval_one_max(f0)[0]))", "def test_max_begin(self):\n self.assertEqual(max_integer([5, 3, 4, 1]), 5)", "def test_get_maximum():\n assert get_maximum({\"numbers\": [4, 3, 2, 1]}) == {\"maximum\": 4}", "def __mini_max(self, board: Board, depth: int, is_max: bool, states: List[Board]) -> Tuple[int, Board]:\n self.nodes_count += 1\n if depth == 0:\n return self.__moves_available(board), states[0]\n all_moves = self.get_all_moves(board, self.player_color)\n\n if self.get_num_of_moves(board, self.opponent_color) == 0:\n return 9999, states[0]\n\n func = max if is_max else min\n return func([self.__mini_max(m, depth - 1, not is_max, states + [m]) for m in all_moves], key=lambda x: x[0])", "def test_result(self):\n self.assertIsNone(max_integer([]))\n self.assertEqual(max_integer([1, 2, 3, 4]), 4)\n self.assertEqual(max_integer([4, 1, 2, 3]), 4)\n self.assertEqual(max_integer([1, 4, 3, 2]), 4)\n self.assertEqual(max_integer([-34, -2, -3, -37]), -2)\n self.assertEqual(max_integer([-231, 2, -33, -24]), 2)\n self.assertEqual(max_integer([23.4, 34.6, 56.5, 60.2]), 60.2)\n self.assertEqual(max_integer([1]), 1)\n self.assertEqual(max_integer([56.3]), 56.3)\n self.assertEqual(max_integer([-34]), -34)\n self.assertEqual(max_integer([\"holberton\", \"school\",\"student\"]), \"student\")", "def expectimax_move(game, method='score'):\n\n if method == 'score':\n def val(g):\n return g[1]\n elif method == 'empty':\n val = empty_squares\n elif method == 'gradient':\n val = gradient_value\n else:\n print('Invalid method given to expectimax function')\n exit(1)\n\n _, move = expectimax(game, 2, val)\n return move", "def max_value(board, max_util, min_util, depth):\r\n \r\n global nodes_generated \r\n global min_prune\r\n global max_prune\r\n global max_depth\r\n \r\n nodes_generated += 1\r\n max_depth = max(max_depth,depth)\r\n \r\n if cutoff_search(board, depth):\r\n return evaluation(board)\r\n v = -1000\r\n moves = legal_moves(board,1)\r\n for move in moves:\r\n temp_board = camelot_board.Camelot(list(board.white),list(board.black))\r\n state = action(temp_board, move, 1)\r\n v = max(v, min_value(state, max_util, min_util, depth + 1))\r\n if v >= min_util:\r\n max_prune += 1\r\n return v\r\n max_util = max(max_util, v)\r\n return v", "def test_minimax(self):\n \n # empty board\n board = \"EEEEEEEEE\"\n res = MinimaxApiView.get_next_move(board, 'O')\n self.assertEqual(res, 0)\n board = \"OEEEEEEEE\"\n res = MinimaxApiView.get_next_move(board, 'X')\n self.assertEqual(res, 4)", "def max_target(board, depth, alpha, beta):\n if terminal(board) or depth == DEPTH:\n return utility(board)\n\n best_val = -math.inf\n for action in actions(board):\n val = min_target(result(board, action), depth+1, alpha, beta)\n best_val = max(best_val, val)\n alpha = max(alpha, best_val)\n if beta <= alpha:\n break\n\n return best_val", "def _maximize(self, board, possible_actions, depth_limit, alpha, beta):\r\n pass", "def _max_player(self, board: Board, alpha: int, beta: int, depth: int) -> (int, int):\n base_alpha = alpha\n value = -math.inf\n best_move = 0\n possible_moves = board.get_valid_moves()\n for move in possible_moves:\n board.make_move(move)\n\n # Checks to see if this board is in the transposition table, if it is,\n # We can save time by not computing it again\n if board.hash in self._transposition_table and self._transposition_table[board.hash][2] >= depth:\n entry = self._transposition_table[board.hash]\n if entry[1] == 'exact':\n score = entry[0]\n if score > value:\n value = score\n best_move = move\n elif entry[1] == 'low':\n alpha = max(alpha, entry[0])\n elif entry[1] == 'high':\n beta = min(beta, entry[0])\n else: # If it's not in the table, we need to calculate it\n score = self.minimax(board, alpha, beta, depth - 1, -1)[1]\n if score > value:\n value = score\n best_move = move\n alpha = max(value, alpha)\n if alpha >= beta:\n if value <= base_alpha:\n entry = (value, 'high', depth)\n elif value >= beta:\n entry = (value, 'low', depth)\n else:\n entry = (value, 'exact', depth)\n\n # Saves this value into the table so it doesn't need to be calculated again\n self._transposition_table[board.hash] = entry\n board.un_move(move)\n return best_move, value\n else:\n board.un_move(move)\n\n # Mutates the board, and thus it's hash, to save and use to store in the table\n board.make_move(best_move)\n hash_value = board.hash\n board.un_move(best_move)\n\n if value <= base_alpha:\n entry = (value, 'high', depth)\n elif value >= beta:\n entry = (value, 'low', depth)\n else:\n entry = (value, 'exact', depth)\n\n # Saves this value into the table so it doesn't need to be calculated again\n self._transposition_table[hash_value] = entry\n return best_move, value", "def max_value(self, game, depth):\n if self.time_left() < self.TIMER_THRESHOLD: # Timeout check\n raise SearchTimeout()\n\n if game.is_loser(self) or game.is_winner(self) or depth == 0: # Terminal test, checks base cases\n return self.score(game,self) # returns the score, UTILITY of the current state\n legal_moves = game.get_legal_moves() # obtain all legal moves for game, ACTIONs that can be taken\n best_score = -math.inf # abstraction assignment of neg. infinity(lowest possible value for MAX score)\n for m in legal_moves: # iterate through all available actions\n new_state = game.forecast_move(m) # for each available move, forecast the resulting state from that ACTION\n # RESULT of ACTION\n score = self.max_value(new_state, depth - 1) # recursively uses the new state\n best_score = max(best_score,score) # calculates the minimizing score between the states\n return best_score # propagates minimizing score for given state", "def test_max_end(self):\n self.assertEqual(max_integer([5, 3, 4, 8]), 8)" ]
[ "0.7044211", "0.6821906", "0.6805151", "0.6742624", "0.67249966", "0.6700263", "0.6686053", "0.661849", "0.66156197", "0.66018206", "0.659575", "0.6558958", "0.6555396", "0.655198", "0.6548324", "0.6520838", "0.64753556", "0.64723724", "0.6428882", "0.6411126", "0.6392643", "0.6386149", "0.63836783", "0.6369561", "0.6360621", "0.6345198", "0.63445896", "0.63374406", "0.6327948", "0.6326767" ]
0.79445547
0
Tests the Laplace fn
def test_laplace(): f = np.asarray([ [0.99, 1.0, 0.5], [0.69, 0.6, 0.6]]) R = common_metrics.laplace(f, maximise=True) expected = np.asarray( [0.83, 0.63]) assert np.allclose(R, expected) R = common_metrics.laplace(f, maximise=False) expected = np.asarray( [-0.83, -0.63]) assert np.allclose(R, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def a_test_laplace():\n model = ARIMAX(formula=\"y ~ x1\", data=data, ar=1, ma=1, family=Exponential())\n x = model.fit('Laplace')\n assert(len(model.latent_variables.z_list) == 4)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)", "def a_test2_laplace():\n model = ARIMAX(formula=\"y ~ x1 + x2\", data=data, ar=1, ma=1, family=Exponential())\n x = model.fit('Laplace')\n assert(len(model.latent_variables.z_list) == 5)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)", "def finnLaplace(u):\n laplace=u[0:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, 0:-2] + u[1:-1, 2:] - 4 * u[1:-1, 1:-1]\n return laplace", "def solve_laplace_equation(\n grid: GridBase, bc: \"BoundariesData\", label: str = \"Solution to Laplace's equation\"\n) -> ScalarField:\n rhs = ScalarField(grid, data=0)\n return solve_poisson_equation(rhs, bc=bc, label=label)", "def ft_inv_laplace(a, fcc=False):\n k_sq = 1.0\n for i in range(a.shape[0]):\n for j in range(a.shape[1]):\n for l in range(a.shape[2]):\n if (i == 0) and (j == 0) and (l == 0):\n a[i,j,l] = 0\n continue\n if i > a.shape[0]/2:\n i = a.shape[0] + 1 - i\n if j > a.shape[0]/2:\n j = a.shape[0] + 1 - j\n if l > a.shape[0]/2:\n l = a.shape[0] + 1 - l\n if fcc:\n k_sq = 1.5*(i**2 + j**2 + l**2) - i*j - j*l - i*l\n else:\n k_sq = i**2 + j**2 + l**2\n a[i,j,l] = a[i,j,l]/(k_sq)", "def vector_laplace(arr, out=None):\n laplace_r(arr[0], out=out[0])\n laplace_z(arr[1], out=out[1])\n laplace_phi(arr[2], out=out[2])\n return out", "def test_lorentz_broadening():\n v0 = 100\n I = 10\n w = 1.0\n\n assert lorentz(v0, v0, I, w) == approx(5)", "def make_laplace(bcs: Boundaries) -> OperatorType:\n assert isinstance(bcs.grid, CylindricalSymGrid)\n bcs.check_value_rank(0)\n boundary_r, boundary_z = bcs\n\n # calculate preliminary quantities\n dim_r, dim_z = bcs.grid.shape\n dr_2, dz_2 = 1 / bcs.grid.discretization ** 2\n\n value_outer = boundary_r.high.make_virtual_point_evaluator()\n region_z = boundary_z.make_region_evaluator()\n\n # use processing for large enough arrays\n parallel = dim_r * dim_z >= config[\"numba.parallel_threshold\"]\n\n @jit_allocate_out(parallel=parallel, out_shape=(dim_r, dim_z))\n def laplace(arr, out=None):\n \"\"\"apply laplace operator to array `arr`\"\"\"\n for j in nb.prange(0, dim_z): # iterate axial points\n # inner radial boundary condition\n i = 0\n arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))\n out[i, j] = (\n 2 * (arr[i + 1, j] - arr_c) * dr_2\n + (arr_z_l - 2 * arr_c + arr_z_h) * dz_2\n )\n\n if dim_r == 1:\n continue # deal with singular radial dimension\n\n for i in range(1, dim_r - 1): # iterate radial points\n arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))\n arr_r_l, arr_r_h = arr[i - 1, j], arr[i + 1, j]\n out[i, j] = (\n (arr_r_h - 2 * arr_c + arr_r_l) * dr_2\n + (arr_r_h - arr_r_l) / (2 * i + 1) * dr_2\n + (arr_z_l - 2 * arr_c + arr_z_h) * dz_2\n )\n\n # outer radial boundary condition\n i = dim_r - 1\n arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))\n arr_r_l, arr_r_h = arr[i - 1, j], value_outer(arr, (i, j))\n out[i, j] = (\n (arr_r_h - 2 * arr_c + arr_r_l) * dr_2\n + (arr_r_h - arr_r_l) / (2 * i + 1) * dr_2\n + (arr_z_l - 2 * arr_c + arr_z_h) * dz_2\n )\n return out\n\n return laplace # type: ignore", "def laplacian(self, array_in):\r\n\r\n # Call-through to Laplacian operator, already computed\r\n return self.laplace_op*array_in", "def laplacian_(self, grid, i, j):\n l1 = grid[(i+1+self.N) % self.N][j] + grid[(i-1+self.N) % self.N][j]\n l2 = grid[i][(j+1+self.N) % self.N] + grid[i][(j-1+self.N) % self.N]\n l3 = -4*grid[i][j]\n return (l1 + l2 + l3)/self.dx**2", "def test_displace_xyz(self):\n xyz = {'symbols': ('N', 'H', 'H', 'N', 'H', 'H', 'N', 'H'), 'isotopes': (14, 1, 1, 14, 1, 1, 14, 1),\n 'coords': ((-0.447353, 0.680147, -0.090895), (-0.451171, 1.142121, 0.814701),\n (0.674665, 0.380874, -0.232312), (-1.228559, -0.471121, -0.003769),\n (-1.815912, -0.503291, 0.815499), (-1.782278, -0.570663, -0.841674),\n (1.909708, -0.148579, -0.070033), (1.740013, -0.849123, 0.669738))}\n displacement = np.array([[-0.03, -0.02, -0.0], [0.04, -0.08, 0.05], [0.92, -0.34, 0.08], [0.0, 0.04, 0.0],\n [0.0, -0.05, 0.01], [0.09, -0.09, -0.04], [-0.05, 0.02, -0.01], [0.0, 0.03, -0.01]],\n np.float64)\n xyz_1, xyz_2 = converter.displace_xyz(xyz=xyz, displacement=displacement)\n expected_xyz_1 = {'symbols': ('N', 'H', 'H', 'N', 'H', 'H', 'N', 'H'), 'isotopes': (14, 1, 1, 14, 1, 1, 14, 1),\n 'coords': ((-0.4754185110901118, 0.6614366592732588, -0.090895),\n (-0.44113195107970876, 1.1220429021594176, 0.827249811150364),\n (0.9055631251666977, 0.29554208417752476, -0.2122339021594176),\n (-1.228559, -0.43370031854651764, -0.003769),\n (-1.815912, -0.515839811150364, 0.8180087622300728),\n (-1.7596901399293448, -0.5932508600706552, -0.8517130489202912),\n (1.862932148183147, -0.1298686592732588, -0.07938817036337059),\n (1.740013, -0.8415937133097816, 0.6672282377699272))}\n expected_xyz_2 = {'symbols': ('N', 'H', 'H', 'N', 'H', 'H', 'N', 'H'), 'isotopes': (14, 1, 1, 14, 1, 1, 14, 1),\n 'coords': ((-0.4192874889098882, 0.6988573407267411, -0.090895),\n (-0.4612100489202912, 1.1621990978405823, 0.802152188849636),\n (0.44376687483330224, 0.4662059158224752, -0.2523900978405824),\n (-1.228559, -0.5085416814534824, -0.003769),\n (-1.815912, -0.49074218884963605, 0.8129892377699272),\n (-1.8048658600706553, -0.5480751399293449, -0.8316349510797089),\n (1.956483851816853, -0.16728934072674118, -0.060677829636629405),\n (1.740013, -0.8566522866902183, 0.6722477622300729))}\n self.assertEqual(xyz_1, expected_xyz_1)\n self.assertEqual(xyz_2, expected_xyz_2)\n\n xyz_1, xyz_2 = converter.displace_xyz(xyz=xyz, displacement=displacement, amplitude=0.5)\n expected_xyz_1 = {'symbols': ('N', 'H', 'H', 'N', 'H', 'H', 'N', 'H'), 'isotopes': (14, 1, 1, 14, 1, 1, 14, 1),\n 'coords': ((-0.5034840221802236, 0.6427263185465175, -0.090895),\n (-0.4310929021594176, 1.101964804318835, 0.839798622300728),\n (1.1364612503333955, 0.2102101683550495, -0.19215580431883517),\n (-1.228559, -0.39627963709303526, -0.003769),\n (-1.815912, -0.528388622300728, 0.8205185244601456),\n (-1.7371022798586897, -0.6158387201413105, -0.8617520978405825),\n (1.816156296366294, -0.11115831854651761, -0.08874334072674119),\n (1.740013, -0.8340644266195631, 0.6647184755398544))}\n expected_xyz_2 = {'symbols': ('N', 'H', 'H', 'N', 'H', 'H', 'N', 'H'), 'isotopes': (14, 1, 1, 14, 1, 1, 14, 1),\n 'coords': ((-0.3912219778197764, 0.7175676814534824, -0.090895),\n (-0.4712490978405824, 1.1822771956811648, 0.789603377699272),\n (0.21286874966660452, 0.5515378316449505, -0.2724681956811648),\n (-1.228559, -0.5459623629069648, -0.003769),\n (-1.815912, -0.47819337769927206, 0.8104794755398543),\n (-1.8274537201413104, -0.5254872798586896, -0.8215959021594176),\n (2.003259703633706, -0.18599968145348236, -0.05132265927325881),\n (1.740013, -0.8641815733804368, 0.6747575244601457))}\n self.assertEqual(xyz_1, expected_xyz_1)\n self.assertEqual(xyz_2, expected_xyz_2)", "def laplace(f, g_inv, g_det, X):\n r = 0\n for i in range(len(X)):\n for j in range(len(X)):\n r += g_inv[i, j]*f.diff(X[i]).diff(X[j])\n for sigma in range(len(X)):\n for alpha in range(len(X)):\n r += g_det.diff(X[sigma]) * g_inv[sigma, alpha] * \\\n f.diff(X[alpha]) / (2*g_det)\n return r", "def test_alpine1(self):\n fun = get_problem('alpine1', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array), 0.0)", "def make_vector_laplace(bcs: Boundaries) -> OperatorType:\n assert isinstance(bcs.grid, CylindricalSymGrid)\n bcs.check_value_rank(1)\n\n laplace_r = make_laplace(bcs.extract_component(0))\n laplace_z = make_laplace(bcs.extract_component(1))\n laplace_phi = make_laplace(bcs.extract_component(2))\n\n @jit_allocate_out(out_shape=(3,) + bcs.grid.shape)\n def vector_laplace(arr, out=None):\n \"\"\"apply gradient operator to array `arr`\"\"\"\n laplace_r(arr[0], out=out[0])\n laplace_z(arr[1], out=out[1])\n laplace_phi(arr[2], out=out[2])\n return out\n\n return vector_laplace # type: ignore", "def laplacian(expr):\n\n delop = Del()\n if expr.is_Vector:\n return (gradient(divergence(expr)) - curl(curl(expr))).doit()\n return delop.dot(delop(expr)).doit()", "def test_grad_vector(func, motion, optimized, preserve_result, x):\n utils.test_reverse_array(func, motion, optimized, preserve_result, x)", "def calculate_laplace_coeff(alpha, j, s):\n return integrate.quad(lambda psi, alpha, j, s: np.cos(j*psi)/(1-2*alpha*np.cos(psi)+alpha**2)**s,\n 0, 2*np.pi, args=(alpha, j, s,))[0]/np.pi", "def laplace(tensor, padding='replicate', axes=None, use_fft_for_periodic=False):\n rank = spatial_rank(tensor)\n if padding is None or padding == 'valid':\n pass # do not pad tensor\n elif padding in ('circular', 'wrap') and use_fft_for_periodic:\n return fourier_laplace(tensor)\n else:\n tensor = math.pad(tensor, _get_pad_width_axes(rank, axes, val_true=[1, 1], val_false=[0, 0]), padding)\n # --- convolutional laplace ---\n if axes is not None:\n return _sliced_laplace_nd(tensor, axes)\n if rank == 2:\n return _conv_laplace_2d(tensor)\n elif rank == 3:\n return _conv_laplace_3d(tensor)\n else:\n return _sliced_laplace_nd(tensor)", "def laplace_filter(F, M=None):\n\n if not M:\n M = np.ones_like(F)\n\n return 0.5 * (laplace_X(laplace_Y(F, M), M) +\n laplace_Y(laplace_X(F, M), M))", "def test_extreme_values(self):\n with pytest.warns(RuntimeWarning) as warninfo:\n assert np.exp(laplace_approx(999999, 999999, self.data)) == 0\n with pytest.warns(RuntimeWarning) as warninfo:\n assert np.exp(laplace_approx(999999, 999999, self.data)) == 0", "def test_lorentz_width():\n v0 = 100\n I = 1\n w = 0.5\n\n assert lorentz(v0 - w/2, v0, I, w) == approx(0.5)\n assert lorentz(v0 + w/2, v0, I, w) == approx(0.5)", "def Laplace_evidence(self):\r\n A = self.Laplace_covariance()\r\n try:\r\n hld = np.sum(np.log(np.diag(jitchol(A)[0])))\r\n except:\r\n return np.nan\r\n return 0.5 * self._get_params().size * np.log(2 * np.pi) + self.log_likelihood() - hld", "def test_reverse(self):\n t = Linearize()\n assert t.reverse(1) == numpy.e", "def toy_linear_1d_classification_laplace(seed=default_seed, optimize=True, plot=True, axes=None):\n\n try:import pods\n except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets')\n data = pods.datasets.toy_linear_1d_classification(seed=seed)\n Y = data['Y'][:, 0:1]\n Y[Y.flatten() == -1] = 0\n\n likelihood = GPy.likelihoods.Bernoulli()\n laplace_inf = GPy.inference.latent_function_inference.Laplace()\n kernel = GPy.kern.RBF(1)\n\n # Model definition\n m = GPy.core.GP(data['X'], Y, kernel=kernel, likelihood=likelihood, inference_method=laplace_inf)\n\n # Optimize\n if optimize:\n try:\n print(\"Pre opt\")\n print(m)\n m.optimize('bfgs', messages=1)\n print(\"Laplace opt 1\")\n print(m)\n m.optimize('bfgs', messages=1)\n print(\"Laplace opt 2\")\n print(m)\n m.optimize('bfgs', messages=1)\n print(\"Laplace opt 3\")\n print(m)\n except Exception as e:\n return m\n\n # Plot\n if plot:\n from matplotlib import pyplot as plt\n if axes is None:\n fig, axes = plt.subplots(2, 1)\n m.plot_f(ax=axes[0])\n m.plot(ax=axes[1])\n\n print(m)\n return m", "def make_laplace_from_matrix(\n matrix, vector\n) -> Callable[[np.ndarray, Optional[np.ndarray]], np.ndarray]:\n mat = matrix.tocsc()\n vec = vector.toarray()[:, 0]\n\n def laplace(arr: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:\n \"\"\"apply the laplace operator to `arr`\"\"\"\n result = mat.dot(arr.flat) + vec\n if out is None:\n out = result.reshape(arr.shape)\n else:\n out[:] = result.reshape(arr.shape)\n return out\n\n return laplace", "def Laplace_evidence(self):\n A = self.Laplace_covariance()\n try:\n hld = np.sum(np.log(np.diag(jitchol(A)[0])))\n except:\n return np.nan\n return 0.5*self._get_params().size*np.log(2*np.pi) + self.log_likelihood() - hld", "def Laplace_covariance(self):\n #TODO add in the prior contributions for MAP estimation\n #TODO fix the hessian for tied, constrained and fixed components\n if hasattr(self, 'log_likelihood_hessian'):\n A = -self.log_likelihood_hessian()\n\n else:\n print \"numerically calculating hessian. please be patient!\"\n x = self._get_params()\n def f(x):\n self._set_params(x)\n return self.log_likelihood()\n h = ndt.Hessian(f)\n A = -h(x)\n self._set_params(x)\n # check for almost zero components on the diagonal which screw up the cholesky\n aa = np.nonzero((np.diag(A)<1e-6) & (np.diag(A)>0.))[0]\n A[aa,aa] = 0.\n return A", "def laplace(arr, out=None):\n for j in nb.prange(0, dim_z): # iterate axial points\n # inner radial boundary condition\n i = 0\n arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))\n out[i, j] = (\n 2 * (arr[i + 1, j] - arr_c) * dr_2\n + (arr_z_l - 2 * arr_c + arr_z_h) * dz_2\n )\n\n if dim_r == 1:\n continue # deal with singular radial dimension\n\n for i in range(1, dim_r - 1): # iterate radial points\n arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))\n arr_r_l, arr_r_h = arr[i - 1, j], arr[i + 1, j]\n out[i, j] = (\n (arr_r_h - 2 * arr_c + arr_r_l) * dr_2\n + (arr_r_h - arr_r_l) / (2 * i + 1) * dr_2\n + (arr_z_l - 2 * arr_c + arr_z_h) * dz_2\n )\n\n # outer radial boundary condition\n i = dim_r - 1\n arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))\n arr_r_l, arr_r_h = arr[i - 1, j], value_outer(arr, (i, j))\n out[i, j] = (\n (arr_r_h - 2 * arr_c + arr_r_l) * dr_2\n + (arr_r_h - arr_r_l) / (2 * i + 1) * dr_2\n + (arr_z_l - 2 * arr_c + arr_z_h) * dz_2\n )\n return out", "def Laplace_covariance(self):\r\n # TODO add in the prior contributions for MAP estimation\r\n # TODO fix the hessian for tied, constrained and fixed components\r\n if hasattr(self, 'log_likelihood_hessian'):\r\n A = -self.log_likelihood_hessian()\r\n\r\n else:\r\n print \"numerically calculating Hessian. please be patient!\"\r\n x = self._get_params()\r\n def f(x):\r\n self._set_params(x)\r\n return self.log_likelihood()\r\n h = ndt.Hessian(f) # @UndefinedVariable\r\n A = -h(x)\r\n self._set_params(x)\r\n # check for almost zero components on the diagonal which screw up the cholesky\r\n aa = np.nonzero((np.diag(A) < 1e-6) & (np.diag(A) > 0.))[0]\r\n A[aa, aa] = 0.\r\n return A", "def test_coord_preceding_fs(self):" ]
[ "0.72667736", "0.7121572", "0.6567624", "0.6396213", "0.6273471", "0.60401195", "0.58256054", "0.56461614", "0.5599439", "0.5565884", "0.55239224", "0.55215263", "0.5492814", "0.5412461", "0.5412252", "0.53845686", "0.53729516", "0.53642994", "0.53386897", "0.53046286", "0.52903473", "0.52568984", "0.5248772", "0.5212246", "0.52106816", "0.520264", "0.52014095", "0.5199678", "0.519624", "0.51936936" ]
0.7378338
0
Tests the minimax_regret fn
def test_minimax_regret(): f = np.asarray([ [0.99, 1.0, 0.5], [0.69, 0.6, 0.6]]) R = common_metrics.minimax_regret(f, maximise=True) expected = np.asarray( [-0.1, -0.4]) assert np.allclose(R, expected) R = common_metrics.minimax_regret(f, maximise=False) expected = np.asarray( [-0.4, -0.1]) assert np.allclose(R, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_regress(self):\r\n x = [0, 12, 29.5, 43, 53, 62.5, 75.5, 85, 93]\r\n y = [8.98, 8.14, 6.67, 6.08, 5.90, 5.83, 4.68, 4.20, 3.72]\r\n self.assertFloatEqual(regress(x, y), (-0.05322, 8.7038), 0.001)\r\n # higher precision from OpenOffice\r\n self.assertFloatEqual(regress(x, y), (-0.05322215, 8.70402730))\r\n\r\n # add test to confirm no overflow error with large numbers\r\n x = [32119, 33831]\r\n y = [2.28, 2.43]\r\n exp = (8.761682243E-05, -5.341209112E-01)\r\n self.assertFloatEqual(regress(x, y), exp, 0.001)", "def test_percentile_regret():\n f = np.asarray([\n [0.99, 1.0, 0.5],\n [0.69, 0.6, 0.6]])\n R = common_metrics.percentile_regret(f, maximise=True, percentile=0.5)\n expected = np.asarray(\n [0.0, -0.3])\n assert np.allclose(R, expected)\n R = common_metrics.percentile_regret(f, maximise=False, percentile=0.5)\n expected = np.asarray(\n [-0.3, 0.0])\n assert np.allclose(R, expected)", "def test_regress_R2(self):\r\n x = [1.0, 2.0, 3.0, 4.0, 5.0]\r\n y = [2.1, 4.2, 5.9, 8.4, 9.6]\r\n result = regress_R2(x, y)\r\n self.assertFloatEqual(result, 0.99171419347896)", "def minim(self) -> int:\n\t\treturn 2", "def Example_1(nb_param):\r\n print \"\\n** Example_1: Finding the minimum of the Rosenbrock function with {0} variables **\".format(nb_param)\r\n\r\n Ex = optim_wrapper()\r\n X0 = np.zeros(nb_param)\r\n lim = [(-2.0,2.0)]*nb_param\r\n Ex.set_X0(X0)\r\n Ex.set_lim(lim)\r\n Ex.set_norm_count(nb_param**2*2)\r\n Ex.set_nb_best(nb_param**2)\r\n Ex.set_obj_func(obj)\r\n Ex.set_multi_proc(1)\r\n Ex.set_wrapper()\r\n Ex.test_test()\r\n print Ex\r\n\r\n X_solution = [1.0]*nb_param\r\n res_string = \"Results of the optimisation: {:03.4f}, expected results: {:03.4f}\".format(obj(Ex.get_res()),obj(X_solution))\r\n print res_string\r\n print \"*\"*len(res_string)", "def test_simple_reg_fn():\n reg_fn = lambda strengths: 2 * strengths\n reg_tracker = InterfaceRegTracker(1., reg_fn=reg_fn)\n strengths = Variable(torch.ones([10]))\n reg_tracker.regularize(strengths)\n result = sum(reg_tracker.loss.data)\n assert result == 2., \\\n \"{} != {}\".format(result, 2.)", "def test_regress_origin(self):\r\n x = array([1, 2, 3, 4])\r\n y = array([4, 2, 6, 8])\r\n self.assertFloatEqual(regress_origin(x, y), (1.9333333, 0))\r\n\r\n # add test to confirm no overflow error with large numbers\r\n x = [32119, 33831]\r\n y = [2.28, 2.43]\r\n exp = (7.1428649481939822e-05, 0)\r\n self.assertFloatEqual(regress_origin(x, y), exp, 0.001)", "def test_regress_residuals(self):\r\n x = [1.0, 2.0, 3.0, 4.0, 5.0]\r\n y = [2.1, 4.2, 5.9, 8.4, 9.6]\r\n result = regress_residuals(x, y)\r\n self.assertFloatEqual(result, [-0.1, 0.08, -0.14, 0.44, -0.28])", "def test_generic(args):\n (tol,cons,sol,test_func,low,high,shape) = args\n #if shape == 0:\n #x0 = np.random.uniform(0, 2, (1000, 5))\n #print('here')\n x0 = init_feasible(cons, low=low, high=high, shape=shape)\n t0 = time.time()\n res = minimize_qpso(test_func, x0, tol=tol)\n t1= time.time()\n converged = res.success\n qpso_converged = 0\n qpso_nit = res.nit\n try:\n np.testing.assert_array_almost_equal(sol, res.x, 3)\n except:\n qpso_converged = 1\n # if high is None:\n #x0 = np.random.uniform(0, 2, (1000, 5))\n # else:\n x0 = init_feasible(cons, low=low, high=high, shape=shape)\n t2= time.time()\n res = minimize(test_func,x0, tol=tol)\n t3 = time.time()\n converged = res.success\n pso_converged = 0\n pso_nit = res.nit\n assert converged, res.message\n try:\n np.testing.assert_array_almost_equal(sol, res.x, 3)\n except:\n pso_converged = 1\n \n return qpso_converged, qpso_nit ,t1-t0, pso_converged , pso_nit , t3-t2", "def test_minimise_reentrant():\n # Set parameters for number of iterations and evaluation frequency\n n_iters_1 = 23\n eval_every_1 = 5\n n_iters_2 = 31\n eval_every_2 = 3\n # Create model and data\n np.random.seed(6307)\n model = get_random_network(input_dim=1, output_dim=1)\n sin_data = data.Sinusoidal(input_dim=1, output_dim=1, freq=1)\n # Open result file\n results_filename = \"Test AbstractOptimiser.optimise method re-entrant.txt\"\n results_path = os.path.join(output_dir, results_filename)\n with open(results_path, \"w\") as results_file:\n # Create Result object\n result = optimisers.Result(\n name=\"SGD without line search\", \n verbose=True,\n file=results_file\n )\n # Call gradient descent function twice\n result_ls = optimisers.gradient_descent(\n model,\n sin_data,\n terminator=optimisers.Terminator(i_lim=n_iters_1),\n evaluator=optimisers.Evaluator(i_interval=eval_every_1),\n result=result,\n display_summary=False\n )\n result_ls = optimisers.gradient_descent(\n model,\n sin_data,\n terminator=optimisers.Terminator(i_lim=n_iters_2),\n evaluator=optimisers.Evaluator(i_interval=eval_every_2),\n result=result\n )\n # Check values in time column are monotonically increasing\n time_values = result.get_values(optimisers.results.columns.Time)\n for i in range(1, len(time_values)):\n assert time_values[i] > time_values[i - 1]\n # Check values in iteration column are monotonically increasing\n iteration_values = result.get_values(optimisers.results.columns.Iteration)\n for i in range(1, len(iteration_values)):\n assert iteration_values[i] > iteration_values[i - 1]\n # Assert that the list of iteration values is exactly what we expect\n all_iter_vals = (\n list(range(0, n_iters_1, eval_every_1)) +\n list(range(n_iters_1, n_iters_1 + n_iters_2, eval_every_2)) +\n [n_iters_1 + n_iters_2]\n )\n assert all_iter_vals == iteration_values", "def evaluate_reg_param(inputs, targets, folds, centres, scale, reg_params=None):\n # create the feature mappoing and then the design matrix \n feature_mapping = construct_rbf_feature_mapping(centres,scale) \n designmtx = feature_mapping(inputs) \n # choose a range of regularisation parameters\n if reg_params is None:\n reg_params = np.logspace(-15,0)\n num_values = reg_params.size\n num_folds = len(folds)\n # create some arrays to store results\n train_mean_errors = np.zeros(num_values)\n test_mean_errors = np.zeros(num_values)\n train_stdev_errors = np.zeros(num_values)\n test_stdev_errors = np.zeros(num_values)\n # \n for r, reg_param in enumerate(reg_params):\n # r is the index of reg_param, reg_param is the regularisation parameter\n # cross validate with this regularisation parameter\n train_errors, test_errors = cv_evaluation_linear_model(\n designmtx, targets, folds, reg_param=reg_param)\n # we're interested in the average (mean) training and testing errors\n train_mean_error = np.mean(train_errors)\n test_mean_error = np.mean(test_errors)\n train_stdev_error = np.std(train_errors)\n test_stdev_error = np.std(test_errors)\n # store the results\n train_mean_errors[r] = train_mean_error\n test_mean_errors[r] = test_mean_error\n train_stdev_errors[r] = train_stdev_error\n test_stdev_errors[r] = test_stdev_error\n\n #Get test error without reg param\n blank ,test_errors_without_reg = cv_evaluation_linear_model(designmtx,targets,folds,reg_param=None)\n test_mean_error_without_reg_param = np.mean(test_errors_without_reg)\n\n # Now plot the results\n fig, ax = plot_train_test_errors(\n \"$\\lambda$\", reg_params, train_mean_errors, test_mean_errors)\n # Here we plot the error ranges too: mean plus/minus 1 standard error.\n # 1 standard error is the standard deviation divided by sqrt(n) where\n # n is the number of samples. \n # (There are other choices for error bars.)\n # train error bars\n lower = train_mean_errors - train_stdev_errors/np.sqrt(num_folds)\n upper = train_mean_errors + train_stdev_errors/np.sqrt(num_folds)\n ax.fill_between(reg_params, lower, upper, alpha=0.2, color='b')\n # test error bars\n lower = test_mean_errors - test_stdev_errors/np.sqrt(num_folds)\n upper = test_mean_errors + test_stdev_errors/np.sqrt(num_folds)\n ax.fill_between(reg_params, lower, upper, alpha=0.2, color='r')\n #plot green line to represent no reg params\n xlim = ax.get_xlim()\n ax.plot(xlim, test_mean_error_without_reg_param * np.ones(2), 'g:')\n ax.set_xscale('log')", "def test_griewank(self):\n fun = get_problem('griewank', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def do_we_need_to_reoptimize(MFE):\n # check that we found a solution and run optimizer again if not\n MFE.CalculateMeritFunction()\n Nop = MFE.NumberOfOperands\n REOPTIMIZE = False\n for j in range(6):\n op = MFE.GetOperandAt(Nop - j)\n contribution = op.Contribution\n print(\"Contribution %i: %1.2e\" % (j, contribution))\n REOPTIMIZE = REOPTIMIZE or (contribution > 1e-7)\n op_margin = MFE.GetOperandAt(Nop - 7)\n reached_target = np.isclose(op_margin.Value,\n op_margin.Target, atol=10)\n print(\"Margin: %1.2e\" % op_margin.Value)\n REOPTIMIZE = REOPTIMIZE or not reached_target\n\n op_equa = MFE.GetOperandAt(Nop - 8)\n reached_target = op_equa.Value < 10\n print(\"Avg Deviation from edge shape: %1.2f\" % op_equa.Value)\n REOPTIMIZE = REOPTIMIZE or not reached_target\n return REOPTIMIZE", "def testHeuristic(self):\n result = Sigmoid.heuristic(self, 12)\n self.assertEqual(0.28867513459481287, result)", "def test_binary_reg_fn():\n inputs = Variable(torch.Tensor([0, .5, 1]))\n outputs = binary_reg_fn(inputs).data\n expected = torch.Tensor([0.0029409, 1, 0.0029409])\n assert is_close(outputs, expected).all(), \\\n \"{} != {}\".format(outputs.tolist(), expected.tolist())", "def test_const_evaluate():\n pop = test_population\n pop = ops.const_evaluate(pop, value=123456789.0)\n \n for ind in pop:\n assert(pytest.approx(123456789.0) == ind.fitness)", "def linearReg(x,y):\n X=np.array(x).reshape(-1,1)\n Y=np.array(y).reshape(-1,1)\n x_shape = X.shape\n num_var = x_shape[1] \n yintercept = 0\n slope = 0\n progress = []\n #intialize the parameter\n weight_matrix = np.random.normal(-1,1,(num_var,1))\n yintercept = np.random.rand(1)\n #cost minmization\n for i in range(200):\n dcostdm = np.sum(np.multiply(((np.matmul(X,weight_matrix)+ yintercept)-Y),X))*2/x_shape[0] #w.r.t to the weight\n dcostdc = np.sum(((np.matmul(X,weight_matrix)+yintercept)-Y))*2/x_shape[0] #partial derivative of cost w.r.t the intercept\n weight_matrix -= 0.1*dcostdm \n #updating the weights with the calculated gradients\n yintercept -= 0.1*dcostdc #updating the weights with the calculated gradients\n progress.append(np.array((weight_matrix,yintercept)))\n slope = weight_matrix\n return (slope[-1],yintercept)", "def regret(machine, T, res_temps):\n opt = gain_opt(machine, T)\n return opt - res_temps", "def minimax(gamestate, depth, timeTotal, alpha, beta, maxEntity):\n\n bonus = 0\n isTerminalState = gamestate.board.checkTerminalState(gamestate.currentPlayer.noPlayer)\n # Basis Rekursif\n if ((depth == 0) or (time.time() > timeTotal) or (isTerminalState)):\n if (isTerminalState) and (gamestate.currentPlayer.noPlayer == maxEntity):\n bonus = 10\n elif (isTerminalState) and (gamestate.currentPlayer.noPlayer != maxEntity):\n bonus = -10\n return gamestate, U_Function(gamestate.currentPlayer, gamestate.oppositePlayer, gamestate.board.size, maxEntity) + bonus\n\n # Rekurens\n if (gamestate.currentPlayer.noPlayer == maxEntity):\n # Choose the maximum utility of the state\n # Iterate all pion and its possible moves\n maxGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n maxValue = -math.inf\n\n # Iterate all pion index\n for idx in range(len(gamestate.currentPlayer.arrayPion)):\n all_possible_moves = gamestate.currentPlayer.listAllPossibleMove(idx, gamestate.board)\n\n # Iterate all possible moves of pion index\n for move in all_possible_moves:\n newGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n newGameState.currentPlayer.movePion(idx, move, newGameState.board)\n\n recursiveState = GameState.GameState(newGameState.board, newGameState.currentPlayer, newGameState.oppositePlayer)\n recursiveState.nextTurn()\n dummyState, utility = minimax(recursiveState, depth-1, timeTotal, alpha, beta, maxEntity)\n\n # Compare with the old max value\n if (utility > maxValue):\n maxValue = utility\n maxGameState = newGameState\n \n alpha = max(alpha, maxValue)\n if (beta <= alpha):\n return maxGameState, maxValue\n return maxGameState, maxValue\n\n else:\n # Choose the minimum utility of the state\n minGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n minValue = math.inf\n\n # Iterate all pion index\n for idx in range(len(gamestate.currentPlayer.arrayPion)):\n all_possible_moves = gamestate.currentPlayer.listAllPossibleMove(idx, gamestate.board)\n\n # Iterate all possible moves of pion index\n for move in all_possible_moves:\n newGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n newGameState.currentPlayer.movePion(idx, move, newGameState.board)\n\n recursiveState = GameState.GameState(newGameState.board, newGameState.currentPlayer, newGameState.oppositePlayer)\n recursiveState.nextTurn()\n dummyState, utility = minimax(recursiveState, depth-1, timeTotal, alpha, beta, maxEntity)\n\n # Compare with the old min value\n if (utility < minValue):\n minValue = utility\n minGameState = newGameState\n \n beta = min(beta, minValue)\n if (beta <= alpha):\n return minGameState, minValue\n \n return minGameState, minValue", "def test_models_regression(model):\n atom = ATOMRegressor(X_reg, y_reg, test_size=0.24, random_state=1)\n atom.run(\n models=model,\n metric=\"neg_mean_absolute_error\",\n n_calls=2,\n n_initial_points=1,\n bo_params={\"base_estimator\": \"gbrt\", \"cv\": 1},\n )\n assert not atom.errors\n assert hasattr(atom, model)", "def minimization_fun(params, x, y, sizelist, x_crit_fun,\n rescale_fun, rescale_type, return_data=False):\n #x, y = _pad_vals(x, y)\n\n x_rescaled = rescale_xvals(x, sizelist, x_crit_fun, rescale_fun,\n rescale_type,\n *params)\n\n x_min = x_rescaled.flatten()\n y_min = y.flatten()\n\n sort_args = np.argsort(x_min)\n y_min = y_min[sort_args]\n\n # define the cost function\n cost_fun = np.nansum(np.abs(np.diff(y_min))) / \\\n (np.nanmax(y_min) - np.nanmin(y_min)) - 1\n\n if not return_data:\n\n return cost_fun\n else:\n return np.array(x_min)[sort_args], y_min, cost_fun", "def evaluate_regression(x_test,t_test,basis,bias,w,degree=1,mu=None,s=1):\n \n phi = design_matrix(x_test,basis,degree,bias,mu,s)\n pred_test=phi@w\n # Measure root mean squared error on testing data.\n t_est = pred_test\n #print(\"deleteeeeeeeeeee\",t_est)\n #print(np.shape(t_est))\n err = np.sqrt((np.square(pred_test-t_test)).mean())\n \n \n\n return (t_est, err)", "def test_MINX_pass(self):\n self.assertTrue(self.mod.minx.isset)", "def test_parallel_resistors(self):\r\n self.assertEqual(calc.evaluator({}, {}, '1||1'), 0.5)\r\n self.assertEqual(calc.evaluator({}, {}, '1||1||2'), 0.4)\r\n self.assertEqual(calc.evaluator({}, {}, \"j||1\"), 0.5 + 0.5j)", "def regmin(f, Bc=None, option=\"binary\"):\n\n if option != 'binary':\n raise ValueError, \"mmorph.regmin only implements option 'binary'\"\n if Bc is None: Bc = secross()\n fplus = addm(f,1)\n g = subm(suprec(fplus,f,Bc),f)\n y = union(threshad(g,1),threshad(f,0,0))\n return y", "def _gmres(self, super_operator, super_rhs, tol):\n return login_gmres(\n super_operator, super_rhs, tol,\n return_residuals=True,\n **SOLVER_OPTIONS\n )", "def test_score(self):\n reg = ElasticRegistration()\n reg.fit(self.unimodal_samples)\n score = reg.score(self.unimodal_samples)\n np.testing.assert_almost_equal(score, 0.9994225)", "def evaltest(x_solution,ntest,pred):\n \n large = 10.0**30\n e0 = 0.0\n y=0.0\n for i in range(ntest): # Computation of correct piece\n e0 += cfg.a_unscaled[cfg.ntrain+i][-1]\n pind = 0\n ipbest = 0\n pbest = -large # for max\n \n for j1 in range(cfg.nomax):\n ipmin=pind\n pmin=large # for min\n for _ in range(cfg.jk[j1]):\n piece=x_solution[(pind+1)*cfg.nfea-1] \n for j3 in range(cfg.nfea-1): #\n piece += x_solution[pind*cfg.nfea+j3]*cfg.a_unscaled[cfg.ntrain+i][j3]\n if piece < pmin:\n ipmin = pind\n pmin = piece\n pind += 1 \n \n if pmin > pbest:\n ipbest = ipmin\n pbest = pmin\n \n pred[i] = x_solution[(ipbest+1)*cfg.nfea-1] # Computation of prediction\n for j1 in range(cfg.nfea-1):\n pred[i] += x_solution[ipbest*cfg.nfea+j1]*cfg.a_unscaled[cfg.ntrain+i][j1]\n y += pred[i]\n \n y = y/ntest \n e0 = e0/ntest\n \n # Computation of indices\n rmse = 0.0\n mae = 0.0\n e1 = 0.0\n for i in range(ntest):\n rmse += (pred[i]-cfg.a_unscaled[cfg.ntrain+i][-1])**2\n mae += np.abs(pred[i]-cfg.a_unscaled[cfg.ntrain+i][-1]) \n e1 += (cfg.a_unscaled[cfg.ntrain+i][-1] - e0)**2\n ce = 1.0 - rmse/e1 \n rmse = np.sqrt(rmse/ntest)\n mae = mae/ntest\n\n if ntest > 1:\n sx=0.0\n sy=0.0\n rcor=0.0\n for i in range(ntest):\n sx += (pred[i]-y)**2\n sy += (cfg.a_unscaled[cfg.ntrain+i][-1]-e0)**2 \n rcor += (pred[i]-y) * (cfg.a_unscaled[cfg.ntrain+i][-1]-e0) \n\n r = rcor/np.sqrt(sx*sy)\n \n return pred,rmse,mae,ce,r", "def evaluate_errors_reg_param(inputs, targets, folds, centres, scale, reg_params=None):\n # create the feature mappoing and then the design matrix\n feature_mapping = construct_rbf_feature_mapping(centres,scale)\n designmtx = feature_mapping(inputs)\n # choose a range of regularisation parameters\n if reg_params is None:\n reg_params = np.logspace(-11,0)\n num_values = reg_params.size\n num_folds = len(folds)\n # create some arrays to store results\n test_mean_errors = np.zeros(num_values)\n #\n for r, reg_param in enumerate(reg_params):\n # r is the index of reg_param, reg_param is the regularisation parameter\n # cross validate with this regularisation parameter\n train_errors, test_errors = cv_evaluation_linear_model(\n designmtx, targets, folds, reg_param=reg_param)\n # we're interested in the average (mean) training and testing errors\n test_mean_error = np.mean(test_errors)\n # store the results\n test_mean_errors[r] = test_mean_error\n\n return test_mean_errors", "def _newtons_method_gmres_action(f, initial_guess, max_iter=50, tol=1e-12):\n\n output_dim = len(f(initial_guess))\n \n @np.vectorize\n def sum_values(dictionary):\n return sum(dictionary.values())\n \n def create_action(x0):\n \n def L_fun(x):\n \"\"\"\n Action\n Returns J_f(x0)*x by setting the values of 'x' as the initial derivatives for the variables in x0.\n \"\"\"\n \n f_x0 = f(ad.create_vector('x0', x0, seed_vector=x));\n f_x0 = np.array(f_x0) #ensure that f_x0 is np.array\n action = sum_values(ad.get_deriv(f_x0))\n return action\n \n L = LinearOperator(shape=(output_dim, len(x0)), matvec=L_fun)\n \n return L\n \n x0 = initial_guess\n for iter_num in range(max_iter):\n L = create_action(x0)\n b = -1 * np.array(f(x0))\n if len(x0) == 1:\n b = np.array([b])\n step, _ = gmres(L, b, tol = tol, atol = 'legacy')\n xnext = x0 + step \n if np.all(np.abs(xnext - x0) < tol):\n return (xnext, iter_num + 1);\n x0 = xnext\n \n raise RuntimeError(\"Failed to converge after {0} iterations, value is {1}\".format(max_iter, x0) );" ]
[ "0.6133892", "0.59743285", "0.584011", "0.55133736", "0.5493546", "0.54772586", "0.5439533", "0.5432923", "0.54194605", "0.5386318", "0.53721267", "0.5313838", "0.5308108", "0.52519023", "0.52404577", "0.5208454", "0.5201797", "0.51906943", "0.5181717", "0.517464", "0.516129", "0.5160212", "0.51470286", "0.51252", "0.5123426", "0.5108222", "0.51007617", "0.50929296", "0.50911796", "0.5088827" ]
0.770568
0
Tests the percentile_regret fn
def test_percentile_regret(): f = np.asarray([ [0.99, 1.0, 0.5], [0.69, 0.6, 0.6]]) R = common_metrics.percentile_regret(f, maximise=True, percentile=0.5) expected = np.asarray( [0.0, -0.3]) assert np.allclose(R, expected) R = common_metrics.percentile_regret(f, maximise=False, percentile=0.5) expected = np.asarray( [-0.3, 0.0]) assert np.allclose(R, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_valid_calculation_of_quantile(alpha: Any) -> None:\n n = 30\n check_alpha_and_n_samples(alpha, n)", "def test__quantile(self):\r\n # regular cases\r\n sample_data = array(range(25, 42))\r\n assert_almost_equal(_quantile(sample_data, 0.5), median(sample_data))\r\n\r\n # sorted data is assumed for this function\r\n sample_data = sorted(\r\n array([0.17483293, 0.99891939, 0.81377467, 0.8137437,\r\n 0.51990174, 0.35521497, 0.98751461]))\r\n assert_almost_equal(_quantile(sample_data, 0.10), 0.283062154)", "def test_lots_of_percentiles(self):\n data = np.array(\n [\n [[14.4, -46, 10.2], [-73.0, -46, -89.2], [-89.2, -93.25, -94.6]],\n [[19.2, 8.25, 10.6], [-19, 8.25, -67.6], [-67.6, -79.75, -83.8]],\n [[24.0, 8.75, 11.0], [8.33333, 8.75, -46.0], [-46.0, -66.25, -73.0]],\n [[28.8, 9.25, 11.4], [9.0, 9.25, -24.4], [-24.4, -52.75, -62.2]],\n [[33.6, 9.75, 11.8], [9.666667, 9.75, -2.8], [-2.8, -39.25, -51.4]],\n [\n [38.4, 10.333333, 16.8],\n [10.333333, 10.2, 8.5],\n [8.333333, -25.75, -40.6],\n ],\n [[43.2, 11.0, 26.4], [11.0, 10.6, 9.5], [9.0, -12.25, -29.8]],\n [\n [48.0, 11.666667, 36.0],\n [11.666667, 11.0, 10.5],\n [9.666667, 1.25, -19.0],\n ],\n [[52.8, 24, 45.6], [24, 11.4, 11.5], [10.5, 8.5, -8.2]],\n [[57.6, 48, 55.2], [48, 11.8, 36.0], [11.5, 9.5, 2.6]],\n ],\n dtype=np.float32,\n )\n\n percentiles = np.arange(5, 100, 10)\n result = Plugin()._probabilities_to_percentiles(self.cube, percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=5)", "def ninetieth_percentile(y_test, y_pred) :\n return np.abs(y_test - y_pred).sort_values().iloc[int(len(y_test)*0.90)]/60", "def test_percentile_kurtosis():\n f = np.asarray([\n [0.99, 1.0, 0.5, 0.52],\n [0.69, 0.6, 0.61, 1.0]])\n R = common_metrics.percentile_kurtosis(f, maximise=True)\n expected = np.asarray(\n [1.06382979, 5.0])\n assert np.allclose(R, expected)", "def test_check_data_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, percentiles=[25, 50, 75])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_quantile(self):\r\n\r\n # suffle the data to be sure, it is getting sorted\r\n sample_data = array(range(1, 11))\r\n shuffle(sample_data)\r\n\r\n # regular cases\r\n expected_output = [1.9, 2.8, 3.25, 5.5, 7.75, 7.93]\r\n list_of_quantiles = [0.1, 0.2, 0.25, 0.5, 0.75, 0.77]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(expected_output, output)\r\n\r\n sample_data = array([42, 32, 24, 57, 15, 34, 83, 24, 60, 67, 55, 17,\r\n 83, 17, 80, 65, 14, 34, 39, 53])\r\n list_of_quantiles = [0.5]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(output, median(sample_data))\r\n\r\n # quantiles must be between [0, 1]\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, [0.1, 0.2, -0.1, 2, 0.3, 0.5])\r\n\r\n # quantiles must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, 1)\r\n\r\n # the data must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(1, [0])", "def test_check_data_specifying_no_of_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, no_of_percentiles=3)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_single_percentile(self):\n percentiles_cube = set_up_percentiles_cube()\n percentiles_cube = percentiles_cube[0]\n msg = \"Percentile coordinate has only one value. Interpolation\"\n with self.assertRaisesRegex(ValueError, msg):\n ProbabilitiesFromPercentiles2D(percentiles_cube, 'new_name')", "def test_profiled_quantiles(self):\n\n # this data has 4 bins, range of 3\n # with equal bin size, each bin has the width of 0.75\n\n data = [\"1.0\", \"2.0\", \"3.0\", \"4.0\"]\n df = pd.Series(data)\n profiler = FloatColumn(df.name)\n profiler.update(df)\n profile = profiler.profile\n\n est_quartiles = profile['quantiles']\n est_Q1 = est_quartiles[249]\n est_Q2 = est_quartiles[499]\n est_Q3 = est_quartiles[749]\n\n data_to_num = [float(item) for item in data]\n exact_Q1 = np.percentile(data_to_num, 25)\n exact_Q2 = np.percentile(data_to_num, 50)\n exact_Q3 = np.percentile(data_to_num, 75)\n\n self.assertEqual(est_Q1, exact_Q1)\n self.assertEqual(est_Q2, exact_Q2)\n self.assertEqual(est_Q3, exact_Q3)", "def percentile(scores, student_score):\n scores = np.array(sorted(scores))\n num_scores = len(scores)\n return round(sum(scores <= student_score) / float(num_scores) * 100, 2)", "def test_check_data_over_specifying_percentiles(self):\n msg = \"Cannot specify both no_of_percentiles and percentiles\"\n with self.assertRaisesRegex(ValueError, msg):\n Plugin().process(self.cube, no_of_percentiles=3, percentiles=[25, 50, 75])", "def normalizeprctile(expdat,percent=80):\n\tparams=locals()\n\n\t# select the bacteria to use - don't want to include very low freq. bacteria\n\tnewexp=hs.filterminreads(expdat,1*len(expdat.samples))\n\n\tpercvals=np.percentile(newexp.data,percent,axis=0)\n#\tplt.figure()\n#\tplt.plot(percvals)\n\tpercvals=percvals/np.mean(percvals)\n\tnewexp=hs.copyexp(expdat)\n\tfor idx,samp in enumerate(expdat.samples):\n\t\tnewexp.data[:,idx]=newexp.data[:,idx]*percvals[idx]\n\tnewexp.filters.append(\"normalize percentile %f\" % percent)\n\ths.addcommand(newexp,\"normalizeprctile\",params=params,replaceparams={'expdat':expdat})\n\n\treturn newexp", "def best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\tpercentile_score = 0\n\tpercentiles = [25, 35, 45, 50, 55, 65, 75]\n\t# percentiles = [45]\n\tpercentile_selector = None\n\tpercentile_train_features_selected = None\n\tpercentile_test_features_selected = None\n\n\tfor percentile in percentiles:\n\t\tprint(percentile)\n\t\ttemp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)\n\t\ttemp_percentile_selector.fit(train_features, train_similarity_target)\n\t\ttemp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)\n\t\ttemp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)\n\n\t\tregressor.fit(temp_percentile_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Percentile Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > percentile_score:\n\t\t\tpercentile_score = temp_score\n\t\t\tpercentile_selector = temp_percentile_selector\n\t\t\tpercentile_train_features_selected = temp_percentile_train_features_selected\n\t\t\tpercentile_test_features_selected = temp_percentile_test_features_selected\n\n\tpercentile_mask = percentile_selector.get_support()\n\tprint(\"This is the percentile mask: \")\n\tprint(percentile_mask)\n\n\treturn percentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask", "def tenth_percentile(y_test, y_pred) :\n return np.abs(y_test - y_pred).sort_values().iloc[int(len(y_test)*0.10)]/60", "def test_check_data_specifying_single_percentile(self):\n expected_data = np.array(self.percentile_25)\n result = Plugin().process(self.cube, percentiles=[25])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def _quantile_check(df: DataFrame) -> None:\n expected_percentiles = choose_set_of_percentiles(df[\"percentile\"].nunique())\n\n if not np.allclose(expected_percentiles, df[\"percentile\"].unique()):\n msg = (\n \"The forecast percentiles can not be considered as quantiles. \"\n f\"The forecast percentiles are {df['percentile'].unique()}.\"\n \"Based on the number of percentiles provided, the expected \"\n f\"percentiles would be {expected_percentiles}.\"\n )\n raise ValueError(msg)", "def test_check_data_not_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def fpr(positive, negative, tpr):\n threshold = np.percentile(np.asarray(positive), 100-tpr) # uses numpy percentile (ex 70th tpr is the 30th percentile)\n total_false_positives = sum(negative > threshold)\n\n return total_false_positives/len(negative)", "def wilks(percentiles, alpha=0.10):\n percentiles = np.asarray(percentiles)\n pvals = list(percentiles.flat) # want in decimal\n pvals = sorted(2 * min(pval, 1 - pval) for pval in pvals)\n ptest = [alpha * i / len(pvals) for i in range(len(pvals))]\n ppick = max(pv for pv, pt in zip(pvals, ptest) if pv <= pt) / 2\n mask = (percentiles <= ppick) | (percentiles >= (1 - ppick))\n return percentiles[mask]", "def test_run_simulations_and_get_percentile_allele_length_1():\n simulator = genotype_confidence_simulator.GenotypeConfidenceSimulator(\n 50, 300, 0.01, iterations=5\n )\n simulator.run_simulations()\n expected_confidence_scores_percentiles = {\n 193: 20.0,\n 221: 40.0,\n 271: 60.0,\n 278: 80.0,\n 303: 100.0\n }\n assert (\n simulator.confidence_scores_percentiles\n == expected_confidence_scores_percentiles\n )\n assert simulator.get_percentile(193) == 20.00\n assert simulator.get_percentile(221) == 40.00\n # Try getting number that is not in the dict and will have to be inferred\n assert simulator.get_percentile(207) == 30.0\n # Try values outside the range of what we already have\n simulator.get_percentile(192) == 0.00\n simulator.get_percentile(191) == 0.00\n simulator.get_percentile(304) == 100.00\n simulator.get_percentile(305) == 100.00", "def fitness(individual, divider, target_sum, target_multiply):\n\n sum_val = reduce(operator.add, individual[:divider], 0)\n multiply_val = reduce(operator.mul, individual[divider:], 1)\n \n sum_error = abs(target_sum - sum_val)\n sum_error = sum_error / target_sum\n\n multiply_error = abs(target_multiply - multiply_val)\n multiply_error = multiply_error / target_multiply\n\n #print(multiply_error, sum_error)\n #print(sum_error, multiply_error)\n return (multiply_error + sum_error)/2 * 100", "def p(n):\n def p_(x):\n return np.percentile(x, n)\n\n p_.__name__ = \"p_%s\" % n\n return p_", "def tail_ratio(returns):\n\n return np.abs(np.percentile(returns, 95)) / \\\n np.abs(np.percentile(returns, 5))", "def aux_best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\tpercentile_score = 0\n\tpercentiles = [25, 35, 45, 50, 55, 65, 75]\n\t# percentiles = [45]\n\tpercentile_selector = None\n\tpercentile_train_features_selected = None\n\tpercentile_test_features_selected = None\n\n\tfor percentile in percentiles:\n\t\tprint(percentile)\n\t\ttemp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)\n\t\ttemp_percentile_selector.fit(train_features, train_similarity_target)\n\t\ttemp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)\n\t\ttemp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)\n\n\t\tregressor.fit(temp_percentile_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Percentile Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > percentile_score:\n\t\t\tpercentile_score = temp_score\n\t\t\tpercentile_selector = temp_percentile_selector\n\t\t\tpercentile_train_features_selected = temp_percentile_train_features_selected\n\t\t\tpercentile_test_features_selected = temp_percentile_test_features_selected\n\n\tpercentile_mask = percentile_selector.get_support()\n\tprint(\"This is the percentile mask: \")\n\tprint(percentile_mask)\n\n\tpercentile_mask = build_mask(percentile_mask, used_features)\n\tmask_save_path = os.path.join('feature_selection_masks', 'assin2_percentile_based_mask.txt')\n\tdebug_data(percentile_mask, mask_save_path)\n\n\treturn percentile_train_features_selected, percentile_test_features_selected, percentile_selector", "def calcPercentile(percent, set): #TESTED\r\n\r\n # check for 100%\r\n if percent == Decimal('1.0'):\r\n return max(set)\r\n\r\n # convert percent to the appropriate index\r\n pValue = percent * len(set)\r\n\r\n set = sorted(set)\r\n\r\n # check for 0%\r\n if percent == Decimal('0'):\r\n return set[0]\r\n\r\n # check if percent is an integer\r\n if pValue % 1 == 0:\r\n\r\n # cast pValue as int so it can be used as an index\r\n pValue = int(pValue)\r\n\r\n # take average of values at indices percent and percent - 1\r\n return (set[pValue - 1] + set[pValue]) / Decimal('2')\r\n\r\n # if percentage needs to be rounded\r\n else:\r\n # round number up to nearest integer\r\n print pValue # DELETE\r\n pValue = pValue.to_integral_exact(rounding=ROUND_CEILING) # WHAT'S UP WITH THIS FUNCTION?\r\n print pValue # DELETE\r\n pValue = int(pValue)\r\n\r\n return set[pValue - 1]", "def test_minimax_regret():\n f = np.asarray([\n [0.99, 1.0, 0.5],\n [0.69, 0.6, 0.6]])\n R = common_metrics.minimax_regret(f, maximise=True)\n expected = np.asarray(\n [-0.1, -0.4])\n assert np.allclose(R, expected)\n R = common_metrics.minimax_regret(f, maximise=False)\n expected = np.asarray(\n [-0.4, -0.1])\n assert np.allclose(R, expected)", "def lscoreatpercentile (inlist, percent):\r\n if percent > 1:\r\n print \"\\nDividing percent>1 by 100 in lscoreatpercentile().\\n\"\r\n percent = percent / 100.0\r\n targetcf = percent*len(inlist)\r\n h, lrl, binsize, extras = histogram(inlist)\r\n cumhist = cumsum(copy.deepcopy(h))\r\n for i in range(len(cumhist)):\r\n if cumhist[i] >= targetcf:\r\n break\r\n score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)\r\n return score", "def matlab_percentile(in_data, percentiles):\n data = np.sort(in_data)\n p_rank = 100.0 * (np.arange(data.size) + 0.5) / data.size\n perc = np.interp(percentiles, p_rank, data, left=data[0], right=data[-1])\n return perc", "def percentile(histogram, percentile=0.75):\n covered = 0\n normalization = sum(histogram.values())\n for key, frequency in sorted(histogram.items(), reverse=True):\n covered += frequency\n assert covered <= normalization\n if covered > ((1.0 - percentile) * normalization):\n return key\n raise RuntimeError('Percentile computation should have terminated '\n 'mid-loop.')" ]
[ "0.6634793", "0.6602073", "0.6511077", "0.6510798", "0.64590746", "0.6381094", "0.6362796", "0.633054", "0.6321114", "0.6284327", "0.626895", "0.62191373", "0.61997247", "0.61240596", "0.6115809", "0.61138546", "0.610881", "0.59470975", "0.5904325", "0.59035945", "0.58971494", "0.5893836", "0.58878684", "0.5875666", "0.5862959", "0.5861127", "0.58436483", "0.5834776", "0.58168334", "0.5796168" ]
0.8310882
0
Tests the mean_variance fn
def test_mean_variance(): f = np.asarray([ [0.99, 1.0, 0.5], [0.69, 0.6, 0.6]]) R = common_metrics.mean_variance(f, maximise=True) expected = np.asarray( [1.42320289996384, 1.54948632859709]) assert np.allclose(R, expected) R = common_metrics.mean_variance(f, maximise=False) expected = np.asarray( [0.132210105461122, 0.351723890540445]) assert np.allclose(R, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_variance(self):\n self.assertEqual(variance(list1, sample=False), np.var(list1))\n self.assertEqual(variance(list1), np.var(list1, ddof=1))", "def test_predict_mean_variance(self):\n lik = self._standard_likelihood()\n input_mean = Variable(TensorType([0.0]))\n input_variance = Variable(TensorType([1.0]))\n expected_output_mean = input_mean\n expected_output_variance = input_variance + self._expected_likelihood_variance\n\n # API\n output_mean, output_variance = lik.predict_mean_variance(\n input_mean, input_variance\n )\n assert isinstance(output_mean, Variable)\n assert isinstance(output_variance, Variable)\n\n # Value\n assert output_mean.data.numpy() == expected_output_mean.data.numpy()\n assert output_variance.data.numpy() == pytest.approx(\n expected_output_variance.data.numpy()\n )", "def test_calculate_nhpp_mean_variance_inst_mean(self):\n\n _mean_var = calculate_nhpp_mean_variance(46, 3000.0, 0.332, 0.616, 2)\n self.assertAlmostEqual(_mean_var, 489.07164965)", "def test_calculate_nhpp_mean_variance_cum_mean(self):\n\n _mean_var = calculate_nhpp_mean_variance(46, 3000.0, 0.332, 0.616)\n self.assertAlmostEqual(_mean_var, 92.3421144)", "def _variance(mean_variance, samples):\n mean = mean_variance[0] / samples\n variance = mean_variance[1]\n variance /= samples\n variance -= mean * mean\n return variance", "def test_predict_mean_covariance(self):\n lik = self._standard_likelihood()\n input_mean = Variable(TensorType([0.0, 1.0, 2.1]))\n input_covariance = Variable(\n TensorType([[1.0, 0.5, 0.2], [0.5, 1.0, 0.5], [0.2, 0.5, 1.0]])\n )\n expected_output_mean = input_mean\n # Ugh, sorry about this. Will cleanup when we move PyTorch forward!\n expected_output_covariance = (\n input_covariance\n + Variable(TensorType([self._expected_likelihood_variance]))\n .expand_as(input_covariance)\n .diag()\n .diag()\n )\n\n # API\n output_mean, output_covariance = lik.predict_mean_covariance(\n input_mean, input_covariance\n )\n assert isinstance(output_mean, Variable)\n assert isinstance(output_covariance, Variable)\n\n # Value\n assert all(output_mean.data.numpy() == expected_output_mean.data.numpy())\n assert output_covariance.data.numpy() == pytest.approx(\n expected_output_covariance.data.numpy()\n )", "def test_profiled_mean_and_variance(self):\n\n def mean(df):\n total = 0\n for item in df:\n total += item\n return total / len(df)\n\n def var(df):\n var = 0\n mean_df = mean(df)\n for item in df:\n var += (item - mean_df) ** 2\n return var / (len(df) - 1)\n\n def batch_variance(mean_a, var_a, count_a, mean_b, var_b, count_b):\n delta = mean_b - mean_a\n m_a = var_a * (count_a - 1)\n m_b = var_b * (count_b - 1)\n M2 = m_a + m_b + delta ** 2 * count_a * count_b / (\n count_a + count_b)\n return M2 / (count_a + count_b - 1)\n\n data = np.linspace(-5, 5, 11).tolist()\n df1 = pd.Series(data)\n\n data = np.linspace(-3, 2, 11).tolist()\n df2 = pd.Series(data)\n\n data = np.full((10,), 1)\n df3 = pd.Series(data)\n\n num_profiler = FloatColumn(df1.name)\n num_profiler.update(df1.apply(str))\n\n self.assertEqual(mean(df1), num_profiler.mean)\n self.assertEqual(var(df1), num_profiler.variance)\n self.assertEqual(np.sqrt(var(df1)), num_profiler.stddev)\n\n variance = batch_variance(\n mean_a=num_profiler.mean, var_a=num_profiler.variance,\n count_a=num_profiler.match_count,\n mean_b=mean(df2), var_b=var(df2), count_b=df2.count()\n )\n num_profiler.update(df2.apply(str))\n df = pd.concat([df1, df2])\n self.assertEqual(mean(df), num_profiler.mean)\n self.assertEqual(variance, num_profiler.variance)\n self.assertEqual(np.sqrt(variance), num_profiler.stddev)\n\n variance = batch_variance(\n mean_a=num_profiler.mean, var_a=num_profiler.variance,\n count_a=num_profiler.match_count,\n mean_b=mean(df3), var_b=var(df3), count_b=df3.count()\n )\n num_profiler.update(df3.apply(str))\n\n df = pd.concat([df1, df2, df3])\n self.assertEqual(mean(df), num_profiler.mean)\n self.assertEqual(variance, num_profiler.variance)\n self.assertEqual(np.sqrt(variance), num_profiler.stddev)", "def test_mean(self):\n pass", "def test_mean(self):\n pass", "def _variance(self, features):\n return np.mean(np.var(features.reshape((features.shape[0], -1)), axis=1))", "def variance(self, mean=None):\n raise NotImplementedError", "def test_calculate_variance_covariance(self):\n\n _var_covar = calculate_variance_covariance(22, 620.0, 0.4239, 0.6142)\n self.assertAlmostEqual(_var_covar[0][0], 0.1351777)\n self.assertAlmostEqual(_var_covar[0][1], -0.04660735)\n self.assertAlmostEqual(_var_covar[1][0], -0.04660735)\n self.assertAlmostEqual(_var_covar[1][1], 0.01710296)\n self.assertEqual(_var_covar[0][1], _var_covar[1][0])", "def test_coeffvar(self):\n self.assertEqual(coeffvar(list1, sample=False), np.std(list1) /\n np.mean(list1))\n self.assertEqual(coeffvar(list1), np.std(list1, ddof=1) /\n np.mean(list1))", "def test_mean():\n data = io.create_sample_Dataset(10)\n print(data.piv.average.u.median())\n assert np.allclose(data.piv.average.u.median(), 6.0)", "def variance(x):\n n = len(x)\n deviations = de_mean(x)\n return sum_of_squares(deviations) / (n - 1)", "def variance(x):\n n = len(x)\n deviations = de_mean(x)\n return sum_of_squares(deviations) / (n - 1)", "def variance(x):\n n = len(x)\n deviations = de_mean(x)\n return sum_of_squares(deviations) / (n - 1)", "def variance(x):\r\n n = len(x)\r\n deviations = dev_mean(x)\r\n return sum_of_squares(deviations) / (n-1)", "def variance( values, sample=False ):\n mean_val = mean_value( values )\n n_val = len( values ) -1 if sample else len( values )\n return sum( [ j**2 for j in [ i - mean_val for i in values ] ] ) / n_val", "def test_5_scalar_variance_1step(self):\n print(\"test 5 comparing variances\")\n\n means, vars, cl_probs = EM_step(\n self.X_h, self.means_h, self.dispersions_h, self.cluster_probabilities_h\n )\n\n self.assertEqual(means.shape[0], 2)\n\n print(vars[0], vars[1])", "def variance(numbers, mean):\n variance = 0 # We will add to this value in a loop\n N = len(numbers)\n \n for i in numbers:\n\n # Operations follow typical BEDMAS\n variance += ((i - mean) * (i - mean))/N\n \n return variance", "def variance(dataset):\n avg = sum(dataset)/len(dataset)\n v = 0.0\n for data in dataset:\n v += (data - avg) * (data - avg)\n v = v / len(dataset)\n return v", "def explained_variance_score(y_true, y_pred, *, sample_weight=..., multioutput=...):\n ...", "def variance(xs: List[float]) -> float:\n assert len(xs) >= 2, \"variance requires at least two elements\"\n\n n = len(xs)\n deviations = de_mean(xs)\n return sum_of_squares(deviations) / (n - 1)", "def variance(xs: List[float]) -> float:\n assert len(xs) >= 2, \"variance requires at least two elements\"\n\n n = len(xs)\n deviations = de_mean(xs)\n return sum_of_squares(deviations) / (n - 1)", "def variance(x):\n \"\"\" note - why n-1?: since we are likely looking at a sample, x_bar is only an\n estimate of the actual mean, which means that on average (x_i - x_bar) ** 2\n is an underestimate of x_i's squared deviation from the mean, which is why\n we divide by n-1 instead of n (see bit.ly/lL2EapI)\"\"\"\n n = len(x)\n deviations = deviations_from_mean(x)\n return sum_of_squares(deviations) / (n - 1)", "def variance(self):\n observations_raw = input(\"Observations: \").split()\n observations = [int(elem) for elem in observations_raw]\n observations_squared = sum([num**2 for num in observations])\n aggregate_squared = sum(observations)**2\n n = len(observations)\n mean = sum(observations)/n\n variance = (observations_squared - (aggregate_squared/n))/(n-1)\n print(f\"Variance is: {variance}\")\n return variance, mean", "def ustat_h1_mean_variance(fea_tensor, return_variance=True, use_unbiased=True):\n Xi = fea_tensor\n n, d, J = Xi.shape\n\n assert n > 1, \"Need n > 1 to compute the mean of the statistic.\"\n # n x d*J\n Tau = np.reshape(Xi, [n, d * J])\n if use_unbiased:\n t1 = np.sum(np.mean(Tau, 0) ** 2) * (old_div(n, float(n - 1)))\n t2 = old_div(np.sum(np.mean(Tau**2, 0)), float(n - 1))\n # stat is the mean\n stat = t1 - t2\n else:\n stat = np.sum(np.mean(Tau, 0) ** 2)\n\n if not return_variance:\n return stat\n\n # compute the variance\n # mu: d*J vector\n mu = np.mean(Tau, 0)\n variance = 4 * np.mean(np.dot(Tau, mu) ** 2) - 4 * np.sum(mu**2) ** 2\n return stat, variance", "def sample_variance(self, x_dict={}):\n raise NotImplementedError()", "def _variance(self,gp):\r\n return self.variance" ]
[ "0.80704737", "0.777593", "0.7494635", "0.7262066", "0.7157619", "0.6950645", "0.6944921", "0.6888636", "0.6888636", "0.6876363", "0.6846463", "0.68175846", "0.66643274", "0.6646647", "0.6567007", "0.6567007", "0.6567007", "0.6552458", "0.65489817", "0.6546654", "0.6544595", "0.6526431", "0.64823586", "0.6474266", "0.6474266", "0.6456093", "0.6455595", "0.64431787", "0.6405411", "0.6376021" ]
0.7823844
1
Tests the undesirable_deviations fn
def test_undesirable_deviations(): f = np.asarray([ [0.99, 1.0, 0.5], [0.69, 0.6, 0.6]]) R = common_metrics.undesirable_deviations(f, maximise=True) expected = np.asarray( [-0.245, 0.0]) assert np.allclose(R, expected) R = common_metrics.undesirable_deviations(f, maximise=False) expected = np.asarray( [-0.005, -0.045]) assert np.allclose(R, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_stddev(self):\n self.assertEqual(stddev(list1, sample=False), np.std(list1))\n self.assertEqual(stddev(list1), np.std(list1, ddof=1))", "def allowedDeviation(self, lower, upper=None, msg=None):\n return allowed_deviation(lower, upper, msg)", "def test_mixed_incomplete_data_2():\n profiles = [[2, 0]]\n payoffs = [[1.0, 0.0]]\n game = paygame.game(2, 2, profiles, payoffs)\n devgains = regret.mixture_deviation_gains(game, [1, 0])\n assert np.allclose(\n devgains, [0, np.nan], equal_nan=True\n ), \"nonzero regret or deviation without payoff didn't return nan\"", "def test_empty_pure_strategy_deviation_gains():\n game = rsgame.empty(2, [2, 2])\n gains = regret.pure_strategy_deviation_gains(game, [2, 0, 2, 0])\n expected = [np.nan, np.nan, 0, 0, np.nan, np.nan, 0, 0]\n assert np.allclose(gains, expected, equal_nan=True)", "def test_demand_variability(self):\n demand_variability = self._uncertain_demand.demand_variability\n avg_order = sum([int(item) for item in self._data_set.values()]) //len(self._data_set)\n variance = [(item - avg_order) for item in self._data_set.values()]\n stdev = pow(sum([pow(j, 2) for j in variance]) / len(self._data_set), 0.5)\n cal_variability = lambda x, y: x / y\n test_variability = cal_variability(stdev, avg_order)\n self.assertEqual(demand_variability, test_variability)", "def test_pure_strategy_deviation_gains():\n profiles = [\n [2, 0, 2, 0],\n [2, 0, 1, 1],\n [2, 0, 0, 2],\n [1, 1, 2, 0],\n [1, 1, 1, 1],\n [1, 1, 0, 2],\n [0, 2, 2, 0],\n [0, 2, 1, 1],\n [0, 2, 0, 2],\n ]\n payoffs = [\n [1, 0, 2, 0],\n [3, 0, 4, 5],\n [6, 0, 0, 7],\n [8, 9, 10, 0],\n [11, 12, 13, 14],\n [15, 16, 0, 17],\n [0, 18, 19, 0],\n [0, 20, 21, 22],\n [0, 23, 0, 24],\n ]\n game = paygame.game(2, [2, 2], profiles, payoffs)\n\n gains = regret.pure_strategy_deviation_gains(game, [2, 0, 2, 0])\n assert np.allclose(gains, [0, 8, 0, 0, 0, 3, 0, 0])\n gains = regret.pure_strategy_deviation_gains(game, [1, 1, 1, 1])\n assert np.allclose(gains, [0, 9, -9, 0, 0, 4, -4, 0])", "def test_offensive_degenerate_case(self):\n from parlai.scripts.detect_offensive_language import DetectOffensive\n\n report = DetectOffensive.main(\n task='integration_tests:overfit', safety='all', mutators='degenerate'\n )\n assert report['classifier_offenses%'] == 0\n assert report['exs'] == 4", "def deviation_ok(norm, value, epsilon):\n deviation = abs(norm-value)/norm\n # print(abs(d-epsilon))\n return deviation <= epsilon", "def test_pooled_sd(self):\r\n exp = pooled_standard_deviation(self.pooled_sd_input_1)\r\n self.assertEqual(self.pooled_sd_result, exp)", "def test_devide(self):\n self.assertEqual(work_file.devide(10, 5), 2)\n self.assertEqual(work_file.devide(-1, 1), -1)\n self.assertEqual(work_file.devide(-1, -1), 1)\n self.assertEqual(work_file.devide(5, 2), 2.5)\n\n self.assertRaises(ValueError, work_file.devide, 10, 0)", "def test_exact_nonsupercontrolled_decompose(self):\n with self.assertWarns(UserWarning, msg=\"Supposed to warn when basis non-supercontrolled\"):\n TwoQubitBasisDecomposer(UnitaryGate(Ud(np.pi / 4, 0.2, 0.1)))", "def test_check_distribution1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_distribution(distribution_fail_1)\n assert str(err_info.value) == 'distribution type input not within range of index'", "def test_positive_definite2(dist, alpha, divergence):\n assert divergence(dist, dist, alpha, rvs=[0, 1]) == pytest.approx(0)\n assert hellinger_sum(dist, dist, alpha, rvs=[0, 1]) == pytest.approx(1)", "def test_effective_metrics_not_in_PM():\n assert \"effective_sample_size\" not in PM_METRICS\n assert \"pearson_r_eff_p_value\" not in PM_METRICS\n assert \"spearman_r_eff_p_value\" not in PM_METRICS", "def test_mixed_incomplete_data():\n profiles = [[2, 0], [1, 1]]\n payoffs = [[4.3, 0], [6.2, 6.7]]\n game = paygame.game(2, 2, profiles, payoffs)\n dev_gain = regret.mixture_deviation_gains(game, [1, 0])\n expected_gains = [0.0, 2.4]\n assert np.allclose(\n dev_gain, expected_gains\n ), \"mixture gains wrong {} instead of {}\".format(dev_gain, expected_gains)\n dev_gain = regret.mixture_deviation_gains(game, game.uniform_mixture())\n assert np.isnan(dev_gain).all(), \"had data for mixture without data\"", "def test_thermallyExpands(self):\n self.assertFalse(self.component.THERMAL_EXPANSION_DIMS)", "def _check_density(density, n_features):\n if density == \"auto\":\n density = 1 / np.sqrt(n_features)\n\n elif density <= 0 or density > 1:\n raise ValueError(\"Expected density in range ]0, 1], got: %r\" % density)\n return density", "def test_no_values(self):\r\n values = []\r\n result = multipoint_mean_sd(values)\r\n\r\n self.assertEqual('', result['mean_result'])\r\n self.assertEqual('', result['sd_result'])", "def standard_deviation_error(y_true, y_pred):\n ...", "def sd(vals):", "def deviation(values, val):\n\tm = mean(values)\n\tdev = abs(val-m)\n\tsd = standard_deviation(values)\n\treturn float(dev)/sd if sd!=0 else 0.0", "def test_plot_kde_hdi_probs_bad(continuous_model, kwargs):\n with pytest.raises(ValueError):\n plot_kde(continuous_model[\"x\"], continuous_model[\"y\"], **kwargs)", "def deviation(delta, mean_reliability, experiment, disks, chunk_count, spread_factor, threshold_recovery):\n \n # Approximated file loss probability\n reliability_mean_value = probability_at_least_d_fail_equal_reliability(threshold_recovery, chunk_count, mean_reliability)\n \n\n # Exact probability\n reliabilities = [element for element in \n file_loss_delta_matrix(delta, mean_reliability, experiment, disks, chunk_count, spread_factor, \n threshold_recovery)]\n \n return abs(mean(reliabilities) - reliability_mean_value)/abs(mean(reliabilities))", "def get_declination(st,vals):\n bel = sorted(filter(lambda x: x <= 0.5, vals))\n abo = sorted(filter(lambda x: x > 0.5, vals))\n if len(bel) < 1 or len(abo) < 1:\n return -2.0\n\n theta = np.arctan((1-2*np.mean(bel))*len(vals)/len(bel))\n gamma = np.arctan((2*np.mean(abo)-1)*len(vals)/len(abo))\n\n return 2.0*(gamma-theta)/3.1415926535 # Enough precision for you?", "def test_positive_definite1(dist, alpha, divergence):\n assert divergence(dist, dist, alpha) == pytest.approx(0)\n assert hellinger_sum(dist, dist, alpha) == pytest.approx(1)", "async def test_random_complete_dev(players, strats, _):\n game = gamegen.samplegame(players, strats)\n sched = gamesched.samplegamesched(game)\n sgame = schedgame.schedgame(sched)\n mix = sgame.random_sparse_mixture()\n supp = mix > 0\n dev_game = await sgame.get_deviation_game(supp)\n devs, jac = dev_game.deviation_payoffs(mix, jacobian=True)\n assert not np.isnan(devs).any()\n assert not np.isnan(jac[supp]).any()\n assert np.isnan(jac[~supp]).all()\n for role in range(sgame.num_roles):\n mask = role == sgame.role_indices\n dev_game = await sgame.get_deviation_game(supp, role_index=role)\n rdevs = dev_game.deviation_payoffs(mix)\n assert np.allclose(rdevs[supp], devs[supp])\n assert np.allclose(rdevs[mask], devs[mask])\n assert supp[~mask].all() or np.isnan(rdevs[~mask]).any()", "def test_positivity(alpha, dists, divergence):\n for dist1, dist2 in combinations(dists, 2):\n assert divergence(dist1, dist2, alpha) > 0", "def standard_deviation_under(individual, test_data, truth_data, name=None, tolerance=0):\r\n test_data = np.array(test_data)\r\n truth_data = np.array(truth_data)\r\n under_predicted_inds = get_under_predicted_inds(test_data,\r\n truth_data,\r\n tolerance)\r\n test_subset = test_data[under_predicted_inds]\r\n truth_subset = truth_data[under_predicted_inds]\r\n return overall_standard_deviation(individual, test_subset, truth_subset)", "def test_accept_missing_sources_as_tech_debt(self):\n metric = Metric(self.DATA_MODEL, {\"addition\": \"sum\", \"type\": \"tests\", \"accept_debt\": True}, METRIC_ID)\n measurement = self.measurement(metric)\n self.assertEqual(\"debt_target_met\", measurement.status())", "def normal_deviation(a, b):\r\n deviation = math.sqrt((b-a)**2 / 12)\r\n print(\"The deviation of this normal distribution is : \", deviation)\r\n return deviation" ]
[ "0.6607664", "0.65282196", "0.6420958", "0.6356108", "0.6301525", "0.61906475", "0.61337507", "0.6122744", "0.6097631", "0.5991103", "0.58946663", "0.5888784", "0.5869099", "0.58275855", "0.5796584", "0.5736681", "0.57045794", "0.5692008", "0.5681096", "0.56480706", "0.56301445", "0.5624416", "0.5616718", "0.56089014", "0.55892", "0.5566921", "0.5564218", "0.5540108", "0.55323416", "0.5531345" ]
0.81727505
0
Tests the percentile skew fn
def test_percentile_skew(): f = np.asarray([ [0.99, 1.0, 0.5], [0.69, 0.6, 0.61]]) R = common_metrics.percentile_skew(f, maximise=True) expected = np.asarray( [0.96, -0.777777777777779]) assert np.allclose(R, expected) R = common_metrics.percentile_skew(f, maximise=False) expected = np.asarray( [-0.96, 0.777777777777779]) assert np.allclose(R, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_percentile_kurtosis():\n f = np.asarray([\n [0.99, 1.0, 0.5, 0.52],\n [0.69, 0.6, 0.61, 1.0]])\n R = common_metrics.percentile_kurtosis(f, maximise=True)\n expected = np.asarray(\n [1.06382979, 5.0])\n assert np.allclose(R, expected)", "def test_lots_of_percentiles(self):\n data = np.array(\n [\n [[14.4, -46, 10.2], [-73.0, -46, -89.2], [-89.2, -93.25, -94.6]],\n [[19.2, 8.25, 10.6], [-19, 8.25, -67.6], [-67.6, -79.75, -83.8]],\n [[24.0, 8.75, 11.0], [8.33333, 8.75, -46.0], [-46.0, -66.25, -73.0]],\n [[28.8, 9.25, 11.4], [9.0, 9.25, -24.4], [-24.4, -52.75, -62.2]],\n [[33.6, 9.75, 11.8], [9.666667, 9.75, -2.8], [-2.8, -39.25, -51.4]],\n [\n [38.4, 10.333333, 16.8],\n [10.333333, 10.2, 8.5],\n [8.333333, -25.75, -40.6],\n ],\n [[43.2, 11.0, 26.4], [11.0, 10.6, 9.5], [9.0, -12.25, -29.8]],\n [\n [48.0, 11.666667, 36.0],\n [11.666667, 11.0, 10.5],\n [9.666667, 1.25, -19.0],\n ],\n [[52.8, 24, 45.6], [24, 11.4, 11.5], [10.5, 8.5, -8.2]],\n [[57.6, 48, 55.2], [48, 11.8, 36.0], [11.5, 9.5, 2.6]],\n ],\n dtype=np.float32,\n )\n\n percentiles = np.arange(5, 100, 10)\n result = Plugin()._probabilities_to_percentiles(self.cube, percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=5)", "def test__quantile(self):\r\n # regular cases\r\n sample_data = array(range(25, 42))\r\n assert_almost_equal(_quantile(sample_data, 0.5), median(sample_data))\r\n\r\n # sorted data is assumed for this function\r\n sample_data = sorted(\r\n array([0.17483293, 0.99891939, 0.81377467, 0.8137437,\r\n 0.51990174, 0.35521497, 0.98751461]))\r\n assert_almost_equal(_quantile(sample_data, 0.10), 0.283062154)", "def percentile(scores, student_score):\n scores = np.array(sorted(scores))\n num_scores = len(scores)\n return round(sum(scores <= student_score) / float(num_scores) * 100, 2)", "def wilks(percentiles, alpha=0.10):\n percentiles = np.asarray(percentiles)\n pvals = list(percentiles.flat) # want in decimal\n pvals = sorted(2 * min(pval, 1 - pval) for pval in pvals)\n ptest = [alpha * i / len(pvals) for i in range(len(pvals))]\n ppick = max(pv for pv, pt in zip(pvals, ptest) if pv <= pt) / 2\n mask = (percentiles <= ppick) | (percentiles >= (1 - ppick))\n return percentiles[mask]", "def calculate_cornish_fisher_percentile(alpha, mu, sigma, skew, kurt):\n\n z = stats.norm.ppf(alpha)\n he2 = np.polynomial.hermite_e.hermeval(z, [0.0, 0.0, 1.0])\n he3 = np.polynomial.hermite_e.hermeval(z, [0.0, 0.0, 0.0, 1.0])\n he13 = np.polynomial.hermite_e.hermeval(z, [0.0, -1.0, 0.0, -2.0])\n\n w = (z +\n he2 * skew / 6 +\n he3 * kurt / 24 +\n he13 * (skew ** 2) / 36)\n\n return mu + sigma * w", "def test_median_type():\n\tmedian(.2)", "def test__repeated_median(repeated_median):\n x, y, *_ = repeated_median\n assert repeated_median_slope(x, y) == 5.0", "def test_valid_calculation_of_quantile(alpha: Any) -> None:\n n = 30\n check_alpha_and_n_samples(alpha, n)", "def rate(skew):\r\n prob = random.random()\r\n if prob >= skew:\r\n return True\r\n if prob < skew:\r\n return False", "def ninetieth_percentile(y_test, y_pred) :\n return np.abs(y_test - y_pred).sort_values().iloc[int(len(y_test)*0.90)]/60", "def robust_median_from_percentiles(array,percentiles=()):\n\n if percentiles:\n percentiles = np.array(percentiles)\n SigmaVector = scipy.stats.norm.ppf(percentiles/100.)\n else:\n percentiles = np.array([10.,20.,30.,40.,45.])\n SigmaVector = np.array([-1.28155157, -0.84162123, -0.52440051, -0.2533471 , -0.12566135])\n\n PercentileValues = np.percentile(array,percentiles)\n \n sig, med = fit_slope_1d(SigmaVector,PercentileValues)\n\n return med", "def get_percentile(obs, bootstrap):\n if np.isnan(obs):\n return np.nan\n else:\n return np.searchsorted(np.sort(bootstrap), obs) / len(bootstrap)", "def p(n):\n def p_(x):\n return np.percentile(x, n)\n\n p_.__name__ = \"p_%s\" % n\n return p_", "def test_sort_distributions_median(self):\r\n exp = [([0, 0, 0, 1], [2, 1, 1], [1], [1, 2, 3]),\r\n ('bar', 'baz', 'zab', 'foo'), ('b', 'r', 'b', 'w')]\r\n obs = _sort_distributions(\r\n [[1, 2, 3], [2, 1, 1], [0, 0, 0, 1], [1]],\r\n ['foo', 'baz', 'bar', 'zab'], ['w', 'r', 'b', 'b'], 'median')\r\n self.assertEqual(obs, exp)", "def kind_utility_func(x):\n mean = [0.7, 0.3]\n s_1 = 0.3\n s_2 = 0.2\n r_12 = 0.0\n cov = [[s_1**2, r_12*s_1*s_2], \n [r_12*s_1*s_2, s_2**2]]\n rv = multivariate_normal(mean, cov)\n A = 1/rv.pdf(mean)\n return A*rv.pdf(x)", "def test_check_data_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, percentiles=[25, 50, 75])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def tenth_percentile(y_test, y_pred) :\n return np.abs(y_test - y_pred).sort_values().iloc[int(len(y_test)*0.10)]/60", "def test_check_data_specifying_single_percentile(self):\n expected_data = np.array(self.percentile_25)\n result = Plugin().process(self.cube, percentiles=[25])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def tail_ratio(returns):\n\n return np.abs(np.percentile(returns, 95)) / \\\n np.abs(np.percentile(returns, 5))", "def percentile(histogram, percentile=0.75):\n covered = 0\n normalization = sum(histogram.values())\n for key, frequency in sorted(histogram.items(), reverse=True):\n covered += frequency\n assert covered <= normalization\n if covered > ((1.0 - percentile) * normalization):\n return key\n raise RuntimeError('Percentile computation should have terminated '\n 'mid-loop.')", "def test_check_data_specifying_no_of_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, no_of_percentiles=3)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def lambda_test(p_values, df=1):\n from scipy.stats import chi2\n assert np.max(p_values) <= 1 and np.min(p_values) >= 0, 'These do not appear to be p-values'\n\n chi_sq_scores = chi2.ppf(1 - p_values, df)\n return np.median(chi_sq_scores) / chi2.ppf(0.5, df)", "def keep_top_percentile(image, percentile, *args, **kwargs):\n # TODO: Implement the method\n\n imageSort = num.sort(image, axis=None)\n elem = int(len(imageSort) * (percentile/100))\n big = num.zeros(elem)\n\n k = len(imageSort)-1\n for i in range(0, elem):\n if imageSort[k] in big:\n k -= 1\n else:\n big[i] = imageSort[k]\n k -= 1\n\n thresholded = num.zeros((640, 480))\n for i in range (0, 640):\n for j in range (0, 480):\n l = image[i][j]\n if l in big:\n thresholded[i][j] = image[i][j]\n else:\n thresholded[i][j] = 0\n\n save_image_to_h5(thresholded, \"thresholded.h5\")\n \n\n return thresholded", "def test__repeated_median_catch_division_by_zero(repeated_median):\n *_, divzero_x, divzero_y = repeated_median\n assert repeated_median_slope(divzero_x, divzero_y) == 1.0", "def calc_skewness(sig):\n return skew(sig)", "def fpr(positive, negative, tpr):\n threshold = np.percentile(np.asarray(positive), 100-tpr) # uses numpy percentile (ex 70th tpr is the 30th percentile)\n total_false_positives = sum(negative > threshold)\n\n return total_false_positives/len(negative)", "def test_single_percentile(self):\n percentiles_cube = set_up_percentiles_cube()\n percentiles_cube = percentiles_cube[0]\n msg = \"Percentile coordinate has only one value. Interpolation\"\n with self.assertRaisesRegex(ValueError, msg):\n ProbabilitiesFromPercentiles2D(percentiles_cube, 'new_name')", "def tpr(positive, negative, fpr):\n threshold = np.percentile(np.asarray(negative), 100 - fpr)\n total_true_positives = sum(positive > threshold)\n\n return total_true_positives / len(positive)", "def test_check_data_not_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)" ]
[ "0.64033365", "0.63303274", "0.6269127", "0.62636626", "0.62064844", "0.60701036", "0.6063214", "0.6048836", "0.6026952", "0.59726375", "0.5954569", "0.5940382", "0.5877444", "0.5845502", "0.578347", "0.5763043", "0.5729505", "0.57232666", "0.5709021", "0.5695031", "0.5659054", "0.5658106", "0.56132936", "0.5591473", "0.5588802", "0.5548942", "0.5521403", "0.54988366", "0.549333", "0.5484905" ]
0.80108
0
Tests the percentile kurtosis fn
def test_percentile_kurtosis(): f = np.asarray([ [0.99, 1.0, 0.5, 0.52], [0.69, 0.6, 0.61, 1.0]]) R = common_metrics.percentile_kurtosis(f, maximise=True) expected = np.asarray( [1.06382979, 5.0]) assert np.allclose(R, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_valid_calculation_of_quantile(alpha: Any) -> None:\n n = 30\n check_alpha_and_n_samples(alpha, n)", "def calc_kurtosis(sig):\n return kurtosis(sig)", "def kurtosis(self) -> float:\n return float(ss.kurtosis(self.tsdf.pct_change(), fisher=True, bias=True, nan_policy='omit'))", "def kurtosis_func(self, months_from_last: int = None, from_date: dt.date = None, to_date: dt.date = None) -> float:\n earlier, later = self.calc_range(months_from_last, from_date, to_date)\n return float(ss.kurtosis(self.tsdf.loc[earlier:later].pct_change(), fisher=True, bias=True, nan_policy='omit'))", "def test__quantile(self):\r\n # regular cases\r\n sample_data = array(range(25, 42))\r\n assert_almost_equal(_quantile(sample_data, 0.5), median(sample_data))\r\n\r\n # sorted data is assumed for this function\r\n sample_data = sorted(\r\n array([0.17483293, 0.99891939, 0.81377467, 0.8137437,\r\n 0.51990174, 0.35521497, 0.98751461]))\r\n assert_almost_equal(_quantile(sample_data, 0.10), 0.283062154)", "def kurtosis(r):\n demeaned_r = r - r.mean()\n # use the population standard deviation, so set dof=0\n sigma_r = r.std(ddof=0)\n exp = (demeaned_r**4).mean()\n return exp/sigma_r**4", "def kurtosis(self,return_series:pd.Series):\n demeaned_returns = return_series - return_series.mean()\n\n # Use the population standard deviation, so set dof=0\n sigma_r = return_series.std(ddof=0)\n exp = (demeaned_returns ** 4).mean()\n return (exp/sigma_r ** 4) - 3", "def test_median_type():\n\tmedian(.2)", "def tenth_percentile(y_test, y_pred) :\n return np.abs(y_test - y_pred).sort_values().iloc[int(len(y_test)*0.10)]/60", "def kurtosis(a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n apb = a + b\n return (6*((a - b)**2*(apb + 1) - a*b*(apb + 2)) /\n (a*b*(apb + 2)*(apb + 3)))", "def kempton_taylor_q(counts, lower_quantile=.25, upper_quantile=.75):\n n = len(counts)\n lower = int(ceil(n*lower_quantile))\n upper = int(n*upper_quantile)\n sorted = counts.copy()\n sorted.sort()\n return (upper-lower)/log(sorted[upper]/sorted[lower])", "def smile_func(self, K):\n pass", "def test_lots_of_percentiles(self):\n data = np.array(\n [\n [[14.4, -46, 10.2], [-73.0, -46, -89.2], [-89.2, -93.25, -94.6]],\n [[19.2, 8.25, 10.6], [-19, 8.25, -67.6], [-67.6, -79.75, -83.8]],\n [[24.0, 8.75, 11.0], [8.33333, 8.75, -46.0], [-46.0, -66.25, -73.0]],\n [[28.8, 9.25, 11.4], [9.0, 9.25, -24.4], [-24.4, -52.75, -62.2]],\n [[33.6, 9.75, 11.8], [9.666667, 9.75, -2.8], [-2.8, -39.25, -51.4]],\n [\n [38.4, 10.333333, 16.8],\n [10.333333, 10.2, 8.5],\n [8.333333, -25.75, -40.6],\n ],\n [[43.2, 11.0, 26.4], [11.0, 10.6, 9.5], [9.0, -12.25, -29.8]],\n [\n [48.0, 11.666667, 36.0],\n [11.666667, 11.0, 10.5],\n [9.666667, 1.25, -19.0],\n ],\n [[52.8, 24, 45.6], [24, 11.4, 11.5], [10.5, 8.5, -8.2]],\n [[57.6, 48, 55.2], [48, 11.8, 36.0], [11.5, 9.5, 2.6]],\n ],\n dtype=np.float32,\n )\n\n percentiles = np.arange(5, 100, 10)\n result = Plugin()._probabilities_to_percentiles(self.cube, percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=5)", "def ninetieth_percentile(y_test, y_pred) :\n return np.abs(y_test - y_pred).sort_values().iloc[int(len(y_test)*0.90)]/60", "def percentile(t: torch.tensor, q: float):\n # Note that ``kthvalue()`` works one-based, i.e. the first sorted value\n # indeed corresponds to k=1, not k=0! Use float(q) instead of q directly,\n # so that ``round()`` returns an integer, even if q is a np.float32.\n k = 1 + round(.01 * float(q) * (t.numel() - 1))\n result = t.view(-1).kthvalue(k).values.item()\n return result", "def test_katsuura(self):\n fun = get_problem('katsuura', self.dimension, -100, 100)\n self.assertAlmostEqual(fun(self.array10), 3837.4739882594373, delta=4000)", "def test_percentile_skew():\n f = np.asarray([\n [0.99, 1.0, 0.5],\n [0.69, 0.6, 0.61]])\n R = common_metrics.percentile_skew(f, maximise=True)\n expected = np.asarray(\n [0.96, -0.777777777777779])\n assert np.allclose(R, expected)\n R = common_metrics.percentile_skew(f, maximise=False)\n expected = np.asarray(\n [-0.96, 0.777777777777779])\n assert np.allclose(R, expected)", "def kurtosis(self):\n self._finalize()\n return self.vkurtosis", "def test_quantile(self):\r\n\r\n # suffle the data to be sure, it is getting sorted\r\n sample_data = array(range(1, 11))\r\n shuffle(sample_data)\r\n\r\n # regular cases\r\n expected_output = [1.9, 2.8, 3.25, 5.5, 7.75, 7.93]\r\n list_of_quantiles = [0.1, 0.2, 0.25, 0.5, 0.75, 0.77]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(expected_output, output)\r\n\r\n sample_data = array([42, 32, 24, 57, 15, 34, 83, 24, 60, 67, 55, 17,\r\n 83, 17, 80, 65, 14, 34, 39, 53])\r\n list_of_quantiles = [0.5]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(output, median(sample_data))\r\n\r\n # quantiles must be between [0, 1]\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, [0.1, 0.2, -0.1, 2, 0.3, 0.5])\r\n\r\n # quantiles must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, 1)\r\n\r\n # the data must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(1, [0])", "def _quantile_check(df: DataFrame) -> None:\n expected_percentiles = choose_set_of_percentiles(df[\"percentile\"].nunique())\n\n if not np.allclose(expected_percentiles, df[\"percentile\"].unique()):\n msg = (\n \"The forecast percentiles can not be considered as quantiles. \"\n f\"The forecast percentiles are {df['percentile'].unique()}.\"\n \"Based on the number of percentiles provided, the expected \"\n f\"percentiles would be {expected_percentiles}.\"\n )\n raise ValueError(msg)", "def compute_quantile(risk, T_max: int, scenario_numbers, quantile):\r\n\r\n print(\"\\tComputing Quantile...\")\r\n # Init quantile\r\n q = np.zeros(T_max)\r\n for t in range(T_max):\r\n risk[t].sort()\r\n q[t] = risk[t][int(np.ceil(scenario_numbers[t] * quantile)) - 1]\r\n print(\"\\tDone\")\r\n\r\n return q", "def testKtoF(self):\r\n for integer, numeral in self.ktofvalues:\r\n result = conversions_refactored.convert('Kelvin', 'Fahrenheit', integer) \r\n self.assertEqual(numeral, result, msg='Incorrect result, calculation error')", "def akurtosistest(a,dimension=None):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n dimension = 0\r\n n = float(a.shape[dimension])\r\n if n<20:\r\n print \"akurtosistest only valid for n>=20 ... continuing anyway, n=\",n\r\n b2 = akurtosis(a,dimension)\r\n E = 3.0*(n-1) /(n+1)\r\n varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5))\r\n x = (b2-E)/N.sqrt(varb2)\r\n sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * N.sqrt((6.0*(n+3)*(n+5))/\r\n (n*(n-2)*(n-3)))\r\n A = 6.0 + 8.0/sqrtbeta1 *(2.0/sqrtbeta1 + N.sqrt(1+4.0/(sqrtbeta1**2)))\r\n term1 = 1 -2/(9.0*A)\r\n denom = 1 +x*N.sqrt(2/(A-4.0))\r\n denom = N.where(N.less(denom,0), 99, denom)\r\n term2 = N.where(N.equal(denom,0), term1, N.power((1-2.0/A)/denom,1/3.0))\r\n Z = ( term1 - term2 ) / N.sqrt(2/(9.0*A))\r\n Z = N.where(N.equal(denom,99), 0, Z)\r\n return Z, (1.0-zprob(Z))*2", "def quartiles(x, percentile):\n length = len(x)\n\n if percentile == 25:\n center = length // 4\n elif percentile == 75:\n center = length // 2 + length // 4\n\n x.sort()\n\n if length % 2 == 0:\n return (x[center - 1] + x[center]) / 2\n else:\n return x[center]", "def calculate_cornish_fisher_percentile(alpha, mu, sigma, skew, kurt):\n\n z = stats.norm.ppf(alpha)\n he2 = np.polynomial.hermite_e.hermeval(z, [0.0, 0.0, 1.0])\n he3 = np.polynomial.hermite_e.hermeval(z, [0.0, 0.0, 0.0, 1.0])\n he13 = np.polynomial.hermite_e.hermeval(z, [0.0, -1.0, 0.0, -2.0])\n\n w = (z +\n he2 * skew / 6 +\n he3 * kurt / 24 +\n he13 * (skew ** 2) / 36)\n\n return mu + sigma * w", "def testFtoK(self):\r\n for integer, numeral in self.ftokvalues:\r\n result = conversions_refactored.convert('Fahrenheit', 'Kelvin', integer) \r\n self.assertEqual(numeral, result, msg='Incorrect result, calculation error')", "def excesskurtosis(self):\n self._finalize()\n return self.vkurtosis-3", "def percentile(scores, student_score):\n scores = np.array(sorted(scores))\n num_scores = len(scores)\n return round(sum(scores <= student_score) / float(num_scores) * 100, 2)", "def test_check_data_specifying_no_of_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, no_of_percentiles=3)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_kilometers_to_miles(self) -> None:\n kilometers = 5\n metric = \"kilometers\"\n\n self.assertEqual(\"3.11\", f\"{to_miles(metric, kilometers):.2f}\")\n\n kilometers = 0\n metric = \"kilometers\"\n\n self.assertEqual(0, to_miles(metric, kilometers))" ]
[ "0.659924", "0.6551488", "0.64938307", "0.6469376", "0.6296186", "0.60896367", "0.6081468", "0.6069125", "0.602806", "0.60106987", "0.6009993", "0.59498876", "0.59466994", "0.5946249", "0.5943504", "0.5876415", "0.583249", "0.5811206", "0.5785323", "0.5775423", "0.5739732", "0.57355213", "0.5692874", "0.5690896", "0.56786484", "0.56751144", "0.56596667", "0.5638634", "0.56210566", "0.56147844" ]
0.7832344
0
Function used to read the corpus for training (old version).
def deprecated_read_corpus(filename): train_list = read_xml(filename, 1) corpus = [] for pair in train_list: corpus += [strip_multiple_whitespaces(pair.t)] corpus += [strip_multiple_whitespaces(pair.h)] return corpus
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_corpus():\n # Define directory structure\n parent_path = os.getcwd() + '/'\n corpus_path = parent_path + 'corpus_data/'\n corpus_name = corpus_path + 'train_corpus_vocab.pickle'\n # Load corpus vocabulary\n with open(corpus_name, 'rb') as handle:\n train_vocab = pickle.load(handle)\n return(corpus_path, train_vocab)", "def read_corpus_from_file(input_file): \n \n print ('reading corpus')\n file = open(input_file, 'r')\n corpus = file.read()\n return corpus", "def load_corpus(fn):\n return corpora.svmlightcorpus.SvmLightCorpus(fn)", "def load_data():\r\n train = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'train.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n val = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'valid.txt', ['words', 'pos', 'ignore', 'chunk'])) # testa will be our val set\r\n test = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'test.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n\r\n return train, val, test", "def get_corpus():\n all_text = []\n\n for _, _, files in os.walk(DATA_DIRECTORY):\n for f in files:\n with open(os.path.join(DATA_DIRECTORY, f), 'r') as article:\n # Quotation marks rarely come out as pairs in finished chains.\n # So we remove them before adding the article text:\n all_text.append(re.sub(r'[„“]', '', article.read()))\n\n return markovify.Text(\"\".join(all_text), state_size=2)", "def load_corpus(self, fn):\n corpus = load_corpus(fn)\n self.corpus = corpus\n self.has_corpus = True", "def read_corpus(category=\"crude\"):\n files = reuters.fileids(category)\n return [[START_TOKEN] + [w.lower() for w in list(reuters.words(f))] + [END_TOKEN] for f in files]", "def read_corpus(dir):\n corpus = {}\n file_names = glob.glob(f\"{dir}/*\")\n for file_name in file_names:\n name = os.path.splitext(os.path.basename(file_name))[0]\n text = \" \".join(open(file_name, \"rt\").readlines())\n text = text.replace(\"\\n \\n\", \" \")\n text = text.replace(\"\\n\", \"\")\n text = text.replace(\" \", \" \")\n corpus[os.path.splitext(name)[0]] = text\n return corpus", "def read_corpus(filename, word_index, max_l, pad=2, clean_string=False,\n textField=3):\n corpus = []\n with open(filename) as f:\n for line in f:\n fields = line.strip().split(\"\\t\")\n text = fields[textField]\n if clean_string:\n text_clean = clean_str(text)\n else:\n text_clean = text.lower()\n sent = get_idx_from_sent(text_clean, word_index, max_l, pad)\n corpus.append(sent)\n return np.array(corpus, dtype=\"int32\")", "def readcorpus(index):\n for docid in range(0, 1001):\n file = open(PATH + \"Document-\" + str(docid) + \".txt\", 'r', encoding=\"utf-8\")\n allcontent = file.readlines()\n stoplist = getstopwords()\n corpus = []\n stemmer = PorterStemmer()\n getatitle(allcontent, corpus)\n getmetakeywords(allcontent, corpus)\n getcontent(allcontent, corpus)\n flagfordate = 0\n for i in range(0, len(corpus)):\n if flagfordate == 1:\n flagfordate = 0\n continue\n word = corpus[i]\n if word in MONTH:\n if expressionfordateb(corpus, i) is True or expressionfordatef(corpus, i) is True:\n word = constructdate_expression(corpus, i, index)\n increasecount(index[0], word, docid)\n if word in stoplist:\n continue\n increasecount(index[1], word, docid)\n increasecount(index[2], processword(stemmer.stem(word)), docid)", "def get_corpus():\n corpus_raw = []\n files = os.listdir()\n\n for name in files:\n if \".txt\" in name:\n try:\n file = open(name, \"rt\", encoding='utf8')\n data_org = file.read()\n corpus_raw.append(data_org)\n except:\n print(\"ERROR: Couldn't open a .txt file. Please ensure that the text is UTF-8 encoded.\")\n elif \".docx\" in name:\n try:\n data_org = docx2txt.process(name)\n corpus_raw.append(data_org)\n except:\n print(\"ERROR: Couldn't open a .docx file. Please ensure that the text is UTF-8 encoded.\")\n else:\n print(\"ERROR: Cannot print non .txt or .docx files. Please verify the input folder's contents.\")\n\n return corpus_raw", "def extract_corpus(corpus_dir = \"articles\"):\n corpus = {}\n num_documents = 0\n for filename in os.listdir(corpus_dir):\n with open(os.path.join(corpus_dir, filename)) as f:\n corpus[filename] = re.sub(\"[^\\w]\", \" \", f.read()).split()\n return corpus", "def build_corpus(self):\n print(\"Inside the build_corpus >>>>>\")\n documentsCount = 0\n documents = self.documents\n\t\t\n with open(self.documents_path) as file:\n for documents in file.readlines():\n documents = documents.rstrip('}\\n ').strip('0\\t').strip('1\\t').split(' ')\n documentsCount = documentsCount +1\n self.documents.append(documents)\n\t\t\t\n self.number_of_documents = documentsCount", "def load_data(data_path):\n print(\"RNN Language MODEL: Loading gigaword corpus\")\n return data.CorpusGigaword(data_path)", "def __init__(self, corpus):\n self.train(corpus)", "def read_news(old=True, recent=True):\n corpus = \"\"\n doc = Doc()\n if old:\n # Read news csv files\n print(\"Preparing old news\")\n path = '/data/WorkData/media_and_judging/data/collected/nytimes/'\n newsfiles = glob.glob(path + '*.csv')\n print(newsfiles)\n tf = len(newsfiles)\n print(\"Total old news files: {}\".format(tf))\n\n # concate all cleaned news text\n for news in newsfiles:\n df = pd.read_csv(news)\n count = 0\n for index, row in df.iterrows():\n count += 1\n # append text\n try:\n cleaned_text = normalizeString(row[5].replace(\"LEAD: \", \"\"))\n corpus += cleaned_text\n\n # add to lang row[4]: title, row[5]: content\n date = formatDate(row[0], '%Y/%m/%d')\n doc.add_news(row[4], cleaned_text, date)\n except:\n pass\n if count % 500 == 0:\n print(\"Progress Report: Old news {}, total {}\".\n format(count, len(news)))\n tf -= 1\n print(\"Files Remaining: \", tf)\n\n if recent:\n # Read json\n # {title: {content:, ptime:, utime:, }..}\n print(\"Preparing recent news\")\n path = '/data/WorkData/media_and_judging/data/collected/nytimes/'\n newsfiles = glob.glob(path + '*.json')\n tf = len(newsfiles)\n print(\"Length of recent news: {}\".format(tf))\n\n # concate all cleaned news text\n for news in newsfiles:\n count = 0\n n = json_load(news)\n for title in n:\n count += 1\n # append text\n cleaned_text = normalizeString(clean_text(n[title]['content']\n .replace(\"LEAD: \", \"\")))\n\n corpus += cleaned_text\n # add to lang\n try:\n if 'ptime' in n[title]:\n date = formatDate(n[title]['ptime'], '%Y%m%d%H%M%S%f')\n else:\n date = formatDate(n[title]['DATE'], '%m. %d, %Y')\n except:\n date = 'UNKNOWN'\n doc.add_news(title, cleaned_text, date)\n if count % 500 == 0:\n print(\"Progress Report: Recent news, {}, total {}\".\n format(count, len(news)))\n tf -= 1\n print(\"Files Remaining: \", tf)\n\n return corpus, doc", "def loadData(self, dataType): \n if dataType == \"train\":\n f = self.urls[0]\n elif dataType == \"valid\":\n f = self.urls[1]\n elif dataType == \"test\":\n f = self.urls[2] \n\n \"\"\" Load text file \"\"\"\n corpus = list()\n with io.open(f, encoding='UTF-8') as f:\n for line in f: \n if len(line) > self.minSeq and len(line) < self.maxLen:\n corpus.append(line.lstrip().rstrip().split(' '))\n return corpus", "def train(self, corpus): \n # TODO your code here\n # Tip: To get words from the corpus, try\n # for sentence in corpus.corpus:\n # for datum in sentence.data: \n # word = datum.word\n for sentence in corpus:\n prevWord = \"\"\n prevPrevWord = \"\"\n for word in sentence:\n word = word.strip(STRIP_CHARS)\n word = word.lower()\n currentWord = word\n self.unigramCounts[currentWord] += 1\n self.total += 1\n if prevWord != \"\":\n if prevPrevWord != \"\":\n trigram = (prevPrevWord, prevWord, currentWord)\n if trigram not in self.trigramCounts:\n self.continuationCounts[currentWord] += 1\n self.followingCounts[(prevPrevWord, prevWord)] += 1\n self.trigramCounts[trigram] += 1\n self.bigramCounts[(prevWord, currentWord)] += 1\n self.totalBigramCounts += 1\n else:\n self.bigramCounts[(prevWord, currentWord)] += 1\n self.totalBigramCounts += 1\n prevPrevWord = prevWord\n prevWord = currentWord\n else:\n prevWord = currentWord\n self.total += len(self.unigramCounts)", "def read_wiki_corpus(corpus_dir: str, corpus_split: str, max_seq_len: Optional[int] = 50, vocab: Optional[dict] = None,\n stop_after: Optional[int] = None) -> Corpus:\n def _read_vocabulary(vocab_path: str) -> W2I:\n with open(vocab_path, \"r\") as vocab_file:\n idx, words = zip(*enumerate(line.strip() for line in vocab_file.readlines()))\n w2i = dict(zip(words, idx))\n w2i[\"<pad>\"] = len(w2i)\n w2i = W2I(w2i) # Return <unk> index if word is not in vocab\n\n return w2i\n\n assert corpus_split in (\"train\", \"valid\", \"test\"), \"Invalid split selected!\"\n\n if vocab is None:\n print(f\"Reading vocabulary under {corpus_dir}/vocab.txt...\")\n if os.path.exists(f\"{corpus_dir}/vocab.txt\"):\n vocab = _read_vocabulary(f\"{corpus_dir}/vocab.txt\")\n else:\n print(\"No vocabulary file found, building vocabulary from scratch...\")\n vocab = defaultdict(lambda: len(vocab))\n\n # Read in corpus\n print(f\"Reading corpus under {corpus_dir}/{corpus_split}.txt...\")\n indexed_sentences = []\n\n with open(f\"{corpus_dir}/{corpus_split}.txt\", \"r\") as corpus_file:\n for i, line in enumerate(corpus_file.readlines()):\n line = line.strip()\n\n # Skip empty lines\n if line in (\"\", \"<eos>\"):\n continue\n\n tokens = line.split()\n\n if tokens[-1] != \"<eos>\":\n tokens.append(\"<eos>\")\n\n indexed_sentence = torch.LongTensor(list(map(vocab.__getitem__, tokens))) # Index lookup\n indexed_sentences.append(indexed_sentence)\n\n if stop_after is not None:\n if i > stop_after:\n break\n\n # If vocab was build from scratch, convert\n if not isinstance(vocab, W2I):\n vocab = W2I(vocab)\n\n corpus = Corpus(indexed_sentences, vocab, max_seq_len)\n\n return corpus", "def train_with_corpus(corpus):\n\n chatbot.set_trainer(\"chatterbot.trainers.ChatterBotCorpusTrainer\")\n chatbot.train(corpus)", "def _initialize_corpus(self):\n vocab = self.vocab # vocab is the word vector\n theta = self.theta # theta is the model parameter\n corpus = self.corpus\n\n for line in corpus:\n for word in line:\n if word not in vocab:\n vocab[word] = init_vector(self.n)\n theta[word] = init_vector(self.n)\n\n if self.verbose:\n print(f\"{len(vocab)} words have been loaded\")", "def get_corpus_text(nr_files=199):\n fileids = nltk.corpus.treebank_raw.fileids()[:nr_files]\n corpus_text = nltk.corpus.treebank_raw.raw(fileids)\n corpus_text = corpus_text.replace(\".START\", \"\")\n return corpus_text", "def preprocess_corpus(train_sents):\n #lexicon_dict['stop_words'] = set(open('stop_words').read().split())\n lexicon_dict['people_name']=set(open('data\\\\lexicon\\\\firstname.5k').read().title().split())\n lexicon_dict['people_name'].update(set(open('data\\\\lexicon\\\\lastname.5000').read().title().split()))\n lexicon_dict['people_name'].update(set(open('data\\\\lexicon\\\\people.family_name').read().title().split()))\n lexicon_dict['people_person']=set(open('data\\\\lexicon\\\\people.person').read().title().split())\n lexicon_dict['people_name'].update(set(open('data\\\\lexicon\\\\people.person.lastnames').read().title().split()))\n \n lexicon_dict['product']=set(open('data\\\\lexicon\\\\product').read().title().split())\n lexicon_dict['business_products']=set(open('data\\\\lexicon\\\\business.consumer_product').read().title().split())\n\n lexicon_dict['sports_team']=set(open('data\\\\lexicon\\\\sports.sports_team').read().title().split())\n\n lexicon_dict['tvprog']=set(open('data\\\\lexicon\\\\tv.tv_program').read().title().split())\n \n lexicon_dict['museum'] = set(open('data\\\\lexicon\\\\architecture.museum').read().title().split())\n lexicon_dict['auto_make']=set(open('data\\\\lexicon\\\\automotive.make').read().title().split())\n lexicon_dict['auto_model']=set(open('data\\\\lexicon\\\\automotive.model').read().title().split())\n lexicon_dict['award']=set(open('data\\\\lexicon\\\\award.award').read().title().split())\n lexicon_dict['fest_ser']=set(open('data\\\\lexicon\\\\base.events.festival_series').read().title().split())\n lexicon_dict['reg_name']=set(open('data\\\\lexicon\\\\bigdict').read().title().split())\n lexicon_dict['newspaper']=set(open('data\\\\lexicon\\\\book.newspaper').read().title().split())\n lexicon_dict['tv_channels']=set(open('data\\\\lexicon\\\\broadcast.tv_channel').read().title().split())\n lexicon_dict['business_brand']=set(open('data\\\\lexicon\\\\business.brand').read().title().split())\n lexicon_dict['business_company']=set(open('data\\\\lexicon\\\\business.brand').read().title().split())\n lexicon_dict['business_brand']=set(open('data\\\\lexicon\\\\business.consumer_company').read().title().split())\n\n lexicon_dict['business_sponsor']=set(open('data\\\\lexicon\\\\business.sponsor').read().title().split())\n lexicon_dict['top10']=set(open('data\\\\lexicon\\\\cap.10').read().title().split())\n lexicon_dict['top100']=set(open('data\\\\lexicon\\\\cap.100').read().title().split())\n lexicon_dict['cap500']=set(open('data\\\\lexicon\\\\cap.500').read().title().split())\n lexicon_dict['cap1000']=set(open('data\\\\lexicon\\\\cap.1000').read().title().split())\n lexicon_dict['video_game']=set(open('data\\\\lexicon\\\\cvg.computer_videogame').read().title().split())\n lexicon_dict['cvg_developer']=set(open('data\\\\lexicon\\\\cvg.cvg_developer').read().title().split())\n lexicon_dict['cvg_platform']=set(open('data\\\\lexicon\\\\cvg.cvg_platform').read().title().split())\n #leaving out dictionaries.conf,english.stop,lower.100,lower.500,lower.1000,lower.5000,lower.10000\n lexicon_dict['dictionaries_conf']=set(open('data\\\\lexicon\\\\dictionaries.conf').read().title().split())\n lexicon_dict['english_stop']=set(open('data\\\\lexicon\\\\english.stop').read().title().split())\n lexicon_dict['lower_10000']=set(open('data\\\\lexicon\\\\lower.10000').read().title().split())\n #lexicon_dict['cvg_platform']=set(open('data\\\\lexicon\\\\cvg.cvg_platform').read().title().split())\n \n lexicon_dict['university']=set(open('data\\\\lexicon\\\\education.university').read().title().split())\n lexicon_dict['gov_agency']=set(open('data\\\\lexicon\\\\government.government_agency').read().title().split())\n\n\n lexicon_dict['location']=set(open('data\\\\lexicon\\\\location').read().title().split())\n lexicon_dict['location'].update(set(open('data\\\\lexicon\\\\location.country').read().title().split()))\n lexicon_dict['sports_league']=set(open('data\\\\lexicon\\\\sports.sports_league').read().title().split())\n\n\n lexicon_dict['time_holiday']=set(open('data\\\\lexicon\\\\time.holiday').read().title().split())\n lexicon_dict['time_rec_event']=set(open('data\\\\lexicon\\\\time.recurring_event').read().title().split())\n lexicon_dict['roads']=set(open('data\\\\lexicon\\\\transportation.road').read().title().split())\n lexicon_dict['tvnet']=set(open('data\\\\lexicon\\\\tv.tv_network').read().title().split())\n\n lexicon_dict['ven_company']=set(open('data\\\\lexicon\\\\venture_capital.venture_funded_company').read().title().split())\n lexicon_dict['venues']=set(open('data\\\\lexicon\\\\venues').read().title().split())", "def load_data(corpus_dir: str, max_seq_len: int) -> Tuple[Corpus, Corpus]:\n start = time.time()\n train_set = read_wiki_corpus(corpus_dir, \"train\", max_seq_len=max_seq_len)\n valid_set = read_wiki_corpus(corpus_dir, \"valid\", max_seq_len=max_seq_len, vocab=train_set.vocab)\n end = time.time()\n duration = end - start\n minutes, seconds = divmod(duration, 60)\n\n print(f\"Data loading took {int(minutes)} minute(s), {seconds:.2f} second(s).\")\n\n return train_set, valid_set", "def read_data(self, dirname):\n # NOTE: We cache stemmed documents for speed\n # (i.e. write to files in new 'stemmed/' dir).\n\n print(\"Reading in documents...\")\n # dict mapping file names to list of \"words\" (tokens)\n filenames = os.listdir(dirname)\n subdirs = os.listdir(dirname)\n if 'stemmed' in subdirs:\n titles, docs = self.__read_stemmed_data(dirname)\n else:\n titles, docs = self.__read_raw_data(dirname)\n\n # Sort document alphabetically by title to ensure we have the proper\n # document indices when referring to them.\n ordering = [idx for idx, title in sorted(enumerate(titles),\n key = lambda xx : xx[1])]\n\n self.titles = []\n self.docs = []\n numdocs = len(docs)\n for d in range(numdocs):\n self.titles.append(titles[ordering[d]])\n self.docs.append(docs[ordering[d]])\n\n # Get the vocabulary.\n self.vocab = [xx for xx in self.get_uniq_words()]", "def read_corpus(corpus_path):\n idx = 0\n data = []\n with open(corpus_path, encoding='utf-8') as fr:\n lines = fr.readlines()\n sent_, tag_, pos_, ner_ = [], [], [], []\n\n for line in lines:\n idx += 1\n if line.find(\"DOC-ID\") < 0 and line != '\\n':\n try:\n [char, label, pos_tag, ner_tag] = line.strip().split()\n sent_.append(char)\n tag_.append(label)\n pos_.append(pos_tag)\n ner_.append(ner_tag)\n except:\n print(line)\n else:\n # print(line)\n if idx > 1:\n data.append((sent_, tag_, pos_, ner_))\n sent_, tag_, pos_, ner_ = [], [], [], []\n\n return data", "def demo(corpus_root=None):\n if not corpus_root:\n from nltk.data import find\n\n corpus_root = find(\"corpora/childes/data-xml/Eng-USA/\")\n\n try:\n childes = CHILDESCorpusReader(corpus_root, \".*.xml\")\n # describe all corpus\n for file in childes.fileids()[:5]:\n corpus = \"\"\n corpus_id = \"\"\n for (key, value) in childes.corpus(file)[0].items():\n if key == \"Corpus\":\n corpus = value\n if key == \"Id\":\n corpus_id = value\n print(\"Reading\", corpus, corpus_id, \" .....\")\n print(\"words:\", childes.words(file)[:7], \"...\")\n print(\n \"words with replaced words:\",\n childes.words(file, replace=True)[:7],\n \" ...\",\n )\n print(\"words with pos tags:\", childes.tagged_words(file)[:7], \" ...\")\n print(\"words (only MOT):\", childes.words(file, speaker=\"MOT\")[:7], \"...\")\n print(\"words (only CHI):\", childes.words(file, speaker=\"CHI\")[:7], \"...\")\n print(\"stemmed words:\", childes.words(file, stem=True)[:7], \" ...\")\n print(\n \"words with relations and pos-tag:\",\n childes.words(file, relation=True)[:5],\n \" ...\",\n )\n print(\"sentence:\", childes.sents(file)[:2], \" ...\")\n for (participant, values) in childes.participants(file)[0].items():\n for (key, value) in values.items():\n print(\"\\tparticipant\", participant, key, \":\", value)\n print(\"num of sent:\", len(childes.sents(file)))\n print(\"num of morphemes:\", len(childes.words(file, stem=True)))\n print(\"age:\", childes.age(file))\n print(\"age in month:\", childes.age(file, month=True))\n print(\"MLU:\", childes.MLU(file))\n print()\n\n except LookupError as e:\n print(\n \"\"\"The CHILDES corpus, or the parts you need, should be manually\n downloaded from https://childes.talkbank.org/data-xml/ and saved at\n [NLTK_Data_Dir]/corpora/childes/\n Alternately, you can call the demo with the path to a portion of the CHILDES corpus, e.g.:\n demo('/path/to/childes/data-xml/Eng-USA/\")\n \"\"\"\n )\n # corpus_root_http = urllib2.urlopen('https://childes.talkbank.org/data-xml/Eng-USA/Bates.zip')\n # corpus_root_http_bates = zipfile.ZipFile(cStringIO.StringIO(corpus_root_http.read()))\n ##this fails\n # childes = CHILDESCorpusReader(corpus_root_http_bates,corpus_root_http_bates.namelist())", "def load_corpus_gensim():\r\n\tglobal gensim_corpus\r\n\tif os.path.exists(paths.path_data_mmcorpus):\r\n\t\tprint('\\nloading gensim corpus')\r\n\t\tgensim_corpus = gensim.corpora.MmCorpus(paths.path_data_mmcorpus)\r\n\t\tprint(gensim_corpus)\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False", "def readCorpus(file_path):\r\n if '.json' in file_path:\r\n return pd.read_json(file_path, lines=True)\r\n else:\r\n return pd.read_csv(file_path)", "def get_raw_corpus():\n with open(RAW_CORPUS_PATH, 'r') as f:\n return f.read().splitlines()" ]
[ "0.73244303", "0.7174164", "0.7146987", "0.6780772", "0.67489094", "0.6740784", "0.6627824", "0.65085244", "0.6502357", "0.6485583", "0.6474232", "0.6469331", "0.6435133", "0.6425884", "0.6361243", "0.6269834", "0.62694544", "0.61683667", "0.6163791", "0.6159709", "0.60779965", "0.6067023", "0.6056091", "0.6043212", "0.6041387", "0.6036512", "0.60209304", "0.601221", "0.60009646", "0.59972185" ]
0.73263
0
Increase or decrease the resolution of a distribution. Scales the resolution of the distribution by first interpolating the data values and then resampling it at the positions specified in the reference distribution. If the `scale` parameter is specified, the number of points in each dimension will be increased `scale` times. Currently, only regularly spaced gridded data is supported, due to the use of RecBivarateSpline for interpolation. See the scipy.interpolate.RectBivariateSpline documentation for more information.
def scale_grid(self, reference_distribution=None, new_resolution=None): if reference_distribution is None: reference_distribution = self if new_resolution is None: new_resolution = reference_distribution.resolution scale = 1 else: scale = new_resolution / reference_distribution.resolution # Generate new positions by scaling the positions given in # reference_distribution x = reference_distribution.position[0, 0, :] y = reference_distribution.position[1, :, 0] xscale = scale * (len(x) - 1) + 1 yscale = scale * (len(y) - 1) + 1 x_new = np.linspace(x[0], x[-1], xscale) y_new = np.linspace(y[0], y[-1], yscale) # Set up linear interpolation function for original grid interp_spline = interpolate.RectBivariateSpline( self.position[0, 0, :], self.position[1, :, 0], self.data.T, kx=1, ky=1 ) # Find data points at new positions new_data = interp_spline(x_new, y_new) xx, yy = np.meshgrid(x_new, y_new) new_distribution = Distribution( new_data.T, resolution=new_resolution, position=np.stack([xx, yy]) ) return new_distribution
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale(self, scale):\n\n self._scale = scale", "def scale(self, scale):\n\n self._scale = scale", "def with_scale_op(self, scale):\n\t\tself.variables['scale'] = scale\n\t\treturn self", "def scale(self, scale):\n self.coords = self.coords * scale\n return self", "def set_scale(self, scale):\n scale = float(scale)\n if scale <= 1:\n raise ValueError('The scale parameter must exceed 1.')\n self._a = scale", "def change_scaling(self, scales=None, offsets=None) -> None:\n self.points.change_scaling(scales, offsets)\n\n self.header.scales = scales\n self.header.offsets = offsets", "def scale(self, scale):\n\t\tself._current_score *= scale", "def scale(self, scale):\n \n scale_matrix = wf.scaleMatrix(scale, self.width/2, self.height/2, 0)\n self.transform(scale_matrix)", "def scale_it(val):\n return scale(val, 0, 1, bpm_range[0], bpm_range[1])", "def _setscales(self, ndata, largestscale, notes, scaling):\n if scaling==\"log\":\n if notes<=0: notes=1\n # adjust nscale so smallest scale is 1\n noctave=self._log2(2.*ndata/largestscale)\n self.nscale=notes*noctave\n self.scales=numpy.zeros(self.nscale, float)\n for j in range(self.nscale):\n self.scales[j]=2.0**(float(j)/notes)\n elif scaling==\"linear\":\n nmax=ndata/largestscale/2\n self.scales=numpy.arange(float(2), float(nmax))\n self.nscale=len(self.scales)\n else: raise ValueError, \"scaling must be linear or log\"\n return", "def setscaling(self, scaling):\n\n self.__scaling = scaling", "def normalize_1d(x, scale=(0, 1, 1024)):\n new_min = scale[0]\n new_max = scale[1]\n new_len = scale[2]\n (min_x, max_x, old_size) = scale_1d(x)\n x_norm = (x - min_x) / (max_x - min_x)\n old_baseline = np.linspace(0, 1, old_size)\n new_baseline = np.linspace(0, 1, new_len)\n if len(old_baseline) <= 1:\n old_baseline = np.array([0, 1])\n x_norm = np.array([1, 0])\n x_interp = interp.interp1d(old_baseline, x_norm)\n x_resized = (x_interp(new_baseline) * (new_max - new_min)) + new_min\n return x_resized", "def _scale_setter(self, value: float) -> None:\n self.uaxis.scale = value\n self.vaxis.scale = value", "def scaleProcess(process,scale):\n #print '>>> scaleProcess(\"%s\",%.3f):'%(process.process(),scale)\n #print \">>> rate before = %s\"%(process.rate())\n process.set_rate(process.rate()*scale)\n #print \">>> rate after = %s\"%(process.rate())", "def update(self, new_scale=None):\n if not self._enabled:\n return\n\n _scale, _growth_tracker = self._check_scale_growth_tracker(\"update\")\n\n if new_scale is not None:\n # Accept a new user-defined scale.\n if isinstance(new_scale, float):\n self._scale.fill_(new_scale) # type: ignore[union-attr]\n else:\n reason = \"new_scale should be a float or a 1-element torch.cuda.FloatTensor with requires_grad=False.\"\n assert isinstance(new_scale, torch.cuda.FloatTensor), reason # type: ignore[attr-defined]\n assert new_scale.numel() == 1, reason\n assert new_scale.requires_grad is False, reason\n self._scale.copy_(new_scale) # type: ignore[union-attr]\n else:\n # Consume shared inf/nan data collected from optimizers to update the scale.\n # If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.\n found_infs = [\n found_inf.to(device=_scale.device, non_blocking=True)\n for state in self._per_optimizer_states.values()\n for found_inf in state[\"found_inf_per_device\"].values()\n ]\n\n assert len(found_infs) > 0, \"No inf checks were recorded prior to update.\"\n\n found_inf_combined = found_infs[0]\n\n # Update across all model parallel instances.\n torch.distributed.all_reduce(\n found_inf_combined, op=torch.distributed.ReduceOp.MAX, group=parallel_state.get_model_parallel_group()\n )\n\n if len(found_infs) > 1:\n for i in range(1, len(found_infs)):\n found_inf = found_infs[i]\n # Update across all model parallel instances.\n torch.distributed.all_reduce(\n found_inf, op=torch.distributed.ReduceOp.MAX, group=parallel_state.get_model_parallel_group()\n )\n found_inf_combined += found_inf\n\n if found_inf_combined > 0:\n self._hysteresis_tracker -= 1\n if self._hysteresis_tracker <= 0:\n # When hysteresis becomes zero, follow the native grad scale update rule.\n # Increase scale and reset growth tracker\n torch._amp_update_scale_(\n _scale,\n _growth_tracker,\n found_inf_combined,\n self._growth_factor,\n self._backoff_factor,\n self._growth_interval,\n )\n else:\n # Only reset the growth tracker when hysteresis is larger than zero\n _growth_tracker.fill_(0.0)\n else:\n # When no inf found, follow the native grad scale update rule.\n # Increment growth_tracker, update scale when growth tracker reaches the interval, and\n # reset the hysteresis tracker.\n torch._amp_update_scale_(\n _scale,\n _growth_tracker,\n found_inf_combined,\n self._growth_factor,\n self._backoff_factor,\n self._growth_interval,\n )\n self._hysteresis_tracker = self.hysteresis\n\n # To prepare for next iteration, clear the data collected from optimizers this iteration.\n self._per_optimizer_states = defaultdict(torch.cuda.amp.grad_scaler._refresh_per_optimizer_state)", "def scale(self):", "def scale(X, *, axis=..., with_mean=..., with_std=..., copy=...):\n ...", "def scale(img, scale):\n return resize(img, x_scale=scale, y_scale=scale)", "def setScale(self, sx, sy=None, sz=None):\n self.transform.setScale(sx, sy, sz)", "def setScale(self, *args):\n return _libsbml.Unit_setScale(self, *args)", "def Scale(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Scale(*args, **kwargs)", "def scale(self, sx, sy):\n self._impl.scale(sx, sy)", "def export_to_scale(\n paper_size: tuple[float, float] = (8.5, 11),\n origin: tuple[float, float] = (0, 0),\n scale: float = 1,\n dpi: int = 300,\n):\n doc = make_doc(offset=(1, 2), size=(6.5, 8))\n msp = doc.modelspace()\n msp.add_mtext(\n f\"scale = 1:{scale}\\n\"\n f\"paper size = {paper_size[0]:.1f} inch x {paper_size[1]:.1f} inch \",\n dxfattribs={\"style\": \"OpenSans\", \"char_height\": 0.25},\n ).set_location(\n (0.2, 0.2), attachment_point=MTextEntityAlignment.BOTTOM_LEFT\n )\n\n ctx = RenderContext(doc)\n fig: plt.Figure = plt.figure(dpi=dpi)\n ax: plt.Axes = fig.add_axes([0, 0, 1, 1])\n\n # disable all margins\n ax.margins(0)\n\n # get the final render limits in drawing units:\n min_x, min_y, max_x, max_y = render_limits(\n origin, paper_size, scale\n )\n\n ax.set_xlim(min_x, max_x)\n ax.set_ylim(min_y, max_y)\n\n out = MatplotlibBackend(ax)\n # finalizing invokes auto-scaling by default!\n Frontend(ctx, out).draw_layout(msp, finalize=False)\n\n # set output size in inches:\n fig.set_size_inches(paper_size[0], paper_size[1], forward=True)\n\n fig.savefig(CWD / f\"image_scale_1_{scale}.pdf\", dpi=dpi)\n plt.close(fig)", "def setScaleX(self,startx,endx):\r\n if startx == endx:\r\n endx += 1\r\n self.scaleLock.acquire()\r\n self.scalex = [startx,endx]\r\n self.scaleLock.release()", "def update_scaling_parameters(DomainName=None, ScalingParameters=None):\n pass", "def rescale(range1, range2):\n min1, max1, min2, max2 = min(range1), max(range1), min(range2), max(range2)\n def resize(value):\n return (((value - min1) * (max2 - min2)) / (max1 - min1)) + min2\n return resize", "def random_scale(img_scales, mode='range'):\n num_scales = len(img_scales)\n if num_scales == 1: # fixed scale is specified\n img_scale = img_scales[0]\n elif num_scales == 2: # randomly sample a scale\n if mode == 'range':\n ratio=max(img_scales[0])/min(img_scales[0])\n img_scale_long = [max(s) for s in img_scales]\n img_scale_short = [min(s) for s in img_scales]\n long_edge = np.random.randint(\n min(img_scale_long),\n max(img_scale_long) + 1)\n \"\"\"\n short_edge = np.random.randint(\n min(img_scale_short),\n max(img_scale_short) + 1)\n \"\"\"\n short_edge = int(long_edge/ratio) \n img_scale = (long_edge, short_edge)\n elif mode == 'value':\n img_scale = img_scales[np.random.randint(num_scales)]\n else:\n if mode != 'value':\n raise ValueError('Only \"value\" mode supports more than 2 image scales')\n img_scale = img_scales[np.random.randint(num_scales)]\n return img_scale", "def scale(a, tmin=0.0, tmax=1.0):\n return np.interp(a, (a.min(), a.max()), (tmin, tmax))", "def scale(self, scale_x: float, scale_y: float) -> None:\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y", "def scale(self, fname, **kw):\n return self.scales.scale(fname, **kw)" ]
[ "0.65477973", "0.65477973", "0.62743556", "0.62523687", "0.61580896", "0.61553645", "0.61139524", "0.60290205", "0.5963627", "0.59028816", "0.58840287", "0.58265793", "0.57973707", "0.5793271", "0.578469", "0.577135", "0.57707953", "0.5713229", "0.57025945", "0.5695182", "0.568974", "0.5688705", "0.56598186", "0.56437075", "0.56110567", "0.5600534", "0.55975693", "0.55857265", "0.55801094", "0.55729693" ]
0.67478675
0
Determine which site to render given the hostname. Currently we have 2 renderings of sites, "pending" and "ncats". They differs in aesthetics, yet sharing the same backend. Hostname "biothings.ncats.io" and "biothings[|.ci|.test].transltr.io" use "ncats" rendering, while "pending.biothings.io" uses "pending".
def hostname_to_site(hostname: str) -> str: if hostname == "biothings.ncats.io" or hostname.endswith("transltr.io"): return "ncats" return "pending"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def host_to_site(host):\n\n if host:\n # www.facebook.com m.facebook.com l.facebook.com lm.facebook.com\n if host.endswith('facebook.com'):\n return 'Facebook'\n # youtu.be www.youtube.com youtube.com m.youtube.com\n elif host.endswith('youtube.com') or host == 'youtu.be':\n return 'YouTube'\n # old.reddit.com www.reddit.com\n elif host.endswith('reddit.com'):\n return 'Reddit'\n # t.co twitter.com\n elif host.endswith('twitter.com') or host == 't.co':\n return 'Twitter'\n elif host.endswith('tiktok.com'):\n return 'TikTok'\n return None", "def get_site(name):\n return sites[name]", "def choose_server(self, domain):\n try:\n domain = domain.encode('idna').decode('utf-8')\n except TypeError:\n domain = domain.decode('utf-8').encode('idna').decode('utf-8')\n except AttributeError:\n domain = domain.decode('utf-8').encode('idna').decode('utf-8')\n if domain.endswith(\"-NORID\"):\n return NICClient.NORIDHOST\n if domain.endswith(\"id\"):\n return NICClient.PANDIHOST\n if domain.endswith(\"hr\"):\n return NICClient.HR_HOST\n\n domain = domain.split('.')\n if len(domain) < 2:\n return None\n tld = domain[-1]\n if tld[0].isdigit():\n return NICClient.ANICHOST\n elif tld == 'ai':\n return NICClient.AI_HOST\n elif tld == 'app':\n return NICClient.APP_HOST\n elif tld == 'dev':\n return NICClient.DEV_HOST\n elif tld == 'games':\n return NICClient.GAMES_HOST\n elif tld == 'page':\n return NICClient.PAGE_HOST\n elif tld == 'money':\n return NICClient.MONEY_HOST\n elif tld == 'online':\n return NICClient.ONLINE_HOST\n elif tld == 'cl':\n return NICClient.CL_HOST\n elif tld == 'ar':\n return NICClient.AR_HOST\n elif tld == 'by':\n return NICClient.BY_HOST\n elif tld == 'cr':\n return NICClient.CR_HOST\n elif tld == 'ca':\n return NICClient.CA_HOST\n elif tld == 'do':\n return NICClient.DO_HOST\n elif tld == 'de':\n return NICClient.DE_HOST\n elif tld == 'hk':\n return NICClient.HK_HOST\n elif tld == 'hn':\n return NICClient.HN_HOST\n elif tld == 'jobs':\n return NICClient.JOBS_HOST\n elif tld == 'lat':\n return NICClient.LAT_HOST\n elif tld == 'li':\n return NICClient.LI_HOST\n elif tld == 'mx':\n return NICClient.MX_HOST\n elif tld == 'pe':\n return NICClient.PE_HOST\n elif tld == 'ist':\n return NICClient.IST_HOST\n elif tld == 'kz':\n return NICClient.KZ_HOST\n elif tld == 'abogado':\n return NICClient.ABOGADO_HOST\n elif tld == 'accountant':\n return NICClient.ACCOUNTANT_HOST\n elif tld == 'aero':\n return NICClient.AERO_HOST\n elif tld == 'ag':\n return NICClient.AG_HOST\n elif tld == 'ai':\n return NICClient.AI_HOST\n elif tld == 'allfinanz':\n return NICClient.ALLFINANZ_HOST\n elif tld == 'alsace':\n return NICClient.ALSACE_HOST\n elif tld == 'am':\n return NICClient.AM_HOST\n elif tld == 'amsterdam':\n return NICClient.AMSTERDAM_HOST\n elif tld == 'aquarelle':\n return NICClient.AQUARELLE_HOST\n elif tld == 'as':\n return NICClient.AS_HOST\n elif tld == 'asia':\n return NICClient.ASIA_HOST\n elif tld == 'au':\n return NICClient.AU_HOST\n elif tld == 'aw':\n return NICClient.AW_HOST\n elif tld == 'ax':\n return NICClient.AX_HOST\n elif tld == 'bank':\n return NICClient.BANK_HOST\n elif tld == 'bar':\n return NICClient.BAR_HOST\n elif tld == 'barclaycard':\n return NICClient.BARCLAYCARD_HOST\n elif tld == 'barclays':\n return NICClient.BARCLAYS_HOST\n elif tld == 'bayern':\n return NICClient.BAYERN_HOST\n elif tld == 'beer':\n return NICClient.BEER_HOST\n elif tld == 'berlin':\n return NICClient.BERLIN_HOST\n elif tld == 'bi':\n return NICClient.BI_HOST\n elif tld == 'bid':\n return NICClient.BID_HOST\n elif tld == 'bio':\n return NICClient.BIO_HOST\n elif tld == 'bmw':\n return NICClient.BMW_HOST\n elif tld == 'biz':\n return NICClient.BIZ_HOST\n elif tld == 'bj':\n return NICClient.BJ_HOST\n elif tld == 'blog':\n return NICClient.BLOG_HOST\n elif tld == 'brussels':\n return NICClient.BRUSSELS_HOST\n elif tld == 'budapest':\n return NICClient.BUDAPEST_HOST\n elif tld == 'build':\n return NICClient.BUILD_HOST\n elif tld == 'buzz':\n return NICClient.BUZZ_HOST\n elif tld == 'bw':\n return NICClient.BW_HOST\n elif tld == 'by':\n return NICClient.BY_HOST\n elif tld == 'bzh':\n return NICClient.BZH_HOST\n elif tld == 'ca':\n return NICClient.CA_HOST\n elif tld == 'cam':\n return NICClient.CAM_HOST\n elif tld == 'cancerresearch':\n return NICClient.CANCERRESEARCH_HOST\n elif tld == 'capetown':\n return NICClient.CAPETOWN_HOST\n elif tld == 'career':\n return NICClient.CAREER_HOST\n elif tld == 'casa':\n return NICClient.CASA_HOST\n elif tld == 'cat':\n return NICClient.CAT_HOST\n elif tld == 'cc':\n return NICClient.CC_HOST\n elif tld == 'ch':\n return NICClient.CH_HOST\n elif tld == 'ci':\n return NICClient.CI_HOST\n elif tld == 'cl':\n return NICClient.CL_HOST\n elif tld == 'cloud':\n return NICClient.CLOUD_HOST\n elif tld == 'club':\n return NICClient.CLUB_HOST\n elif tld == 'cm':\n return NICClient.CM_HOST\n elif tld == 'cologne':\n return NICClient.COLOGNE_HOST\n elif tld == 'cooking':\n return NICClient.COOKING_HOST\n elif tld == 'coop':\n return NICClient.COOP_HOST\n elif tld == 'cricket':\n return NICClient.CRICKET_HOST\n elif tld == 'cuisinella':\n return NICClient.CUISINELLA_HOST\n elif tld == 'cx':\n return NICClient.CX_HOST\n elif tld == 'cymru':\n return NICClient.CYMRU_HOST\n elif tld == 'cz':\n return NICClient.CZ_HOST\n elif tld == 'date':\n return NICClient.DATE_HOST\n elif tld == 'de':\n return NICClient.DE_HOST\n elif tld == 'desi':\n return NICClient.DESI_HOST\n elif tld == 'dk':\n return NICClient.DK_HOST\n elif tld == 'dm':\n return NICClient.DM_HOST\n elif tld == 'do':\n return NICClient.DO_HOST\n elif tld == 'download':\n return NICClient.DOWNLOAD_HOST\n elif tld == 'durban':\n return NICClient.DURBAN_HOST\n elif tld == 'dvag':\n return NICClient.DVAG_HOST\n elif tld == 'ee':\n return NICClient.EE_HOST\n elif tld == 'eu':\n return NICClient.EU_HOST\n elif tld == 'eurovision':\n return NICClient.EUROVISION_HOST\n elif tld == 'eus':\n return NICClient.EUS_HOST\n elif tld == 'faith':\n return NICClient.FAITH_HOST\n elif tld == 'fashion':\n return NICClient.FASHION_HOST\n elif tld == 'fi':\n return NICClient.FI_HOST\n elif tld == 'film':\n return NICClient.FILM_HOST\n elif tld == 'firmdale':\n return NICClient.FIRMDALE_HOST\n elif tld == 'fishing':\n return NICClient.FISHING_HOST\n elif tld == 'fit':\n return NICClient.FIT_HOST\n elif tld == 'flsmidth':\n return NICClient.FLSMIDTH_HOST\n elif tld == 'frl':\n return NICClient.FRL_HOST\n elif tld == 'frogans':\n return NICClient.FROGANS_HOST\n elif tld == 'ga':\n return NICClient.GA_HOST\n elif tld == 'gal':\n return NICClient.GAL_HOST\n elif tld == 'games':\n return NICClient.GAMES_HOST\n elif tld == 'garden':\n return NICClient.GARDEN_HOST\n elif tld == 'gd':\n return NICClient.GD_HOST\n elif tld == 'gdn':\n return NICClient.GDN_HOST\n elif tld == 'gent':\n return NICClient.GENT_HOST\n elif tld == 'gg':\n return NICClient.GG_HOST\n elif tld == 'gl':\n return NICClient.GL_HOST\n elif tld == 'global':\n return NICClient.GLOBAL_HOST\n elif tld == 'gmx':\n return NICClient.GMX_HOST\n elif tld == 'gold':\n return NICClient.GOLD_HOST\n elif tld == 'gop':\n return NICClient.GOP_HOST\n elif tld == 'gov':\n return NICClient.GOV_HOST\n elif tld == 'gq':\n return NICClient.GQ_HOST\n elif tld == 'gy':\n return NICClient.GY_HOST\n elif tld == 'hamburg':\n return NICClient.HAMBURG_HOST\n elif tld == 'hn':\n return NICClient.HN_HOST\n elif tld == 'horse':\n return NICClient.HORSE_HOST\n elif tld == 'hr':\n return NICClient.HR_HOST\n elif tld == 'ht':\n return NICClient.HT_HOST\n elif tld == 'hu':\n return NICClient.HU_HOST\n elif tld == 'ibm':\n return NICClient.IBM_HOST\n elif tld == 'ie':\n return NICClient.IE_HOST\n elif tld == 'ifm':\n return NICClient.IFM_HOST\n elif tld == 'im':\n return NICClient.IM_HOST\n elif tld == 'int':\n return NICClient.INT_HOST\n elif tld == 'io':\n return NICClient.IO_HOST\n elif tld == 'is':\n return NICClient.IS_HOST\n elif tld == 'it':\n return NICClient.IT_HOST\n elif tld == 'java':\n return NICClient.JAVA_HOST\n elif tld == 'je':\n return NICClient.JE_HOST\n elif tld == 'jetzt':\n return NICClient.JETZT_HOST\n elif tld == 'jobs':\n return NICClient.JOBS_HOST\n elif tld == 'joburg':\n return NICClient.JOBURG_HOST\n elif tld == 'ki':\n return NICClient.KI_HOST\n elif tld == 'kiwi':\n return NICClient.KIWI_HOST\n elif tld == 'koeln':\n return NICClient.KOELN_HOST\n elif tld == 'ky':\n return NICClient.KY_HOST\n elif tld == 'la':\n return NICClient.LA_HOST\n elif tld == 'lacaixa':\n return NICClient.LACAIXA_HOST\n elif tld == 'lat':\n return NICClient.LAT_HOST\n elif tld == 'latrobe':\n return NICClient.LATROBE_HOST\n elif tld == 'leclerc':\n return NICClient.LECLERC_HOST\n elif tld == 'li':\n return NICClient.LI_HOST\n elif tld == 'live':\n return NICClient.LIVE_HOST\n elif tld == 'loan':\n return NICClient.LOAN_HOST\n elif tld == 'london':\n return NICClient.LONDON_HOST\n elif tld == 'lt':\n return NICClient.LT_HOST\n elif tld == 'lu':\n return NICClient.LU_HOST\n elif tld == 'luxe':\n return NICClient.LUXE_HOST\n elif tld == 'luxury':\n return NICClient.LUXURY_HOST\n elif tld == 'ma':\n return NICClient.MA_HOST\n elif tld == 'madrid':\n return NICClient.MADRID_HOST\n elif tld == 'mango':\n return NICClient.MANGO_HOST\n elif tld == 'md':\n return NICClient.MD_HOST\n elif tld == 'me':\n return NICClient.ME_HOST\n elif tld == 'men':\n return NICClient.MEN_HOST\n elif tld == 'menu':\n return NICClient.MENU_HOST\n elif tld == 'mg':\n return NICClient.MG_HOST\n elif tld == 'miami':\n return NICClient.MIAMI_HOST\n elif tld == 'mini':\n return NICClient.MINI_HOST\n elif tld == 'ml':\n return NICClient.ML_HOST\n elif tld == 'mo':\n return NICClient.MO_HOST\n elif tld == 'moe':\n return NICClient.MOE_HOST\n elif tld == 'monash':\n return NICClient.MONASH_HOST\n elif tld == 'moscow':\n return NICClient.MOSCOW_HOST\n elif tld == 'ms':\n return NICClient.MS_HOST\n elif tld == 'mu':\n return NICClient.MU_HOST\n elif tld == 'museum':\n return NICClient.MUSEUM_HOST\n elif tld == 'na':\n return NICClient.NA_HOST\n elif tld == 'name':\n return NICClient.NAME_HOST\n elif tld == 'nc':\n return NICClient.NC_HOST\n elif tld == 'news':\n return NICClient.NEWS_HOST\n elif tld == 'nf':\n return NICClient.NF_HOST\n elif tld == 'nl':\n return NICClient.NL_HOST\n elif tld == 'no':\n return NICClient.NO_HOST\n elif tld == 'nrw':\n return NICClient.NRW_HOST\n elif tld == 'nu':\n return NICClient.NU_HOST\n elif tld == 'nyc':\n return NICClient.NYC_HOST\n elif tld == 'one':\n return NICClient.ONE_HOST\n elif tld == 'online':\n return NICClient.ONLINE_HOST\n elif tld == 'ooo':\n return NICClient.OOO_HOST\n elif tld == 'ovh':\n return NICClient.OVH_HOST\n elif tld == 'paris':\n return NICClient.PARIS_HOST\n elif tld == 'party':\n return NICClient.PARTY_HOST\n elif tld == 'pf':\n return NICClient.PF_HOST\n elif tld == 'physio':\n return NICClient.PHYSIO_HOST\n elif tld == 'plus':\n return NICClient.PLUS_HOST\n elif tld == 'pm':\n return NICClient.PM_HOST\n elif tld == 'pohl':\n return NICClient.POHL_HOST\n elif tld == 'post':\n return NICClient.POST_HOST\n elif tld == 'qpon':\n return NICClient.QPON_HOST\n elif tld == 'quebec':\n return NICClient.QUEBEC_HOST\n elif tld == 'racing':\n return NICClient.RACING_HOST\n elif tld == 're':\n return NICClient.RE_HOST\n elif tld == 'reise':\n return NICClient.REISE_HOST\n elif tld == 'review':\n return NICClient.REVIEW_HOST\n elif tld == 'rodeo':\n return NICClient.RODEO_HOST\n elif tld == 'ruhr':\n return NICClient.RUHR_HOST\n elif tld == 'samsung':\n return NICClient.SAMSUNG_HOST\n elif tld == 'saarland':\n return NICClient.SAARLAND_HOST\n elif tld == 'sb':\n return NICClient.SB_HOST\n elif tld == 'sca':\n return NICClient.SCA_HOST\n elif tld == 'scb':\n return NICClient.SCB_HOST\n elif tld == 'schmidt':\n return NICClient.SCHMIDT_HOST\n elif tld == 'science':\n return NICClient.SCIENCE_HOST\n elif tld == 'scot':\n return NICClient.SCOT_HOST\n elif tld == 'se':\n return NICClient.SE_HOST\n elif tld == 'sh':\n return NICClient.SH_HOST\n elif tld == 'si':\n return NICClient.SI_HOST\n elif tld == 'sk':\n return NICClient.SK_HOST\n elif tld == 'sky':\n return NICClient.SKY_HOST\n elif tld == 'sm':\n return NICClient.SM_HOST\n elif tld == 'sn':\n return NICClient.SN_HOST\n elif tld == 'so':\n return NICClient.SO_HOST\n elif tld == 'spiegel':\n return NICClient.SPIEGEL_HOST\n elif tld == 'st':\n return NICClient.ST_HOST\n elif tld == 'stream':\n return NICClient.STREAM_HOST\n elif tld == 'study':\n return NICClient.STUDY_HOST\n elif tld == 'sucks':\n return NICClient.SUCKS_HOST\n elif tld == 'surf':\n return NICClient.SURF_HOST\n elif tld == 'sx':\n return NICClient.SX_HOST\n elif tld == 'sydney':\n return NICClient.SYDNEY_HOST\n elif tld == 'taipei':\n return NICClient.TAIPEI_HOST\n elif tld == 'tatar':\n return NICClient.TATAR_HOST\n elif tld == 'tc':\n return NICClient.TC_HOST\n elif tld == 'tel':\n return NICClient.TEL_HOST\n elif tld == 'tf':\n return NICClient.TF_HOST\n elif tld == 'tirol':\n return NICClient.TIROL_HOST\n elif tld == 'tk':\n return NICClient.TK_HOST\n elif tld == 'tl':\n return NICClient.TL_HOST\n elif tld == 'tm':\n return NICClient.TM_HOST\n elif tld == 'top':\n return NICClient.TOP_HOST\n elif tld == 'tr':\n return NICClient.TR_HOST\n elif tld == 'trade':\n return NICClient.TRADE_HOST\n elif tld == 'travel':\n return NICClient.TRAVEL_HOST\n elif tld == 'trust':\n return NICClient.TRUST_HOST\n elif tld == 'tui':\n return NICClient.TUI_HOST\n elif tld == 'tv':\n return NICClient.TV_HOST\n elif tld == 'co.ua':\n return NICClient.CO.UA_HOST\n elif tld == 'uno':\n return NICClient.UNO_HOST\n elif tld == 'us':\n return NICClient.US_HOST\n elif tld == 'uz':\n return NICClient.UZ_HOST\n elif tld == 'versicherung':\n return NICClient.VERSICHERUNG_HOST\n elif tld == 'vg':\n return NICClient.VG_HOST\n elif tld == 'vip':\n return NICClient.VIP_HOST\n elif tld == 'vlaanderen':\n return NICClient.VLAANDEREN_HOST\n elif tld == 'vodka':\n return NICClient.VODKA_HOST\n elif tld == 'voting':\n return NICClient.VOTING_HOST\n elif tld == 'wales':\n return NICClient.WALES_HOST\n elif tld == 'webcam':\n return NICClient.WEBCAM_HOST\n elif tld == 'wed':\n return NICClient.WED_HOST\n elif tld == 'wedding':\n return NICClient.WEDDING_HOST\n elif tld == 'wf':\n return NICClient.WF_HOST\n elif tld == 'whoswho':\n return NICClient.WHOSWHO_HOST\n elif tld == 'wien':\n return NICClient.WIEN_HOST\n elif tld == 'win':\n return NICClient.WIN_HOST\n elif tld == 'work':\n return NICClient.WORK_HOST\n elif tld == 'ws':\n return NICClient.WS_HOST\n elif tld == 'wtc':\n return NICClient.WTC_HOST\n elif tld == 'xxx':\n return NICClient.XXX_HOST\n elif tld == 'yoga':\n return NICClient.YOGA_HOST\n elif tld == 'yt':\n return NICClient.YT_HOST\n elif tld == 'zm':\n return NICClient.ZM_HOST\n else:\n return tld + NICClient.QNICHOST_TAIL", "def Site(self) -> str:", "def guess_company_name(self, response):\n # TODO here guess the name of the company\n # if og:title or title or smth else\n # if domain in the title then its the name\n # if not\n # take domain\n\n parts = urllib.parse.urlparse(response.url)\n name_parts = parts.netloc.split(\".\")\n if len(name_parts) > 2:\n name = name_parts[1]\n else:\n name = name_parts[0]\n\n site_name = response.xpath('//*/meta[@property=\"description\"]/@content').extract_first()\n if site_name:\n return site_name\n else:\n return name.title()", "def server_domain(self):\n url = self.api.address\n domain_start = url.find('://') + 3 if url.find('://') >= 0 else 0\n domain_end = url.find(':', domain_start) if url.find(':', domain_start) >= 0 else \\\n url.find('/', domain_start) if url.find('/', domain_start) >= 0 else \\\n url.find('?', domain_start) if url.find('?', domain_start) >= 0 else \\\n len(url)\n regex = re.compile('[^a-zA-Z0-9\\.]') # being cautious as changing this later will invalidate everyone's cache\n return regex.sub('_', url[domain_start:domain_end]).lower()", "def get_host(request):\n return request.META[\"HTTP_HOST\"].split(\":\")[0]", "def url(self):\n return self.hs.hostname if self.active else None", "def cloudfront_public_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('cloudfront')\n response = client.list_distributions(\n MaxItems='100'\n )\n items = response[\"DistributionList\"][\"Items\"]\n for item in items:\n cloud_front_domain_name = item[\"DomainName\"]\n if item[\"Aliases\"][\"Quantity\"] > 0:\n if hostname in item[\"Aliases\"][\"Items\"]:\n return cloud_front_domain_name\n return None", "def getSite(self, url):\n hostname = urlparse(urlparser).hostname\n site = sites.getSite(hostname)\n return site", "def get_base_domain(self) -> str:\n if self == self.off:\n return \"openfoodfacts\"\n elif self == self.obf:\n return \"openbeautyfacts\"\n elif self == self.opff:\n return \"openpetfoodfacts\"\n elif self == self.opf:\n return \"openproductfacts\"\n else:\n # Open Food Facts Pro\n return \"pro.openfoodfacts\"", "def safe_get_host(request):\r\n if isinstance(settings.ALLOWED_HOSTS, (list, tuple)) and '*' not in settings.ALLOWED_HOSTS:\r\n return request.get_host()\r\n else:\r\n return microsite.get_value('site_domain', settings.SITE_NAME)", "def fallback_host(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"fallback_host\")", "def currentSiteURL(request):\n from django.contrib.sites.shortcuts import get_current_site\n\n current_site = get_current_site(request)\n protocol = getattr(settings, 'MY_SITE_PROTOCOL', 'https')\n port = getattr(settings, 'MY_SITE_PORT', '')\n url = '%s://%s' % (protocol, current_site.domain)\n if port:\n url += ':%s' % port\n return url", "def site_name(self, obj):\n site = obj.site\n return (\"%s\" % (site.name))", "def host(self):\n if self.url.startswith(\"dns:\"):\n return self.url[4:]\n else:\n return urlparse(self.url).hostname", "def getHost():", "def getHost():", "def gethostbycondorname(name):\n\n m = htcondor_ip_name_re.match(name)\n if m is not None:\n return m.group(1).replace('-', '.')\n else:\n return socket.gethostbyname(name)", "def get_from_host(cls, host, silent=False):\n if cls.search([], count=True) == 1:\n return cls.search([])[0]\n try:\n website, = cls.search([('name', '=', host)])\n except ValueError:\n if not silent:\n raise WebsiteNotFound()\n else:\n return website", "def get_current_domain(r):\n return '{scheme}://{host}'.format(\n scheme='https' if r.is_secure() else 'http',\n host=r.get_host(),\n )", "def request_host(request):\n host = urlsplit(request.url).hostname\n if host == \"\":\n host = request.get_header(\"Host\", \"\").partition(\":\")[0]\n\n # remove port, if present\n return host.lower()", "def sitename(self) :\n\t\ttry :\n\t\t\treturn self._sitename\n\t\texcept Exception as e:\n\t\t\traise e", "def site_name(request):\n return {'site_name':'CatFood'}", "def website_name(self) -> Optional[str]:\n return pulumi.get(self, \"website_name\")", "def site(obj):\n return \"%s\" % (obj.site.name)", "def get_host(req):\n return req.META[\"HTTP_HOST\"].split(\":\")[0]", "def hostname_get():\n try:\n return json_response.success({'hostname': hostname.determine()})\n except hostname.Error as e:\n return json_response.error(str(e)), 200", "def determine_computer_from_hostname():\n # FIXME: This needs to be a resource file at some point\n all_computers = yaml_file_to_dict(FUNCTION_PATH + \"/machines/all_machines.yaml\")\n for this_computer in all_computers:\n for computer_pattern in all_computers[this_computer].values():\n if isinstance(computer_pattern, str):\n if re.match(computer_pattern, socket.gethostname()):\n return FUNCTION_PATH + \"/machines/\" + this_computer + \".yaml\"\n elif isinstance(computer_pattern, (list, tuple)):\n # Pluralize to avoid confusion:\n computer_patterns = computer_pattern\n for pattern in computer_patterns:\n if re.match(pattern, socket.gethostname()):\n return FUNCTION_PATH + \"/machines/\" + this_computer + \".yaml\"\n logging.warning(\n \"The yaml file for this computer (%s) could not be determined!\"\n % socket.gethostname()\n )\n logging.warning(\"Continuing with generic settings...\")\n return FUNCTION_PATH + \"/machines/generic.yaml\"\n\n # raise FileNotFoundError(\n # \"The yaml file for this computer (%s) could not be determined!\"\n # % socket.gethostname()\n # )", "def site_url(request, uri):\n\tsite = get_current_site(request)\n\tif site is not None:\n\t\treturn '{0}://{1}{2}{3}'.format(\n\t\t\t'https' if settings.SSL_ENABLED else 'http',\n\t\t\tsite.name,\n\t\t\t'' if uri.startswith('/') else '/',\n\t\t\turi\n\t\t)\n\treturn site" ]
[ "0.67882705", "0.57860607", "0.5658386", "0.5654703", "0.56485164", "0.5624197", "0.56084806", "0.55898887", "0.5582621", "0.5553793", "0.5553094", "0.550446", "0.5491108", "0.5476203", "0.5476005", "0.54712963", "0.546804", "0.546804", "0.5465417", "0.54653466", "0.54581463", "0.5454479", "0.54363877", "0.5427238", "0.5423932", "0.53968275", "0.5385678", "0.5384037", "0.53824335", "0.53752834" ]
0.7363288
0
Get the cosine similarity matrix from the embedding as a Pandas DataFrame.
def get_cosine_similarity_df(word2vec: Word2Vec) -> pd.DataFrame: sim = get_cosine_similarity(word2vec) return pd.DataFrame(sim, index=word2vec.wv.index2word, columns=word2vec.wv.index2word)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_similarity(df):\n count = CountVectorizer()\n count_matrix = count.fit_transform(df[\"bag_of_words\"])\n cosine_sim = cosine_similarity(count_matrix, count_matrix)\n return cosine_sim", "def _calculate_similarities(self) -> pd.DataFrame:\n\n df_encoded_articles = self._db_connection.get_dataframe(\n table_name='tfidf_representation',\n schema='encoded_articles'\n ).set_index('id')\n\n # Pandas loads the array column 'encoded' as a string e.g. \"[0.0, 0.6, 0.8]\" which needs translating to an array\n encoded_representations = np.array(df_encoded_articles['encoded'].tolist())\n\n return pd.DataFrame(\n index=df_encoded_articles.index,\n columns=df_encoded_articles.index,\n data=pairwise.cosine_similarity(encoded_representations)\n )", "def cosineDistanceMatrix():\n\n\tmatrix = movieMatrix()\n\tsimilarity = np.dot(matrix, matrix.T)\n\tsquareMag = np.diag(similarity)\n\tinvSquareMag = 1/squareMag\n\tinvSquareMag[np.isinf(invSquareMag)]=0\n\tinvMag = np.sqrt(invSquareMag)\n\tcosine = similarity * invMag\n\tcosine = cosine.T * invMag\n\treturn cosine", "def cossim(corpus):\n files = os.listdir()\n vectorizer = TfidfVectorizer()\n trsfm = vectorizer.fit_transform(corpus)\n columns = vectorizer.get_feature_names()\n df_tfidf = pd.DataFrame(trsfm.toarray(), columns = columns, index = corpus)\n out = cosine_similarity(trsfm)\n df_result = pd.DataFrame(out, columns = files, index = files)\n return df_result", "def build_distance_matrix(path_to_embeddings):\n\n embed_df = pd.read_csv(path_to_embeddings)\n print (\"length is: \", len(embed_df))\n columns = list(embed_df)\n\n \n distances = euclidean_distances(embed_df.iloc[:, 1:], embed_df.iloc[:, 1:])\n embed_df = embed_df.set_index([columns[0]])\n # format distance matrix\n distances_df = pd.DataFrame(distances)\n distances_df.columns = list(embed_df.index)\n distances_df.index = list(embed_df.index)\n\n print (\"finished building the distance matrix ...\")\n\n print (\"///////////////////\")\n print (len(distances_df))\n\n return distances_df", "def cosine_similarity(X):\n matrix = X.dot(X.transpose()).todense()\n mat_len = len(matrix)\n norms = [0] * mat_len\n for i in range(0, mat_len):\n norms[i] = 1.0 / np.sqrt(matrix.item((i, i)))\n norm_mat = np.matrix(norms)\n return np.multiply(norm_mat.transpose().dot(norm_mat), matrix)", "def calculate_cosine_similarity(self):\n tfidf_matrix = self.calculate_tfidf()\n\n cosine_similarity = linear_kernel(tfidf_matrix, tfidf_matrix) # Cosine similarity matrix calculation\n\n return cosine_similarity", "def calc_dist_matrix(self):\n\n self.dist_matrix = spatial.distance.squareform(spatial.distance.pdist(self.data_vector,metric=\"hamming\"))\n\n self.dist_frame = pd.DataFrame(self.dist_matrix,\n index = self.seq_strings,\n columns = self.seq_strings)", "def get_cosine_similarity(doc1, doc2):\n count_vectorizer = CountVectorizer(stop_words='english')\n sparse_matrix = count_vectorizer.fit_transform(raw_documents=[doc1, doc2])\n dtm = sparse_matrix.todense()\n df_dtm = pd.DataFrame(data=dtm, \n columns=count_vectorizer.get_feature_names(), \n index=['doc1', 'doc2'])\n similarity_matrix = cosine_similarity(df_dtm, df_dtm)\n similarity_score = round(similarity_matrix[0][1], 6)\n return similarity_score", "def get_similarity(self, ):\r\n customer_cos_similarity = cosine_similarity(self.rating_matrix, self.rating_matrix)\r\n customer_cos_similarity = pd.DataFrame(customer_cos_similarity,\r\n index=self.customer_vendor_matrix.index,\r\n columns=self.customer_vendor_matrix.index)\r\n # customer_pearson_similarity = np.corrcoef(self.rating_matrix,\r\n # self.rating_matrix,)\r\n # customer_pearson_similarity = pd.DataFrame(customer_pearson_similarity,\r\n # index=self.customer_vendor_matrix.index,\r\n # columns=self.customer_vendor_matrix.index)\r\n return customer_cos_similarity,\r\n # return customer_pearson_similarity run too slowly\r", "def cosine_similarity(self, source_doc, input_doc):\n vectorizer = self.vectorizer or TfidfVectorizer(tokenizer=PlagiarismDetector.tokenize_and_stem, stop_words='english')\n tfidf = vectorizer.fit_transform([source_doc, input_doc])\n return ((tfidf * tfidf.T).A)[0, 1]", "def get_cosine_similarity(word2vec: Word2Vec) -> np.ndarray:\n return cosine_similarity(word2vec.wv.vectors)", "def as_df(self):\r\n return pd.DataFrame(self.vectors).set_index(self.words)", "def similarity_matrix(feat_mat):\n sim_mat = cosine_similarity(feat_mat)\n np.fill_diagonal(sim_mat, 0)\n return sim_mat", "def model_book_similarities(data):\n\n data = data.T\n U2 = distance.squareform(distance.pdist(data, metric='cosine'))\n sim_matrix = pd.DataFrame(U2)\n\n return sim_matrix", "def calculate_dist_mat(embeddings: np.ndarray, norm: int) -> np.ndarray:\n kwargs = {'p': norm}\n condensed_dist = pdist(embeddings, metric='minkowski', **kwargs)\n dist_mat = squareform(condensed_dist)\n return dist_mat", "def get_cosin_sim(question, contexts):\r\n cos_sim_for_question = []\r\n for context in contexts :\r\n cv = CountVectorizer(stop_words=MY_STOPWORDS, lowercase=False)\r\n matrix = cv.fit_transform(pd.DataFrame([question, context])[0]).toarray()\r\n cos_sim = dot(matrix[0], matrix[1])/(norm(matrix[0])*norm(matrix[1]))\r\n cos_sim_for_question.append(cos_sim)\r\n return pd.Series(cos_sim_for_question)", "def cosine_sim(matrix):\n if type(matrix) is not csr_matrix:\n matrix = csr_matrix(matrix)\n\n return cosine_similarity(matrix, dense_output=False)", "def self_similarity_matrix(feature_vectors):\n norm_feature_vectors, mean, std = at.normalize_features([feature_vectors.T])\n norm_feature_vectors = norm_feature_vectors[0].T\n sim_matrix = 1.0 - distance.squareform(\n distance.pdist(norm_feature_vectors.T, 'cosine'))\n return sim_matrix", "def cosine_similarity_matrix(references: np.ndarray, queries: np.ndarray) -> np.ndarray:\n size1 = references.shape[0]\n size2 = queries.shape[0]\n scores = np.zeros((size1, size2))\n for i in range(size1):\n for j in range(size2):\n scores[i, j] = cosine_similarity(references[i, :], queries[j, :])\n return scores", "def getGloveoCosineSimilarity(question1, question2):\n questions = [question1, question2]\n\n ## for the sentences we need to get the count vectors\n vec = CountVectorizer(max_features=5000, stop_words=None,binary=True)\n count_vectors = vec.fit_transform(questions)\n\n ## get the vocabulary of words from the questions\n vocab_index = vec.vocabulary_\n\n ## get the index of the words and embeddings\n index_word = {v:k for k, v in vocab_index.items()}\n\n ## get the question vectors\n question_vectors = np.zeros((count_vectors.shape[0], 300))\n\n ## iterate through count vectors for each word get the embeddings\n ## for each embedding, we will then average by the number of words\n ## this will be then used for cosine similarity\n for i in range(count_vectors.shape[0]):\n row = count_vectors[i, :].toarray()\n word_ids = np.where(row > 0)[1]\n word_counts = row[:, word_ids][0]\n numWords = np.sum(word_counts)\n\n ## if there are no words, continue\n if numWords == 0:\n continue\n\n ## initialize the word embeddings to 0\n word_embeddings = np.zeros((word_ids.shape[0], 300))\n\n ## update the word embeddings\n for j in range(word_ids.shape[0]):\n word_id = word_ids[j]\n word_embeddings[j, :] = word_counts[j] * gloveDict[index_word[word_id]]\n question_vectors[i, :] = np.sum(word_embeddings, axis=0) / numWords\n\n return(cosine_similarity(question_vectors[0], question_vectors[1])[0][0])", "def get_matches_df(sparse_matrix, name_vector):\n\n name_vector_list = pd.Series(list(map(str, name_vector)))\n\n non_zeros = sparse_matrix.nonzero()\n\n sparserows = non_zeros[0]\n sparsecols = non_zeros[1]\n\n nr_matches = sparsecols.size\n\n left_side = np.empty([nr_matches], dtype=object)\n right_side = np.empty([nr_matches], dtype=object)\n similarity = np.zeros(nr_matches)\n pos_left = np.zeros(nr_matches, dtype=np.int)\n pos_right = np.zeros(nr_matches, dtype=np.int)\n\n for index in range(0, nr_matches):\n left_side[index] = name_vector_list[sparserows[index]]\n right_side[index] = name_vector_list[sparsecols[index]]\n similarity[index] = sparse_matrix.data[index]\n pos_left[index] = sparserows[index]\n pos_right[index] = sparsecols[index]\n\n return pd.DataFrame({'left_side': left_side,\n 'right_side': right_side,\n 'similarity': similarity,\n 'pos_left': pos_left,\n 'pos_right': pos_right})", "def cosine_similarity(cls, vec_a, vec_b):\n return np.dot(vec_a, vec_b) / \\\n (np.linalg.norm(vec_a) * np.linalg.norm(vec_b))", "def _cosine_matrix(self, x1, x2):\n # expand h1 shape to (batch_size, x1_timesteps, 1, embedding_size)\n x1 = K.expand_dims(x1, axis=2)\n # expand x2 shape to (batch_size, 1, x2_timesteps, embedding_size)\n x2 = K.expand_dims(x2, axis=1)\n # cosine matrix (batch_size, h1_timesteps, h2_timesteps)\n cos_matrix = self._cosine_similarity(x1, x2)\n return cos_matrix", "def create_course_content_similarity_df(self, min_similarity: float = 0.5, sample_len: int = None) -> pd.DataFrame:\n self.retrieve_courses()\n sim_list = []\n sim_matrix = self.create_course_content_similarity_matrix(sample_len=sample_len)\n\n for idx, similarities in enumerate(sim_matrix):\n a_course_id = self.courses_df.iloc[idx]['id']\n\n for idx_sims, similarity in enumerate(similarities):\n if idx == idx_sims:\n continue\n\n if similarity < min_similarity:\n continue\n\n another_course_id = self.courses_df.iloc[idx_sims]['id']\n\n sim_list.append({'a_course_id': a_course_id,\n 'another_course_id': another_course_id,\n 'similarity': similarity})\n\n self.courses_content_sims_df = pd.DataFrame(sim_list)\n\n return self.courses_content_sims_df", "def sere_matrix(df, TH=1):\n number_samples = df.shape[1]\n columns = df.columns\n\n # Distance matrix\n distance = np.full((num_samples, num_samples), np.nan)\n for i in range(num_samples):\n for j in range(i, num_samples):\n distance.iloc[i, j] = sere_score(df.loc[:, [i, j]], TH)\n distance.iloc[j, i] = distance.iloc[i, j]\n\n return pd.DataFrame(distance, index=columns, columns=columns)", "def cosine_similarity(vec_x, vec_y):\n sim_prod = 0.0\n len_x = 0\n len_y = 0\n\n for ngram in vec_x:\n len_x += vec_x[ngram] ** 2\n\n for ngram in vec_y:\n len_y += vec_y[ngram] ** 2\n\n len_x = math.sqrt(len_x)\n len_y = math.sqrt(len_y)\n\n for ngram in vec_x:\n if ngram in vec_y:\n sim_prod += vec_x[ngram] * vec_y[ngram]\n\n return sim_prod / (len_x * len_y)", "def compute_cosine_similarity(self):\n cos_matrix = []\n for i in range(len(self.train_vec)):\n val = self.vec1 * self.train_vec[i]\n cos_matrix.append(val[0])\n out = np.argmax(cos_matrix)\n print(self.train_output[out])", "def get_euclidean_matrix(df):\n df.reset_index(drop=True, inplace=True)\n\n # foods = df['food_names']\n # food_examples = []\n # indices = list(range(0, len(foods)))\n # for i in indices:\n # food_examples.append(str(foods[i]) + str(i))\n # food_examples = pd.Series(food_examples)\n food_examples = df['food_names']\n\n df = df.drop(['food_names', 'height', 'weight', 'above_range', 'BMI', 'age', 'gender',\n 'glucose_tolerance_category','90-percentile_of_2h-iAUC', 'average_carbs_ratio',\n 'average_daily_carbs','average_meals_per_day', 'average_sleep_hours',\n 'average_glucose', 'baseline', 'coefficient_of_variation', 'max_2-hours_iAUC',\n 'median_fasting_glucose_level','median_of_2h-iAUC', 'night_baseline'], axis='columns')\n\n df = df.replace([-np.inf], 0).dropna(axis=1)\n\n num_examples = df.shape[0]\n\n distances = pdist(df.values, metric='euclidean')\n print(distance)\n dis_array = squareform(distances)\n print(dis_array)\n dis_df = pd.DataFrame(data = dis_array, index=food_examples, columns=food_examples)\n print(dis_df)\n writer = pd.ExcelWriter('Euclidean_distance_icarbonx.xlsx', engine='xlsxwriter')\n dis_df.to_excel(writer, sheet_name='Sheet1')\n writer.save()", "def get_alphabet_similarity_matrix(self):\n distance_matrix = numpy.zeros((len(self.alphabet), len(self.alphabet)))\n numpy.fill_diagonal(distance_matrix, 0)\n for index_one, descriptor_one in enumerate(self.descriptors):\n for index_two, descriptor_two in enumerate(self.descriptors):\n distance = descriptor_one - descriptor_two\n squared_distance = numpy.dot(distance, distance)\n distance_matrix[index_one, index_two] = squared_distance\n distance_matrix /= 2. * (self.sigma_amino_acid ** 2)\n return numpy.exp(-distance_matrix)" ]
[ "0.7234511", "0.6991583", "0.6946937", "0.68768257", "0.65643364", "0.6388998", "0.6264799", "0.61277986", "0.612138", "0.6115123", "0.6112033", "0.60728824", "0.60516924", "0.5977526", "0.5932496", "0.59059745", "0.5899762", "0.582879", "0.58085597", "0.5782091", "0.56256443", "0.5623799", "0.5583576", "0.55641776", "0.5556099", "0.5555112", "0.5543699", "0.5540323", "0.553921", "0.5498953" ]
0.76576155
0
Create a workflow queue
def add(ctx, wf_name, wf_version, wf_owner): jess_url = ctx.obj.get('JT_CONFIG').get('jess_server') if wf_owner is None: wf_owner = ctx.obj.get('JT_CONFIG').get('jt_account') url = "%s/queues/owner/%s/workflow/%s/ver/%s" % (jess_url, wf_owner, wf_name, wf_version) r = requests.post(url) if r.status_code != 200: click.echo('Queue creation for: %s failed: %s' % (wf_owner, r.text)) else: click.echo("Queue registration succeeded, details as below") click.echo(r.text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _queue_create(self, **kwargs):\n name = self.generate_random_name()\n return self.clients(\"zaqar\").queue(name, **kwargs)", "def _create_queue(self):\n # Instantiate\n queue = pbs.queue(verbose=not self.quiet)\n\n if self.q == 'ember':\n # Submitting to Utah ember cluster\n ppn = 12\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n walltime = self.walltime if int(self.walltime.split(':')[0]) < 72 else '72:00:00'\n queue.create(label=self.label, nodes=self.nodes, qos=self.qos, umask=self.umask,\n walltime=walltime, ppn=ppn, cpus=cpus, partition='ember', alloc='sdss')\n elif self.q is not None:\n # All other self.q values expected for Portsmouth cluster,\n # sciama. In this case, the number of nodes is queue\n # dependent, and qos is not set\n if self.q == 'sciama1.q':\n ppn = 12\n elif self.q == 'sciama3.q':\n ppn = 20\n else:\n ppn = 16\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n queue.create(label=self.label, nodes=self.nodes, umask=self.umask,\n walltime=self.walltime, queue=self.q, ppn=ppn, cpus=cpus)\n else:\n # self.q can be None when submitting to both the Portsmouth\n # and Utah clusters. In this case, the default queue\n # destination and ppn is correct. qos is also set, but this\n # should only be used when submitting to Utah.\n ppn = 16\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n queue.create(label=self.label, nodes=self.nodes, qos=self.qos, umask=self.umask,\n walltime=self.walltime, ppn=ppn, cpus=cpus)\n\n return queue", "def new_queue() -> Queue:\n return multiprocessing.Queue()", "def create_queue(q_settings):\r\n db = get_db()\r\n cursor = db.cursor()\r\n cursor.execute(INSERT_QUEUE)\r\n q_settings['qid'] = cursor.lastrowid\r\n cursor.execute(INSERT_QUEUE_SETTINGS, qsettings_dict_to_db_tuple(q_settings))\r\n cursor.close()\r\n db.commit()\r\n permissions.add_permission_list(get_uids(q_settings['admins']), q_settings['qid'], permissions.ADMIN)\r\n if q_settings.has_key('managers'):\r\n permissions.add_permission_list(get_uids(q_settings['managers']), q_settings['qid'], permissions.MANAGER)\r\n if q_settings.has_key('blocked_users'):\r\n permissions.add_permission_list(get_uids(q_settings['blocked_users']), q_settings['qid'], permissions.BLOCKED_USER)\r\n return q_settings['qid']", "def create_queue(self):\n queue_name = self.generate_name()\n try:\n queue = self.sqs.create_queue(QueueName=queue_name)\n except Exception as e:\n raise RuntimeError('SQS could create queue: %s' % e)\n self.queue_name, self.queue = queue_name, queue", "async def create_work_queue(\n self,\n name: str,\n tags: Optional[List[str]] = None,\n description: Optional[str] = None,\n is_paused: Optional[bool] = None,\n concurrency_limit: Optional[int] = None,\n priority: Optional[int] = None,\n work_pool_name: Optional[str] = None,\n ) -> WorkQueue:\n if tags:\n warnings.warn(\n (\n \"The use of tags for creating work queue filters is deprecated.\"\n \" This option will be removed on 2023-02-23.\"\n ),\n DeprecationWarning,\n )\n filter = QueueFilter(tags=tags)\n else:\n filter = None\n create_model = WorkQueueCreate(name=name, filter=filter)\n if description is not None:\n create_model.description = description\n if is_paused is not None:\n create_model.is_paused = is_paused\n if concurrency_limit is not None:\n create_model.concurrency_limit = concurrency_limit\n if priority is not None:\n create_model.priority = priority\n\n data = create_model.dict(json_compatible=True)\n try:\n if work_pool_name is not None:\n response = await self._client.post(\n f\"/work_pools/{work_pool_name}/queues\", json=data\n )\n else:\n response = await self._client.post(\"/work_queues/\", json=data)\n except httpx.HTTPStatusError as e:\n if e.response.status_code == status.HTTP_409_CONFLICT:\n raise prefect.exceptions.ObjectAlreadyExists(http_exc=e) from e\n elif e.response.status_code == status.HTTP_404_NOT_FOUND:\n raise prefect.exceptions.ObjectNotFound(http_exc=e) from e\n else:\n raise\n return WorkQueue.parse_obj(response.json())", "def queue (self):\n\n with self.__t.steps():\n import exception\n from lib import schema\n from lib import common\n from lib import process\n from lib import data\n from sqlalchemy import and_\n import json\n from collections import OrderedDict\n\n with schema.select(\"process_queue\", schema.table.process_queue.status==None) as select:\n for queued in select.limit(1000).all():\n blocked = False\n if len(queued.depend) > 0:\n for depend_id in queued.depend:\n depend = schema.select_one(\"series\", schema.table.series.id==depend_id)\n match_tags = json.dumps(OrderedDict(sorted(data.decode_tags(depend.tags).items())))\n if depend and schema.select_one(\"process_queue\", schema.table.process_queue.tags==match_tags):\n blocked = True\n break # queued dependencies\n if not blocked:\n queued.status = \"queued\"\n schema.save(queued)\n run.apply_async([queued.tags]) #queue process\n self.__t.ok()\n self.apply_async(queue=\"control\", countdown=30) #queue next", "def test_create_qos_queue(self):\r\n resource = 'qos_queue'\r\n cmd = qos.CreateQoSQueue(\r\n test_cli20.MyApp(sys.stdout), None)\r\n myid = 'myid'\r\n name = 'my_queue'\r\n default = False\r\n args = ['--default', default, name]\r\n position_names = ['name', 'default']\r\n position_values = [name, default]\r\n self._test_create_resource(resource, cmd, name, myid, args,\r\n position_names, position_values)", "def create_queue(self, queue_name, visibility_timeout=None):\r\n params = {'QueueName': queue_name}\r\n if visibility_timeout:\r\n params['DefaultVisibilityTimeout'] = '%d' % (visibility_timeout,)\r\n return self.get_object('CreateQueue', params, Queue)", "def create_task():", "def queue(self):\n if not self.parent_node.is_job:\n return\n\n self.winstance.send_event('Queuing job..')\n result = self.winstance.execute_operation('hpc.interfaces.'\n 'lifecycle.queue',\n kwargs={\"name\": self.name})\n result.task.wait_for_terminated()\n if result.task.get_state() == tasks.TASK_FAILED:\n init_state = 'FAILED'\n else:\n self.winstance.send_event('.. job queued')\n init_state = 'PENDING'\n self.set_status(init_state)\n return result.task", "def queue(self, *args, **kwargs):\n queue_args = self._pop_tq_add_args(kwargs)\n app = queue_args.pop('app', None) or flask.current_app\n\n with app.test_request_context():\n # flask.url_for uses the request context if it is present\n # as we're most likely in a request context, use a\n # test_request_context() instead.\n url = self.url()\n\n payload = pickle.dumps((args, kwargs))\n\n taskqueue.add(\n url=url,\n queue_name=self.queue_name,\n payload=payload,\n **queue_args\n )", "def create_task(self, task_body, req_context):\n design_ref = task_body.get('design_ref', None)\n node_filter = task_body.get('node_filter', None)\n action = task_body.get('action', None)\n\n if design_ref is None or action is None:\n raise errors.InvalidFormat(\n 'Task creation requires fields design_ref, action')\n\n task = self.orchestrator.create_task(design_ref=design_ref,\n action=action,\n node_filter=node_filter,\n context=req_context)\n\n task.set_status(hd_fields.TaskStatus.Queued)\n task.save()\n return task", "def create_queue(queue_name: str,\n durable: bool = True,\n auto_delete: bool = False,\n priorities: int = 0,\n extra_properties: Optional[dict] = None,\n server_url: Optional[str] = None):\n method_arguments: dict = {\n 'type': 'queue',\n 'name': queue_name,\n 'properties': {\n 'durable': durable,\n 'auto-delete': auto_delete,\n 'qpid.priorities': priorities\n }\n }\n\n if extra_properties:\n method_arguments['properties'].update(extra_properties)\n\n rpc = RemoteProcedure(handle_QMF2_exception,\n 'qmf.default.direct', server_url)\n create_queue_message = create_QMF2_method_invoke(\n get_broker_id(server_url),\n 'create', method_arguments)\n rpc.call(create_queue_message, timedelta(seconds=5))", "def queue_maker(queue, bucket_name):\n scraper = key_scraper.KaleidoscopeKeyScraper(\n bucket_name=bucket_name,\n queue=queue,\n )\n scraper.add_keys_to_queue()\n\n return None", "def shopify_create_product_queue(self, instance, created_by='import'):\n #Added created_by field which is used to identify the queue is created from which process import or webhook : Dipak Gogiya\n product_queue_vals = {\n 'shopify_instance_id':instance and instance.id or False,\n 'state':'draft',\n 'created_by': created_by\n }\n product_queue_data_id = self.create(product_queue_vals)\n\n return product_queue_data_id", "def test_queue_enqueue_command(runner, tmpworkdir, queue, target_factory): # pylint: disable=unused-argument\n\n atarget = target_factory.build(queue=queue)\n apath = Path('ips.txt')\n apath.write_text(f'{atarget.target}\\n \\n ', encoding='utf-8')\n\n result = runner.invoke(command, ['queue-enqueue', 'notexist', atarget.target])\n assert result.exit_code == 1\n\n result = runner.invoke(command, ['queue-enqueue', queue.name, atarget.target])\n assert result.exit_code == 0\n assert Queue.query.get(queue.id).targets[0].target == atarget.target\n\n result = runner.invoke(command, ['queue-enqueue', queue.name, '--file', apath])\n assert result.exit_code == 0\n assert len(Queue.query.get(queue.id).targets) == 2", "def queue(self, sid):\r\n return queues.Queue(self, sid)", "def queue_cloud_task(request):\n project = os.environ.get(\"PROJECT_ID\")\n queue = os.environ.get(\"QUEUE_NAME\")\n location = os.environ.get(\"QUEUE_REGION_LOCATION\")\n service_account_email = os.environ.get(\"SERVICE_ACCOUNT_EMAIL\")\n\n request_json = request.get_json()\n\n # the http endpoint the task will send to\n url = request_json.get('url')\n # the post data that should be forwarded to the http endpoint\n payload = request_json.get('payload')\n # the time in seconds to delay task execution\n in_seconds = request_json.get('in_seconds')\n # the unique name of the task we are queueing\n task_name = request_json.get('task_name')\n\n try:\n # Create a client.\n client = tasks_v2.CloudTasksClient()\n # Construct the fully qualified queue name.\n parent = client.queue_path(project, location, queue)\n except Exception as e:\n print(e)\n return f\"{e}\", 500\n\n # Construct the request body.\n task = {\n \"http_request\": { # Specify the type of request.\n \"http_method\": tasks_v2.HttpMethod.POST,\n \"url\": url,\n \"oidc_token\": {\"service_account_email\": service_account_email},\n }\n }\n if payload is not None:\n if isinstance(payload, dict):\n # Convert dict to JSON string\n payload = json.dumps(payload)\n # specify http content-type to application/json\n task[\"http_request\"][\"headers\"] = {\"Content-type\": \"application/json\"}\n\n # The API expects a payload of type bytes.\n converted_payload = payload.encode()\n\n # Add the payload to the request.\n task[\"http_request\"][\"body\"] = converted_payload\n\n if in_seconds is not None:\n # Convert \"seconds from now\" into an rfc3339 datetime string.\n d = datetime.datetime.utcnow() + datetime.timedelta(seconds=in_seconds)\n\n # Create Timestamp protobuf.\n timestamp = timestamp_pb2.Timestamp()\n timestamp.FromDatetime(d)\n\n # Add the timestamp to the tasks.\n task[\"schedule_time\"] = timestamp\n\n if task_name is not None:\n # Add the name to tasks.\n name = f\"projects/{project}/locations/{location}/queues/{queue}/tasks{task_name}\"\n task[\"name\"] = name\n\n try:\n # Use the client to build and send the task.\n response = client.create_task(request={\"parent\": parent, \"task\": task})\n return f\"Created task {response.name}\", 200\n except Exception as e:\n print(e)\n return f\"{e}\", 500", "def create_message_queue_table(self):\n app_process = sqlite3.connect('app_process::memory:', check_same_thread=False)\n app_process_cursor = app_process.cursor()\n app_process_cursor.execute(\"SELECT count(name) FROM sqlite_master WHERE type='table' AND name='message_queue'\")\n if app_process_cursor.fetchone()[0]==1:\n return\n app_process_cursor.execute(\"\"\"\n CREATE TABLE message_queue (\n tstamp real,\n request text,\n dest_ip text,\n dest_port integer,\n next_tstamp real\n )\n \"\"\")\n app_process.commit()\n app_process.close()", "def adc_api_workflow_create():\n workflow_json = request.get_json(force=True)\n\n return jsonify(adc.workflow_create(workflow_json=workflow_json))", "def subject(\n state_store: StateStore,\n command_executor: CommandExecutor,\n) -> QueueWorker:\n return QueueWorker(state_store=state_store, command_executor=command_executor)", "def instantiate_queue(self):\n serialized_queue = self.cache.get('queue')\n queue = ast.literal_eval(serialized_queue.decode('utf-8'))\n return queue", "def creator(data, q):\n print('Creating data and putting it on the queue')\n for item in data:\n q.put(item)", "def prepare_queue_after_restart(options):\n if TEST_MODE:\n global task_number\n try:\n task_number\n except NameError:\n task_number = -1\n task_number += 1\n fake_class = SQS_Queue(options['queue']['name'])\n return options['task_data'], fake_class\n # Connection to SQS\n queue = SQS_Queue(\n name=options['queue']['queue_name'],\n region=options['queue']['conn_region']\n )\n # Create a new message\n queue.currentM = queue.q.message_class()\n # Fill message\n queue.currentM.body = options['queue']['body']\n queue.currentM.attributes = options['queue']['attributes']\n queue.currentM.md5_message_attributes = \\\n options['queue']['md5_message_attributes']\n queue.currentM.message_attributes = options['queue']['message_attributes']\n queue.currentM.receipt_handle = options['queue']['receipt_handle']\n queue.currentM.id = options['queue']['id']\n queue.currentM.md5 = options['queue']['md5']\n return options['task_data'], queue", "def __init__(self, queue_id):\n self.queue_id = queue_id\n self.action_type = 'set_queue'", "def create_work_item(self):", "def add_a_queue(self, size):\n \tself.queues.append(ContextModalityQueue(size))", "def __init__(self, workflow):\n self.workflow = workflow", "def queue_buildrequest(event):\n get().build_queue.put(event)" ]
[ "0.6942579", "0.66524434", "0.6572549", "0.648934", "0.63773924", "0.63448215", "0.63364196", "0.62668824", "0.6258213", "0.6220978", "0.62111425", "0.6170667", "0.6101163", "0.60690314", "0.60477155", "0.60386324", "0.6012786", "0.6001777", "0.5967533", "0.5931258", "0.59293574", "0.59138167", "0.5871466", "0.58535653", "0.5848829", "0.5829172", "0.5818148", "0.5813537", "0.5798596", "0.5778967" ]
0.6898098
1
Open a workflow queue
def open(queue_id): r = update_queue_state(queue_id, 'open') if r.status_code != 200: click.echo('Open queue failed, please ensure input is correct.') else: click.echo(r.text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_workflow(self, **params):\n raise NotImplementedError", "def __init__(self, workflow):\n self.workflow = workflow", "def start_workflow(self, workflow_name, workflow_input, **params):\n raise NotImplementedError", "def on_channel_open(new_channel):\n global channel\n channel = new_channel\n channel.queue_declare(queue=\"test\", durable=True, exclusive=False, auto_delete=False, callback=on_queue_declared)", "def test_get_job_queue(self):\n response = self.client.open(\n '/tx-queue/2/scheduler/job',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def __init__(self):\n self.queue = Queue()", "def open_task(self, instance):\n self.task_manager.load(instance.text)\n\n # Build the task in editor\n for component in self.task_manager.task.graph.V:\n self.add_component(component)\n for tagged_link in self.task_manager.task.graph[component]:\n self.task_manager.add_editor_link(\n component,\n self.task_manager.task.graph.V[tagged_link.vertex_index],\n self.ids.edit_area,\n index=tagged_link.tag)\n self.task_list.dismiss()", "def queue (self):\n\n with self.__t.steps():\n import exception\n from lib import schema\n from lib import common\n from lib import process\n from lib import data\n from sqlalchemy import and_\n import json\n from collections import OrderedDict\n\n with schema.select(\"process_queue\", schema.table.process_queue.status==None) as select:\n for queued in select.limit(1000).all():\n blocked = False\n if len(queued.depend) > 0:\n for depend_id in queued.depend:\n depend = schema.select_one(\"series\", schema.table.series.id==depend_id)\n match_tags = json.dumps(OrderedDict(sorted(data.decode_tags(depend.tags).items())))\n if depend and schema.select_one(\"process_queue\", schema.table.process_queue.tags==match_tags):\n blocked = True\n break # queued dependencies\n if not blocked:\n queued.status = \"queued\"\n schema.save(queued)\n run.apply_async([queued.tags]) #queue process\n self.__t.ok()\n self.apply_async(queue=\"control\", countdown=30) #queue next", "def __init__(self, queue_id):\n self.queue_id = queue_id\n self.action_type = 'set_queue'", "def startingNewStep(self):\n with self.__queueLock:\n self.__submittedJobs = []", "def queue(self):\n if not self.parent_node.is_job:\n return\n\n self.winstance.send_event('Queuing job..')\n result = self.winstance.execute_operation('hpc.interfaces.'\n 'lifecycle.queue',\n kwargs={\"name\": self.name})\n result.task.wait_for_terminated()\n if result.task.get_state() == tasks.TASK_FAILED:\n init_state = 'FAILED'\n else:\n self.winstance.send_event('.. job queued')\n init_state = 'PENDING'\n self.set_status(init_state)\n return result.task", "def handle(self, sim_manager, state):\n super().handle(sim_manager, state)\n state.queue_job(self.job_index)\n if state.has_free_view_slots():\n # todo: revisit this...\n self.trigger_next = False\n job_idx = state.get_next_job()\n sim_manager.start_new_job(job_idx, state)\n sim_manager.event_heap.update_event_heap_counts('job_arrivals', False)", "def test_OpenCloseSingle(self):\n\n q = Queue(self.path)\n q.put('var1')\n del q\n q = Queue(self.path)\n self.assertEqual(1, q.qsize())\n self.assertEqual('var1', q.get())\n q.task_done()", "def test_queue_enqueue_command(runner, tmpworkdir, queue, target_factory): # pylint: disable=unused-argument\n\n atarget = target_factory.build(queue=queue)\n apath = Path('ips.txt')\n apath.write_text(f'{atarget.target}\\n \\n ', encoding='utf-8')\n\n result = runner.invoke(command, ['queue-enqueue', 'notexist', atarget.target])\n assert result.exit_code == 1\n\n result = runner.invoke(command, ['queue-enqueue', queue.name, atarget.target])\n assert result.exit_code == 0\n assert Queue.query.get(queue.id).targets[0].target == atarget.target\n\n result = runner.invoke(command, ['queue-enqueue', queue.name, '--file', apath])\n assert result.exit_code == 0\n assert len(Queue.query.get(queue.id).targets) == 2", "def new_queue() -> Queue:\n return multiprocessing.Queue()", "def queue(self, sid):\r\n return queues.Queue(self, sid)", "def get_queue(self):\n if self.queue is not None:\n return self.queue\n state = self.get_state()\n self.queue = state.get_queue()\n # print(\"IQ\", self.queue)\n return self.queue", "def run_workflow(workflow_log_id):\n outputs = {}\n protocol = \"tcp\"\n\n workflow_log = WorkflowLog.objects.get(id=workflow_log_id)\n worker = workflow_log.performed_on\n\n WORKER_ENDPOINT = \"%s://%s:%s\" % (protocol, worker.ip, str(worker.port))\n WORKER_SECRET_KEY = worker.secret_key\n\n conn = BotConnection(WORKER_ENDPOINT, WORKER_SECRET_KEY)\n conn.connect()\n\n # Make a JSON\n request_header = {'workflow_log_id': workflow_log.id,\n 'workflow': slugify(workflow_log.workflow.title),\n 'workflow_log_time': workflow_log.date_created.strftime('%Y%m%d-%H%M%S'),\n 'script': {},\n 'hooks': {}, # see doc/HOOKS.md\n }\n\n # hooks for this workflow\n if workflow_log.workflow.pre_task:\n request_header['hooks']['pre_task'] = workflow_log.workflow.pre_task\n\n if workflow_log.workflow.post_task:\n request_header['hooks']['post_task'] = workflow_log.workflow.post_task\n\n ordered_workflows = order_workflow_tasks(workflow_log.workflow)\n\n workflow_log.date_started = timezone.now()\n for idx, workflow_task in enumerate(ordered_workflows):\n template = render_template(workflow_log, workflow_task)\n\n if workflow_task.task.is_builtin:\n m = importCode(template, \"test\")\n output = {}\n output['stdout'] = str(m.run())\n output['exit_code'] = workflow_log.SUCCESS\n else:\n request = request_header\n request['script']['id'] = idx\n request['script']['body'] = template\n\n output = send_script(request, conn)\n\n outputs['%i_%s' % (workflow_task.id, workflow_task.task.title)] = output\n\n # loop over all next wf_tasks and add this scripts output to inputs\n current = workflow_task\n while current.next_workflow_task:\n current = current.next_workflow_task\n\n # deepcopy dict to prevent runtime error\n inp = deepcopy(workflow_log.inputs)\n # loop key, value pairs and look if this output needs to be set as input\n for key, value in inp[str(current.id)]['string'].iteritems():\n if value == 'output_%s' % str(workflow_task.id):\n workflow_log.inputs[str(current.id)]['string'][key] = output['stdout']\n\n if 'exit_code' not in output or output['exit_code'] is not workflow_log.SUCCESS:\n workflow_log.exit_code = workflow_log.ERROR\n workflow_log.save()\n break\n else:\n workflow_log.exit_code = workflow_log.SUCCESS\n\n conn.close()\n\n workflow_log.date_finished = timezone.now()\n workflow_log.outputs = outputs\n workflow_log.save()\n\n # Notify user in case of failure\n if workflow_log.exit_code == workflow_log.ERROR:\n send_failiure_notification(workflow_log)", "def add(ctx, wf_name, wf_version, wf_owner):\n jess_url = ctx.obj.get('JT_CONFIG').get('jess_server')\n if wf_owner is None:\n wf_owner = ctx.obj.get('JT_CONFIG').get('jt_account')\n\n url = \"%s/queues/owner/%s/workflow/%s/ver/%s\" % (jess_url, wf_owner, wf_name, wf_version)\n\n r = requests.post(url)\n if r.status_code != 200:\n click.echo('Queue creation for: %s failed: %s' % (wf_owner, r.text))\n else:\n click.echo(\"Queue registration succeeded, details as below\")\n click.echo(r.text)", "def init_workflow():\n pass", "def on_open_channel(new_channel):\n # assign new channel to the global channel variable\n global channel\n channel = new_channel\n\n # channel is assigned and declare a queue named scripbox.\n # queue Properties - durable is True so that the queue withstands rabbitmq reboot\n # Pass a callback on_queue_declared which fires when a queue declaration\n # is successful\n channel.queue_declare(queue='scripbox', durable=True,\n auto_delete=False, callback=on_queue_declared)", "def _queue_create(self, **kwargs):\n name = self.generate_random_name()\n return self.clients(\"zaqar\").queue(name, **kwargs)", "def __init__(self, config, queue_name):\n self.work_queue_client = WorkQueueClient(config, queue_name)", "def __init__(self, config, queue_name):\n self.work_queue_client = WorkQueueClient(config, queue_name)", "def on_channel_task_open(self, channel):\n # LOGGER.info('Channel opened')\n self._channel_task = channel\n self._channel_task.add_on_close_callback(self.on_channel_closed)\n channel.queue_declare(\n queue=self.queue_task,\n durable=False,\n exclusive=False\n )\n channel.basic_qos(prefetch_count=self._prefetch_count)\n self._init_ok_task = True", "def __init__(self, stream):\n self.stream = stream\n self.queue = Queue()\n self.start_thread()", "def add_a_queue(self, size):\n \tself.queues.append(ContextModalityQueue(size))", "def add_workflow(self, workflow):\n self.workflow_manager.add_workflow(workflow)", "def __init__(self,\n input_queue: JoinableQueue,\n output_queue: JoinableQueue,\n error_queue: JoinableQueue,\n slack_queue: 'SlackBot.SlackQueue',\n logging_queue: JoinableQueue,\n process_job: Callable[[Type['Task.Task']], Type['Task.Task']],\n name: str =\"PipelineManager\",\n num_processes: int = 1,\n timeout_duration: int = 1) -> None:\n\n self.name = name\n #An attempt to idiot-proof the PipelineManager by instantiating a JoinableQueue() if one didn't exist already.\n self.input_queue = input_queue if input_queue else JoinableQueue()\n self.output_queue = output_queue if output_queue else JoinableQueue()\n self.error_queue = error_queue if error_queue else JoinableQueue()\n self.slack_queue = slack_queue\n self.logging_queue = logging_queue\n self.num_processes = num_processes\n self.process_job = process_job\n self.timeout_duration = timeout_duration\n #A list of active processes comprised of Process objects\n self.process_list: List[Process] = []\n #An internal restart flag (used when all processes managed die)\n self.restart_required = False\n self.logger = logging.getLogger(self.name)\n self.logger.setLevel(logging.DEBUG)", "def __init__(self, lsfQueue, jobName=\"\", async=False):\n self.lsfQueue = lsfQueue\n self.jobName = jobName\n self.async = async\n self.jobid = None\n self.cmd = None" ]
[ "0.5858807", "0.5655057", "0.5632103", "0.55643636", "0.5441331", "0.54325885", "0.5398457", "0.5371928", "0.5335743", "0.533102", "0.53260267", "0.5324544", "0.532239", "0.5315641", "0.53023946", "0.5290867", "0.52795565", "0.527409", "0.52734566", "0.52717185", "0.52413005", "0.5202245", "0.5195258", "0.5195258", "0.51935834", "0.5182081", "0.5170067", "0.5163497", "0.51534986", "0.5129187" ]
0.7445993
0
Close a workflow queue
def close(queue_id): r = update_queue_state(queue_id, 'close') if r.status_code != 200: click.echo('Close queue failed, please ensure input is correct.') else: click.echo(r.text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close(self):\n self.input_queue.put(None)\n self.input_queue.join()", "def exit_queue(self, name=None):\r\n if(name):\r\n self.log.debug(\"EXITING queue: (%s)\" % (name))\r\n self._queues[name].release()\r\n self.log.debug(\"SUCCESS EXITING queue: (%s)\" % (name))", "def delete_queue(self):\n self.work_queue_client.delete_queue()", "def _close(self):\n\n #to kill children loop over native children list\n #DO NOT create a custom process list\n #since terminated processes don't get correctly garbage collected\n for lw in multiprocessing.active_children():\n if type(lw) == loop_worker.LoopWorker:\n lw.kill()\n\n logging.info(\"Closing Agent\")\n closemsg = {'process': {\n 'name': multiprocessing.current_process().name, \n 'status': 'closed'}}\n \n self.msg_queue.put(dict(to=settings.IOTTLY_XMPP_SERVER_USER,msg='/json ' + json.dumps(closemsg)))\n self.msg_queue.put(None)\n if self.broker_process:\n self.broker_process.join()\n logging.info(\"Agent closed\")", "def Close(self):\n self._task_storage_reader.Close()\n self._task_storage_reader = None", "def close(self):\n self.running = False\n self.join()\n\n for i in xrange(len(self.tasks)):\n if (self.tasks[i].status == 'queued'):\n self.tasks[i].set_killed()\n\n if self.pool is not None:\n log_debug(\"closing pool of processes\")\n self.pool.close()", "def _close(self):\n self.write_data(self.write_queue)\n self.write_compound(self.write_compound_queue)", "def close(self):\n\t\tfor _ in range(len(self.worker_processes)):\n\t\t\tself.input_queue.put(None)\n\n\t\tfor p in self.worker_processes:\n\t\t\tp.join()\n\n\t\tself.serialized_examples_queue.put(None)\n\t\tself.writer_process.join()", "def delete_queue(qid):\r\n raise NotImplementedError()", "def close(self):\n\n self.shm_command.write({'cmd': 'close', 'data': {}})\n time.sleep(0.2)", "def test_OpenCloseSingle(self):\n\n q = Queue(self.path)\n q.put('var1')\n del q\n q = Queue(self.path)\n self.assertEqual(1, q.qsize())\n self.assertEqual('var1', q.get())\n q.task_done()", "def _queue_delete(self, queue):\n\n queue.delete()", "def close_worker(self, uri):\n\t\twith self._mutex:\n\t\t\tself._close_worker(uri)", "def close(self):\n if not self.isqueue:\n return self.connection\n return None", "def free_queue(self, sycl_queue_val):\n fn = DpctlCAPIFnBuilder.get_dpctl_queue_delete(\n builder=self.builder, context=self.context\n )\n self.builder.call(fn, [self.builder.load(sycl_queue_val)])", "def close(self):\r\n if self._handle is None:\r\n warnings.warn(\r\n 'Attempted to close NI-DAQmx task of name \"{0}\" but task was '\r\n 'already closed.'.format(self._saved_name), DaqResourceWarning)\r\n return\r\n\r\n cfunc = lib_importer.windll.DAQmxClearTask\r\n if cfunc.argtypes is None:\r\n with cfunc.arglock:\r\n if cfunc.argtypes is None:\r\n cfunc.argtypes = [\r\n lib_importer.task_handle]\r\n\r\n error_code = cfunc(\r\n self._handle)\r\n check_for_error(error_code)\r\n\r\n self._handle = None", "def close(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/close\"\n\n _response = self.connector.http_call(\"post\", _url)\n\n # Update object\n if _response.status_code == 204:\n self.status = \"closed\"", "def close(self) -> None:\n self.relay(\"close\")()", "def close(self):\n self.exit_set = True\n self.sql_queue.put((self.exit_token, \"\", \"\"), timeout=5)\n # Sleep and check that the thread is done before returning.\n while self.thread_running:\n time.sleep(.01) # Don't kill the CPU waiting.", "def close(self):\n self.running.value = False\n # Empty queues or processes won't join.\n while not self.pimap_data_queue.empty(): self.pimap_data_queue.get()\n while not self.received_address_queue.empty(): self.received_address_queue.get()\n for worker_process in self.sense_worker_processes:\n worker_process.join()\n for worker_process in self.pimap_worker_processes:\n worker_process.join()\n self.socket.close()", "def close(self):\n full_size = len(self.created_list) # put terminal event to task queue\n while full_size:\n self.q.put(self.StopEvent)\n full_size -= 1", "def close(self):\n\n if self._state == states['open']:\n self._do_close()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.queue.channel.rpc(self._basic_cancel)\n self.queue.consuming = False", "def close(self):\r\n self._sendLock.acquire()\r\n try:\r\n self._queue.put(\"CLOSE\")\r\n self._eventQueue.put((time.time(), \"CLOSE\"))\r\n self._closed = 1\r\n self._s.close()\r\n self._thread.join()\r\n self._eventThread.join()\r\n finally:\r\n self._sendLock.release()", "async def _close(self):\n for w in self.workers:\n w.cancel()\n\n await self.session.close()", "def destroy_queue(self):\n response = self.queue.delete()\n if self._is_error_call(response):\n raise RuntimeError('SQS could not delete queue: %s' % response)\n self.queue, self.queue_name = None, None", "def close(self):\n self.call('close')", "def test_queue():\n mq = IPCComm.get_queue()\n key = str(mq.key)\n assert(CommBase.is_registered('IPCComm', key))\n CommBase.unregister_comm('IPCComm', key, dont_close=True)\n nt.assert_raises(KeyError, IPCComm.remove_queue, mq)\n CommBase.register_comm('IPCComm', key, mq)\n IPCComm.remove_queue(mq)\n assert(not CommBase.is_registered('IPCComm', key))", "def close(self):\n try:\n process_id = get_pid(\"broker_mqtts\")\n except subprocess.CalledProcessError:\n pass\n else:\n os.system(\"kill -9 {}\".format(process_id))\n if os.path.isfile(self.log):\n os.system(\"rm -rf \" + self.log)", "def test_cron_workflow_service_terminate_cron_workflow(self):\n pass" ]
[ "0.642828", "0.6139574", "0.6111436", "0.60830665", "0.5898193", "0.58823055", "0.5876393", "0.583515", "0.58262885", "0.57727635", "0.57590497", "0.57177114", "0.5715811", "0.5668772", "0.5659588", "0.56331134", "0.5620639", "0.5619311", "0.56096524", "0.5590741", "0.5587805", "0.5587615", "0.55473125", "0.5539746", "0.55229616", "0.55039614", "0.5492605", "0.5484408", "0.54670924", "0.546635" ]
0.7493398
0
Reads sea ice mask
def read_seaice_mask(file=r'C:\Users\apbarret\Documents\data\sea_ice_index\Arctic_region_mask_Meier_AnnGlaciol2007.msk'): cols = 304 rows = 448 img = np.fromfile(file, dtype='byte').reshape(rows, cols) return img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_mask(self):\n mask_file = fetch_one_file(self.ica_dir, self._mask_fname, pat_type='re.match')\n return niimg.load_img(mask_file)", "def read_region_mask(grid='Nh50km'):\n\n mask_path = ('/oldhome/apbarret/data/seaice_indices/'\n 'Arctic_region_mask_Meier_AnnGlaciol2007_Nh50km.dat')\n nrow = 360\n ncol = 360\n \n result = xr.DataArray(np.fromfile(mask_path, dtype=float).reshape(nrow,ncol),\n dims=['x','y'])\n return result", "def load_ice_data():\n SIC = ci.load_seaice() * 100/250\n LIC = ci.load_landice()\n LIC = LIC.sel(lat=slice(-90, -55))\n LIC = ci.latlon_to_polarstereo(LIC)\n LIC.name = 'lic'\n return SIC, LIC", "def get_mediterranean_sea_unmasked(res='0.25x0.3125'):\n # West South corner (south Jordan) =\n lowerlat = 34\n lowerlon = -6.5\n # East North corner (~Ukraine)\n higherlat = 47\n higherlon = 38\n # Get a mask for lat and lon range, then combine\n mask1 = lat2lat_2D_unmasked(res=res, lowerlat=lowerlat,\n higherlat=higherlat)\n mask2 = lon2lon_2D_unmasked(res=res, lowerlon=lowerlon,\n higherlon=higherlon)\n mask = np.ma.mask_or(mask1, mask2)\n # Add mask for water\n mask = np.ma.mask_or(mask, ocean_unmasked(res=res)[..., 0])\n # Also remove black sea ( by removing an inverted unmasked mask )\n mask3 = get_black_sea_unmasked(res=res, unmask_water=False)\n mask = np.ma.mask_or(mask, np.logical_not(mask3))\n # Also remove bay of biscay\n # Also remove black sea\n mask4 = get_bay_of_biscay_unmasked(res=res)\n mask = np.ma.mask_or(mask, np.logical_not(mask4))\n return mask", "def ice_unmasked(res='4x5', debug=False):\n # Create a np.ma mask\n m = np.logical_not((land_unmasked(res)*ocean_unmasked(res)))\n if debug:\n print((mask, mask.shape))\n return m", "def sky_noise(sky_file_name):\n fits_file = fits.open(sky_file_name)\n image_data = fits_file[0].data\n return image_data", "def get_water_mask_raw():\n\n j = request.json\n\n use_url = j['use_url']\n region = ee.Geometry(j['region'])\n bands = ['B3', 'B8'] # green, nir\n start = j['start']\n stop = j['stop']\n\n percentile = j['percentile'] if 'percentile' in j else 10\n ndwi_threshold = j['ndwi_threshold'] if 'ndwi_threshold' in j else 0\n scale = j['scale'] if 'scale' in j else 10\n\n # filter Sentinel-2 images\n images = ee.ImageCollection('COPERNICUS/S2') \\\n .select(bands) \\\n .filterBounds(region) \\\n .filterDate(start, stop) \\\n .map(lambda i: i.resample('bilinear'))\n\n # remove noise (clouds, shadows) using percentile composite\n image = images \\\n .reduce(ee.Reducer.percentile([percentile])) \\\n \\\n # computer water mask using NDWI\n water_mask = image \\\n .normalizedDifference() \\\n .gt(ndwi_threshold)\n\n # vectorize\n water_mask_vector = water_mask \\\n .mask(water_mask) \\\n .reduceToVectors(**{\n \"geometry\": region,\n \"scale\": scale / 2\n })\n\n water_mask_vector = water_mask_vector.toList(10000) \\\n .map(lambda o: ee.Feature(o).simplify(scale))\n\n water_mask_vector = ee.FeatureCollection(water_mask_vector)\n\n # create response\n if use_url:\n url = water_mask_vector.getDownloadURL('json')\n data = {'url': url}\n else:\n data = water_mask_vector.getInfo()\n\n return Response(json.dumps(data), status=200, mimetype='application/json')", "def out3ice(self):\n data = self._ftdi.spi_read(self.OUT3ICE_ADDR, len=1, burst='fixed')\n return data[0] & self.OUT3ICE_MASK", "def read_region_mask(grid):\n nreg = len(region_ind)\n region_mask_file = '/home/ivan/Python/data/%s.nc'%grid\n fpreg = Nio.open_file(region_mask_file, 'r')\n region_mask = fpreg.variables['REGION_MASK'][:]\n fpreg.close()\n region_lname = (N.take(region_ind.values(),\n N.argsort(N.abs(region_ind.keys()))).tolist())\n region_sname = [region[:3].lower() for region in region_lname]\n\n return region_mask, nreg, region_lname, region_sname", "def get_black_sea_unmasked(res='0.25x0.3125', unmask_water=True):\n # West South corner (south Jordan) =\n lowerlat = 41\n lowerlon = 26.8\n # East North corner (~Ukraine)\n higherlat = 50\n higherlon = 43\n # Get a mask for lat and lon range, then combine\n mask1 = lat2lat_2D_unmasked(res=res, lowerlat=lowerlat,\n higherlat=higherlat)\n mask2 = lon2lon_2D_unmasked(res=res, lowerlon=lowerlon,\n higherlon=higherlon)\n mask = np.ma.mask_or(mask1, mask2)\n # Add mask for water\n if unmask_water:\n mask = np.ma.mask_or(mask, ocean_unmasked(res=res)[..., 0])\n return mask", "def read_BEC_region_mask(grid):\n region_mask_file = '/home/ivan/Python/data/BEC_REGION_MASK_%s.nc'%grid\n fpreg = Nio.open_file(region_mask_file, 'r')\n nreg = fpreg.dimensions['nreg']\n region_mask = fpreg.variables['REGION_MASK'][:]\n\n # get region long names\n region_lname = [''.join(fpreg.variables['REGION_lname'][n,:])\n for n in range(nreg)]\n # get region short names\n region_sname = [''.join(fpreg.variables['REGION_sname'][n,:])\n for n in range(nreg)]\n\n fpreg.close()\n return region_mask, nreg, region_lname, region_sname", "def test_get_mask(self):\n\n spine_data_loader = SpineDataLoader(dirpath_data=self.dirpath,\n batch_size=4)\n\n for idx in range(4):\n mask = spine_data_loader.get_mask(str(idx))\n assert mask.shape == (256, 256, 1)\n assert mask.dtype == 'int64'", "def get_regions_mask(self, input):", "def make_western_mask(fname_coords, fname_top):\n top = STEM_parsers.parse_tobspred(fname_top)\n top = STEM_vis.grid_tobspred_data(top, which_data='emi_fac')\n\n lon, lat, topo = STEM_parsers.parse_STEM_coordinates(fname_coords)\n\n mask = np.logical_and(abs(top - 1.0) < 0.001, lon < -95)\n # mask out this little blip in prairie that slips through\n mask[np.logical_and(lat > 40, lon > -100)] = False\n #mask = lon < -110\n\n return(mask)", "def chelsea():\n from skimage import data\n\n return data.chelsea()", "def out2ice(self):\n data = self._ftdi.spi_read(self.OUT2ICE_ADDR, len=1, burst='fixed')\n return data[0] & self.OUT2ICE_MASK", "def read(self, infname):\n InArr = np.loadtxt(infname)\n inlon = InArr[:,0]\n inlat = InArr[:,1]\n inZ = InArr[:,2]\n self.mask = ~self.mask\n for i in xrange(inlon.size):\n if i%10000==0: print i\n lon=inlon[i]\n if lon < 0: lon+=360\n lat=inlat[i]\n index = np.where((self.lonArr==lon)*(self.latArr==lat))\n if inZ[i]==0 or math.isnan(inZ[i]): continue\n self.mask[index[0], index[1]]=False\n self.Zarr[index[0], index[1]]=inZ[i]\n return", "def read_outlier_mask():\n\n file_path = r'C:\\Users\\nmishra\\Workspace\\TEMPO\\outlier_mask'+\\\n r'Outlier_median\\save_masks_logical_OR\\to_SAO'\n mask_a = np.genfromtxt(file_path + '/' + 'quad_A_outlier_mask.csv',\n delimiter=',')\n mask_b = np.genfromtxt(file_path + '/' + 'quad_B_outlier_mask.csv',\n delimiter=',')\n mask_c = np.genfromtxt(file_path + '/' + 'quad_C_outlier_mask.csv',\n delimiter=',')\n mask_d = np.genfromtxt(file_path + '/' + 'quad_D_outlier_mask.csv',\n delimiter=',')\n outlier_mask = [mask_a, mask_b, mask_c, mask_d]\n #print(outluer)\n return np.array(outlier_mask)", "def sentinel2_(image):\n nubes = image.select(\"QA60\")\n opaque = tools.compute_bits_client(nubes, 10, 10, \"opaque\")\n cirrus = tools.compute_bits_client(nubes, 11, 11, \"cirrus\")\n mask = opaque.Or(cirrus)\n result = image.updateMask(mask.Not())\n return result", "def read_exposure(fname, patchralims, patchdeclims, mask=True):\n from astropy.io import fits\n \n hdr = fits.getheader(fname)\n data = fits.getdata(fname)\n unc = fits.getdata(fname.replace(\"sci\", \"unc\"))\n \n s = PostageStamp()\n s.filtername = hdr[\"FILTER\"]\n s.nx, s.ny = hdr[\"NAXIS1\"], hdr[\"NAXIS2\"]\n pixscale = hdr[\"PIXSCALE\"]\n PA = hdr[\"ROT\"]\n npsf = hdr[\"NPSF\"]\n\n # --- WCS ---\n s.scale = 1.0/pixscale * np.eye(2)\n s.dpix_dsky = np.matmul(s.scale, rotation_matrix(np.deg2rad(PA)))\n s.crpix = np.array([hdr[\"CRPIX0\"], hdr[\"CRPIX1\"]])\n s.crval = np.array([hdr[\"CRVAL0\"], hdr[\"CRVAL1\"]])\n\n # --- PSF ---\n s.psf = get_psf(npsf)\n\n # -- PIXEL DATA ---\n # x,y\n # note inversion here\n s.ypix, s.xpix = np.meshgrid(np.arange(s.ny), np.arange(s.nx))\n \n # restrict to pixels in patch, and reshape all images to 1D\n sky = pixelcoords_to_skycoords(s)\n inpatch = ((sky[0] > patchralims[0]) & (sky[0] < patchralims[1]) &\n (sky[1] > patchdeclims[0]) & (sky[1] < patchdeclims[1]))\n assert inpatch.sum() > 0\n\n if not mask:\n s.good_pixel = np.copy(inpatch)\n inpatch = slice(None)\n else:\n s.nx = inpatch.sum()\n s.ny = 1\n\n s.xpix = s.xpix.reshape(-1)[inpatch]\n s.ypix = s.ypix.reshape(-1)[inpatch]\n \n # fluxes and uncertainties within patch\n s.pixel_values = data.reshape(-1)[inpatch]\n s.ierr = 1. / unc.reshape(-1)[inpatch]\n \n return s", "def get_mask(self, dataset_name):\n p = path.join(self.dataset_root, dataset_name + \"/\")\n mask_path = serial.preprocess(p + \"mask.npy\")\n mask = np.load(mask_path)\n if not np.all(np.bitwise_or(mask == 0, mask == 1)):\n raise ValueError(\"Mask has incorrect values.\")\n return mask", "def read_BEC_region_mask_popdiag(grid):\n# region_mask_file = '/CESM/bgcwg/obgc_diag/mapping/model_grid/BEC_REGION_MASK_%s.nc'%grid\n region_mask_file = '/glade/p/cesm/bgcwg/obgc_diag/mapping/model_grid/BEC_REGION_MASK_%s.nc'%grid\n fpreg = Nio.open_file(region_mask_file, 'r')\n nreg = fpreg.dimensions['nreg']\n region_mask = fpreg.variables['REGION_MASK'][:]\n\n # get region long names\n region_lname = [''.join(fpreg.variables['REGION_lname'][n,:])\n for n in range(nreg)]\n # get region short names\n region_sname = [''.join(fpreg.variables['REGION_sname'][n,:])\n for n in range(nreg)]\n\n fpreg.close()\n return region_mask, nreg, region_lname, region_sname", "def out1ice(self):\n data = self._ftdi.spi_read(self.OUT1ICE_ADDR, len=1, burst='fixed')\n return data[0] & self.OUT1ICE_MASK", "def read_new_region_mask():\n nreg = len(region_ind_new)\n# region_mask_file = '/home/ivan/Python/new_REGION_MASK_gx3v5.nc'\n region_mask_file = '/glade/home/emunoz/Python/mapping/model_grid/new_REGION_MASK_gx3v5.nc'\n fpreg = Nio.open_file(region_mask_file, 'r')\n region_mask = fpreg.variables['REGION_MASK'][:]\n fpreg.close()\n region_lname = (N.take(region_ind_new.values(),\n N.argsort(N.abs(region_ind_new.keys()))).tolist())\n region_sname = [region[:3].lower() for region in region_lname]\n\n return region_mask, nreg, region_lname, region_sname", "def cfmask_to_mask(raster):\r\n mask = raster.ReadAsArray()\r\n # A value of 0 is clear of clouds/water. Make all other values = 1.\r\n mask[mask != 0] = 1\r\n\r\n # That's it, just return the result...\r\n return mask", "def get_mask(data):\n # saturated CCD count\n saturation_adu = 63000\n\n mask_sat = (data[:, 20:-20] >= saturation_adu)\n\n mask_bad = np.zeros_like(data[:, 20:-20], dtype=np.int16)\n # currently no bad pixels in FOCES CCD\n\n mask = np.int16(mask_sat)*4 + np.int16(mask_bad)*2\n\n return mask", "def read_masks(self):\n structure_mask = self.read_image(\n self.filenames[\"structure_mask\"], grayscale=True\n ).astype(np.bool)\n unknown_mask = self.read_image(self.filenames[\"unknown_mask\"], grayscale=True).astype(\n np.bool\n )\n return structure_mask, unknown_mask", "def get_read_noise(self):\n\n read_noise_adu = self.ccd.read_noise / self.ccd.gain\n return numpy.random.normal(scale=read_noise_adu, size=self.image.shape)", "def get_mask ( self, iloc ):\n mask = self._mask[iloc]\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n sat = g.ReadAsArray()\n m3 = sat == 0\n\n the_mask = mask.replace(\"SAT\", \"DIV\")\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, the_mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n div = g.ReadAsArray()\n m1 = div == 0\n\n the_mask = mask.replace(\"SAT\", \"NUA\")\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, the_mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n nua = g.ReadAsArray()\n m2 = np.logical_not ( np.bitwise_and ( nua, 1 ).astype ( np.bool ) )\n return m1 * m2 * m3", "def get_mask ( self, iloc ):\n mask = self._mask[iloc]\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n sat = g.ReadAsArray()\n m3 = sat == 0\n\n the_mask = mask.replace(\"SAT\", \"DIV\")\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, the_mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n div = g.ReadAsArray()\n m1 = div == 0\n\n the_mask = mask.replace(\"SAT\", \"NUA\")\n if self.resample_opts is not None:\n the_mask = reproject_cut ( os.path.join ( self.datadir, the_mask ),\n **self.resample_opts )\n\n g = gdal.Open( the_mask )\n nua = g.ReadAsArray()\n m2 = np.logical_not ( np.bitwise_and ( nua, 1 ).astype ( np.bool ) )\n return m1 * m2 * m3" ]
[ "0.60069513", "0.59866613", "0.56588846", "0.5600343", "0.5474177", "0.5440845", "0.543682", "0.53643864", "0.5361416", "0.53063095", "0.5299723", "0.5291374", "0.5290301", "0.5229145", "0.52269757", "0.52161676", "0.5201707", "0.51518387", "0.51318836", "0.5130881", "0.5124453", "0.51020104", "0.5088726", "0.50759727", "0.50403935", "0.5032854", "0.49980062", "0.4970912", "0.49492124", "0.49492124" ]
0.7320507
0
For each observed character, append observed runlengths of that character to a list in a dictionary with key=char
def count_runlength_per_character(sequence): character_counts = defaultdict(list) current_character = None for character in sequence: if character != current_character: character_counts[character].append(1) else: character_counts[character][-1] += 1 current_character = character return character_counts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_dictionaries(self, chars):\n dictionary = dict()\n for char in chars:\n dictionary[char] = len(dictionary)\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return dictionary, reverse_dictionary", "def update_char_histogram(s,hist = dict()):\r\n for c in s:\r\n if c.isspace():\r\n continue\r\n if c not in hist:\r\n hist[c] = 0\r\n hist[c] += 1\r\n return hist", "def createCharDict(word):\n d = {}\n for char in word:\n if char not in d:\n d[char] = 1\n else:\n d[char] += 1\n return d", "def tally_letters(string):\n output = dict()\n for char in list(string):\n freq = output.get(char, 0)\n output[char]= freq+1\n return output", "def _generate_character_map(self):\n self._ct = [-1] * 256\n index = 0\n for c_range in self._meta.character_ranges:\n for c_pos in range(c_range['min'], c_range['max'] + 1):\n self._ct[c_pos] = index\n index += 1", "def create_dictionaries(chars):\n return dict((c, i) for i, c in enumerate(chars)), dict((i, c) for i, c in enumerate(chars))", "def create_dictionary():\n chars = sorted(ch for ch in string.printable if ch not in (\"\\x0b\", \"\\x0c\", \"\\r\"))\n char2id = dict((ch, i + 1) for i, ch in enumerate(chars))\n char2id.update({\"\": 0})\n id2char = dict((char2id[ch], ch) for ch in char2id)\n vocab_size = len(char2id)\n id2char.update({98:'\\\\unk',99:'\\\\unk'})\n return char2id, id2char, vocab_size,chars", "def character_map(text):\n\n print(f\"Total character count: {len(text)}\\n\")\n\n characters = sorted(list(set(text))) # Get sorted list of individual characters\n n_to_char = {}\n char_to_n = {}\n\n num = 0\n for char in characters:\n n_to_char[num] = char\n char_to_n[char] = num\n num += 1\n\n return characters, n_to_char, char_to_n", "def get_table(text, size = 1):\r\n result = {}\r\n for i in range(len(text)):\r\n chars = text[i:i+size]\r\n try:\r\n out = text[i + size]\r\n except IndexError:\r\n break\r\n char_dict = result.get(chars, {})\r\n if out not in char_dict:\r\n char_dict[out] = 0\r\n char_dict[out] += 1\r\n result[chars] = char_dict\r\n return result", "def get_num_words_spoken_by_character_per_episode(content):\n content = list(csv.reader(content.splitlines(), delimiter=','))\n characters = [name[2] for name in content]\n characters = list(dict.fromkeys(characters))\n del characters[0]\n res = defaultdict()\n for character in characters:\n episode = 1\n dic = {}\n count = 0\n for row in content: \n if row[2] == character:\n if str(episode) == row[1]:\n count += len(row[3].split())\n else:\n dic[str(episode)] = count\n episode = int(row[1])\n count = len(row[3].split())\n if '13' not in dic.keys():\n dic['13'] = count \n dic = Counter(dic)\n res[character] = dic\n return res", "def __init__(self, chars):\n self.chars = sorted(set(chars))\n self.char_indices = dict((c, i) for i, c in enumerate(self.chars))\n self.indices_char = dict((i, c) for i, c in enumerate(self.chars))", "def __init__(self, chars):\n self.chars = sorted(set(chars))\n self.char_indices = dict((c, i) for i, c in enumerate(self.chars))\n self.indices_char = dict((i, c) for i, c in enumerate(self.chars))", "def __init__(self, chars):\n self.chars = sorted(set(chars))\n self.char_indices = dict((c, i) for i, c in enumerate(self.chars))\n self.indices_char = dict((i, c) for i, c in enumerate(self.chars))", "def _get_run_length_ac(self):\n self._run_length_ac = []\n for block in self.data:\n self._run_length_ac.extend(\n encode_run_length(tuple(iter_zig_zag(block))[1:])\n )", "def count_kmers(seq, k=3):\n # Start with an empty dictionary\n counts = {}\n # Calculate how many kmers of length k there are\n num_kmers = len(str(seq)) - k + 1\n # Loop over the kmer start positions\n for i in range(num_kmers):\n # Slice the string to get the kmer\n kmer = str(seq)[i:i+k]\n # Add the kmer to the dictionary if it's not there\n if kmer not in counts:\n counts[kmer] = 0\n # Increment the count for this kmer\n counts[kmer] += 1\n # Return the final counts\n return counts", "def get_letter_counts(str_):\n return dict(Counter(str_))", "def count_chars(s: str) -> dict:\n count_dict = {}\n\n for c in s:\n if c in count_dict:\n count_dict[c] += 1\n else:\n count_dict[c] = 1\n\n return count_dict", "def str_len():\n strlen_dict = {}\n # Length of ion name\n strlen_dict['ion'] = 6\n # Length of data file name for line source\n strlen_dict['Source'] = 30\n # Return\n return strlen_dict", "def chromosome_lengths(self):\n chr_lens = {}\n for r in self.regions(lazy=True):\n if chr_lens.get(r.chromosome) is None:\n chr_lens[r.chromosome] = r.end\n continue\n if r.end > chr_lens[r.chromosome]:\n chr_lens[r.chromosome] = r.end\n return chr_lens", "def word_lengths(sentence):\n\n word_count_dict = {}\n sentence = sentence.split()\n\n for word in sentence:\n length = len(word)\n if length not in word_count_dict:\n word_count_dict[length] = {word}\n else:\n set = word_count_dict[length]\n set.add(word)\n\n return word_count_dict", "def get_char_counts(string):\n counts = {}\n for char in iter(string):\n counts[char] = 1 if not char in counts.keys() else counts[char] + 1\n return counts", "def calculate_frequencies(cipher_text: str) -> dict:\n cipher_frequencies = dict()\n for character in cipher_text:\n try:\n cipher_frequencies[character] += 1\n except KeyError:\n cipher_frequencies[character] = 1\n \n return cipher_frequencies", "def letterFreq(words):\n dict = {}\n total = 0\n for word in words:#Iterate through words\n for letter in word:#Increment by letter\n count = 0\n for yearCount in words[word]:\n count += yearCount.count#Increment total instances of word\n total += count#Count total letters\n if letter in dict:\n dict[letter] += count#Add to existing entry\n else:\n dict[letter] = count#Create new entry\n \"\"\"CODE FOR THE WHOLE ALPHABET\"\"\"\n list = []\n for letter in ascii_lowercase:\n if letter in dict and dict[letter] != 0:\n list.append(dict[letter] / total)#Convert to relative\n else:\n list.append(0.0)#Fill alphabet\n return list", "def count_nucleotides(strand: str) -> dict:\n return dict(Counter(strand))", "def get_num_words_spoken_by_character_per_episode(content):\n d = defaultdict(Counter)\n reader_list = csv.DictReader(content.splitlines())\n for row in reader_list:\n words = row['Line'].strip().split()\n d[row['Character']][row['Episode']] += len(words)\n return d", "def buildDict(self, dict):\n for word in dict:\n self.s.add(word)\n self.length_set = set([len(word) for word in dict])", "def letter_freq( text ):\n\tchars = string.ascii_uppercase\n\ttext = text.upper()\n\tresult = get_letter_dict()\n\ttotal = 0\n\tfor char in chars:\n\t\tcount = text.count(char)\n\t\tresult[char] = count\n\t\ttotal += count\n\tif total != 0:\n\t\tfor char in chars:\n\t\t\tresult[char] = (result[char]*10000 / total) / float(100)\n\treturn result", "def character_frequency(filename):\n try:\n f = open(filename)\n except OSError:\n return None\n\n characcters = {}\n for line in f:\n for char in line:\n characcters[char] = characcters.get(char, 0) +1\n f.close()\n return characcters", "def add_string_char_freqs(s, char_freqs):\n s = s.lower()\n for char in s:\n# if char in string.printable and char not in string.whitespace:\n if char in string.letters:\n if char in char_freqs:\n char_freqs[char] += 1\n else:\n char_freqs[char] = 1\n elif char == ' ':\n if ' ' in char_freqs:\n char_freqs[' '] += 1\n else:\n char_freqs[' '] = 1", "def word_length_sorted(words):\n ## initialize empty dictionary to track word lengths as key:val pairs\n word_lengths = {}\n\n ## iterate over the input list on a word by word basis and then char by char\n ## within the word, creating a dictionary with word as key and its length\n ## as value\n for word in words:\n ## initialize empty counter/reset count for each word in words\n count = 0\n ## iterate over all characters being read and increment count accordingly\n for char in word:\n count += 1\n ## create new dict key with empty list if count has not yet been seen\n if count not in word_lengths:\n word_lengths[count] = []\n ## append the current word as new value to existing value list for this\n ## count key in dictionary (existing value list could be empty or could\n ## contain previously seen word(s) with the same # of characters)\n word_lengths[count].append(word)\n\n ## iterate over value lists for word_lengths keys and sort in place\n for counts in word_lengths:\n word_lengths[counts].sort()\n\n ## return word_lengths dictionary in the form of a list of tuples; sorted()\n ## ensures results are sorted by tuple value at index 0\n return sorted(word_lengths.items())" ]
[ "0.6133791", "0.61164045", "0.58835137", "0.58087814", "0.58062124", "0.577302", "0.5753303", "0.574885", "0.5735104", "0.57193017", "0.5699817", "0.56469935", "0.56469935", "0.56187785", "0.56006825", "0.5579962", "0.55670226", "0.55447185", "0.5544331", "0.55127746", "0.55098104", "0.54932195", "0.54573274", "0.54436785", "0.5419248", "0.541347", "0.540477", "0.54003346", "0.54001486", "0.5386123" ]
0.7153474
0
Format output as a row with index 050 (max_count) with normalized log probabilities of each length. Each comma separated row is preceded with a header describing which bases the prior corresponds to. Since observed reads are not directional, counts for complementary bases are summed together (A+T, G+C)
def print_all_counts_as_shasta_matrix(all_counts, max_count=50, pseudocount=1): a_t_counts = all_counts["A"] + all_counts["T"] g_c_counts = all_counts["G"] + all_counts["C"] total = 0 for i in range(max_count + 1): total += max(pseudocount, a_t_counts[i]) line = list() for i in range(max_count + 1): count = max(pseudocount, a_t_counts[i]) line.append("%.9f" % math.log((count/total),10)) print(">AT prior") print(",".join(line)) print() total = 0 for i in range(max_count + 1): total += max(pseudocount, g_c_counts[i]) line = list() for i in range(max_count + 1): count = max(pseudocount, g_c_counts[i]) line.append("%.9f" % math.log((count/total),10)) print(">GC prior") print(",".join(line)) print()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_split_libraries_fastq_log(count_barcode_not_in_map,\r\n count_too_short,\r\n count_too_many_N,\r\n count_bad_illumina_qual_digit,\r\n count_barcode_errors_exceed_max,\r\n input_sequence_count,\r\n sequence_lengths,\r\n seqs_per_sample_counts):\r\n log_out = [\"Quality filter results\"]\r\n log_out.append(\r\n \"Total number of input sequences: %d\" %\r\n input_sequence_count)\r\n log_out.append(\r\n \"Barcode not in mapping file: %d\" %\r\n count_barcode_not_in_map)\r\n log_out.append(\r\n \"Read too short after quality truncation: %d\" %\r\n count_too_short)\r\n log_out.append(\r\n \"Count of N characters exceeds limit: %d\" %\r\n count_too_many_N)\r\n log_out.append(\r\n \"Illumina quality digit = 0: %d\" %\r\n count_bad_illumina_qual_digit)\r\n log_out.append(\r\n \"Barcode errors exceed max: %d\" %\r\n count_barcode_errors_exceed_max)\r\n\r\n log_out.append(\"\")\r\n\r\n log_out.append(\"Result summary (after quality filtering)\")\r\n log_out.append(\"Median sequence length: %1.2f\" % median(sequence_lengths))\r\n counts = sorted([(v, k) for k, v in seqs_per_sample_counts.items()])\r\n counts.reverse()\r\n for sequence_count, sample_id in counts:\r\n log_out.append('%s\\t%d' % (sample_id, sequence_count))\r\n\r\n total_seqs_written = 0\r\n for curr_count in counts:\r\n total_seqs_written += curr_count[0]\r\n\r\n log_out.append('\\nTotal number seqs written\\t%d' % total_seqs_written)\r\n return '\\n'.join(log_out)", "def tabular_report(sample_dictionary, blast_dictionary):\n print(\"Writing the report...\")\n sample_dict = sample_dictionary.copy()\n blast_dict = blast_dictionary.copy()\n\n #creating quick dictionary to pull out trimmed sequence\n trimmed_data_dict={}\n for key in sample_dict.keys():\n try:\n trimmed_sequence=(sample_dict[key]['trimmed_sequence'])\n key = key.strip('@')\n trimmed_data_dict.update({key:trimmed_sequence})\n except KeyError:\n continue\n\n samples = []\n for sequenceID in sample_dict:\n samples.append(sequenceID[1:])\n \n records = blast_dict.keys()\n \n columns = [\"SeqID\", \"Trimmed Sequence\", \"Trimmed Sequence Length\",\"BLAST Sequence\", \n \"BLAST SeqLength\", \"Description\", \"Accession\", \"Db\", \n \"Score\", \"E_value\", \"Percent_Identity\", \"Organism\", \n \"Source\", \"Domain\", \"Taxonomy\"]\n \n #Writing\n OUT = open(\"blast_hits_report.txt\", \"w\")\n OUT.write('\\t'.join(columns) + '\\n')\n \n NO_HITS_OUT = open(\"blast_no_hits_report.txt\", \"w\")\n NO_HITS_OUT.write(\"SeqID\\tOriginal Seq\\tOriginal Seq Length\"\n \"\\tTrimmed Seq\\tTrimmed Seq Length\\tResult\\n\")\n\n for record in blast_dict.keys():\n\n trimmed_sequence = trimmed_data_dict[record]\n length_trimmed_sequence=len(trimmed_data_dict[record])\n \n #Used Brute force coding to be able to manipulate and add new columns to output\n try: \n OUT.write(str(record)\n +'\\t'+str(trimmed_sequence)\n +'\\t'+str(length_trimmed_sequence)\n +'\\t'+str(blast_dict[record]['Sequence'])\n +'\\t'+str(blast_dict[record]['SeqLength'])\n +'\\t'+str(blast_dict[record]['Description'])\n +'\\t'+str(blast_dict[record]['Accession'])\n +'\\t'+str(blast_dict[record]['Db'])\n +'\\t'+str(blast_dict[record]['Score'])\n +'\\t'+str(blast_dict[record]['E_value'])\n +'\\t'+str(blast_dict[record]['Percent_Identity'])\n +'\\t'+str(blast_dict[record]['Organism'])\n +'\\t'+str(blast_dict[record]['Source'])\n +'\\t'+str(blast_dict[record]['Domain'])\n +'\\t'+str(blast_dict[record]['Taxonomy'])+'\\n')\n except KeyError:\n continue\n\n for sample in samples:\n\n if sample not in records:\n sample_stripped = sample.split(\"\\t\")[0]\n\n #Get original trimmed sequence for reference\n try:\n trimmed_sequence = trimmed_data_dict[sample_stripped]\n length_trimmed_sequence=len(trimmed_sequence)\n except KeyError:\n trimmed_sequence = '-'\n length_trimmed_sequence=0\n\n NO_HITS_OUT.write(sample_stripped\n #Commend out original data being reported\n + '\\t' + sample_dict['@'+sample]['sequence'] \n + '\\t' + str(len(sample_dict['@'+sample]['sequence'])) \n + '\\t' + str(trimmed_sequence)\n + '\\t' + str(length_trimmed_sequence)\n\n + '\\t' + 'NO HIT OR SEQUENCE QUALITY BELOW THRESHOLD\\n')\n OUT.close()\n NO_HITS_OUT.close()", "def ROutputFormatter():\n from math import log\n from tools import file_importer, file_outporter\n \n fdrN = 0.05\n def p_value_key(protItem):\n \"\"\"mini function returning the last element of a list. just because I do not like unnamed functions\"\"\"\n return protItem[-1]\n \n protList = []\n headerFlag = True\n missCount = 1\n inpF = file_importer(\"bob/processed/OST-24-05-2017_combined_ttest_2.csv\") # read and process the csv with protein names and p values\n for inpLine in inpF:\n if headerFlag: \n headerFlag = False\n continue\n inpLine = inpLine.split(\"\\\" \\\"\")\n curLine = []\n \n for inpI in inpLine:\n if inpI.strip(\"\\\"\\n \") == \"NaN\":\n inpI = 1\n try:\n curLine.append(int(inpI.strip(\"\\\"\\n \")))\n except ValueError:\n try:\n curLine.append(float(inpI.strip(\"\\\"\\n \")))\n except ValueError:\n curLine.append(inpI.strip(\"\\\"\\n \")) # by this point, each line in the entry file is processed into a neat list\n if curLine[2] == \"\" or curLine[2] == \"_\": # if no gene name is given, just add a placeholder\n curLine[2] = \"Noname\" + str(missCount)\n missCount += 1\n\n protList.append(curLine)\n \n protList.sort(key = p_value_key) # sort the whole list on p value (lowest to highest) \n i = 0.0 \n m = float(len(protList))\n print(\"dataset length: \", int(m))\n outputF = file_outporter(\"bob/processed/OST-24-05-2017_combined_ttest_ed_2.csv\")\n outputF.write(\"ID,UniprotID,Gene name,OST1,OST2,OST3,WT1,WT2,WT3,pValue-wilcox,FDR,log2FoldChange\\n\")\n for protListI in protList:\n i += 1\n critVal = (i/m)*fdrN # this is the benjamini-hochberg defined critical value\n protListI.append(critVal)\n try:\n FAvg = (protListI[3] + protListI[4] + protListI[5])/3.0 # OST\n SAvg = (protListI[6] + protListI[7] + protListI[8])/3.0 # OT1\n except TypeError:\n print(curLine)\n raise\n try:\n logFoldChange = log(FAvg/SAvg,2) # so positive numbers are more abundant in the OST cells, negatives number in the OT1 cells, at least for the OST IP mass spec file\n except ZeroDivisionError:\n logFoldChange = log(FAvg/0.5,2)\n protListI.append(logFoldChange)\n \n for outI in protListI:\n outputF.write(str(outI))\n if outI is protListI[-1]:\n outputF.write(\"\\n\")\n else:\n outputF.write(\",\")\n \n print(\"formatting complete\")", "def produce_mirna_allbest_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.bam.mirbase_counts.txt\")", "def report_csv(self):\n # type: () -> Optional[AnyStr]\n if isinstance(self.report_count, int) and self.report_count <= 0:\n return None\n if self.score is None or self.true is None:\n return None\n pack = list(zip(*[(*pack, )\n for packs in zip(self.score, self.true, *self.meta.values())\n for pack in zip(*packs)]))\n logdata = {key: np.stack(val, axis=0) for key, val in zip([\"pred\", \"target\", *self.meta.keys()], pack)}\n assert all([len(val) == len(logdata[\"target\"]) for val in logdata.values()]), \"messed up unpacking\"\n header = \"target_name,target_score\"\n for k in range(self.top_k):\n header += f\",pred_{k + 1}_name,pred_{k + 1}_score\"\n for meta_key in self.log_keys:\n header += f\",{str(meta_key)}\"\n lines = []\n for sample_idx in range(len(logdata[\"target\"])):\n gt_label_idx = int(logdata[\"target\"][sample_idx])\n pred_scores = logdata[\"pred\"][sample_idx]\n sorted_score_idxs = np.argsort(pred_scores)[::-1]\n sorted_scores = pred_scores[sorted_score_idxs]\n if self.conf_threshold is None or gt_label_idx is None or \\\n pred_scores[gt_label_idx] >= self.conf_threshold:\n if gt_label_idx is not None:\n entry = f\"{self.class_names[gt_label_idx]},{pred_scores[gt_label_idx]:2.4f}\"\n else:\n entry = f\"<unknown>,{0.0:2.4f}\"\n for k in range(self.top_k):\n entry += f\",{self.class_names[sorted_score_idxs[k]]},{sorted_scores[k]:2.4f}\"\n for meta_key in self.log_keys:\n entry += f\",{str(logdata[meta_key][sample_idx])}\"\n lines.append(entry)\n if isinstance(self.report_count, int) and len(lines) >= self.report_count:\n break\n return \"\\n\".join([header, *lines])", "def information(counts: list) -> list:\n heights = []\n # magic\n e = (1 / math.log(2)) * ((4 - 1) / (2 * sum([counts[1][base] for base in \"ACGT\"])))\n for column_count in counts:\n relative_frqs = {base: column_count[base] / sum(column_count.values()) for base in \"ACGT\"}\n H = -1 * sum([relative_frqs[base] * math.log2(relative_frqs[base]) for base in \"ACGT\"])\n R = math.log2(4) - (H + e)\n heights.append({base: relative_frqs[base] * R for base in \"ACGT\"})\n # end magic\n return heights", "def report_csv(self):\n # type: () -> Optional[AnyStr]\n report = self.gen_report()\n if not report:\n return None\n\n none_str = \"unknown\"\n\n def patch_none(to_patch, number_format='2.4f'): # type: (Any, str) -> str\n if to_patch is None:\n return none_str\n if isinstance(to_patch, float):\n s = f\"{{:{number_format}}}\"\n return s.format(to_patch)\n return str(to_patch)\n\n header = \"sample,target_name,target_bbox\"\n for meta_key in self.log_keys:\n header += f\",{str(meta_key)}\"\n if self.top_k:\n for k in range(self.top_k):\n header += f\",detect_{k + 1}_name,detect_{k + 1}_bbox,detect_{k + 1}_conf,detect_{k + 1}_iou\"\n else:\n # unknown count total detections (can be variable)\n header += \",detect_name[N],detect_bbox[N],detect_conf[N],detect_iou[N],(...)[N]\"\n lines = [\"\"] * len(report)\n for i, result in enumerate(report):\n target = result[\"target\"]\n detect = result[\"detect\"]\n if not target:\n entry = f\"{none_str},{none_str},{none_str}\"\n else:\n entry = f\"{target['image_id']},{patch_none(target['class_name'])},{patch_none(target['bbox'])}\"\n for meta_key in self.log_keys:\n entry += f\",{str(target[meta_key])}\"\n for det in detect:\n entry += f\",{det['class_name']},{det['bbox']},{patch_none(det['confidence'])},{patch_none(det['iou'])}\"\n lines[i] = entry\n return \"\\n\".join([header, *lines])", "def ascii_histogram(seq) -> None:\n counted = count_elements(seq)\n for k in sorted(counted):\n print('{0:5d} {1}'.format(k, '+' * counted[k]))", "def generate_counthistline(counts, label, view_lim=[1e-6,1e0,1e0,1e5]):\n max_size = max(counts.values())\n num_chains = sum(counts.values())\n bins = np.logspace(0,np.log10(max_size),21)\n bins_freqs = np.float_(bins) / num_chains\n (hist,garbage) = np.histogram(counts.values(),bins=bins)\n \n fig = plt.figure()\n \n ax = fig.add_subplot(111)\n ax2 = ax.twiny()\n \n ax.spines['top'].set_position(('outward',5))\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_position(('outward',5))\n ax.spines['left'].set_position(('outward',5))\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.plot(bins_freqs,list(hist)+[hist[-1]],color='#e31a1c',drawstyle='steps-post',clip_on=False,label=label)\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlim(view_lim[:2])\n ax.set_ylim(view_lim[2:])\n \n ax2.spines['top'].set_position(('outward',5))\n ax2.spines['right'].set_visible(False)\n ax2.spines['bottom'].set_visible(False)\n ax2.spines['left'].set_visible(False)\n ax2.xaxis.set_ticks_position('top')\n ax2.yaxis.set_ticks_position('none')\n ax2.set_xscale('log')\n ax2.set_xlim([view_lim[0]*num_chains,view_lim[1]*num_chains])\n \n ax.set_xlabel('junction frequency (bottom) or count (top)')\n ax.set_ylabel('number of junctions')\n \n leg = ax.legend(loc=0,numpoints=1,prop=mpl.font_manager.FontProperties(size='small'))\n leg.get_frame().set_visible(False)\n \n return fig", "def write_count_matrix(pb_count, outfile, first=1):\n # write the header (PB names)\n print(\" \" + \"\".join([\"%6s\" % name for name in NAMES]), file=outfile)\n # write the data table\n for residue_idx, residue_pb in enumerate(pb_count):\n print(\"%-5d\" % (residue_idx + first) +\n \" \".join(\"%5d\" % i for i in residue_pb), file=outfile)", "def produce_mirna_single_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.single.bam.mirbase_counts.txt\")", "def generate_counthist(counts, label, view_lim=[1e-6,1e0,1e0,1e5]):\n max_size = max(counts.values())\n num_chains = sum(counts.values())\n sizes = np.arange(1,max_size+1)\n freqs = np.float_(sizes) / num_chains\n (hist,garbage) = np.histogram(counts.values(),bins=sizes)\n idxs = hist > 0\n \n fig = plt.figure()\n \n ax = fig.add_subplot(111)\n ax2 = ax.twiny()\n \n ax.spines['top'].set_position(('outward',5))\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_position(('outward',5))\n ax.spines['left'].set_position(('outward',5))\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.plot(freqs[idxs],hist[idxs],marker='o',linestyle='None',color='#e31a1c',markeredgewidth=0,markersize=4,clip_on=False,label=label)\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlim(view_lim[:2])\n ax.set_ylim(view_lim[2:])\n \n ax2.spines['top'].set_position(('outward',5))\n ax2.spines['right'].set_visible(False)\n ax2.spines['bottom'].set_visible(False)\n ax2.spines['left'].set_visible(False)\n ax2.xaxis.set_ticks_position('top')\n ax2.yaxis.set_ticks_position('none')\n ax2.set_xscale('log')\n ax2.set_xlim([view_lim[0]*num_chains,view_lim[1]*num_chains])\n \n ax.set_xlabel('junction frequency (bottom) or count (top)')\n ax.set_ylabel('number of junctions')\n \n leg = ax.legend(loc=0,numpoints=1,prop=mpl.font_manager.FontProperties(size='small'))\n leg.get_frame().set_visible(False)\n \n return fig", "def produce_mirna_unique_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.uniq.bam.mirbase_counts.txt\")", "def format_histogram_one_count(counts, bin_edges):\r\n lines = []\r\n lines.append('Length\\tCount')\r\n for edge, count in zip(bin_edges, counts):\r\n lines.append('\\t'.join(map(str, [edge, count])))\r\n return '\\n'.join(lines)", "def gen_count_style(count, max_count):\n if count == 0:\n percent = 0\n else:\n percent = math.log(count+1, max_count+1)\n red = int(round(RED_MAX - RED_SPREAD*percent))\n green = int(round(GREEN_MAX - GREEN_SPREAD*percent))\n blue = int(round(BLUE_MAX - BLUE_SPREAD*percent))\n\n color = \"{0:2x}{1:2x}{2:2x}\".format(red, green, blue)\n style = ' style=\"text-align: right; background-color: #' + color + ';\" '\n\n return style", "def num_54():\n frmt = \"\"\"\n :{}\n :Generate Data that conform to a uniform distribution.\n :\n :Class values: {}\n :Population size: {}\n :Results:\n : values:\n {}\n : table:\n {}\n : histogram: (class, frequency)\n {}\n :Then use NumPyArrayToTable to get your table.\n \"\"\"\n # import numpy as np\n st = 1\n end = 7\n vals = np.arange(st,end)\n reps = 10\n z = np.repeat(vals,reps)\n np.random.shuffle(z)\n ID = np.arange(len(z))\n tbl = np.array(list(zip(ID, z)), \n dtype = [('ID', 'int'), ('Class', 'int')])\n h = np.histogram(z, np.arange(st, end+1))\n h = np.array(list(zip(h[1], h[0])))\n pad = \" \"\n args =[num_54.__doc__, vals, reps*len(vals),\n indent(str(z.reshape(3,20)), pad),\n indent(str(tbl), pad), indent(str(h), pad)]\n print(dedent(frmt).format(*args))", "def log_prob_of_file(filepath, model):\n vocab = set(counts_un.keys())\n tot = 0\n count = 0\n prev_prev = \"<s>\\n\"\n prev = \"<s>\\n\"\n with open(filepath) as f:\n for line in f:\n count += 2\n line = line.strip()+\"\\n\"\n tri_prob = model.get_trigram_prob(prev_prev, prev, line)\n tot += math.log(tri_prob)\n prev_prev = prev\n prev = line \n for line in [\"</s>\\n\", \"</s>\\n\"]:\n tri_prob = model.get_trigram_prob(prev_prev, prev, line)\n tot += math.log(tri_prob)\n prev_prev = prev\n prev = line \n return tot, count", "def test_format_histograms(self):\r\n self.assertEqual(format_histograms(array([0, 1, 0, 2, 2, 3]),\r\n array(\r\n [2, 1, 0, 2, 0, 0]), array(\r\n [0, 0, 0, 2, 0, 1]),\r\n array(\r\n [100, 110, 120, 130, 140, 150, 160])),\r\n \"\"\"# bins raw sequence lengths, length of sequences that pass quality filters before processing, and lengths of sequences that pass quality filters post processing.\\nLength\\tRaw\\tBefore\\tAfter\\n100\\t0\\t2\\t0\\n110\\t1\\t1\\t0\\n120\\t0\\t0\\t0\\n130\\t2\\t2\\t2\\n140\\t2\\t0\\t0\\n150\\t3\\t0\\t1\"\"\")", "def Header(nmax):\r\n n = np.arange(1,nmax+1)\r\n return (2*n+1)/(n*(n+1))", "def report_distribution(count):\n # create a list containing tuples of count and word,\n # while summing the total number of word occurrences\n num = 0\n tup_list = []\n\n for key, value in count.items():\n num += int(value)\n tup_list.append((value, key))\n # make me use string formatting smh im gonna use lambas i don't care what we have learned\n #tup_list.sort(key = lambda t: t[0], reverse = True)\n tup_list.sort(reverse = True)\n\n s_list = []\n s_list.append(\"{:>5}\".format(num))\n max = 20\n for tup in tup_list:\n if max == 0:\n break\n else:\n max -= 1\n s_list.append(\"{:>5}\".format(tup[0]) + \" \" + tup[1])\n\n format_string = \"count word\\n\"\n for i in s_list:\n format_string = format_string + i + \"\\n\"\n\n # remove last new line im too lazy to do it right in the for-loop\n #format_string = format_string[:-1]\n # add lines with the title and total word count to the output string\n \n # sort the list from largest number to smallest,\n # add a line to the output for each word in the top 20 containing count and word\n \n # return the string containing the report\n return format_string", "def information_content(self):\n ic = 0\n for row in self.pwm:\n ic += 2.0 + np.sum([row[x] * log(row[x])/log(2) for x in range(4) if row[x] > 0])\n return ic", "def format_histograms(raw_hist, pre_hist, post_hist, bin_edges):\r\n lines = []\r\n lines.append('# bins raw sequence lengths, length of sequences that '\r\n 'pass quality filters before processing, and lengths of sequences that '\r\n 'pass quality filters post processing.')\r\n lines.append('Length\\tRaw\\tBefore\\tAfter')\r\n for edge, raw, pre, post in zip(bin_edges, raw_hist, pre_hist, post_hist):\r\n lines.append('\\t'.join(map(str, [edge, raw, pre, post])))\r\n return '\\n'.join(lines)", "def write_raw_stats(raw_R1_reads, raw_R1_bases, Q20_R1_bp, Q20_R1_percent, Q30_R1_bp, Q30_R1_percent, raw_R2_reads, raw_R2_bases, Q20_R2_bp, Q20_R2_percent, Q30_R2_bp, Q30_R2_percent, output_file, name):\n with open(output_file, 'w') as f:\n f.write('Name\\tR1[reads]\\tR1[bp]\\tR2[reads]\\tR2[bp]\\tQ20_Total_[bp]\\tQ30_Total_[bp]\\tQ20_R1_[bp]\\tQ20_R2_[bp]\\tQ20_R1_[%]\\tQ20_R2_[%]\\tQ30_R1_[bp]\\tQ30_R2_[bp]\\tQ30_R1_[%]\\tQ30_R2_[%]\\tTotal_Sequenced_[bp]\\tTotal_Sequenced_[reads]\\n')\n Q20_Total = str(Q20_R1_bp + Q20_R2_bp)\n Q30_Total = str(Q30_R1_bp + Q30_R2_bp)\n Total_Sequenced_bp = str(raw_R1_bases + raw_R2_bases)\n Total_Sequenced_reads = str(raw_R1_reads + raw_R2_reads)\n Line = name + '\\t' + str(raw_R1_reads) + '\\t' + str(raw_R1_bases) + '\\t' + str(raw_R2_reads) + '\\t' + str(raw_R2_bases) + '\\t' + Q20_Total + '\\t' + Q30_Total + '\\t' + str(Q20_R1_bp) + '\\t' + str(Q20_R2_bp) + '\\t' + str(Q20_R1_percent) + '\\t' + str(Q20_R2_percent) + '\\t' + str(Q30_R1_bp) + '\\t' + str(Q30_R2_bp) + '\\t' +str( Q30_R1_percent) + '\\t' + str(Q30_R2_percent) + '\\t' + Total_Sequenced_bp + '\\t' + Total_Sequenced_reads\n f.write(Line)", "def GenFrequencies(alignment):\n bases = {'A':0,'C':0,'G':0,'T':0,'-':0}\n FreqArray = []\n SeqLen = getLen(alignment)\n for i in range(SeqLen):\n FreqArray.append(bases.copy())\n count = 0\n SeqNum = 0\n with open(alignment,'rU') as F:\n data = 'placeHolder'\n while data:\n data = F.readline().strip()\n if data and not data[0] == '>':\n for char in data:\n FreqArray[count][char] += 1\n count +=1\n elif data:\n count = 0\n SeqNum += 1\n else:\n break\n for position in FreqArray:\n for base in position:\n position[base] /= float(SeqNum)\n return FreqArray", "def format_base_frequencies(self):\n return format_frequencies(self.get_base_frequencies())", "def three_end_distribution(in_fastq, num_bases, to_display):\n counts = collections.defaultdict(int)\n with open(in_fastq) as in_handle:\n for (_, seq, _) in FastqGeneralIterator(in_handle):\n counts[seq[-num_bases:]] += 1\n counts = [(v, k) for (k, v) in counts.iteritems()]\n counts.sort(reverse=True)\n for count, seq in counts[:to_display]:\n print seq, count", "def print_sequence_length_report(directory, base_pair_limit):\n\n for filepath in find_fastq_files(directory):\n print(\"%(filepath)s\\n%(percent)1.2f%% sequences over %(base_pair_limit)i\\n\" %\n {\n 'filepath': filepath,\n 'percent': parse_sequence_lengths(filepath, base_pair_limit),\n 'base_pair_limit': base_pair_limit\n })", "def tabular_formatted_printing(data_list):\n n = len(data_list)\n max = 0\n for i in range(0,n):\n if int(len(data_list[i][0])) > max:\n max = len(data_list[i][0])\n for i in range(0,n):\n if int(len(data_list[i][0])) < max:\n space = max - len(data_list[i][0])\n else:\n space = 0\n print(data_list[i][0]+space*' '+' : '+str(data_list[i][1]))\n return", "def calc_pIdent_hist(infile, outfile):\n data_dict = {}\n\n print(f'Parsing file {infile}')\n parse_magic_blast(infile, data_dict)\n\n\n with open(outfile, 'w') as o:\n\n print(f'File parsed. Writing to {outfile}')\n header_order = sorted(data_dict.keys())\n header = 'pIdent\\t' + '\\t'.join(header_order) + '\\n'\n\n o.write(header)\n\n for i in reversed(range(70, 101)):\n buildLine = [str(i)]\n\n for j in header_order:\n buildLine.append(str(data_dict[j][i]))\n\n o.write('\\t'.join(buildLine) + '\\n')", "def _print_stat_rows(title,rows_before,rows_after):\n self.strprint(str(title)+\" : Percent of processed rows = %1.2F\"\\\n %(np.abs(rows_before-rows_after)*100/rows_before))" ]
[ "0.61965764", "0.55557483", "0.55227757", "0.5508419", "0.54903924", "0.54385847", "0.5424107", "0.5422831", "0.5291813", "0.5266468", "0.52504617", "0.5247569", "0.5234036", "0.520332", "0.5193628", "0.51752245", "0.5163459", "0.51611245", "0.51465124", "0.51452625", "0.51264995", "0.5112025", "0.5054759", "0.5048397", "0.50337684", "0.5014511", "0.49760702", "0.49508217", "0.49175513", "0.49134362" ]
0.5927525
1
To make sure we follow the rules for channel opening (see sepctrum doc) 0,1,2,4 or 8 per module, same number of channel per module if both are used If this is not respected, open unused channels to comply Also, order the label correctly to comply with Spectrum multiplexing (Alternate on each module in numerical order)
def check_chan(chan): chan = sorted(chan) assert all([c in range(16) for c in chan]),\ "All spectrum channels must be between and 15" num = len([c for c in chan if c < 8]) chan = chan[:num],chan[num:] nchan = max(len(chan[0]),len(chan[1])) while nchan not in [1,2,4,8]: nchan += 1 if len(chan[0]) not in (0,nchan) or len(chan[1]) not in (0,nchan): print("[Warning] Cannot open this combination of channels on Spectrum") print(" I will add useless channels for you") while 0 < len(chan[0]) < nchan: left = [i for i in range(8) if i not in chan[0]] chan[0].append(left[0]) while 0 < len(chan[1]) < nchan: left = [i for i in range(8,16) if i not in chan[1]] chan[1].append(left[0]) rchan = [] chan = sorted(chan[0]),sorted(chan[1]) if chan[0] and chan[1]: for c1,c2 in zip(*chan): rchan.extend([c1,c2]) else: rchan = chan[0] or chan[1] print("DEBUG channel order:",rchan) return rchan
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def manage_channels(_) -> int:\n return 1 << 4", "def manage_channels(_) -> int:\n return 1 << 4", "def test_switch_channels(self):\n\t\t# not available yet, experimental\n\t\tpass", "def num_channels(self):\n return 3", "def test_channels_first(self):\n\t\tdetails = self.watcher.describe()\n\t\tM = details.iloc[0].M\n\t\tN = details.iloc[0].N\n\t\tself.assertEqual(M, 3)\n\t\tself.assertEqual(N, 64)", "def test_bw40_ch9(self, setUp):\n\n self.common_channel(band='40', channel='9')", "def nodules_connection(label_data, label_header):\n\n\n las_labels = measure.label(label_data,\n neighbors=8,\n background=0,\n return_num=True)\n\n las_labels_nzero = np.nonzero(las_labels[0])\n [xdif, ydif, zdif] = [np.amax(las_labels_nzero[0])-np.amin(las_labels_nzero[0]),\n np.amax(las_labels_nzero[1])-np.amin(las_labels_nzero[1]),\n np.amax(las_labels_nzero[2])-np.amin(las_labels_nzero[2])]\n\n # conversion pixels to mm\n dims = label_header['pixdim']\n if label_header['xyzt_units'] == 10:\n #dimensions in mm\n print('xyzt_units=10')\n xdif=dims[1]*xdif\n ydif=dims[2]*ydif\n zdif=dims[3]*zdif\n\n\n return las_labels,[xdif,ydif,zdif]", "def test_bw40_ch140(self, setUp):\n\n self.common_channel(band='40', channel='140')", "def test_num_from_layer(self):\n for i in range(CliffordUtils.NUM_CLIFFORD_2_QUBIT):\n standard = CliffordUtils.clifford_2_qubit(i)\n circ = QuantumCircuit(2)\n for layer, idx in enumerate(_layer_indices_from_num(i)):\n circ.compose(_CLIFFORD_LAYER[layer][idx], inplace=True)\n layered = Clifford(circ)\n self.assertEqual(standard, layered)", "def test_channels(self):\n test_channels = 2\n self.encoder._channels = test_channels\n self.assertEqual(self.encoder._channels, test_channels)", "def get_num_channels():\r\n check_mixer()\r\n return sdl.Mix_GroupCount(-1)", "def channelSelect (i):\n# assert(0<=i<128, \"channel {} does not exist in [0-127]\".format(i))\n assert(0<=i<128)\n closeAll() #<== close all relays\n if i< 1*8 :# bank1\n mask=shift8(i, 1) ; bus.write_byte_data(R1,off,mask)\n elif i< 2*8 :\n mask=shift8(i, 1) ; bus.write_byte_data(R1,mask,off)\n elif i< 3*8 :\n mask=shift8(i, 1) ; bus.write_byte_data(R2,off, mask)\n elif i< 4*8 :\n mask=shift8(i, 1) ; bus.write_byte_data(R2,mask, off)\n elif i< 5*8 :# bank2\n mask=shift8(i, 0) ; bus.write_byte_data(R3,mask,off)\n elif i< 6*8 :\n mask=shift8(i, 1) ; bus.write_byte_data(R3,off,mask)\n elif i< 7*8 :\n mask=shift8(i, 0) ; bus.write_byte_data(R4,mask,off)\n elif i< 8*8 :\n mask=shift8(i, 0) ; bus.write_byte_data(R4,off,mask)\n elif i< 9*8 :# bank3\n mask=shift8(i, 0) ; bus.write_byte_data(R6,mask,off)\n elif i< 10*8 :\n mask=shift8(i, 0) ; bus.write_byte_data(R6,off,mask)\n elif i< 11*8 :\n mask=shift8(i, 1) ; bus.write_byte_data(R5,mask,off)\n elif i< 12*8 :\n mask=shift8(i, 1) ; bus.write_byte_data(R5,off,mask)\n elif i< 13*8 :# bank4\n mask=shift8(i, 0) ; bus.write_byte_data(R8,mask,off)\n elif i< 14*8 :\n mask=shift8(i, 0) ; bus.write_byte_data(R8,off,mask)\n elif i< 15*8 :\n mask=shift8(i, 0) ; bus.write_byte_data(R7,off,mask)\n elif i< 16*8 :\n mask=shift8(i, 0) ; bus.write_byte_data(R7,mask,off)\n return mask", "def rank_chanels():\r\n \r\n all_paths = [['data_bci\\\\row_data\\\\subject1\\\\'], ['data_bci\\\\row_data\\\\subject2\\\\'],['data_bci\\\\row_data\\\\subject3\\\\']]\r\n\r\n train_subjects = ['01']\r\n test_subject = '02'\r\n freq = 512\r\n\r\n cutoff_beggining = 0\r\n columns_to_read = ['Fp1', 'AF3' ,'F7', 'F3', 'FC1', 'FC5', 'T7', 'C3', 'CP1', 'CP5',\r\n 'P7', 'P3', 'Pz', 'PO3', 'O1', 'Oz', 'O2', 'PO4', 'P4', 'P8', 'CP6',\r\n 'CP2', 'C4', 'T8', 'FC6', 'FC2', 'F4', 'F8', 'AF4', 'Fp2', 'Fz', 'Cz','class']\r\n seq_len = 0\r\n cut_step = 0\r\n num_perseg = freq\r\n num_overlap = int(num_perseg/2)\r\n min_freq=8\r\n max_freq=45\r\n k = 3\r\n\r\n First_iter = True\r\n for path in all_paths:\r\n train_full_data, train_full_data_filtered, train_full_anots, test_full_data, test_sliced_full_filtered, test_full_annoations = read_filter(path, train_subjects,test_subject, columns_to_read, cutoff_beggining, seq_len, cut_step)\r\n\r\n psd_signals = eval_psd_not_modulated(train_full_data, num_perseg, num_overlap, freq, min_freq, max_freq) \r\n chanels_acc = iterate_over_chanels(psd_signals, train_full_anots, k)\r\n if First_iter:\r\n accuracy = chanels_acc\r\n First_iter = False\r\n else:\r\n accuracy += chanels_acc\r\n accuracy = accuracy/len(all_paths)\r\n sorted_indexies = np.argsort(accuracy)[::-1]\r\n\r\n\r\n #indexis_above_treshohld = [idx for idx in sorted_indexies if accuracy[idx]> min_accary]\r\n return sorted_indexies", "def __init__(self, \n samples_per_symbol=_def_samples_per_symbol,\n excess_bw=_def_excess_bw,\n costas_alpha=_def_costas_alpha,\n gain_mu=_def_gain_mu,\n mu=_def_mu,\n omega_relative_limit=_def_omega_relative_limit,\n gray_code=_def_gray_code,\n verbose=_def_verbose,\n log=_def_log):\n\n\tgr.hier_block2.__init__(self, \"dqpsk_demod\",\n\t\t\t gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature\n\t\t\t gr.io_signature(1, 1, gr.sizeof_char)) # Output signature\n\n self._samples_per_symbol = samples_per_symbol\n self._excess_bw = excess_bw\n self._costas_alpha = costas_alpha\n self._mm_gain_mu = gain_mu\n self._mm_mu = mu\n self._mm_omega_relative_limit = omega_relative_limit\n self._gray_code = gray_code\n\n if samples_per_symbol < 2:\n raise TypeError, \"sbp must be >= 2, is %d\" % samples_per_symbol\n\n arity = pow(2,self.bits_per_symbol())\n \n # Automatic gain control\n scale = (1.0/16384.0)\n self.pre_scaler = gr.multiply_const_cc(scale) # scale the signal from full-range to +-1\n #self.agc = gr.agc2_cc(0.6e-1, 1e-3, 1, 1, 100)\n self.agc = gr.feedforward_agc_cc(16, 2.0)\n \n # RRC data filter\n ntaps = 11 * samples_per_symbol\n self.rrc_taps = gr.firdes.root_raised_cosine(\n 1.0, # gain\n self._samples_per_symbol, # sampling rate\n 1.0, # symbol rate\n self._excess_bw, # excess bandwidth (roll-off factor)\n ntaps)\n self.rrc_filter=gr.interp_fir_filter_ccf(1, self.rrc_taps) \n\n if not self._mm_gain_mu:\n sbs_to_mm = {2: 0.050, 3: 0.075, 4: 0.11, 5: 0.125, 6: 0.15, 7: 0.15}\n self._mm_gain_mu = sbs_to_mm[samples_per_symbol]\n\n self._mm_omega = self._samples_per_symbol\n self._mm_gain_omega = .25 * self._mm_gain_mu * self._mm_gain_mu\n self._costas_beta = 0.25 * self._costas_alpha * self._costas_alpha\n fmin = -0.025\n fmax = 0.025\n \n self.receiver=gr.mpsk_receiver_cc(arity, pi/4.0,\n self._costas_alpha, self._costas_beta,\n fmin, fmax,\n self._mm_mu, self._mm_gain_mu,\n self._mm_omega, self._mm_gain_omega,\n self._mm_omega_relative_limit)\n\n # Perform Differential decoding on the constellation\n self.diffdec = gr.diff_phasor_cc()\n \n # find closest constellation point\n rot = 1\n rotated_const = map(lambda pt: pt * rot, psk.constellation[arity])\n self.slicer = gr.constellation_decoder_cb(rotated_const, range(arity))\n\n if self._gray_code:\n self.symbol_mapper = gr.map_bb(psk.gray_to_binary[arity])\n else:\n self.symbol_mapper = gr.map_bb(psk.ungray_to_binary[arity])\n \n # unpack the k bit vector into a stream of bits\n self.unpack = gr.unpack_k_bits_bb(self.bits_per_symbol())\n\n if verbose:\n self._print_verbage()\n \n if log:\n self._setup_logging()\n \n # Connect & Initialize base class\n self.connect(self, self.pre_scaler, self.agc, self.rrc_filter, self.receiver,\n self.diffdec, self.slicer, self.symbol_mapper, self.unpack, self)", "def __init__(self, parent): \n \n self.parent = parent\n \n self.custom_channel_name = _qstring(parent.rhd)\n self.native_channel_name = _qstring(parent.rhd)\n self.native_order = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.custom_order = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.signal_type = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.channel_enabled = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.chip_channel = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.board_stream = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_voltage_trigger_mode= np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_voltage_threshold = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_digital_trigger_channel = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_digital_edge_polarity = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.electrode_impedance_magnitude = np.float32(struct.unpack('f', parent.rhd.read(4)))[0]\n self.electrode_impedance_phase = np.float32(struct.unpack('f', parent.rhd.read(4)))[0]\n\n if self.signal_type == 0 and self.channel_enabled:#Add name to the amplifier channel list\n parent._AMPLIFIER_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 1 and self.channel_enabled:#Add name to the aux channel list\n parent._AUX_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 2 and self.channel_enabled:#Supply voltage\n parent._SUPPLY_VOLTAGE_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 3 and self.channel_enabled:#usb board adc input channel\n parent._ADC_INPUT_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 4 and self.channel_enabled:#usb board digital input channel\n parent._DIGITAL_INPUT_CHANNELS.append(self.native_channel_name)", "def test_bw40_auto(self, setUp):\n\n self.common_channel(band='40', channel='0')", "def setup_channels():\n\n # Setup channel encoders\n for c in channels:\n channels[c].setup()\n print()", "def _calc_out_channels(self, in_channels):\n out_channels = min(in_channels * 2, self.max_channels)\n return out_channels", "def set_num_channels(count):\r\n check_mixer()\r\n global _numchanneldata, _channeldata\r\n if count > _numchanneldata:\r\n _channeldata.extend([ChannelData() for i in\r\n range(count - _numchanneldata)])\r\n _numchanneldata = count\r\n sdl.Mix_AllocateChannels(count)", "def comchans(self, nick):\n comchannels = 0\n for chan in self.chandb:\n if nick in chan:\n comchannels += 1\n return comchannels", "def __init__(self,\n samples_per_symbol=_def_samples_per_symbol,\n excess_bw=_def_excess_bw,\n gray_code=_def_gray_code,\n verbose=_def_verbose,\n log=_def_log):\n\n\tgr.hier_block2.__init__(self, \"dqpsk_mod\",\n\t\t\t\tgr.io_signature(1, 1, gr.sizeof_char), # Input signature\n\t\t\t\tgr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature\n\n self._samples_per_symbol = samples_per_symbol\n self._excess_bw = excess_bw\n self._gray_code = gray_code\n\n if not isinstance(samples_per_symbol, int) or samples_per_symbol < 2:\n raise TypeError, (\"sbp must be an integer >= 2, is %d\" % samples_per_symbol)\n\n\tntaps = 11 * samples_per_symbol\n \n arity = pow(2,self.bits_per_symbol())\n\n # turn bytes into k-bit vectors\n self.bytes2chunks = \\\n gr.packed_to_unpacked_bb(self.bits_per_symbol(), gr.GR_MSB_FIRST)\n\n if self._gray_code:\n self.symbol_mapper = gr.map_bb(psk.binary_to_gray[arity])\n else:\n self.symbol_mapper = gr.map_bb(psk.binary_to_ungray[arity])\n \n self.diffenc = gr.diff_encoder_bb(arity)\n\n rot = .707 + .707j\n rotated_const = map(lambda pt: pt * rot, psk.constellation[arity])\n self.chunks2symbols = gr.chunks_to_symbols_bc(rotated_const)\n\n # pulse shaping filter\n\tself.rrc_taps = gr.firdes.root_raised_cosine(\n\t self._samples_per_symbol, # gain (sps since we're interpolating by sps)\n self._samples_per_symbol, # sampling rate\n 1.0,\t\t # symbol rate\n self._excess_bw, # excess bandwidth (roll-off factor)\n ntaps)\n\n\tself.rrc_filter = gr.interp_fir_filter_ccf(self._samples_per_symbol, self.rrc_taps)\n\n if verbose:\n self._print_verbage()\n \n if log:\n self._setup_logging()\n \n\t# Connect & Initialize base class\n self.connect(self, self.bytes2chunks, self.symbol_mapper, self.diffenc,\n self.chunks2symbols, self.rrc_filter, self)", "def output_channels(self, input_channels):\n pass", "def getDescriptorChannels(self): # real signature unknown; restored from __doc__\n pass", "def _visibleChannels_changed(self):\n for i in range(0,8):\n if i in self.visibleChannels:\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=True\n else:\n print i\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=False", "def get_n_channels(self): \n return self.n_out_channels", "def PYDSO010SETCHAN(self):\n ctx = self.item_start() # always first line of test\n\n chan = ctx.item.chan\n if not (0 < chan < 5):\n self.logger.error(\"Invalid channel number: {} (1-4 accepted)\".format(chan))\n self.item_end(ResultAPI.RECORD_RESULT_INTERNAL_ERROR)\n return\n\n self.shared_lock(self.DSO).acquire()\n\n # reset the scope to a known state\n self.dso.write('*RST')\n if chan != 1: # after reset, chan 1 is already on\n self.dso.write(':CHANnel1:DISPlay OFF') # turn off channel 1\n self.dso.write(':CHANnel{}:DISPlay ON'.format(chan)) # turn off channel 1\n\n self.dso.write(':CHANnel{}:SCALe 100mV'.format(chan))\n\n vpp = self.dso.query(':MEASure:VPP? CHANnel{}'.format(chan))\n value = float(vpp)\n _result, _bullet = ctx.record.measurement(\"VPP{}\".format(chan), value, ResultAPI.UNIT_VOLTS)\n\n self.log_bullet(\"Switched to channel {}\".format(chan))\n self.log_bullet(_bullet)\n time.sleep(0.1) # give it some time to sit here, else its too fast\n self.shared_lock(self.DSO).release()\n self.item_end() # always last line of test", "def channel_names(self):\n header_names = [s.strip() for s in\n self.header['Bias Spectroscopy>Channels'].split(';')]\n\n # 'Bias calc (V)' is in file but not in the header.\n return ['Bias calc (V)', ] + header_names", "def selectChannel(self,asic,chan, hsmode= 1 ):\n pass", "def __init__(self, parent): \n self.signal_group_name = _qstring(parent.rhd)\n self.signal_group_header = _qstring(parent.rhd)\n self.signal_group_enabled = (np.int16(struct.unpack('h', parent.rhd.read(2)))[0] == 1)\n self.number_of_channels = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.number_of_amplifier_channels = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n \n self.channels = {}\n #if there are channels:\n if self.signal_group_enabled and self.number_of_channels != 0: \n for i in range(self.number_of_channels):\n c = Channel(parent)\n self.channels[c.native_channel_name] = c", "def _identify_channels(self, name):\n\n channel_list = []\n if self.nuke_node.Class() == \"Cryptomatte\":\n # nuke_node is a keyer gizmo\n channel_list = self.nuke_node.node('Input1').channels()\n else:\n # nuke_node might a read node\n channel_list = self.nuke_node.channels()\n\n relevant_channels = [x for x in channel_list if x.startswith(name)]\n pure_channels = []\n for channel in relevant_channels:\n suffix = \".red\"\n if not channel.endswith(suffix):\n continue\n # to do: validate this somewhere else\n pure_channel = channel[:-len(suffix)]\n pure_channels.append(pure_channel)\n\n return sorted(pure_channels)" ]
[ "0.6028733", "0.6028733", "0.6014569", "0.5881849", "0.57059354", "0.55728966", "0.55078214", "0.5452189", "0.5366916", "0.53582627", "0.5342205", "0.53277797", "0.52655023", "0.52529305", "0.52440494", "0.5243839", "0.52283365", "0.52131444", "0.51890916", "0.5166996", "0.51583695", "0.51550967", "0.51423365", "0.5138183", "0.5129059", "0.5117629", "0.51093715", "0.509755", "0.50906575", "0.5070543" ]
0.60648745
0
Helper method for sorting tweets in chronological order.
def sort_tweets(tweets): tweets.sort(key=lambda x: x.get_date()) return tweets
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tweet_sort(twitter_data, results, cmp):\r\n \r\n # Insertion sort\r\n for i in range(1, len(results)):\r\n current = results[i]\r\n position = i\r\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\r\n results[position] = results[position - 1]\r\n position = position - 1 \r\n results[position] = current", "def tweet_sort(twitter_data, results, cmp):\n\n # Insertion sort\n for i in range(1, len(results)):\n current = results[i]\n position = i\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\n results[position] = results[position - 1]\n position = position - 1\n results[position] = current", "def sort_time(self):\n self.entries.sort(key=lambda x: x.date_stamp_utc)", "def sort_by_popularity(tweets: list) -> list:\n tweets_by_popularity = sorted(tweets, key=lambda x: (x.retweets, -x.time), reverse=True) # Use lambda functions when an anonymous function is required for a short period of time.\n return tweets_by_popularity", "def _sort_records(self):\n self.records.sort(reverse=True, key=lambda record: record.timestamp)", "def sort_words(words):\n return sorted(words)", "def sort_results(self):\n pass", "def sortFinalFeed(finalFeed):\n\tfinalFeed.sort(key = lambda a: a['datetime'])\n\treturn finalFeed", "def _sort_torrents(ctx, torrent_list, sort_type):\n\n if sort_type == 'seeders':\n return sorted(torrent_list, key=lambda t: t['seeders'], reverse=True)", "def sortby(self):\n ...", "def ascendingTimeOrder(t1, t2):\n return cmp(t1['total_seconds'], t2['total_seconds'])", "def sort(self,desc):\n\tself.__sort(\"\",\"\",desc)", "def sortByPostText(self,post_text,desc):\n\t\n\treturn self.__sort(post_text,\"\",desc)", "def sorted_by_count_and_word(word_counts):\n\n return sorted(word_counts.items(), key=reversed_tuple)", "def sort_by_unicode(self):\n utils.sort_unicode_word_list(self.words_new)", "def sort(self):\n self.words = set(sorted(self.words))", "def sort_duration(self):\n self.sort('duration')", "def order_by_ftime(tasks_lst):\n return sorted(tasks_lst, key=lambda task: task[1])", "def sorting(tokens: list):\n tokens.sort(key=lambda x: (x[0], x[1]))", "def test_timeline_order_sorted_by_activity_publish_date(self):\n from .mockers import user_status, user_comment\n username = 'messi'\n self.create_user(username)\n activity_0_id = self.create_activity(username, user_status).json['id']\n activity_1_id = self.create_activity(username, user_status).json['id']\n activity_2_id = self.create_activity(username, user_status).json['id']\n res = self.testapp.post('/activities/%s/comments' % str(activity_1_id), json.dumps(user_comment), oauth2Header(username), status=201)\n\n res = self.testapp.get('/people/%s/timeline?sortBy=activities' % username, \"\", oauth2Header(username), status=200)\n self.assertEqual(len(res.json), 3)\n self.assertEqual(res.json[0].get('id', None), activity_2_id)\n self.assertEqual(res.json[1].get('id', None), activity_1_id)\n self.assertEqual(res.json[2].get('id', None), activity_0_id)", "def sort_func(structure):\n return structure.timestamp", "def ordered(cls, objs):\n objs = list(objs)\n try:\n objs.sort(key=lambda o: o.latest_message.created, reverse=True)\n except:\n pass\n return objs", "def sort(self):\n self.list.sort(key=lambda x: ''.join)", "def sort(self):\n self.notes.sort()", "def set_trec_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:(x.get_score(),x.get_doc()),reverse=True)\n for r in self._run[k]:\n print r.get_str()", "def set_analyzed_tweets(self, tweets):\n slim_tweets = [SlimTweet(tweet) for tweet in tweets]\n self.analyzed_tweets = sort_tweets(slim_tweets)", "def sort(self, field='word', order=None):\n self.data = list(self.sorted(field, order))", "def sort_hashtags_by_popularity(tweets: list) -> list:\n hashtags_by_popularity = {}\n pattern = r\"#\\w+\"\n for tweet in tweets:\n find_hashtag = re.findall(pattern, tweet.content)\n if not find_hashtag:\n continue\n else:\n for ht in find_hashtag:\n hashtags_by_popularity.setdefault(ht, []).append(tweet.retweets)\n print(hashtags_by_popularity)\n for k, v in hashtags_by_popularity.items():\n hashtags_by_popularity[k] = sum(v)\n print(hashtags_by_popularity)\n sorted_ht = sorted(hashtags_by_popularity.items(), key=lambda x: x[-1], reverse=True)\n print(hashtags_by_popularity)\n return [ht[0] for ht in sorted_ht]", "def sort(self):\r\n self.list.sort(key=lambda x: ''.join(x))", "def reorder( self ):\n self.sorted.sort(self.compareFunction)" ]
[ "0.74844646", "0.7461296", "0.6951029", "0.67573065", "0.63485456", "0.611729", "0.6102695", "0.60897636", "0.6069729", "0.6059522", "0.6047878", "0.6047267", "0.5914891", "0.5894959", "0.58686423", "0.5860879", "0.5839468", "0.58268726", "0.57980865", "0.5797689", "0.5782641", "0.57772595", "0.57626796", "0.5735805", "0.57306767", "0.57184416", "0.57178116", "0.57131904", "0.57049304", "0.5700069" ]
0.8429142
0
Sets the preffered bin size for output. input "mode" can be any of "hours", "days", "weeks", 0, 1 or 2.
def set_output_mode(self, mode="minutes"): if (mode == "minutes") | (mode == "hours") | (mode == "days") | (mode == "weeks"): self.output_mode = mode else: try: self.output_mode = self.output_modes[mode] except Exception, e: DLOG("Output mode not set correctly: " + str(e)) self.output_mode = "minutes"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_bw(self, mode):\n self.bandwidth = 0\n if (mode=='a'):\n self.bandwidth = 54\n elif(mode=='b'):\n self.bandwidth = 11\n elif(mode=='g'):\n self.bandwidth = 54 \n elif(mode=='n'):\n self.bandwidth = 600\n elif(mode=='ac'):\n self.bandwidth = 6777 \n \n return self.bandwidth", "def set_size(self, mode):\n if mode in self.data_index:\n return len(self.data_index[mode])\n return 0", "def set_size(self, mode):\n return len(self.data_index[mode])", "def numBinsChanged(self, val):\n self.numBins = val", "def setBarWidthMode(mode='fixed'):\n bdict = {'fixed':'FIXED','variable':'VARIABLE'}\n dislin.barmod(bdict[mode], 'WIDTH')", "def bins(self, value):\n self.num_bins = int(value)", "def set_mode(self, mode: QcQuantizeOpMode):\n self._mode = mode", "def set_mode(self, mode):\n print('set_mode', mode)\n self._mode = int(mode)", "def onSetToHalfSize(self, evt):\n\t\tself.halfResampleZ.Enable(1)\n\t\tif self.dataUnits:\n\t\t\tx, y, z = self.dataUnits[0].dataSource.getOriginalDimensions()\n\t\t\tzf = 1\n\t\t\t\n\t\t\tif self.halfResampleZ.GetValue():\n\t\t\t\tzf = 0.5\n\t\t\tself.currSize = int(0.5 * x), int(0.5 * y), int(zf * z)\n\t\tself.fourthResampleZ.Enable(0)\n\t\tfor obj in [self.factorLabel, self.dimLabel, self.newDimX, self.newDimY, self.newDimZ, self.factorX, self.factorY, self.factorZ]:\n\t\t\tobj.Enable(0)", "def create_binning_file(bin_size,n_bins,lmax=None, file_name=None):\n bins = np.arange(n_bins)\n bin_low = bins * bin_size + 2\n bin_hi = (bins + 1) * bin_size + 1\n bin_cent = (bin_low + bin_hi) / 2\n \n if lmax is not None:\n id = np.where(bin_hi <lmax)\n bin_lo,bin_hi,bin_c=bin_lo[id],bin_hi[id],bin_c[id]\n\n if file_name is None:\n return bin_low, bin_hi, bin_cent\n else:\n f = open('%s'%file_name,mode=\"w\")\n for i in range(n_bins):\n f.write(\"%0.2f %0.2f %0.2f\\n\"%(bin_low[i],bin_hi[i],bin_cent[i]))\n f.close()", "def set_page_mode(self, mode: PagemodeType) -> None:\n if self.output is None:\n raise RuntimeError(ERR_CLOSED_WRITER)\n self.output.set_page_mode(mode)", "def makeKernals(binsize=100):\n global avgWindow\n global slopWindow\n #slop - downstream average minus upstream average\n slopWindow = numpy.ones(binsize, dtype=numpy.float16) / (binsize/2)\n slopWindow[:binsize/2] = -slopWindow[:binsize/2]", "def limit():\n bwc = BandwidthConfigurator()\n bwc.limit()", "def do_set_opt_scan_range(self, val):\n hwp_factor = self._half_noof_points\\\n * self.get_conversion_factor('half') #noof_steps * deg/steps\n qwp_factor = self._quarter_noof_points\\\n * self.get_conversion_factor('quarter') #noof_steps * deg/steps \n\n if np.size(val) == 2:\n self._half_stepsize = val[0]/hwp_factor\n self._quarter_stepsize = val[1]/qwp_factor\n else:\n raise ValueError('Input size must be 2, but has size %d'%size(val))", "def mode(self, mode):\n\n self._mode = mode", "def mode(self, mode):\n\n self._mode = mode", "def mode(self, mode):\n\n self._mode = mode", "def mode(self, mode):\n self.set_mode(mode)", "def set_output_bw(self, output_bw: int):\n for output_quantizer in self._output_quantizers:\n output_quantizer.bitwidth = output_bw", "def setScale(self, mode='ACC', scale=0):\r\n\t\tif mode.upper() == 'ACC':\r\n\t\t\treg = 0x1C\r\n\t\telif mode.upper() == 'GYR':\r\n\t\t\treg = 0x1B\t\t\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\tcurrentVal = self.read(reg)\r\n\t\tcurrentVal = self.dec2BinList(currentVal)\r\n\t\tscale = self.dec2BinList(value=scale,bits=2)\r\n\t\tcurrentVal[3] = scale[0]\r\n\t\tcurrentVal[4] = scale[1]\r\n\t\tcurrentVal = self.binList2Dec(currentVal)\r\n\t\tself.write(reg, currentVal)", "def setPixelsPerInchShrinkToFit(self,value):\n self.PDFreactorConfiguration.in1[\"pixelsPerInchShrinkToFit\"] = value", "def update_step_size(self):\n self.setSingleStep(10 ** self.step_exponent)\n self.update_format_string()", "def setScalingMode(mode='down'):\n mdict = {'down':'DOWN','full':'FULL'}\n dislin.sclmod(mode)", "def optimalBins(x,factor=1):\n sz = optimalBinSize(x) * factor\n return np.arange(x.min(), x.max(), sz)", "def set_mode(self, mode):\n\n if not self.closed:\n raise AttributeError(\"Cannot set the mode while the file is open.\")\n\n self._set_h5_mode(mode)\n\n self._wepy_mode = mode", "def change_window_size(self, size):\n value = 0\n try:\n value = int(size)\n except ValueError:\n raise ValueError(\"Please type in a valid number.\")\n\n if value >= 0:\n self.__window_size = value\n else:\n raise ValueError(\"Please type in a valid positive number.\")", "def chunksize(self, value):\n\n self.data.chunksize = int(value)\n self.mask.chunksize = int(value)", "def SetNumberOfBinsPerAxis(self, arg0: 'unsigned int') -> \"void\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterIF2_SetNumberOfBinsPerAxis(self, arg0)", "def __measurement_mode(self):\n self.__measurement_modes = {\"DISCRETE\": 120, \"BATCH\": 1200, \"VIDEO\": 1200, \"STREAMING\": 1200}\n try:\n max_len = self.__measurement_modes[self.measurement_mode]\n except KeyError:\n raise KeyError(\"Invalid measurement mode given\")\n\n self.num_chunks = int(self.video_length / self.chunk_length)\n self.max_chunks = int(max_len / self.chunk_length)", "def mode (self, mode) :\r\n self.mode_ = mode" ]
[ "0.64814997", "0.5791871", "0.5663989", "0.5389112", "0.53047895", "0.52575004", "0.5253156", "0.5176985", "0.51385313", "0.5133968", "0.51074135", "0.50738156", "0.5017597", "0.49853322", "0.49836484", "0.49836484", "0.49836484", "0.49768755", "0.4972224", "0.4947991", "0.49448642", "0.49322587", "0.49295968", "0.49037492", "0.48928097", "0.48845977", "0.4870494", "0.4843496", "0.4831603", "0.48176062" ]
0.5839458
1
Analyze a hashtag given as input. Amount of tweets are 200 be default, but can be given as argument also. Returns analyzed tweets in a list.
def analyze_hashtag(self, hashtag, count=200): tweets = [] for x in xrange(0, int(count / 100)): tweets.extend(self.tweet_fetcher.get_tweets(hashtag)) analyzed_tweets = sort_tweets(self.sa.classify(tweets)) self.analyzed_tweets = analyzed_tweets return analyzed_tweets
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_by_hashtag(tweets: list, hashtag: str) -> list:\n tweets_with_hashtag = {} # findall(): Kui tekstis on rohkem kui üks regulaaravaldisele vastav alamsõne saab kõikide vastete järjendi moodustada funktsiooniga findall()\n pattern = r\"#\\w+\" # \\w : tähed, numbrid, alakriips, + : 1 või rohkem\n for tweet in tweets: # r\"string\" on \"raw\" tüüpi string, mis tähendab, et kurakaldkriipsud(\"\\\") jäetakse teksti alles.\n find_hashtag = re.findall(pattern, tweet.content) # word:\\w\\w\\w. Regulaaravaldisele vastab täpne sõne \"word:\" ning sellele järgnevad 3 suvalist tähte.\n if find_hashtag:\n tweets_with_hashtag.setdefault(ht, []).append(tweet)\n return tweets_with_hashtag[hashtag]", "def get_tweets_by_hashtag_route(hashtag):\n response, code = get_tweets_by_hashtag(\n hashtag, request.args.get('limit', 30))\n return jsonify(response), code", "def extract_hashtags(tweet):\n tknzr = TweetTokenizer()\n hashtags = [token.lower() for token in tknzr.tokenize(tweet) if re.match(hashtag_re, token)]\n return hashtags", "def get_hashtag_tweets(self, hashtag,\n count=settings.TWITTER_DEFAULT_LIMIT):\n url = urljoin(self.base_url, \"/search/tweets.json\")\n response = self.session.get(\n url,\n params={\n \"q\": hashtag,\n \"count\": count,\n \"include_entities\": True\n },\n auth=self.__auth,\n )\n data = response.json()\n if response.ok:\n data = [Tweet(tweet_data) for tweet_data in data['statuses']]\n else:\n if 'error' in data:\n raise TwitterException(data['error'], code=response.status_code)\n elif 'errors' in data:\n error = data['errors'][0]\n raise TwitterException(error['message'], code=response.status_code)\n return data", "def get_tweets(hashtag):\n api = twitter.Api(consumer_key=TWITTER_API_CONSUMER_KEY,\n consumer_secret=TWITTER_API_CONSUMER_SECRET,\n access_token_key=TWITTER_API_ACCESS_TOKEN_KEY,\n access_token_secret=TWITTER_API_ACCESS_TOKEN_SECRET)\n\n query = (f\"q=%23{HASHTAG}%20-RT\"\n f\"&result_type=recent&since=2019-01-01&count={NUM_TWEETS}\")\n results = api.GetSearch(raw_query=query)\n\n return [\n format_tweet(tweet.AsDict())\n for tweet in results\n ]", "def getHashtagsAndMentions(tweets):\n hashtags = Counter()\n mentions = Counter()\n plain = Counter()\n\n pattern = re.compile(r\"[^#@\\w'-]+\")\n\n for t in tweets:\n words = pattern.split(t.message)\n for word in words:\n # Ignore null strings caused by split characters at the end of a\n # message and remove standalone hyphens.\n if word and not word.startswith(\"-\"):\n # Increment count for the word in the Counter.\n if word.startswith(\"#\"):\n hashtags.update({word: 1})\n elif word.startswith(\"@\"):\n mentions.update({word: 1})\n else:\n # TODO: apply nltk.corpus.stopwords.words() here,\n # across languages. Consider that the stopwords cut off\n # before apostrophe, therefore check if the word\n # starts with the stopword.\n plain.update({word: 1})\n\n return hashtags, mentions, plain", "def getByHashtags(hashtag):\n\n # set page_limits. The default is 1 \n pages_limit = request.args.get('pages_limit') or 1\n pages_limit = int(pages_limit)\n\n raw_response = get_response(tw_api, 'search/tweets', { 'q': '#' + hashtag, 'count': 100 }, pages_limit)\n list_response = convert_resp2list(raw_response)\n return jsonify(list_response)", "def searchTweets():\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName='apiConf2.txt'))\n #SEARCHING TWEETS CONTAINING THE HASHTAG \"#bitcoin\" USING TWEEPY LIBRARY\n myTweets= []\n #words=list(map(str,words))\n if words:\n myQuery=' OR '.join(words)\n else:\n myQuery = '*'\n if removeRetweets:\n myQuery += ' - filter:retweets'\n kwargs['q']=myQuery\n kwargs['count']=100\n kwargs['tweet_mode']='extended'\n if 'startingDate' in kwargs:\n kwargs['since']=kwargs['startingDate']\n del(kwargs['startingDate'])\n if 'endingDate' in kwargs:\n kwargs['until']=kwargs['endingDate']\n del(kwargs['endingDate'])\n if 'maxTweets' in kwargs:\n del(kwargs['maxTweets'])\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.search, kwargs).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.search, kwargs).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n return getTopNTweets(myTweets, maxTweets)", "def analyse_tweets(nb_tweets, classifier, Resource, threshold, language='en'):\n return [(bytes(line, 'utf-8'), _minimal_analysis(bytes(line, 'utf-8'), classifier, Resource, threshold, language))\n for line in\n collect_tweet(nb_tweets)]", "def analyze(self, tweet):\n \n # keeping track of the score\n score = 0\n \n # filtering though tweets exstracting the useful words\n # preserve_case = false maks them lowercase\n tokenizer = nltk.tokenize.TweetTokenizer(preserve_case = False)\n tokens = tokenizer.tokenize(tweet)\n \n # checking word for word the intension and keeping score\n for word in tokens:\n if word in self.dic:\n if self.dic[word] == 1:\n score += 1\n else:\n score -= 1\n# score += self.dic[word]\n return score", "def buildHashtagsDict(tweets):\n hashtags = {}\n for tweet in tweets:\n if tweet['entities']['hashtags']:\n for hashtag in tweet['entities']['hashtags']:\n tag = hashtag['text'].lower().strip()\n if tag not in hashtags:\n hashtags[tag] = 1\n else:\n hashtags[tag] += 1\n return hashtags", "def analyze_tweets(tweets, model, w2v_model):\n # TODO DO EVERYTHING HERE\n #tweets = [(\"StarWars\", tc.query_tweets(\"StarWars\"))]\n \n #tweets = tc.query_tweets('starwars')\n df = pd.DataFrame(columns=['pos', 'neu', 'neg'])\n if not os.path.isdir('results'):\n os.mkdir('results')\n for topic, topic_tweets in tweets:\n tokenized_tweets = tp.process_raw_tweets(topic_tweets)\n df.loc[topic], dummy = classify_tweets(tokenized_tweets, model, w2v_model)\n vis.word_cloud_from_frequencies(tp.count_tokens(tokenized_tweets), f\"results/{topic}_cloud.png\", width=800, height=400,)\n \n vis.bar_plot_from_dataframe(df, 'results/results.png')\n print(\"\\n\")\n print(df)", "def get_top_hashtags_from_twitter_api(country='Japan', extended_search=True, debug=False):\n trends = get_top_trends_from_twitter(country=country, exclude_hashtags=False)\n trends = json.loads(trends)\n\n trending_hashtags = [t['label'] for t in trends]\n\n #print(json.dumps(trends, indent=4, ensure_ascii=False))\n\n queries = [t['query'] for t in trends]\n\n if debug:\n #[print(x) for x in trends]\n #[print(x) for x in queries]\n queries = [queries[0]]\n\n full_hashtags_list = []\n for query in queries:\n #print(query)\n # there is no country filter, but there is language filter at least\n if country == 'Japan':\n responses = api.GetSearch(term=query, locale='ja', return_json=True)\n try: responses = responses['statuses']\n except: print(responses)\n else:\n responses = api.GetSearch(term=query, return_json=True)\n try: responses = responses['statuses']\n except: print(responses)\n\n #print(json.dumps(responses, indent=4, ensure_ascii=False))\n\n trend_hashtags_list = []\n for response in responses:\n if debug: print(json.dumps(response, indent=4, ensure_ascii=False))\n text = response['text']\n\n hashtags_list = response['entities']['hashtags']\n\n if len(hashtags_list) > 0:\n hashtags_list = [h['text'] for h in hashtags_list]\n [trend_hashtags_list.append(h) for h in hashtags_list]\n\n full_hashtags_list.append(trend_hashtags_list)\n\n flat_hashtags_list = [item for sublist in full_hashtags_list for item in sublist]\n\n # turn it into a set to clear duplicates, then append #\n flat_hashtags_list = list(set(flat_hashtags_list))\n flat_hashtags_list = ['#'+h for h in flat_hashtags_list]\n\n flat_tier_list = []\n for h in flat_hashtags_list:\n if h in trending_hashtags:\n flat_tier_list.append(1)\n else:\n flat_tier_list.append(2)\n\n output = []\n for hashtag, tier in zip(flat_hashtags_list, flat_tier_list):\n output.append({\n \"label\": hashtag,\n \"tier\": tier\n })\n\n sorted_output = sorted(output, key=lambda x: x['tier'])\n\n output_json = json.dumps(sorted_output, ensure_ascii=False)\n return output_json", "def hashtags(self, candidate_list):\n if Tweet.hashtagre is None:\n Tweet.hashtagre = re.compile('|'.join(map(re.escape, candidate_list)))\n return [\n [m.group(0).replace('#', '', 1), m.span()]\n for m in Tweet.hashtagre.finditer(self.text)\n ]", "def get_hashtags(self):\n\t\t# Only first level comments should be checked for hashtag. Maybe.\n\t\tpassl", "def get_suggestions(\n user: 'User',\n hashtag: str,\n valid_user: Callable[['User'], bool],\n since: str,\n max_suggestions: int\n ) -> List[tweepy.models.User]:\n api = get_api(user)\n suggestions = []\n seen = set()\n max_iters = 5000\n\n for tweet in tweepy.Cursor(api.search, q=hashtag, lang=\"en\", since=since).items():\n if tweet.user.screen_name not in seen and valid_user(tweet.user):\n suggestions.append(tweet.user)\n seen.add(tweet.user.screen_name)\n if len(suggestions) >= max_suggestions or len(seen) > max_iters:\n break\n\n return suggestions", "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text", "def remove_hashtag(lista_tweets):\n\n novos_tweets = []\n\n for tweet in lista_tweets:\n texto = re.sub(r\"#\\S+\", \"\", tweet)\n novos_tweets.append(texto)\n\n return novos_tweets", "def separate_hastags_mentions_urls(tweet):\n \n text = tweet.lower()\n hashtag_list = re.findall(\"#([a-zA-Z0-9_]{1,50})\", text)\n \n text = re.sub(r'http\\S+', '', text)\n clean_tweet = re.sub(\"@[A-Za-z0-9_]+\",\"\", text)\n clean_tweet = re.sub(\"#[A-Za-z0-9_]+\",\"\", clean_tweet)\n \n return clean_tweet, hashtag_list", "def extract_hashtag(text):\n if text is not None:\n text = text.replace('\\n', ' ').replace('\\r', '')\n text = text.split(\" \")\n text = [word for word in text if \"#\" in word]\n if len(text) == 0:\n text = [\"no tags\"]\n else:\n text = [\"no tags\"]\n return text", "def clean_tweet(tweet):\n word_out, hashtags = [], []\n for word in tweet.split():\n if word[0] == '#':\n hashtags.append(word)\n elif ((len(word) != 0) and (word[0] != '@')) and (\n len(word) < 4 or ((len(word) > - 4) and (word[:4] != 'http'))):\n word_out.append(word)\n return word_out, hashtags", "def gettweets(request):\n temp = json.loads(request.body)\n print (temp['hashtags'])\n return Response(tw_fetcher.gethashes(temp['hashtags']), status=status.HTTP_201_CREATED)", "def show_search_results():\n\n #Get values from search-box via AJAX\n current_keyword = request.form.get('search').lower()\n print \"**********************\"\n print current_keyword\n print \"**********************\"\n tweets = get_tweets_by_api(term=current_keyword)\n\n result = []\n\n for tweet in tweets:\n # Exclude retweets since they appear as duplicatses to endu ser\n if tweet.retweeted_status is None:\n # Convert tweet text from unicode to text\n tweet_id = tweet.id\n text = unicodedata.normalize('NFKD', tweet.text).encode('ascii', 'ignore')\n # Find URL in text and bind to url\n # url = re.search('((?:http|https)(?::\\\\/{2}[\\\\w]+)(?:[\\\\/|\\\\.]?)(?:[^\\\\s\"]*))', text)\n url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text)\n # Remove URL from text\n text_wo_url = re.sub(r'^https?:\\/\\/.*[\\r\\n]*', '', text, flags=re.MULTILINE)\n # Handle / Name\n user = unicodedata.normalize('NFKD', tweet.user.screen_name).encode('ascii', 'ignore')\n # Count of favorites\n favorite_count = tweet.favorite_count\n #Return dictionary of hashtags with hashtag as key and number of occurances as value\n if tweet.hashtags:\n # Convert hashtags from unicode to string\n ht_list = []\n for hashtag in tweet.hashtags:\n ht_str = unicodedata.normalize('NFKD', hashtag.text).encode('ascii', 'ignore')\n ht_list.append(ht_str.lower())\n hashtags = Counter(ht_list)\n else:\n hashtags = tweet.hashtags\n # Convert tweet from unicode to datetime\n created_at = tweet.created_at\n # format created_at string to ISO 8610\n created_at_str = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(created_at, '%a %b %d %H:%M:%S +0000 %Y'))\n # create a moment from the string\n created_at = moment.date(created_at_str, 'YYYY-MM-DD HH:mm:ss')\n result.append({'created_at': created_at_str, 'tweet_text': text_wo_url, 'user': user,\n 'favorite_count': favorite_count, 'hashtags': hashtags,\n 'url': url, 'tweet_id': tweet_id})\n\n print \"&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\"\n print result\n print \"&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\"\n\n return jsonify(result=result) #, tweets", "def get_hashtags(text):\n # alternatives\n \"#[^ :\\n\\t\\.,\\?\\/’'!]+\"\n \"#[a-zA-Z1-9]+\"\n\n # frankly I\"m happy with this as it's simple and I will go down a rabbit hole on these other ones.\n # it seems to do a decent job\n htag = re.compile(r'#[a-zA-Z0-9\\U0001f3c0]+')\n # tested it on all of these: https://top-hashtags.com/hashtag/basketball/\n # got all of them (the unicode one is the basketball emoji)\n\n return list(set(re.findall(htag, text)))", "def get_tweets(self, query, count=10):\n # empty list to store parsed tweets\n tweets = []\n\n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q=query, count=count)\n\n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n\n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets\n\n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))", "def get_tweets(twitter, screen_name, num_tweets):\n\n request = robust_request(twitter, 'search/tweets', {'q': screen_name, 'count': num_tweets})\n tweets = [a['text'] for a in request]\n\n return tweets", "def extract_tweets(consumer_key,consumer_secret,access_token,access_token_secret,search_key):\n # Step 1 - Authenticate\n consumer_key= str(consumer_key)\n consumer_secret= str(consumer_secret)\n\n access_token=str(access_token)\n access_token_secret=str(access_token_secret)\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n api = tweepy.API(auth)\n\n #Step 3 - Retrieve Tweets\n public_tweets = api.search(search_key)\n tweets_list=[]\n for tweet in public_tweets:\n tweets_list.append(tweet.text)\n return tweets_list", "def get_tweet_list(user_handle):\n client = language.LanguageServiceClient()\n\n tweet_list = twitter.get_tweets(handle=user_handle)\n\n if tweet_list[0] == \"34\":\n return tweet_list\n\n for i in range(len(tweet_list)):\n\n content = tweet_list[i].get(\"text\")\n\n document = types.Document(\n content=content, type=enums.Document.Type.PLAIN_TEXT)\n annotations = client.analyze_sentiment(document=document)\n\n # Print the results\n # print_result(annotations)\n\n score = annotations.document_sentiment.score\n magnitude = annotations.document_sentiment.magnitude\n\n tweet_list[i][\"score\"] = score\n tweet_list[i][\"magnitude\"] = magnitude\n\n # print(tweet_list[i])\n\n return tweet_list", "def set_analyzed_tweets(self, tweets):\n slim_tweets = [SlimTweet(tweet) for tweet in tweets]\n self.analyzed_tweets = sort_tweets(slim_tweets)", "def get_tweets(user, num = 200):\n tweets = []\n \n for tweet in user.home_timeline(count = num):\n edited_tweet = tweet.text\n edited_tweet = edited_tweet.encode(encoding='UTF-8', errors='Ignore') \n tweets.append(edited_tweet)\n return tweets" ]
[ "0.6920718", "0.6833779", "0.675189", "0.66098684", "0.6514208", "0.6490846", "0.6407932", "0.62542325", "0.6223964", "0.6129735", "0.6091279", "0.60504276", "0.5977461", "0.5949544", "0.5946573", "0.59057504", "0.5902749", "0.5883308", "0.58803743", "0.586219", "0.5854543", "0.5848928", "0.5842706", "0.5839371", "0.5800015", "0.57825464", "0.5725198", "0.5712595", "0.5707076", "0.5699683" ]
0.81691366
0
Set analyzed tweets sorted. E.g. from saved analyzed tweets.
def set_analyzed_tweets(self, tweets): slim_tweets = [SlimTweet(tweet) for tweet in tweets] self.analyzed_tweets = sort_tweets(slim_tweets)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort(self):\n self.words = set(sorted(self.words))", "def sort_tweets(tweets):\n tweets.sort(key=lambda x: x.get_date())\n return tweets", "def set_trec_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:(x.get_score(),x.get_doc()),reverse=True)\n for r in self._run[k]:\n print r.get_str()", "def tweet_sort(twitter_data, results, cmp):\r\n \r\n # Insertion sort\r\n for i in range(1, len(results)):\r\n current = results[i]\r\n position = i\r\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\r\n results[position] = results[position - 1]\r\n position = position - 1 \r\n results[position] = current", "def tweet_sort(twitter_data, results, cmp):\n\n # Insertion sort\n for i in range(1, len(results)):\n current = results[i]\n position = i\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\n results[position] = results[position - 1]\n position = position - 1\n results[position] = current", "def sort_results(self):\n pass", "def analyze_hashtag(self, hashtag, count=200):\n tweets = []\n\n for x in xrange(0, int(count / 100)):\n tweets.extend(self.tweet_fetcher.get_tweets(hashtag))\n\n analyzed_tweets = sort_tweets(self.sa.classify(tweets))\n\n self.analyzed_tweets = analyzed_tweets\n\n return analyzed_tweets", "def sort_by_tokens(self, token_order):\n\n remaining_tokens = list(set(self.dictionary.tokens) - set(token_order))\n token_order = token_order + remaining_tokens\n idx_order = [self.dictionary.add_token(token) for token in token_order]\n\n self.Nx += [0] * (len(token_order) - len(self.Nx))\n self.sort_by_idxs(idx_order)\n\n # We are no longer sorted according to unigram frequencies.\n self.sorted = False", "def sort(self, field='word', order=None):\n self.data = list(self.sorted(field, order))", "def sort(self):\n\t\twith self.AutoSplitlines():\n\t\t\tself.lines = sorted(self.lines)", "def __init__(self):\n self.follow_map = {}\n self.user_tweets = {}\n \n self.sort_id = 0", "def sort(self, reverse=True):\n count_word = list()\n indexs = list()\n for w in self.word2index:\n if w in self.special:\n continue\n count_word.append((self.word_count[w], w))\n indexs.append(self.word2index[w])\n\n count_word.sort(reverse=reverse)\n indexs.sort(reverse=reverse)\n\n for index, (_, word) in zip(indexs, count_word):\n self.word2index[word] = index\n self.index2word[index] = word", "def sort_word_analysis(self):\n\n\t\treverse_word_analysis = [(value,key) for key, value in self.word_analysis.items()]\n\t\treverse_word_analysis.sort(reverse=True)\n\n\t\tvocab_list = [\t(reverse_word_analysis[i][1], #[1]: the word; [0]: the attribute values \n\t\t\t\t\t\t(reverse_word_analysis[i][0][3], #stem: 3rd index \n\t\t\t\t\t\treverse_word_analysis[i][0][2], #frequency; 2nd index\n\t\t\t\t\t\tself.sentence_index[reverse_word_analysis[i][0][4]], #the sentence location index; 4th index \n\t\t\t\t\t\treverse_word_analysis[i][0][5], #selection criteria: 5th index \n\t\t\t\t\t\t)) for i in range(10)]\n\t\t\n\t\tself.vocab_list = vocab_list\n\t\t\n\t\treturn vocab_list", "def sort_by_default(self):\n self.data.sort()", "def sort_vocab(self):\n if len(self.wv.syn0):\n raise RuntimeError(\"cannot sort vocabulary after model weights already initialized.\")\n self.wv.index2word.sort(key=lambda word: self.wv.vocab[word].count, reverse=True)\n for i, word in enumerate(self.wv.index2word):\n self.wv.vocab[word].index = i", "def sort_by_unicode(self):\n utils.sort_unicode_word_list(self.words_new)", "def index(self, suggestions: Sequence[str]) -> None:\n for s in suggestions:\n self.data.append(s)\n\n self.data.sort()", "def sort(self):\n self.notes.sort()", "def sort(self):\n\n # momentarily convert into numpy, to take advantage of their easy \n # sorting.\n top_indices = np.argsort([-n for n in self.Nx])\n self.Nx = [self.Nx[i] for i in top_indices]\n self.dictionary = h.dictionary.Dictionary([\n self.dictionary.tokens[i] for i in top_indices])\n\n self.sorted = True\n\n return top_indices", "def sort_terms(self):\n sorted_index = OrderedDict({})\n for k in sorted(self.inverted_index.keys()):\n sorted_index[k] = self.inverted_index[k]\n self.inverted_index = sorted_index", "def sort_by_idxs(self, idx_order):\n self.Nx = [self.Nx[idx] for idx in idx_order]\n self.dictionary = h.dictionary.Dictionary(\n [self.dictionary.tokens[idx] for idx in idx_order])\n\n # We are no longer sorted according to unigram frequencies.\n self.sorted = False", "def sortby(self):\n ...", "def sort_and_reduce(self):\n self.data = sorted(self.data, key=lambda item: item.pubDate)\n if len(self.data) > MAX_SIZE:\n self.data = self.data[-MAX_SIZE:]", "def update_word_stats(self, tweet):\n\n if not self.text:\n return\n\n words = self.text.split()\n\n # process single words\n for word in words:\n self.update_stats('words', word)\n\n # process 2 word lists\n pairs = self.get_phrase_list(words, 2)\n if pairs is not None:\n for word_pair in pairs:\n self.update_stats('word_pairs', self.get_index_from_list(word_pair))\n\n # process 3 word lists\n triples = self.get_phrase_list(words, 3)\n if triples is not None:\n for word_triple in triples:\n self.update_stats('word_triples', self.get_index_from_list(word_triple))", "def sort_by_parser_scores(self):\n self.parses.sort(key=lambda parse: -parse.parser_score)", "def get_top_tweets():\n Tweet.top_tweets = [(k, v) for k, v in sorted(Tweet.hashtag_counter.items(), key=lambda item: item[1], reverse=True)]\n top_10_tweets = {}\n top_10_tweets['top_tweets'] = []\n for tweet in Tweet.top_tweets[:10]:\n top_10_tweets['top_tweets'].append({'hashtag': \"#\"+tweet[0], 'count': tweet[1]})\n return top_10_tweets", "def sort_time(self):\n self.entries.sort(key=lambda x: x.date_stamp_utc)", "def sort(self):\n tmp = list(zip(self.user_points, self.user_ids));\n tmp = sorted(tmp, reverse=True);\n self.user_points, self.user_ids = list(zip(*tmp));\n \n self.user_points = list(self.user_points);\n self.user_ids = list(self.user_ids);", "def sort_by_popularity(tweets: list) -> list:\n tweets_by_popularity = sorted(tweets, key=lambda x: (x.retweets, -x.time), reverse=True) # Use lambda functions when an anonymous function is required for a short period of time.\n return tweets_by_popularity", "def sort_hashtags_by_popularity(tweets: list) -> list:\n hashtags_by_popularity = {}\n pattern = r\"#\\w+\"\n for tweet in tweets:\n find_hashtag = re.findall(pattern, tweet.content)\n if not find_hashtag:\n continue\n else:\n for ht in find_hashtag:\n hashtags_by_popularity.setdefault(ht, []).append(tweet.retweets)\n print(hashtags_by_popularity)\n for k, v in hashtags_by_popularity.items():\n hashtags_by_popularity[k] = sum(v)\n print(hashtags_by_popularity)\n sorted_ht = sorted(hashtags_by_popularity.items(), key=lambda x: x[-1], reverse=True)\n print(hashtags_by_popularity)\n return [ht[0] for ht in sorted_ht]" ]
[ "0.6755012", "0.62385756", "0.601142", "0.60002726", "0.5931617", "0.5790273", "0.5523183", "0.55016154", "0.54319304", "0.5425356", "0.5424972", "0.54047394", "0.5361439", "0.5345621", "0.5344535", "0.5337149", "0.53292656", "0.5318276", "0.52926177", "0.52854353", "0.52800477", "0.5273635", "0.5255148", "0.5248311", "0.52408963", "0.5218285", "0.5215652", "0.52151996", "0.5214445", "0.52134836" ]
0.83614945
0
Outputs analyzed tweets in bins. Bin size (hours, days or weeks) determined by self.output_mode
def output_tweets(self): if self.analyzed_tweets is None: return None splitter = 0 if (self.output_mode == "days"): splitter = 86400 # 1 day in seconds pass elif (self.output_mode == "weeks"): splitter = 604800 # 1 week in seconds pass elif (self.output_mode == "hours"): splitter = 3600 # 1 hours in seconds pass else: splitter = 300 # 5 minutes in second oldest = self.analyzed_tweets[0].get_date() newest = self.analyzed_tweets[-1].get_date() delta = int(((newest - oldest).total_seconds()) / splitter) tweets_for_bins = list(self.analyzed_tweets) bins = [] hour_bin = [] for x in xrange(1, delta + 2): upper_limit = oldest + datetime.timedelta(seconds=splitter * x) lower_limit = upper_limit - datetime.timedelta(seconds=splitter) hour_bin = [] for tweet in tweets_for_bins: if tweet.get_date() > upper_limit: bins.append(hour_bin) DLOG("Bin containing " + str(len(hour_bin)) + " tweets") break elif tweet.get_date() < lower_limit: continue else: hour_bin.append(tweet) [tweets_for_bins.remove(t) for t in hour_bin] DLOG("Bin containing " + str(len(hour_bin)) + " tweets") bins.append(hour_bin) self.output_bins = bins return bins #### Alternate binning #### # if len(analyzed_tweets) < 500: # bin_size = 10 # else: # bin_size = int(len(analyzed_tweets) * .02) # bins = [] # for count in range(0, int(len(analyzed_tweets) / bin_size)): # pol_bin = [tweet.polarity for tweet in analyzed_tweets[(count * bin_size):((count + 1) * bin_size)]] # bins.append(pol_bin) # DLOG([sum(bin) for bin in bins])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time_histogram(spiketrains, bin_size, t_start=None, t_stop=None,\n output='counts', binary=False):\n # Bin the spike trains and sum across columns\n bs = BinnedSpikeTrain(spiketrains, t_start=t_start, t_stop=t_stop,\n bin_size=bin_size)\n\n if binary:\n bs = bs.binarize(copy=False)\n bin_hist = bs.get_num_of_spikes(axis=0)\n # Flatten array\n bin_hist = np.ravel(bin_hist)\n # Renormalise the histogram\n if output == 'counts':\n # Raw\n bin_hist = pq.Quantity(bin_hist, units=pq.dimensionless, copy=False)\n elif output == 'mean':\n # Divide by number of input spike trains\n bin_hist = pq.Quantity(bin_hist / len(spiketrains),\n units=pq.dimensionless, copy=False)\n elif output == 'rate':\n # Divide by number of input spike trains and bin width\n bin_hist = bin_hist / (len(spiketrains) * bin_size)\n else:\n raise ValueError(f'Parameter output ({output}) is not valid.')\n\n return neo.AnalogSignal(signal=np.expand_dims(bin_hist, axis=1),\n sampling_period=bin_size, units=bin_hist.units,\n t_start=bs.t_start, normalization=output,\n copy=False)", "def _histogram_with_spread(self):\n complexity_hist = np.bincount(\n self.epoch.array_annotations['complexity'])\n num_bins = (self.t_stop - self.t_start).rescale(\n self.bin_size.units).item() / self.bin_size.item()\n num_bins = round_binning_errors(num_bins, tolerance=self.tolerance)\n time_hist = np.zeros(num_bins, dtype=int)\n\n start_bins = (self.epoch.times - self.t_start).rescale(\n self.bin_size.units).magnitude / self.bin_size.item()\n stop_bins = (self.epoch.times + self.epoch.durations - self.t_start\n ).rescale(self.bin_size.units\n ).magnitude / self.bin_size.item()\n\n if self.sampling_rate is not None:\n shift = (.5 / self.sampling_rate / self.bin_size).simplified.item()\n # account for the first bin not being shifted in the epoch creation\n # if the shift would move it past t_start\n if self.epoch.times[0] == self.t_start:\n start_bins[1:] += shift\n else:\n start_bins += shift\n stop_bins += shift\n\n start_bins = round_binning_errors(start_bins, tolerance=self.tolerance)\n stop_bins = round_binning_errors(stop_bins, tolerance=self.tolerance)\n\n for idx, (start, stop) in enumerate(zip(start_bins, stop_bins)):\n time_hist[start:stop] = \\\n self.epoch.array_annotations['complexity'][idx]\n\n time_hist = neo.AnalogSignal(\n signal=np.expand_dims(time_hist, axis=1),\n sampling_period=self.bin_size, units=pq.dimensionless,\n t_start=self.t_start)\n\n empty_bins = (self.t_stop - self.t_start - self.epoch.durations.sum())\n empty_bins = empty_bins.rescale(self.bin_size.units\n ).magnitude / self.bin_size.item()\n empty_bins = round_binning_errors(empty_bins, tolerance=self.tolerance)\n complexity_hist[0] = empty_bins\n\n return time_hist, complexity_hist", "def findBins(): \n\n df = pd.read_csv('significantData.csv')\n df = df.sort_values('RecordingTimestamp')\n df.to_csv('significantData.csv', index=False)\n read_in = pd.read_csv('significantData.csv')\n count = 0\n this = []\n return_bins = {}\n word = (read_in['AOI[Neutral_Left]Hit_0']).tolist()\n \n if word[0] == '1':\n return_bins.update({'start_value': 1})\n else: \n return_bins.update({'start_value': 0})\n for v, w in zip(word[:-1], word[1:]):\n if v == w and v != '': \n print v\n count = count + 1\n else: \n total = count\n this.append(count)\n my_list = sorted(list(set(this)))\n return_bins.update({'my_list': my_list})\n return return_bins", "def histogram(self):\n if np.size(self.stats['Counts']): # don't do anything to an empty list\n if np.size(self.bins) and not self.redo:\n return self.bins, self.occs, self.thresh\n elif np.size(self.bin_array) > 0: \n self.occs, self.bins = np.histogram(self.stats['Counts'], self.bin_array) # fixed bins. \n else:\n try:\n lo, hi = min(self.stats['Counts'])*0.97, max(self.stats['Counts'])*1.02\n # scale number of bins with number of files in histogram and with separation of peaks\n num_bins = int(15 + self.ind//100 + (abs(hi - abs(lo))/hi)**2*15) \n self.occs, self.bins = np.histogram(self.stats['Counts'], bins=np.linspace(lo, hi, num_bins+1)) # no bins provided by user\n except: \n self.occs, self.bins = np.histogram(self.stats['Counts'])\n else: self.occs, self.bins = np.zeros(10), np.arange(0,1.1,0.1)\n return self.bins, self.occs, self.thresh", "def bins(self, value):\n self.num_bins = int(value)", "def bins(self):\n return self._bins", "def tweets_statistics():\n tweet_lengths = np.array([])\n\n chosen_tweets = [tweet_dir + cls_train_tweets_pos, tweet_dir + cls_train_tweets_neg] # [tweet_dir + test_tweets]\n\n for fn in chosen_tweets:\n with open(fn) as f:\n count = 0\n for line in f:\n tokens = line.strip().split()\n tweet_lengths = np.append(tweet_lengths, len(tokens))\n count += 1\n\n tweet_lengths = np.sort(tweet_lengths)\n print(tweet_lengths)\n print('total tweets : ' + str(tweet_lengths.size))\n print('Max : ' + str(np.max(tweet_lengths)))\n print('10th bigger : ' + str(tweet_lengths[-10]))\n print('50th bigger : ' + str(tweet_lengths[-50]))\n print('100th bigger : ' + str(tweet_lengths[-100]))\n print('200th bigger : ' + str(tweet_lengths[-200]))\n print('1000th bigger : ' + str(tweet_lengths[-1000]))\n print('Min : ' + str(np.min(tweet_lengths)))\n print('Mean : ' + str(np.mean(tweet_lengths)))\n print('STD : ' + str(np.std(tweet_lengths)))\n plt.hist(tweet_lengths, 50)\n plt.grid(True)\n plt.savefig(ROOT_DIR + 'plots/' + dataset_version + 'tweet_lengths' + ('_train' if len(chosen_tweets) == 2 else '_test'))", "def num_time_bins(self):\n return self.header.time_gate_bin_count * self.header.samples_per_time_bin", "def bins (self):\n return self._bins", "def bins (self):\n return self._bins", "def bin_the_data(neuron_spikes, first, last, bin_size):\n neuron_activity = []\n timebins = range(first, int(last) + int(last) % bin_size, bin_size)\n for spike in neuron_spikes:\n activity = []\n spike_time = spike[0]\n i = 0\n for bin_size in timebins:\n k = 0\n while spike_time < bin_size:\n i += 1\n if i >= np.size(spike):\n break\n spike_time = spike[i]\n k += 1\n activity.append(k)\n neuron_activity.append(activity)\n return neuron_activity, timebins", "def handle_analyzer(f_name, img_name, out_fname):\n # Counting tweets\n tweet_arr, handle_imp = load_pickle(f_name)['dat'], Counter()\n logging.info('Going through tweets now')\n for tweet in tweet_arr:\n handle_imp[tweet['handle']] += 1\n plot_save_dat(handle_imp, out_fname, img_name, 'Number of tweets', 'Probablity')\n logging.info('Saved histogram with number of tweets from handle vs. freq to: {}'.format(img_name))", "def createBins():\n theBins = []\n startFreq = 60\n for a in range(32):\n endFreq = int(startFreq*1.12+12)\n theRange = (startFreq, endFreq)\n startFreq = endFreq\n theBins.append(theRange)\n return(theBins)", "def binAnalysis(self):\n self.mode = 'binned'\n # --------------------------------------------------------------------------------------------- #\n # Make sure that another working directory is selected\n if self.workpath == self.datapath:\n print(\"\\t=== Variable 'self.workpath' is equal to 'self.datapath', provide another ===\")\n return\n else:\n if os.path.isfile(self.outgtlike):\n print(\"\\t=== Directory {} already contains a complete analysis, remove the .dat file ===\".format(self.workpath))\n return\n else:\n pass\n print(\"\\t=== Binned analysis will be computed in '{}' ===\".format(self.workpath))\n\n # --------------------------------------------------------------------------------------------- #\n # Create a temporary python script and launch the Science Tools\n fil = os.path.join(self.workpath, 'tmp_BinnedAnalysis'+self.suffix+'.py')\n tmp = open(fil, 'w')\n tmp.write(\"import algamma; import os; a=algamma.algamma(); a.ft1='{}';\\\n a.ft2='{}'; a.metstart={}; a.metstop={}; a.emin={}; a.emax={}; a.suffix='{}';\\\n a.workpath='{}'; a._gtSelect(); a._gtMktime();\\\n a._gtLtcube(); a._gtBincube(); a._gtExpmap(); a._gtSrcmap();\\\n a._gtLike(); os.remove('{}')\".format(self.ft1, self.ft2, \n self.metstart, self.metstop, self.emin, self.emax,\n self.suffix, self.workpath, fil))\n # Launch the file\n os.popen(\"nohup python {} &\".format(fil))\n tmp.close()\n\n return", "def setup_bins(self):\n width = int((self.max - self.min) / self.bin_size)\n bins = {\n i * width + self.min: (idx, idx + self.bin_size)\n for i, idx in enumerate(range(0, len(self.nums), self.bin_size))\n }\n return bins", "def _printHist(self):\n bins = [0] * self.displayBins\n binSize = self.maxValue / self.displayBins\n displaySamps = self.samples[:]\n displaySamps.sort()\n currLimit = binSize\n binIndex = 0\n partIndex = 0\n while partIndex < self.numParticles:\n part = displaySamps[partIndex]\n if binIndex >= self.displayBins:\n # if value beyond max value, add to last bin and go on to next particle\n bins[19] += 1\n partIndex += 1\n elif part < currLimit:\n # count particle in current bin, go on to next particle\n bins[binIndex] += 1\n partIndex += 1\n else:\n # go on to next bin, don't go on to next particle\n binIndex += 1\n currLimit += binSize\n self.printMapPattern(binSize)\n histStr = \"{0:3d} \"\n printStr = \"\"\n for b in bins:\n printStr += histStr.format(b)\n print(printStr)", "def test_histogram_with_varying_number_of_bin(self):\n # this data use number of bins less than the max limit\n df1 = pd.Series([1, 2, 3, 4]).apply(str)\n profiler1 = FloatColumn(df1.name)\n profiler1.max_histogram_bin = 50\n profiler1.update(df1)\n num_bins = len(profiler1.profile['histogram']['bin_counts'])\n self.assertEqual(num_bins, 4)\n\n # this data uses large number of bins, which will be set to\n # the max limit\n df2 = pd.Series([3.195103249264023e+18, 9999995.0, 9999999.0,\n 0.0, -10 ** 10]).apply(str)\n profiler2 = FloatColumn(df2.name)\n profiler2.max_histogram_bin = 50\n profiler2.update(df2)\n num_bins = len(profiler2.profile['histogram']['bin_counts'])\n self.assertEqual(num_bins, 50)\n\n # max number of bin is increased to 10000\n profiler2 = FloatColumn(df2.name)\n profiler2.max_histogram_bin = 10000\n profiler2.update(df2)\n num_bins = len(profiler2.profile['histogram']['bin_counts'])\n self.assertEqual(num_bins, 10000)", "def histogram(data, bins=50, nmb_data_to_use=None, ignored_row=0,\n col_to_read=1, output_file=None, verbose=0):\n # prepare arguments\n args = \"-x{} -c{} -b{} -V{}\" \\\n .format(ignored_row, col_to_read, bins, verbose)\n if nmb_data_to_use is not None:\n args += \"-l{}\".format(nmb_data_to_use)\n args = args.split(\" \")\n # run command\n res, msg = tisean('histogram', args, input_data=data,\n output_file=output_file)\n # return\n if msg != \"\":\n print(msg)\n return res", "def makeHistogram(values, numBins, xLabel, yLabel, title=None):", "def __len__(self):\n return 9 # logsfr_ratios has 6 bins", "def test_format_histograms_two_bins(self):\r\n self.assertEqual(format_histograms_two_bins(array([0, 1, 0, 2, 2, 3]),\r\n array(\r\n [2, 1, 0, 2, 0, 0]), array(\r\n [100, 110, 120, 130, 140, 150, 160])),\r\n \"\"\"Length\\tBefore\\tAfter\\n100\\t0\\t2\\n110\\t1\\t1\\n120\\t0\\t0\\n130\\t2\\t2\\n140\\t2\\t0\\n150\\t3\\t0\"\"\")", "def to_bins(filein, fileout, window, numbins, chr=None, generator=None):\n if not generator:\n generator = hg38_generator()\n bam = pysam.AlignmentFile(filein, 'rb')\n cm = []\n for row in generator: # iterate over each chromosome\n if chr is None or (chr is not None and row[0] in chr): # checks for chr #\n count = int(int(row[1]) / window) # number of windows\n res = int(window / numbins)\n chr_i = row[0]\n for i in range(count): # iterate over each window\n win_start = i * window\n win_finish = (i + 1) * window - 1\n cm_i = np.zeros(3 + numbins, dtype=object) # array to hold bin counts info\n cm_i[0] = chr_i\n cm_i[1] = win_start\n cm_i[2] = win_finish\n for j in range(numbins): # iterate over each bin\n bin_start = win_start + j * res\n bin_finish = win_start + (j + 1) * res - 1\n cm_i[j + 3] = bam.count(chr_i, bin_start, bin_finish)\n cm.append(cm_i)\n status_statement(i, count, 20, chr_i)\n np.savetxt(fileout + \".csv\", np.asarray(cm), fmt='%s', delimiter=',')\n bam.close()", "def GetNumberOfBins(self):\n return _itkStatisticsLabelMapFilterPython.itkStatisticsLabelMapFilterLM2IUS2_GetNumberOfBins(self)", "def GetNumberOfBins(self):\n return _itkStatisticsLabelMapFilterPython.itkStatisticsLabelMapFilterLM2IUC2_GetNumberOfBins(self)", "def GetNumberOfBins(self):\n return _itkStatisticsLabelMapFilterPython.itkStatisticsLabelMapFilterLM2IUL2_GetNumberOfBins(self)", "def construct_bins(self, training_samples, bins_file):\n\n if self.__read_from_bins_file(bins_file):\n return\n n, d = training_samples.shape\n k = self.number_of_bins\n if self.whitening:\n self.training_mean = np.mean(training_samples, axis=0)\n self.training_std = np.std(training_samples, axis=0) + self.ndb_eps\n\n if self.max_dims is None and d > 1000:\n # To ran faster, perform binning on sampled data dimension (i.e. don't use all channels of all pixels)\n self.max_dims = d // 6\n\n whitened_samples = (training_samples - self.training_mean) / self.training_std\n d_used = d if self.max_dims is None else min(d, self.max_dims)\n self.used_d_indices = np.random.choice(d, d_used, replace=False)\n\n clusters = KMeans(n_clusters=k, max_iter=100, n_jobs=-1).fit(whitened_samples[:, self.used_d_indices])\n\n bin_centers = np.zeros([k, d])\n for i in range(k):\n bin_centers[i, :] = np.mean(whitened_samples[clusters.labels_ == i, :], axis=0)\n\n # Organize bins by size\n label_vals, label_counts = np.unique(clusters.labels_, return_counts=True)\n bin_order = np.argsort(-label_counts)\n self.bin_proportions = label_counts[bin_order] / np.sum(label_counts)\n self.bin_centers = bin_centers[bin_order, :]\n self.ref_sample_size = n\n self.__write_to_bins_file(bins_file)", "def computeBinWidth(self):\n self.binWidth = (self.data[-1] - self.data[0]) / self.numBins\n # Fill the frequencies array with zero\n for i in range(self.numBins):\n self.frequencies.append(0)", "def EDA_binning_numeric_column_displaying_bins(dft, target, bins=4, test=\"\"):\r\n dft = copy.deepcopy(dft)\r\n _, edges = pd.qcut(dft[target].dropna(axis=0),q=bins, retbins=True, duplicates='drop')\r\n ### now we create artificial labels to match the bins edges ####\r\n ls = []\r\n for i, x in enumerate(edges):\r\n #print('i = %s, next i = %s' %(i,i+1))\r\n if i < len(edges)-1:\r\n ls.append('from_'+str(round(edges[i],3))+'_to_'+str(round(edges[i+1],3)))\r\n ##### Next we add a column to hold the bins created by above ###############\r\n dft['binned_'+target] = pd.cut(dft[target], bins=edges, retbins=False, labels=ls, include_lowest=True).values.tolist()\r\n if not isinstance(test, str):\r\n test['binned_'+target] = pd.cut(test[target], bins=edges, retbins=False, labels=ls, include_lowest=True).values.tolist()\r\n nrows = int(len(edges)/2 + 1)\r\n plt.figure(figsize=(15,nrows*3))\r\n plt.subplots_adjust(hspace=.5)\r\n collect_bins = []\r\n for i in range(len(edges)):\r\n if i == 0:\r\n continue\r\n else:\r\n dftc = dft[(dft[target]>edges[i-1]) & (dft[target]<=edges[i])]\r\n collect_bins.append(dftc)\r\n ax1 = plt.subplot(nrows, 2, i)\r\n dftc[target].hist(bins=30, ax=ax1)\r\n ax1.set_title('bin %d: size: %d, %s %0.2f to %0.2f' %(i, dftc.shape[0], target,\r\n edges[i-1], edges[i]))\r\n return ls, edges, dft, test", "def calc_fft_bins(self, tone_bins, nsamp):\n tone_bins_per_fft_bin = nsamp / float(self.nfft)\n fft_bins = np.round(tone_bins / tone_bins_per_fft_bin).astype('int')\n return fft_bins", "def compute_histogram(self):\n # compute distance between points \n distmatrix = np.sqrt(pdist(self.points))\n if not self.mean_dist:\n self.mean_dist = np.mean(distmatrix)\n distmatrix = distmatrix/self.mean_dist\n distmatrix = squareform(distmatrix)\n #compute angles between points\n angles = compute_angles(self.points)\n #quantize angles to a bin\n tbins = np.floor(angles / (2 * pi / self.nbins_theta))\n lg = np.logspace(self.r1, self.r2, num=5)\n #quantize radious to bins\n rbins = np.ones(angles.shape) * -1\n for r in lg:\n counts = (distmatrix < r) \n rbins = rbins + counts.astype(int) \n return rbins, tbins" ]
[ "0.59255564", "0.59204584", "0.57994825", "0.5772476", "0.5754282", "0.56141067", "0.5580331", "0.5579957", "0.55621636", "0.55621636", "0.55580896", "0.5532407", "0.55076605", "0.5490347", "0.53597844", "0.5341103", "0.53104323", "0.5293909", "0.52776015", "0.5274892", "0.5240212", "0.52351797", "0.5204212", "0.5198126", "0.5192259", "0.51804787", "0.516859", "0.51594156", "0.515746", "0.5156872" ]
0.82005036
0
Given a json path (using jsonpath notation), to a json patch (RFC 6902) which can be used to remove the document fragment pointed by the input path Note that such conversion is not formally specified anywhere, so the conversion rules are experimentationbased
def from_path_to_jsonpatch(matchedpath): logging.info("starting. filter path: %s", matchedpath) # First step: path format change # typical input: $['ietf-yang-library:modules-state']['module'][57] # desired output: /ietf-yang-library:modules-state/module/57 matchedpath = matchedpath.replace("$.", "/") matchedpath = matchedpath.replace("$['", "/") matchedpath = matchedpath.replace("']['", "/") matchedpath = matchedpath.replace("']", "/") # this one is for the $[2] pattern if "$[" in matchedpath and "]" in matchedpath: matchedpath = matchedpath.replace("$[", "/") matchedpath = matchedpath.replace("]", "") matchedpath = matchedpath.replace("[", "") matchedpath = matchedpath.replace("]", "") matchedpath = matchedpath.rstrip("/") # Now, for input: /ietf-yang-library:modules-state/module/57 # desired output: [{"op":"remove","path":"/ietf-yang-library:modules-state/module/57"}] logging.info("final filter path: %s", matchedpath) as_patch = '[{{"op":"remove","path":"{0}"}}]'.format(matchedpath) logging.info("generated patch line: %s", as_patch) return as_patch
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prefixed(self, path: List[str]) -> \"JsonPatch\":\n return JsonPatch([\n JsonPatchOperation(\n op=op.op,\n path=[*path, *op.path],\n value=op.value)\n for op in self\n ])", "def push_path(path, diffs):\n for key in reversed(path):\n diffs = [op_patch(key, diffs)]\n return diffs", "def apply_filter(json_arg, filtering_line):\n\n logging.info(\"apply_filter:starting. jsonPath filter=[%s]\", filtering_line)\n\n res = jsonpath(json_arg, filtering_line, result_type=\"PATH\")\n if isinstance(res, types.BooleanType) or len(res) == 0:\n logging.info(\"apply_filter: The prefilter [%s] matched nothing\", filtering_line)\n return json_arg\n if len(res) > 1:\n raise AssertionError(\n \"Bad pre-filter [%s] (returned [%d] entries, should return one at most\",\n filtering_line,\n len(res),\n )\n as_json_patch = from_path_to_jsonpatch(res[0])\n logging.info(\"apply_filter: applying patch! resolved patch =%s\", as_json_patch)\n patched_json = jsonpatch.apply_patch(json_arg, as_json_patch)\n\n logging.info(\"apply_filter: json after patching: %s\", patched_json)\n return patched_json", "def removeJsonComment(jsonStr):\n pureJsonStr = jsonStr\n\n # whole line with #\n # # International\n pureJsonStr = re.sub(\"^\\s*#.*$\\n+\", \"\", pureJsonStr, flags=re.M)\n\n # whole line with //\n # // \"mode\": \"SINGLE\",\n pureJsonStr = re.sub(\"^\\s*//.*$\\n+\", \"\", pureJsonStr, flags=re.M)\n\n # line tail with #\n pureJsonStr = re.sub(\"\\s+#.*$\", \"\", pureJsonStr, flags=re.M)\n\n # line tail with //\n pureJsonStr = re.sub(\"\\s+//.*$\", \"\", pureJsonStr, flags=re.M)\n\n return pureJsonStr", "def patch_files():\n args = parser.parse_args()\n doc = json.load(args.ORIGINAL)\n patch = json.load(args.PATCH)\n result = jsonpatch.apply_patch(doc, patch)\n print(json.dumps(result, indent=args.indent))", "def modify_jsonyaml_paths(jsonyaml_file):\n loader = schema_salad.ref_resolver.Loader(\n {\"location\": {\"@type\": \"@id\"}, \"path\": {\"@type\": \"@id\"}}\n )\n input_dict, _ = loader.resolve_ref(jsonyaml_file, checklinks=False)\n basedir = os.path.dirname(jsonyaml_file)\n\n def fixpaths(d):\n \"\"\"Make sure all paths have a URI scheme.\"\"\"\n if isinstance(d, dict):\n if \"path\" in d:\n if \":\" not in d[\"path\"]:\n local_path = os.path.normpath(\n os.path.join(os.getcwd(), basedir, d[\"path\"])\n )\n d[\"location\"] = pathname2url(local_path)\n else:\n d[\"location\"] = d[\"path\"]\n del d[\"path\"]\n\n visit(input_dict, fixpaths)\n return json.dumps(input_dict)", "def jsonpath_to_xpath(path):\n return '/' + path.replace('.', \"/\")", "def clean_path(path: str) -> str:\n previous_path = \"\"\n next_path = path\n while next_path != previous_path:\n previous_path = next_path\n next_path = copy_annotations(path, next_path.replace(\"//\", \"/\"))\n while next_path.endswith(\"/\"):\n next_path = next_path[:-1]\n return next_path", "def decompose(self) -> typing.Generator[\"JsonPatchOperation\", None, None]:\n if self.op == JsonPatchOperation.Operation.remove:\n yield [self]\n return\n # else: add/replace\n\n if isinstance(self.value, dict):\n for k, v in self.value.items():\n sub_op = JsonPatchOperation(self.op, [*self.path, str(k)], v)\n for sub_sub_op in sub_op.decompose():\n yield sub_sub_op\n else:\n yield self", "def jsonpath(self, path, patterns=[], queries=[], use_json=True):\n import hxl.filters\n return hxl.filters.JSONPathFilter(self, path, patterns=patterns, queries=queries, use_json=use_json)", "def fix_path(self):\n paths = self.data['path'].tolist()\n prefixes = [re.findall(r'[A-Z\\-0-9]+', path) for path in paths]\n prefix_good = [str(prefix[0]) + \".json\" for prefix in prefixes]\n self.data['path'] = prefix_good", "def repair_path(dict_1):\n dup_dict = dict(dict_1)\n for k,v in dup_dict.items():\n if '\\\\' in k:\n key = k.replace('\\\\', '/')\n val = v.replace('\\\\', '/')\n del dict_1[k]\n dict_1[key] = val\n return dict_1", "def force_json(uri):\n o = urlparse(uri)\n path, extension = os.path.splitext(o.path)\n path = path + \".json\"\n o = (o.scheme, o.netloc, path, o.params, o.query, \"\")\n return urlunparse(o)", "def _pop_path(diffs):\n key = None\n popped_diffs = []\n for d in diffs:\n # Empty diffs can be skipped\n if d is None or len(d) == 0:\n popped_diffs.append(None)\n continue\n # Check that we have only one op, which is a patch op\n if len(d) != 1 or d[0].op != DiffOp.PATCH:\n return\n # Ensure all present diffs have the same key\n if key is None:\n key = d[0].key\n elif key != d[0].key:\n return\n # Ensure the sub diffs of all ops are suitable as outer layer\n # if d[0].diff.length > 1:\n # return\n popped_diffs.append(d[0].diff)\n if key is None:\n return\n return {'key': key, 'diffs': popped_diffs}", "def delete_by_path(data: Dict[str, T], path: Sequence[str]):\n del get_by_path(data, path[:-1])[path[-1]]", "def jpath_to_json(value, strict=False):\n if isinstance(value, d.Boolean):\n return value.get_value()\n if isinstance(value, d.Null):\n return None\n if isinstance(value, d.Number):\n return value.get_as_float()\n if isinstance(value, d.String):\n return value.get_value()\n if isinstance(value, d.Object):\n result = {}\n for k, v in value:\n k = jpath_to_json(k)\n if strict and not isinstance(k, d.String):\n raise Exception(\"Key \" + str(k) + \" of an object was not a \"\n \"string, but strict formatting was requested. JSON \"\n \"object keys must always be strings when strict \"\n \"formatting is on.\")\n result[k] = jpath_to_json(v)\n return result\n if isinstance(value, d.List):\n return [jpath_to_json(v) for v in value]\n raise TypeError(\"Can't convert values of type \" + str(type(value))\n + \" to json\")", "def json_merge_patch(servicedef, source, with_):\n\n if isinstance(source, list) or isinstance(with_, list):\n return with_\n\n if not isinstance(source, dict):\n raise TypeError('source must be a dict, got %s' % (type(source)))\n\n if not isinstance(with_, dict):\n raise TypeError('with_ must be a dict, got %s' % (type(with_)))\n\n isdebug() and logger.debug('JSON merge:\\nsource = %s\\nwith = %s' %\n (json.dumps(source, indent=2),\n json.dumps(with_, indent=2)))\n\n if ( '$ref' in source and\n '$ref' in with_ and\n source == with_):\n # If merging 2 $refs and they have the same target, nothing\n # to do ... but avoid infinite recusion!\n return source\n\n # Need to make a copy of source, as this is going to be modified\n # Only make a shallow copy here - only the shallow properties\n # are modified in this call. Recursively deeper calls will make\n # deeper copies as needed.\n source = _eval_shallow(servicedef, source, need_copy=True)\n\n # with_ is only used in a readonly fashion, so no need to copy\n with_ = _eval_shallow(servicedef, with_, need_copy=False)\n\n for key, value in with_.items():\n if value is None:\n # Remove the key if present in the source\n if key in source:\n del source[key]\n elif ( isinstance(value, dict) and\n key in source and\n isinstance(source[key], dict)):\n # If this key is a dict in both source and with_, recurse\n source[key] = json_merge_patch(servicedef, source[key], value)\n else:\n # Otherwise update the source for this key. This may add the\n # key to source if it was not already present\n source[key] = value\n\n isdebug() and logger.debug('JSON merge result:\\n%s' %\n (json.dumps(source, indent=2)))\n\n return source", "def _pretty_json_path(self, path):\r\n segments = path.split('.')\r\n\r\n def builder(prev, cur):\r\n if re.match(cur):\r\n return \"{0}[]\".format(prev)\r\n return \"{0}.{1}\".format(prev, cur)\r\n\r\n segments = reduce(builder, segments)\r\n return segments", "def processed_json_path(path):\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')", "def clean_path(path):\n return resolved_path(path)", "def shrink(body):\n toremove = []\n for key, value in body.iteritems():\n if isinstance(value, basestring):\n if key.endswith('url'):\n if (value.startswith('https://api.github.com/') or\n value.startswith('https://avatars.githubusercontent.com')):\n toremove.append(key)\n elif isinstance(value, dict):\n shrink(value)\n elif isinstance(value, list):\n for el in value:\n if isinstance(el, dict):\n shrink(el)\n for key in toremove:\n body.pop(key)\n return body", "def resolve_fragment(self, document, fragment):\r\n\r\n fragment = fragment.lstrip(u\"/\")\r\n parts = unquote(fragment).split(u\"/\") if fragment else []\r\n\r\n for part in parts:\r\n part = part.replace(u\"~1\", u\"/\").replace(u\"~0\", u\"~\")\r\n\r\n if isinstance(document, Sequence):\r\n # Array indexes should be turned into integers\r\n try:\r\n part = int(part)\r\n except ValueError:\r\n pass\r\n try:\r\n document = document[part]\r\n except (TypeError, LookupError):\r\n raise RefResolutionError(\r\n \"Unresolvable JSON pointer: %r\" % fragment\r\n )\r\n\r\n return document", "def reconstruct_object(path):\n if len(path) == 0:\n return '%s'\n else:\n # The indexed query on `path` below is the means by which we recurse\n # Every iteration pushes it closer to a length of 0 and, thus, bottoming out\n return '{{%s: {recons}}}'.format(recons=reconstruct_object(path[1:]))", "def replace_ref(schema: T, schema_prefix: str) -> T:\n if isinstance(schema, List):\n return [replace_ref(item, schema_prefix) for item in schema]\n\n if isinstance(schema, Dict):\n if \"$ref\" in schema:\n schema[\"$ref\"] = schema_prefix + schema[\"$ref\"].split(\"/\")[-1]\n return schema\n\n return {key: replace_ref(value, schema_prefix) for key, value in schema.items()}\n\n return schema", "def enhance_doc(line):\n try:\n line = json.loads(line)\n line = flatten_transcript(line)\n line = filter_keys(line)\n return line\n except Exception:\n return None", "def test_normalize_patch_with_deleted_symlink(self):\n self.assertEqual(\n self.tool.normalize_patch(\n patch=(\n b'diff --git a/test b/test\\n'\n b'deleted file mode 120000\\n'\n b'index abc1234..0000000\\n'\n b'--- a/test\\n'\n b'+++ /dev/null\\n'\n b'@@ -1,1 +0,0 @@\\n'\n b'-old_target\\n'\n b'\\\\ No newline at end of file'\n ),\n filename='test',\n revision='abc1234'),\n (\n b'diff --git a/test b/test\\n'\n b'deleted file mode 100000\\n'\n b'index abc1234..0000000\\n'\n b'--- a/test\\n'\n b'+++ /dev/null\\n'\n b'@@ -1,1 +0,0 @@\\n'\n b'-old_target\\n'\n b'\\\\ No newline at end of file'\n ))", "def _cleanup_spec(spec):\n\n nonreferenced_data = []\n for data in spec[\"data\"]:\n name = data[\"name\"]\n # create a vesion of the spec where this data is removed\n without_this_data = copy.deepcopy(spec)\n without_this_data[\"data\"].remove(data)\n has_reference = name in str(without_this_data)\n if not has_reference:\n nonreferenced_data.append(data)\n\n new = copy.deepcopy(spec)\n new[\"data\"] = [data for data in new[\"data\"] if data not in nonreferenced_data]\n return new", "def simulate_patch(self, path='/', **kwargs):\n return self.simulate_request('PATCH', path, **kwargs)", "def _fix_old_syntax(tree):\n for key in list(tree.keys()):\n if 'object' in list(tree[key].keys()):\n # if no name is present and the object name is the old syntax we\n # need to be creative and pull the object name and use it\n if 'name' not in list(tree[key].keys()) and \\\n tree[key]['object'].find(':') >= 0:\n tree[key]['name'] = tree[key]['object'].replace(':', '_')\n\n # strip the old syntax from the object name\n tree[key]['object'] = tree[key]['object'].split(':')[0]\n\n # for the remaining syntax we will replace ':' with '_'\n for line in tree[key]:\n try:\n tree[key][line] = tree[key][line].replace(':', '_')\n except AttributeError:\n # If we've hit a dict, recurse.\n if isinstance(tree[key][line], dict):\n # Since dicts are mutable, and tree[key][line]\n # is a dict, this should work just fine for\n # updating in place.\n _fix_old_syntax(tree={line: tree[key][line]})\n else:\n raise TypeError(\"Something weird is going on.\")\n\n # if we are working with fuses let's set the mean replace time to 1\n # hour if not specified. Then we aviod a warning!\n if tree[key]['object'] == 'fuse' \\\n and 'mean_replacement_time' not in list(tree[key].keys()):\n tree[key]['mean_replacement_time'] = 3600.0\n\n # # FNCS is not able to handle names that include \"-\" so we will\n # # replace that with \"_\".\n # for prop in RENAME:\n # try:\n # # Attempt to fix the property.\n # tree[key][prop] = tree[key][prop].replace('-', '_')\n # except KeyError:\n # # Property isn't present - move along.\n # pass\n\n # No return, as we're modifying in place.\n return None", "def preprocess(self, newpath):\n return dict()" ]
[ "0.57485837", "0.54352486", "0.5391403", "0.5389081", "0.538902", "0.53637743", "0.53559333", "0.53352666", "0.53110945", "0.52599907", "0.52183264", "0.5205005", "0.5175974", "0.5173113", "0.5170655", "0.5149336", "0.51125824", "0.51051855", "0.5088135", "0.50403416", "0.5012543", "0.4991242", "0.49263558", "0.49066406", "0.49010426", "0.49000233", "0.48751533", "0.4850749", "0.48288444", "0.48212975" ]
0.71641725
0
Performs the prefiltering of a json file
def prefilter(json_arg, initial_prefilter): if not initial_prefilter: logging.info("prefilter not found!") # whether it is filtered or not, return as json so it can be handled uniformly from now on return json.loads(json_arg) with open(initial_prefilter) as f: lines = f.read().splitlines() logging.info("prefilter:lines in prefilter file: %d ", len(lines)) lines = filter(lambda k: not k.startswith("#"), lines) logging.info("prefilter:lines after removing comments: %d ", len(lines)) json_args_as_json = json.loads(json_arg) for filtering_line in lines: json_args_as_json = apply_filter(json_args_as_json, filtering_line) return json_args_as_json
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_json_file(filename, filters_list):\n filename = filename.replace(\" \", \"\")\n print(\"Filters List {}\".format(filters_list))\n if isinstance(filters_list, list) is False:\n filters_list = [filters_list]\n with open(robot_dir + \"/output/original/{}_orig.json\".format(filename), \"r\") as file_orig:\n # Get the root Json Object\n json_root_object = json.load(file_orig)\n\n if \"commitId\" in json_root_object:\n del json_root_object[\"commitId\"]\n\n # Store the original json object\n orig_json_object = json.dumps(json_root_object)\n root_key = json_root_object.keys()[0]\n\n if root_key == \"message\" and len(json_root_object.keys()) > 1:\n root_key = json_root_object.keys()[1]\n\n # Get the array of objects under the root\n try:\n if len(filters_list) > 0:\n json_root_object = delete_keys_from_dict(json_root_object, filters_list)\n except:\n print('Failed to delete keys from json_root_object before iteration')\n\n if root_key in ROOT_RETURN_ELEMENTS:\n json_array = json_root_object[root_key]\n\n if isinstance(json_array, list):\n # Clear the original array\n json_root_object[root_key] = []\n\n # For each object remove any of the keys that are specified in the filter list then put the object back into\n # the array\n for json_object in json_array:\n if len(filters_list) > 0:\n json_object = delete_keys_from_dict(json_object, filters_list)\n json_root_object[root_key].append(json_object)\n # json_root_object = sorted(json_root_object)\n elif isinstance(json_array, str):\n json_root_object = json_array\n\n # Serialize the JSON object before writing it to the file\n json_object = json.dumps(json_root_object, sort_keys=True, indent=4, separators=(',', ': '))\n # Write to result .json file:\n filtered_filename = robot_dir + \"/output/results/{}.json\".format(filename)\n filtered_file = open(filtered_filename, \"w\")\n # print(json_object)\n filtered_file.writelines(json_object)\n filtered_file.close()\n\n # return filtered_output, orig_output\n return json_object, orig_json_object", "def parse_json_files(self, filter_fn=None):\n def filter_function(f):\n return f is not None and f.endswith(\".json\")\n if not filter_fn:\n filter_fn = filter_function\n files = self.filter_files(None,filter_fn)\n dicts = []\n for f in files:\n with open(f) as fh:\n dicts.append(json.load(fh))\n return dicts", "def main():\n parser = ArgumentParser(\n description='Category Filter: Filter a List of Categories from a JSON')\n parser.add_argument('json_file_path', help='JSON file path')\n parser.add_argument('out_file', help='Output filename')\n args = parser.parse_args()\n\n ann_file = open(args.json_file_path)\n category_names = [\"sports ball\", \"cell phone\", \"couch\", \"elephant\", \"tie\", \"spoon\", \"skis\", \"apple\", \"giraffe\", \"laptop\", \"tennis racket\", \"sink\", \"dog\", \"fork\", \"cat\", \"teddy bear\", \"train\", \"skateboard\", \"toilet\", \"sandwich\", \"bed\", \"keyboard\", \"baseball glove\", \"baseball bat\", \"airplane\", \"oven\", \"hot dog\", \"refrigerator\", \"frisbee\", \"mouse\", \"fire hydrant\", \"stop sign\", \"bear\", \"snowboard\", \"parking meter\", \"toothbrush\", \"microwave\", \"scissors\", \"hair drier\", \"toaster\"]\n\n json_coco = json.load(ann_file)\n new_json = deepcopy(json_coco)\n\n for ann in json_coco['annotations']:\n if return_cat_name(json_coco, ann['category_id']) in category_names:\n new_json['annotations'].remove(ann)\n\n for cat in json_coco['categories']:\n if cat['name'] in category_names:\n new_json['categories'].remove(cat)\n\n output = open(args.out_file, \"w\")\n json.dump(new_json, output)\n output.close()", "def apply_filter(json_arg, filtering_line):\n\n logging.info(\"apply_filter:starting. jsonPath filter=[%s]\", filtering_line)\n\n res = jsonpath(json_arg, filtering_line, result_type=\"PATH\")\n if isinstance(res, types.BooleanType) or len(res) == 0:\n logging.info(\"apply_filter: The prefilter [%s] matched nothing\", filtering_line)\n return json_arg\n if len(res) > 1:\n raise AssertionError(\n \"Bad pre-filter [%s] (returned [%d] entries, should return one at most\",\n filtering_line,\n len(res),\n )\n as_json_patch = from_path_to_jsonpatch(res[0])\n logging.info(\"apply_filter: applying patch! resolved patch =%s\", as_json_patch)\n patched_json = jsonpatch.apply_patch(json_arg, as_json_patch)\n\n logging.info(\"apply_filter: json after patching: %s\", patched_json)\n return patched_json", "def prefilter_json_files_then_compare(args):\n\n logging.info(\"prefilter_json_files_then_compare: starting!\")\n with open(args.initialFile) as f:\n json_initial = file.read(f)\n with open(args.finalFile) as f2:\n json_final = file.read(f2)\n\n patch = jsonpatch.JsonPatch.from_diff(json_initial, json_final)\n logging.info(\n \"prefilter_json_files_then_compare:differences before patching: %d\",\n len(list(patch)),\n )\n\n json_initial_filtered = prefilter(json_initial, args.initial_prefilter)\n json_final_filtered = prefilter(json_final, args.finalPreFilter)\n\n patch_after_filtering = jsonpatch.JsonPatch.from_diff(\n json_initial_filtered, json_final_filtered\n )\n differences_after_patching = list(patch_after_filtering)\n logging.info(\n \"prefilter_json_files_then_compare: differences after patching: %d\",\n len(differences_after_patching),\n )\n\n if args.printDifferences:\n for patchline in differences_after_patching:\n print(json.dumps(patchline))\n\n print(len(differences_after_patching))\n return len(differences_after_patching)", "def main():\n filters_obj = _gauth.gmail_filters()\n for json_path in flag_import_from_json():\n if not os.path.isfile(json_path):\n _log.logger.error(\"missing json file: %s\" % (json_path))\n continue\n new_filter = _gmail.create_filter_from_json(filters_obj, json_path, \"me\")\n _log.logger.debug(new_filter)\n filter_path = os.path.join(\n _cfg.filters_json_basepath(),\n \"%s.json\" % (new_filter['id']))\n if not _db.check_and_write_json(filter_path, new_filter):\n print(\"~ skipping removal as failed saving json for filter id: %s\" % (new_filter['id']))\n continue\n if flag_override_filter():\n delete_filter_of_json(filters_obj, json_path)", "def filter_corpus():\n result = []\n for f in glob.glob(f\"{DATA_DIR}/COP*.json\"):\n with open(f, \"r\") as infile:\n data = json.load(infile)\n articles = flatten(data)\n result.append(articles)\n\n with open(f\"{DATA_DIR}/filtered_data.json\", \"w\", encoding='utf-8') as f:\n json.dump(result, f, indent=2)", "def process_files_json():\n # chdir into beep root\n pwd = os.getcwd()\n os.chdir(os.environ.get(\"BEEP_ROOT\", \"/\"))\n\n meta_list = list(filter(lambda x: '_Metadata.csv' in x, os.listdir(SRC_DIR)))\n file_list = list(filter(lambda x: '.csv' in x if x not in meta_list else None, os.listdir(SRC_DIR)))\n all_list = list(filter(lambda x: '.csv' in x, os.listdir(SRC_DIR)))\n\n all_list = sorted(all_list)\n dumpfn(all_list, \"all_files.json\")\n\n [file_id, mapdf] = init_map(PROJECT_NAME, DEST_DIR)\n\n new_file_index = file_id\n\n for filename in tqdm(sorted(file_list)):\n # If the file has already been renamed another entry should not be made\n if mapdf['filename'].str.contains(filename).sum() > 0:\n continue\n old_file = os.path.join(SRC_DIR, filename)\n new_path = os.path.join(DEST_DIR, PROJECT_NAME)\n shutil.copy(old_file, new_path) # copy main data file\n shutil.copy(old_file.replace(\".csv\", '_Metadata.csv'), new_path) # copy meta data file\n\n if PROJECT_NAME == 'FastCharge':\n [date, channel_no, strname, protocol] = get_parameters_fastcharge(filename, SRC_DIR)\n elif PROJECT_NAME == 'ClosedLoopOED':\n [date, channel_no, strname, protocol] = get_parameters_oed(filename, SRC_DIR)\n else:\n raise ValueError(\"Unsupported PROJECT_NAME: {}\".format(PROJECT_NAME))\n\n df_dup = mapdf.set_index(['protocol', 'date'])\n if (protocol, date) in df_dup.index:\n row = mapdf[(mapdf['protocol'] == protocol) & (mapdf['date'] == date)]\n file_id = row['fid'].iloc[0]\n protocol = row['protocol'].iloc[0]\n date = row['date'].iloc[0]\n strname = row['strname'].iloc[0]\n else:\n file_id = new_file_index\n new_file_index = new_file_index + 1\n\n new_name = \"{}_{}_{}\".format(PROJECT_NAME, f'{file_id:06}', channel_no)\n new_file = os.path.join(DEST_DIR, PROJECT_NAME, \"{}.csv\".format(new_name))\n\n new_row = pd.DataFrame([[file_id, protocol, channel_no, date, strname,\n os.path.abspath(old_file),\n os.path.abspath(new_file)]],\n columns=METADATA_COLUMN_NAMES)\n mapdf = mapdf.append(new_row)\n\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename), new_file)\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename).replace(\".csv\", \"_Metadata.csv\"),\n new_file.replace(\".csv\", \"_Metadata.csv\"))\n\n mapdf.to_csv(os.path.join(DEST_DIR, PROJECT_NAME, PROJECT_NAME + \"map.csv\"), index=False)\n mapdf = mapdf.reset_index(drop=True)\n os.chdir(pwd)\n return json.dumps(mapdf.to_dict(\"list\"))", "def read_json(self):\n # read in all json files in the input_path, that match the\n # algorithm_name and are not outputs\n for f in os.listdir(self.input_path):\n if(os.path.splitext(f)[1] == \".json\") and (os.path.basename(f).startswith(self.algorithm_name)) and (not os.path.basename(f).startswith(\"_\")):\n self.__input_jsons += [json.load(open(self.input_path + f))]", "def open_psort_json(json_file_name):\n with open(json_file_name) as f:\n jf = json.load(f)\n return jf", "def filterAll(media_list_file, in_movie_dir, out_movie_dir):\n with open(media_list_file) as f:\n names = json.load(f)\n\n count_tweets = []\n count_kept_tweets = []\n for name in names:\n with open(\"{}/{}.json\".format(in_movie_dir, name)) as f:\n tweets = json.load(f)\n kept_tweets = [t for t in tweets.values() if keep(t)]\n print(\"total = {}\\tkeep = {}\\tName = {}\".format(\n len(tweets), len(kept_tweets), name))\n count_tweets.append(len(tweets))\n count_kept_tweets.append(len(kept_tweets))\n with open(\"{}/{}.json\".format(out_movie_dir, name), \"w\") as f:\n json.dump(kept_tweets, f)\n print(\"Mean tweets = {}\".format(np.mean(count_tweets)))\n print(\"Mean kept tweets = {}\".format(np.mean(count_kept_tweets)))\n print(\"Mean fraction kept tweets = {}\".format(\n np.mean(count_kept_tweets) / np.mean(count_tweets)))", "def office_prefilter_data(parser, args, params):\n local_args = parser.parse_known_args(args)\n \n control.prefilter_data(params)", "def filter_json_by_category(self, new_json_path):\n # {'supercategory': 'person', 'id': 1, 'name': 'person'}\n ### Filter images:\n print(\"Filtering the annotations ... \")\n json_parent = os.path.split(new_json_path)[0]\n os.makedirs(json_parent, exist_ok=True)\n imgs_ids = [x['id'] for x in self.images] # get img_ids of imgs with the category\n new_imgs = [x for x in self.coco.dataset['images'] if x['id'] in imgs_ids]\n catIds = self.catIds\n ### Filter annotations\n new_annots = [x for x in self.coco.dataset['annotations'] if x['category_id'] in catIds]\n ### Reorganize the ids\n new_imgs, annotations = self.modify_ids(new_imgs, new_annots)\n ### Filter categories\n new_categories = [x for x in self.coco.dataset['categories'] if x['id'] in catIds]\n print(\"new_categories: \", new_categories)\n data = {\n \"info\": self.coco.dataset['info'],\n \"licenses\": self.coco.dataset['licenses'],\n \"images\": new_imgs, \n \"annotations\": new_annots,\n \"categories\": new_categories \n }\n print(\"saving json: \")\n with open(new_json_path, 'w') as f:\n json.dump(data, f)", "def sanitize_json_files(unsanitized_json_file):\n with open(develop_baseline_dir + unsanitized_json_file) as f:\n lines = f.readlines()\n\n with open('sanitizedJson/sanitized_{}'.format(unsanitized_json_file), 'w') as w:\n w.writelines([item for item in lines[:-1]])", "def _json_probe(srcfile):\n return json.loads(__run(srcfile))", "def main():\n\n # Set up argument parser.\n parser = argparse.ArgumentParser(\n description='Removes duplicate key-value pairs from JSON files.')\n parser.add_argument('--suffix', default='',\n help='optional suffix for output files; '\n 'if empty, files will be changed in place')\n parser.add_argument('files', nargs='+', help='input files')\n args = parser.parse_args()\n\n # Iterate over files.\n for filename in args.files:\n # Read in json using Python libraries. This eliminates duplicates.\n print('Processing ' + filename + '...')\n try:\n with codecs.open(filename, 'r', 'utf-8') as infile:\n j = json.load(infile)\n except ValueError as e:\n print('Error reading ' + filename)\n raise InputError(filename, str(e))\n\n # Built up output strings as an array to make output of delimiters easier.\n output = []\n for key in j:\n if key != '@metadata':\n output.append('\\t\"' + key + '\": \"' +\n j[key].replace('\\n', '\\\\n') + '\"')\n\n # Output results.\n with codecs.open(filename + args.suffix, 'w', 'utf-8') as outfile:\n outfile.write('{\\n')\n outfile.write(',\\n'.join(output))\n outfile.write('\\n}\\n')", "def filter(self, filter_dict):\n pass", "def load_priors(self, json_file):\n\n with open(json_file, 'r') as jf:\n self.priors_dict = json.load(jf)", "def get_from_file(filepath, **kwargs):\n tweets=[]\n with open(filepath, \"r\") as file:\n for line in file.readlines():\n tweet=json.loads(line)\n tweet[\"user\"]=util.filter_keys(tweet[\"user\"], [\"name\", \"location\"])\n tweet = util.filter_keys(tweet, [\"created_at\", \"user\"])\n tweets.append(tweet)\n return {\"tweets\":tweets}", "def loadAndClean(jfile):\n with open(jfile) as json_file:\n data = json.load(json_file)[\"G15\"]\n newDict = {}\n # Print the type of data variable\n \n for entry in data:\n\n if \"version\" in data[entry] and data[entry][\"assessed_by\"] not in filterList:\n\n if data[entry][\"assessed_by\"] in dictTries:\n dictTries[data[entry][\"assessed_by\"]].append(data[entry])\n\n \n\n if len(dictTries[data[entry][\"assessed_by\"]]) == 2:\n ml = dictTries[data[entry][\"assessed_by\"]]\n #vou querer calcular o maior \n # comparisson = \"accuracy\"\n comparisson = \"target_w_penalty\"\n \n if ml[0][comparisson] <= ml[1][comparisson]:\n if ml[0][\"accuracy\"] > 90:\n printSingle(ml[0])\n newDict[entry] = ml[0]\n\n else:\n if ml[1][\"accuracy\"] > 90:\n printSingle(ml[1])\n newDict[entry] = ml[1]\n dictTries[data[entry][\"assessed_by\"]] = []\n\n else:\n dictTries[data[entry][\"assessed_by\"]] = [data[entry]]\n\n\n\n # dictTries[data[\"assessed_by\"]]\n\n # newDict[entry] = data[entry]\n # print(data[entry])\n # print()\n # printDict(newDict)\n return newDict", "def filter_json(json, param, param_range):\n filtered_json = []\n\n for element in json:\n if element[param]:\n try:\n value = int(element[param])\n if param_range[0] <= value <= param_range[1]:\n filtered_json.append(element)\n except:\n pass\n\n\n return filtered_json", "def trim_json_files():\n # List directory for training data files\n file_list = os.listdir(settings.PATH_TRAINING_DATA)\n file_names = []\n for file in file_list:\n file_name = file.split('.')[0]\n file_names.append(file_name)\n\n for idx, file in enumerate(file_list):\n # Import json file\n with open(settings.PATH_TRAINING_DATA + file_list[idx], 'r') as json_file:\n data = json.load(json_file)\n json_file.close()\n # Trim training set for items with no classification\n for article in list(data['TrainingData']):\n if data['TrainingData'][article]['topics'] == []:\n data['TrainingData'].pop(article)\n\n # Save trimmed training data\n with open('E:\\Python\\MultiLabel\\data\\TrimmedTrainingData\\{}.json'.format(file_names[idx]), 'w') as outfile:\n json.dump(data, outfile, indent=4, sort_keys=True)\n outfile.close()", "def read_and_write_file(json_file_path, new_json_file_path):\n rests = list()\n with open(json_file_path, \"r\") as old:\n for line in old:\n line_contents = json.loads(line)\n #print line_contents['categories']\n if 'Restaurants' in line_contents['categories']:\n rests.append(line_contents)\n #print \"True\"\n\n json.JSONEncoder().encode(rests)\n with open(new_json_file_path, \"w+\") as newf:\n json.dump(rests, newf)", "def stich_from_file(jsonname):\n with open(jsonname) as f:\n data = json.loads(f.read())\n stich(data)", "def extract_matches_from_json(json_file_path, **kwargs):\n matches = []\n with open(json_file_path, 'r', encoding='utf8') as f:\n for match in json.load(f, object_pairs_hook=OrderedDict):\n matches.append(MatchStat(match, **kwargs))\n if matches[-1].invalid:\n matches.pop()\n continue\n return matches", "def filter_data(self, json_data):\n\n\t\tdata = json_data['data']\n\t\tlocal_time_convertor = time_convertor.TimeConvertor()\n\n\n\t\tfor event_data in data:\n\t\t\t# go through each event and save data\n\n\t\t\t# first need to get data for all avalible sites\n\t\t\tevent_h2h_odds = []\n\t\t\tevent_site_names = []\n\t\t\tfor i, sites_data in enumerate(event_data['sites']):\n\t\t\t\tif len(sites_data['odds']['h2h']) > 2:\n\t\t\t\t\t# if more the 3 odds values (draw odds given) only take win loss odds\n\t\t\t\t\tevent_h2h_odds.append([sites_data['odds']['h2h'][0], sites_data['odds']['h2h'][1]])\n\t\t\t\telse:\n\t\t\t\t\tevent_h2h_odds.append(sites_data['odds']['h2h'])\n\t\t\t\tevent_site_names.append(sites_data['site_nice'])\n\t\t\t\n\t\t\t# append event data\n\t\t\tself.teams.append(event_data['teams'])\n\t\t\tself.h2h_odds.append(event_h2h_odds)\n\t\t\tself.betting_sites.append(event_site_names)\n\n\t\t\tlocal_time_convertor.convert_to_AEST(event_data['commence_time'])\n\t\t\tself.start_time['string format'].append(local_time_convertor.local_time_string)\n\t\t\tself.start_time['datetime format'].append(local_time_convertor.local_time)\n\n\t\t# debug helper code\n\t\t# print(self.teams)\n\t\t# print(self.betting_sites)\n\t\t# print(self.h2h_odds)", "def filterCollections(path, context):\n filteredObj = []\n for obj in os.listdir(path):\n if os.path.splitext(obj)[-1] == '.json':\n if obj.split('_', 1)[0] == context:\n filteredObj.append(obj.encode('utf8'))\n baseC = context + '_baseCollection.json'\n filteredObj = sorted(filteredObj, key=str.lower)\n filteredObj.insert(0, filteredObj.pop(filteredObj.index(baseC)))\n return filteredObj", "def load_filter_file(self, filter_path):\n logger.debug(\"Adding filter file {}\", filter_path)\n try:\n with open(filter_path, \"r\") as filter_file:\n try:\n json_filter_data = json.load(filter_file)\n except Exception as err:\n msg = \"Unable to parse filter file {} as a json file. {!r}\".format(\n filter_path, err)\n logger.debug(msg)\n raise errors.ParserError(msg)\n except IOError:\n raise errors.ParserError(\n \"Unable to access filter path '{}'\".format(filter_path))\n\n if \"version\" not in json_filter_data:\n raise errors.ParserError(\n \"Loading filter-file {} failed. Missing 'version' key.\".format(\n filter_path))\n\n if \"filters\" not in json_filter_data:\n raise errors.ParserError(\n \"Loading filter-file {} failed. Missing 'filters' key.\".format(\n filter_path))\n\n if not isinstance(json_filter_data[\"version\"], dict):\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Expecting value of 'version' entry to be a dictionary \"\n \"but instead its a {}.\".format(filter_path,\n type(json_filter_data[\"version\"])))\n\n version_info = json_filter_data[\"version\"]\n\n if \"major\" not in version_info:\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Missing 'major' key in 'version' value.\".format(filter_path))\n\n if \"minor\" not in version_info:\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Missing 'minor' key in 'version' value.\".format(filter_path))\n\n if not isinstance(version_info[\"major\"], int):\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Expecting int for major version found {} instead.\".format(\n filter_path, type(version_info[\"major\"])))\n\n if not isinstance(version_info[\"minor\"], int):\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Expecting int for minor version found {} instead.\".format(\n filter_path, type(version_info[\"minor\"])))\n\n if version_info[\"major\"] != FILTER_JSON_FORMAT_MAJOR_VERSION:\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Found unexpected major version in JSON filter file.\".format(\n filter_path))\n\n self._add_filters(json_filter_data[\"filters\"], filter_path)", "def minify_json(self) -> None:\n print(f\"[FileManager: INFO] Minifing file {self.path}\")\n file_data = open(self.path, \"r\", encoding=\"utf-8\").read()\n json_data = json.loads(file_data) \n json_string = json.dumps(json_data, separators=(',', \":\")) \n path = str(self.path).replace(\".json\", \"\")\n new_path = \"{0}_min.json\".format(path)\n open(new_path, \"w+\", encoding=\"utf-8\").write(json_string)", "def _load_filter_directory(self, filter_path):\n try:\n logger.debug(\"Adding filters from directory {}\", filter_path)\n for filter_file in os.listdir(filter_path):\n if filter_file.endswith(\".json\"):\n filter_file_path = os.path.join(filter_path, filter_file)\n self.load_filter_file(filter_file_path)\n else:\n logger.debug(\"Skipping file {} missing .json extension\", filter_path)\n except OSError:\n raise errors.ParserError(\n \"Unable to access filter path '{}'\".format(filter_path))" ]
[ "0.7008582", "0.64491796", "0.63941675", "0.63912183", "0.6214865", "0.5879361", "0.5761348", "0.5693378", "0.5604411", "0.56026095", "0.5592775", "0.5553474", "0.55489975", "0.55380446", "0.5530673", "0.55257964", "0.5513254", "0.5474688", "0.5462171", "0.54521024", "0.5415046", "0.5399681", "0.536148", "0.53452086", "0.5344873", "0.53045523", "0.5294984", "0.5293742", "0.52893895", "0.5281711" ]
0.77999103
0
Main function. Prefilters the input files using provided prefiltering patterns, then returns number of differences (and the differences themselves, when requested)
def prefilter_json_files_then_compare(args): logging.info("prefilter_json_files_then_compare: starting!") with open(args.initialFile) as f: json_initial = file.read(f) with open(args.finalFile) as f2: json_final = file.read(f2) patch = jsonpatch.JsonPatch.from_diff(json_initial, json_final) logging.info( "prefilter_json_files_then_compare:differences before patching: %d", len(list(patch)), ) json_initial_filtered = prefilter(json_initial, args.initial_prefilter) json_final_filtered = prefilter(json_final, args.finalPreFilter) patch_after_filtering = jsonpatch.JsonPatch.from_diff( json_initial_filtered, json_final_filtered ) differences_after_patching = list(patch_after_filtering) logging.info( "prefilter_json_files_then_compare: differences after patching: %d", len(differences_after_patching), ) if args.printDifferences: for patchline in differences_after_patching: print(json.dumps(patchline)) print(len(differences_after_patching)) return len(differences_after_patching)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(\n file_pattern=INFILE_PATTERN,\n # folder_pattern=INFOLDER_PATTERN,\n tol_td=TOLERANCE_TIMEDELTA,\n outlier=OUTLIER_THRESHOLD,\n args=ARGS,\n):\n # Initialize IO-directories and setup logging\n path_in, path_out = initialize_io()\n\n # path_diffs = path_out / \"diff_imgs\"\n # if args.export:\n # # Folder not needed otherwise, but variable needs to be passed\n # if not path_diffs.is_dir():\n # path_diffs.mkdir()\n # logging.info(f\"Created folder '{path_diffs}'\")\n\n # Find matching files\n # NOTE: This can take potentially long\n # A folderwise sorting would be much faster\n # t0 = time.time()\n filelist = sorted(path_in.rglob(file_pattern))\n # dur = time.time() - t0\n\n n_files = len(filelist)\n logging.info(f\"Found {n_files} matching files in '{path_in}'\")\n # f\"(took {dur:.4} seconds)\")\n\n # act_list = []\n # df_agg = None\n df_list = []\n med_list = []\n for csv_path in filelist:\n logging.info(f\"Reading '{csv_path.name}'\")\n\n hive, rpi, method, day_str = parse_filename(csv_path.name)\n name = f\"RPi{rpi}_{day_str}_{method}\"\n # Read CSV\n # header = [\n # \"time_central\", \"duration\", \"activity\",\n # \"time1\", \"time2\",\n # \"file1\", \"file2\"\n # ]\n # See https://pandas.pydata.org/pandas-docs/stable/reference/\n # api/pandas.read_csv.html\n # df = pd.read_csv(csv_path, index_col=\"time\", parse_dates=True,\n # date_parser=my_date_parser)\n # Works only with the default pandas time format:\n df = pd.read_csv(\n csv_path,\n index_col=\"time_central\",\n parse_dates=[\"time_central\", \"time1\", \"time2\"],\n # converters={\"path\": my_path_parser}),\n )\n df[\"hour\"] = df.index.hour\n df[\"hive\"] = [hive] * len(df)\n df[\"rpi\"] = [rpi] * len(df)\n df[\"method\"] = [method] * len(df)\n\n # if df_agg is None:\n # df_agg = df\n # else:\n # df_agg = pd.concat([df_agg])\n\n # act_dict = {name: df[\"activity\"]}\n #\n # act_list.append(act_dict)\n\n # Plot_single_activity day\n h_median = plot_single_activity(df[\"activity\"], name, path_out)[1]\n\n # series = df.activity\n # series.index = series.index.hour\n hourly_bxpl_single(df, name, path_out)\n\n # Remove outliers\n if any(df.activity >= outlier):\n logging.warning(\n f\"Found {sum(df.activity >= outlier)} outliers \"\n f\"in {csv_path.name}, filtering them out.\")\n\n # Crop df to plausible measurements\n df = df[df.activity < outlier]\n\n if len(df) > 0:\n name += \"_removed-ols\"\n\n # Plot_single_activity day\n h_median = plot_single_activity(\n df[\"activity\"], name, path_out)[1]\n else:\n logging.warning(f\"All data in {csv_path.name} are outliers, \"\n \"skipping..\")\n continue\n\n df_list.append(df)\n med_list.append(h_median)\n\n df_agg = pd.concat(df_list)\n\n name = \"aggregated\"\n # name_euc = name + \"_euclidean\"\n # name_man = name + \"_manhattan\"\n\n # df_agg_euc = df_agg[df_agg.method == \"euclidean\"]\n # df_agg_man = df_agg[df_agg.method == \"manhattan\"]\n\n # Plot_single_activity day\n # plot_single_activity(df_agg_euc[\"activity\"], name_euc, path_out)\n plot_single_activity(df_agg[\"activity\"], name, path_out)\n\n # series = df.activity\n # series.index = series.index.hour\n\n # hourly_bxpl_single(df_agg_euc, name_euc, path_out)\n hourly_bxpl_single(df_agg, name, path_out)\n\n # Plot all medians\n plot_median_days(med_list, \"median-days\", path_out)\n\n # Plot functional median boxplot\n\n try:\n pass\n\n except KeyboardInterrupt:\n logging.info(\"Manually interrupted script\")\n\n finally:\n # if len(rows) > 0:\n # logging.info(f\"Exporting {len(rows)} rows to CSV\")\n # export_csv(rows, row_cols, path_out, hive, rpi, method)\n\n logging.info(\"Done.\")", "def run(args):\r\n print(\"Looking for files...\")\r\n true = sorted(glob(args.true_pattern))\r\n pred = sorted(glob(args.pred_pattern))\r\n if not true:\r\n raise OSError(\"Did not find any 'true' files matching \"\r\n \"pattern {}\".format(args.true_pattern))\r\n if not pred:\r\n raise OSError(\"Did not find any 'true' files matching \"\r\n \"pattern {}\".format(args.pred_pattern))\r\n if len(true) != len(pred):\r\n raise OSError(\"Did not find a matching number \"\r\n \"of true and pred files ({} and {})\"\r\n \"\".format(len(true), len(pred)))\r\n if len(true) != len(set(true)):\r\n raise ValueError(\"Two or more identical file names in the set \"\r\n \"of 'true' files. Cannot uniquely match true/pred \"\r\n \"files\")\r\n if len(pred) != len(set(pred)):\r\n raise ValueError(\"Two or more identical file names in the set \"\r\n \"of 'pred' files. Cannot uniquely match true/pred \"\r\n \"files\")\r\n\r\n pairs = list(zip(true, pred))\r\n if args.show_pairs:\r\n print(\"PAIRS:\\n{}\".format(pairs))\r\n # Load the pairs\r\n print(\"Loading {} pairs...\".format(len(pairs)))\r\n l = lambda x: [np.load(f)[\"arr_0\"] if os.path.splitext(f)[-1] == \".npz\" else np.load(f) for f in x]\r\n np_pairs = list(map(l, pairs))\r\n for i, (p1, p2) in enumerate(np_pairs):\r\n if len(p1) != len(p2):\r\n print(\"Not equal lengths: \", pairs[i], \"{}/{}\".format(len(p1),\r\n len(p2)))\r\n np_pairs[i] = trim(p1, p2)\r\n if args.wake_trim_min:\r\n print(\"OBS: Wake trimming of {} minutes (period length {} sec)\"\r\n \"\".format(args.wake_trim_min, args.period_length_sec))\r\n np_pairs = wake_trim(np_pairs,\r\n args.wake_trim_min,\r\n args.period_length_sec)\r\n true, pred = map(lambda x: x.astype(np.uint8).reshape(-1, 1), concatenate_true_pred_pairs(pairs=np_pairs))\r\n labels = None\r\n if args.ignore_classes:\r\n print(\"OBS: Ignoring class(es): {}\".format(args.ignore_classes))\r\n labels = list((set(np.unique(true)) | set(np.unique(pred))) - set(args.ignore_classes))\r\n\r\n if args.group_non_rem:\r\n ones = np.ones_like(true)\r\n true = np.where(np.isin(true, [1, 2, 3]), ones, true)\r\n pred = np.where(np.isin(pred, [1, 2, 3]), ones, pred)\r\n labels.pop(labels.index(2))\r\n labels.pop(labels.index(3))\r\n\r\n cm = confusion_matrix(true, pred, labels=labels)\r\n if args.normalized:\r\n cm = cm.astype(np.float64)\r\n cm /= cm.sum(axis=1, keepdims=True)\r\n\r\n # Pretty print\r\n classes = len(cm)\r\n cm = pd.DataFrame(data=cm,\r\n index=[\"True {}\".format(i) for i in range(classes)],\r\n columns=[\"Pred {}\".format(i) for i in range(classes)])\r\n p = \"Raw\" if not args.normalized else \"Normed\"\r\n print(f\"\\n{p} Confusion Matrix:\\n\")\r\n print(cm.round(args.round))\r\n\r\n # Print metrics\r\n f1 = f1_scores_from_cm(cm)\r\n prec = precision_scores_from_cm(cm)\r\n recall = recall_scores_from_cm(cm)\r\n metrics = pd.DataFrame({\r\n \"F1\": f1,\r\n \"Precision\": prec,\r\n \"Recall/Sens.\": recall\r\n }, index=[\"Class {}\".format(i) for i in range(classes)])\r\n metrics = metrics.T\r\n metrics[\"mean\"] = metrics.mean(axis=1)\r\n print(f\"\\n{p} Metrics:\\n\")\r\n print(np.round(metrics.T, args.round), \"\\n\")", "def compare_files(input_index_file, output_index_file ):\n \n # -------------\n # open the input index file for reading\n # -------------\n input_set = open_read_file(input_index_file)\n\n # -------------\n # open the output index file for reading\n # -------------\n output_set = open_read_file(output_index_file)\n\n # -------------\n # get the difference in the files where\n # the input_set is the larger set\n # -------------\n unproc_files = set_difference(output_set, input_set)\n #print unproc_files\n\n return unproc_files", "def main():\n\n parser = argparse.ArgumentParser(\n description=\"Compare the metadata content of two files\"\n )\n\n parser.add_argument(\n \"files\",\n nargs=2,\n metavar=\"FILE\",\n help=\"The names of two files to compare\",\n )\n\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"print detailed output on screen\",\n )\n\n parser.add_argument(\n \"-s\",\n \"--ordered\",\n action=\"store_true\",\n help=\"When comparing lists, check the element order too.\",\n )\n\n parser.add_argument(\n \"-d\",\n \"--drop\",\n nargs=\"*\",\n default=None,\n metavar=\"KEY\",\n help=\"Keys to drop from metadata retrieved from file\",\n )\n\n parser.add_argument(\n \"-m\",\n \"--mode\",\n default=\"lite\",\n metavar=\"MODE\",\n type=str,\n choices=[\"tiny\", \"lite\", \"full\", \"peeker\"],\n help=\"\"\"\\\n This flag provides the user capability to select the amount of\n metadata retrieved. There three options:\n tiny (only those values used in PyJobTransforms),\n lite (same output as dump-athfile)\n and full ( all available data found)\n \"\"\",\n )\n\n parser.add_argument(\n \"-t\",\n \"--type\",\n default=None,\n metavar=\"TYPE\",\n type=str,\n choices=[\"POOL\", \"BS\"],\n help=\"\"\"\\\n The file type of the input filename. By default, it tries to\n determine itself the file type of the input.\n \"\"\",\n )\n\n parser.add_argument(\n \"-f\",\n \"--filter\",\n default=[],\n metavar=\"FILTER\",\n nargs=\"+\",\n type=str,\n help=\"Expression to select specific metadata fields to retrieve.\",\n )\n\n parser.add_argument(\n \"-x\",\n \"--diff-format\",\n default=\"simple\",\n type=str,\n choices=[\"simple\", \"diff\"],\n help=\"Switch between 'simple' or 'diff' style differences \",\n )\n\n parser.add_argument(\n \"--promote\",\n default=None,\n type=bool,\n help=\"Force promotion or not of the metadata keys \",\n )\n\n args = parser.parse_args()\n\n try:\n diff = meta_diff(\n args.files,\n verbose=args.verbose,\n ordered=args.ordered,\n drop=args.drop,\n mode=args.mode,\n meta_key_filter=args.filter,\n file_type=args.type,\n promote=args.promote,\n diff_format=args.diff_format,\n )\n except (ValueError, IndexError):\n print(\"you must supply two files to compare\")\n sys.exit(1)\n except ReferenceError:\n print(\"no such file\")\n sys.exit(1)\n\n if diff:\n print(\"\\n\".join(diff))\n sys.exit(1)\n\n sys.exit(0)", "def get_resulting_diffs():\n diff_dirpath = application.join_abs_path(\n EMPTY_TEST_DIR, application.OUTPUT_DIR_NAME)\n diffleft_filename = application.join_abs_path(\n diff_dirpath, application.OUTPUT_DIFF_LEFT_FILENAME)\n diffright_filename = application.join_abs_path(\n diff_dirpath, application.OUTPUT_DIFF_RIGHT_FILENAME)\n\n diff_left = read_gzip_file_lines_into_set(diffleft_filename)\n diff_right = read_gzip_file_lines_into_set(diffright_filename)\n\n return diff_left, diff_right", "def DiffResults(marker, new_results, old_results, diff_results, strip_reason):\n old_file = open(old_results, \"r\")\n new_file = open(new_results, \"r\")\n diff_file = open(diff_results, \"a\") \n\n # Read lines from each file\n ndict = new_file.readlines()\n cdict = old_file.readlines()\n\n # Write marker to diff file\n diff_file.writelines(marker + \"\\n\")\n diff_file.writelines(\"###############\\n\")\n\n # Strip reason from result lines\n if strip_reason is True:\n for i in range(0, len(ndict)):\n ndict[i] = ndict[i].split(' ')[0] + \"\\n\"\n for i in range(0, len(cdict)):\n cdict[i] = cdict[i].split(' ')[0] + \"\\n\"\n\n # Find results in new_results missing in old_results\n new_count=0\n for line in ndict:\n if line not in cdict:\n diff_file.writelines(\"+ \" + line)\n new_count += 1\n\n # Find results in old_results missing in new_results\n missing_count=0\n for line in cdict:\n if line not in ndict:\n diff_file.writelines(\"- \" + line)\n missing_count += 1\n\n logging.info(marker + \" >>> \" + str(new_count) + \" new, \" + str(missing_count) + \" misses\")\n\n diff_file.writelines(\"\\n\\n\")\n\n old_file.close()\n new_file.close()\n diff_file.close()\n return", "def diffcuv(ctx, input_files):\n assert len(input_files) == 2\n diff_coverage_files(input_files[0].name, input_files[1].name, ctx.obj)", "def main():\n filter_freq = 1.e4\n re_sample_freq = 1.e5\n glob_search = '*.log'\n\n # parse the command line arguments\n parser = argparse.ArgumentParser(description=\"Filters files in a directory based on a file extension.\")\n parser.add_argument('-d', '--directory', type=str, nargs=1,\n help=\"directory of files to filter. Default is the current directory.\")\n parser.add_argument('-ff', '--filter-freq', type=float, nargs=1,\n help=\"low-pass filter frequency cutoff. Default is {0} Hz\".format(filter_freq))\n parser.add_argument('-osr', '--out-sample-rate', type=float, nargs=1,\n help=\"output sample rate. Default is {0} Hz\".format(re_sample_freq))\n parser.add_argument('-g', '--glob', type=str, nargs=1,\n help=\"Unix pattern to search for files in the directory. Default is \\'*.log\\', which finds all\"\n \" files with a '.log' extension. Must surround with quotes.\")\n parser.add_argument('-r', '--recursive', action='store_true',\n help=\"search for files recursively.\")\n args = parser.parse_args()\n\n directory = '.'\n # Use the command line arguments to set our variables, if necessary.\n if args.directory is not None:\n directory = args.directory[0]\n\n if args.filter_freq is not None:\n filter_freq = args.filter_freq[0]\n\n if args.out_sample_rate is not None:\n re_sample_freq = args.out_sample_rate[0]\n\n if args.glob is not None:\n glob_search = args.glob[0]\n print glob_search\n\n # find all of the files in the current directory with .log extension.\n files = []\n for root, dirname, filenames in os.walk(directory):\n for filename in fnmatch.filter(filenames, glob_search):\n files.append(os.path.join(root, filename))\n # Only do top level directory, unless recursive is specified.\n if not args.recursive:\n break\n\n print \"Filter frequency: {0} Hz\".format(filter_freq)\n print \"Output sample frequency: {0} Hz\".format(re_sample_freq)\n print \"Glob search: {0}\".format(glob_search)\n print \"Recursive: {0}\".format(args.recursive)\n print \"Filtering these files:\", files\n print \"\\n----------------------------\\n\"\n\n p = Pool()\n\n # add the file names and filter frequency and output sample rate to a tuple to pass in multiprocessing\n pool_args = []\n for filename in files:\n tup = (filename, filter_freq, re_sample_freq)\n pool_args.append(tup)\n\n # filter each file\n output_file_names = p.map(_filter_wrap, pool_args)\n\n print \"\\n----------------------------\\n\"\n print \"Output files:\", output_file_names", "def main():\n\targs = get_args()\n\tfiles=args.file\n\n\tlogging.basicConfig(\n\t\tfilename='.log',\n\t\tfilemode='w',\n\t\tlevel=logging.DEBUG if args.debug else logging.CRITICAL)\n\t\n\tfor txt in files:\n\t\tif not os.path.isfile(txt):\n\t\t\tdie(msg='\"{}\" is not a file'.format(txt))\n\n\t\n\tlogging.debug('file1={}, file2={}'.format(files[0], files[1]))\n\n# Can't use below because spaces screw up hamming distance, wonder if you could do it to skip space\n#\tstr1=''\n#\tstr2=''\n#\twith open(files[0]) as fh1:\n#\t\tstr1=fh1.read().replace('\\n', '')\n#\n#\twith open(files[1]) as fh2:\n#\t\tstr2=fh2.read().replace('\\n', '')\n\n#\tprint(str1)\n#\tprint(str2)\n\n\tword1=[]\n\tword2=[]\n\twith open(files[0]) as fh1:\n\t\tword1=[word for line in fh1 for word in line.split()]\n\n\twith open(files[1]) as fh2:\n\t\tword2=[word for line in fh2 for word in line.split()]\n\t\n\tcombo=list(zip(word1, word2))\n\thamm=0\n\tfor word1, word2 in combo:\n\t\td=dist(word1, word2)\n\t\thamm+=d\n\t\tlogging.debug(msg='s1= {}, s2= {}, d= {}'.format(word1, word2, d))\n\tprint(hamm)", "def main():\n\n args = get_args()\n file = args.file\n\n for line in args.file:\n word1, word2 = line.split()\n change = abs(len(word1)-len(word2))\n for char1, char2 in zip(word1, word2):\n if char1 != char2:\n change += 1\n if change >= args.min:\n print(f'{change:8}:{word1:20}{word2:20}')\n\n # for line in args.file:\n # word1, word2 = line.rstrip().split()\n #\n # l1, l2 = len(word1), len(word2)\n # distance = abs(l1 - l2)\n #\n # for i in range(min(l1, l2)):\n # if word1[i] != word2[i]:\n # distance += 1\n #\n # if distance >= args.min:\n # print(f'{distance:8}:{word1:20}{word2:20}')\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n # for line in args.file:\n # word1, word2 = line.split()\n # n_change = abs(len(word1) - len(word2))\n # for char1, char2 in zip(word1, word2):\n # if char1 != char2:\n # n_change += 1\n # if n_change >= args.min:\n # print(f'{n_change:8}:{word1:20}{word2:20}')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n # for line in args.file:\n # word1, word2 = line.split()\n # n_change = abs(len(word1) - len(word2))\n # for char1, char2 in zip(word1, word2):\n # if char1 != char2:\n # n_change += 1\n # if n_change >= args.min:\n # print(f'{n_change:8}:{word1:20}{word2:20}')", "def run():\n parser = argparse.ArgumentParser(description='This script will count the number of unique headers (1st line) amongs all files')\n\n # positional args\n parser.add_argument(\"files\", help=\"Paths to input files\", nargs=\"+\")\n\n # optional args\n parser.add_argument(\"--keep-none\", default = False, action='store_true', dest = 'keep_none', help=\"Whether or not to count empty files\")\n parser.add_argument(\"-v\", \"--verbose\", default = True, action='store_false', dest = 'verbose', help=\"Whether or not to print the number of lines to console\")\n\n args = parser.parse_args()\n\n main(**vars(args))", "def process_files(path, patterns, callback):\r\n stats = dict.fromkeys(patterns, 0)\r\n for pattern, line_count in match_filenames(\r\n path, patterns, callback):\r\n stats[pattern] += line_count\r\n return stats", "def compare_mc():\n\n mc_file_base = sys.argv[1]\n da_file_base = sys.argv[2]\n dst_mc_base = sys.argv[3]\n dst_da_base = sys.argv[4]\n\n run_number = da_file_base[da_file_base.find('/r')+2:da_file_base.find('/r')+6]\n\n mc_sorter = sorter_func(mc_file_base)\n mc_file_list = sorted(glob(mc_file_base + '*.h5'), key=mc_sorter)\n da_sorter = sorter_func(da_file_base)\n da_file_list = sorted(glob(da_file_base + '*.h5'), key=da_sorter)\n\n mc_hit = load_dsts(glob(dst_mc_base + '*.h5'), 'RECO', 'Events')\n da_hit = load_dsts(glob(dst_da_base + '*.h5'), 'RECO', 'Events')\n \n dfcols = ['wf_sum', 'p0', 'cp0', 'p1', 'cp1', 'p2', 'cp2', 'p3', 'cp3', 'p4', 'cp4', 'p5', 'cp5', 'p6', 'cp6', 'p7', 'cp7', 'p8', 'cp8', 'p9', 'cp9', 'p10', 'cp10', 'p11', 'cp11']\n pmt_scales = [1, 0.79, 1, 0.80, 0.72, 1.11, 1.03, 0.82, 0.82, 1.03, 0.89, 0.95, 0.82]\n mc_sums = pd.DataFrame(columns=dfcols)\n for fn in mc_file_list:\n print('Reading mc file ', fn)\n pmaps = load_pmaps(fn)\n print('...data got')\n for evt, pmap in pmaps.items():\n\n if len(pmap.s2s) == 1 and len(pmap.s1s) == 1:\n try:\n mc_hit[mc_hit.event==evt].X.values[0]\n hx = mc_hit[mc_hit.event==evt].X.values\n hy = mc_hit[mc_hit.event==evt].Y.values\n hz = mc_hit[mc_hit.event==evt].Z.values\n hq = mc_hit[mc_hit.event==evt].Q.values\n #for s2 in pmap.s2s:\n s2 = pmap.s2s[0]\n rs2 = pmf.rebin_peak(s2, 2)\n p_z = (rs2.times - pmap.s1s[0].time_at_max_energy)/1000\n #if hz.shape[0] == len(rs2.times):\n new_row = [s2.pmts.waveform(x).sum() for x in range(12)]\n cn_row = [life_correction(hx, hy, hz, hq, p_z, rs2.pmts.waveform(x)) for x in range(12)]\n new_row = np.column_stack((new_row, cn_row)).flatten()\n ## new_row.insert(0, s2.total_energy)\n new_row = np.insert(new_row, 0, s2.total_energy)\n \n mc_sums.loc[len(mc_sums)] = list(new_row)\n except IndexError:\n continue\n\n da_sums = pd.DataFrame(columns=dfcols)\n for fn in da_file_list:\n print('Reading data file ', fn)\n pmaps = load_pmaps(fn)\n print('...data got')\n for evt, pmap in pmaps.items():\n\n if len(pmap.s2s) == 1 and len(pmap.s1s) == 1:\n try:\n da_hit[da_hit.event==evt].X.values[0]\n hx = da_hit[da_hit.event==evt].X.values\n hy = da_hit[da_hit.event==evt].Y.values\n hz = da_hit[da_hit.event==evt].Z.values\n hq = da_hit[da_hit.event==evt].Q.values\n #for s2 in pmap.s2s:\n s2 = pmap.s2s[0]\n rs2 = pmf.rebin_peak(s2, 1)\n p_z = (rs2.times - pmap.s1s[0].time_at_max_energy)/1000\n #print('Check: ', hz.shape[0], len(rs2.times))\n #if hz.shape[0] == len(rs2.times):\n new_row = [s2.pmts.waveform(x).sum() for x in range(12)]\n cn_row = [life_correction(hx, hy, hz, hq, p_z, rs2.pmts.waveform(x)) for x in range(12)]\n new_row = np.column_stack((new_row, cn_row)).flatten()\n #new_row.insert(0, s2.total_energy)\n new_row = np.insert(new_row, 0, s2.total_energy)\n \n da_sums.loc[len(da_sums)] = list(new_row)\n except IndexError:\n continue\n\n trg0 = mc_sums['p0'] * pmt_scales[1] > 8835\n trg2 = mc_sums['p2'] * pmt_scales[3] > 7836\n ## Make some plots\n mc_sums[trg0 & trg2].wf_sum.plot.hist(bins=np.linspace(0, 1.2e6, 100),\n label='MC',\n density=True, histtype='step')\n da_sums.wf_sum.plot.hist(bins=np.linspace(0, 1.2e6, 100), label='data',\n density=True, histtype='step')\n plt.title('PMT sum')\n plt.xlabel('Summed PMT charge (pe)')\n plt.yscale('log')\n plt.show()\n\n ## Attempt big fit. (only lifetime corrected [1::2] done in function)\n efunc = general_chi2(mc_sums.drop('wf_sum', axis=1).values.T)\n ## full_dats = np.apply_along_axis(np.histogram, 1,\n ## da_sums.drop('wf_sum', axis=1).values.T,\n ## bins=np.linspace(0, 120000, 100),\n ## density=True)[:, 0]\n full_dats = np.apply_along_axis(np.histogram, 1,\n da_sums.drop('wf_sum', axis=1).values.T[1::2],\n bins=np.linspace(0, 120000, 100))[:, 0]\n dat_norms = np.fromiter((s.sum() for s in full_dats), np.int)\n full_dats = np.concatenate(full_dats)\n errs = np.sqrt(full_dats)\n errs[errs<=0] = 3\n par_seed = pmt_scales[1:]\n pfit, cov, infodict, msg, ier = leastsq(efunc, par_seed,\n args=(full_dats, errs, dat_norms),\n full_output=True)\n print('Fit res: ', pfit, ier, infodict, msg)\n trg0 = mc_sums['p0'] * pfit[1] * pfit[0] > 8835\n trg2 = mc_sums['p2'] * pfit[1] * pfit[2] > 7836\n fig, axes = plt.subplots(nrows=3, ncols=4, figsize=(20,6))\n #mc_sums['new_sum'] = mc_sums.drop('wf_sum', axis=1).sum(axis=1)\n mc_sums['new_sum'] = mc_sums[dfcols[1::2]].multiply(pfit).sum(axis=1)\n mc_sums['new_csum'] = mc_sums[dfcols[2::2]].multiply(pfit).sum(axis=1)\n da_sums['csum'] = da_sums[dfcols[2::2]].sum(axis=1)\n for cname, ax, p in zip(dfcols[2::2], axes.flatten(), pfit):\n ax.set_title('PMT '+cname[2:]+' pe distribution')\n ax.set_xlabel('Photoelectrons')\n ax.set_ylabel('AU')\n mc_sums[trg0 & trg2][cname].multiply(p).plot.hist(ax=ax,\n bins=np.linspace(0, 120000, 100),\n label='MC', density=True,\n histtype='step')\n da_sums[cname].plot.hist(ax=ax, bins=np.linspace(0, 120000, 100),\n label='data', density=True, histtype='step')\n ## if 'p1' == cname:\n ## mc_vals = mc_sums[trg0 & trg2][cname].values\n ## da_vals = da_sums[cname].values\n ## ffunc = simple_pmt1_fit(mc_vals)\n ## dcv, hbins = np.histogram(da_vals, density=True,\n ## bins=np.linspace(0, 120000, 100))\n ## hbins = shift_to_bin_centers(hbins)\n ## errs = np.sqrt(dcv)\n ## errs[errs==0] = 3\n ## fvals = fitf.fit(ffunc, hbins, dcv, seed=(1), sigma=errs)\n ## ax.plot(hbins, fvals.fn(hbins), label='fit attempt')\n ## print('fit result: ', fvals.values, fvals.errors)\n \n ax.legend()\n plt.tight_layout()\n fig.show()\n plt.show()\n\n mc_sums[trg0 & trg2].new_sum.plot.hist(bins=np.linspace(0, 1.2e6, 100),\n label='MC',\n density=True, histtype='step')\n da_sums.wf_sum.plot.hist(bins=np.linspace(0, 1.2e6, 100), label='data',\n density=True, histtype='step')\n plt.title('PMT sum')\n plt.xlabel('Summed PMT charge (pe)')\n plt.yscale('log')\n plt.show()\n\n mc_sums[trg0 & trg2].new_csum.plot.hist(bins=np.linspace(0, 1.2e6, 100),\n label='MC',\n density=True, histtype='step')\n da_sums.csum.plot.hist(bins=np.linspace(0, 1.2e6, 100), label='data',\n density=True, histtype='step')\n plt.title('PMT sum')\n plt.xlabel('Summed PMT charge (pe)')\n plt.yscale('log')\n plt.show()\n\n ## mc_sums.new_sum.plot.hist(bins=100, label='MC',\n ## density=True, histtype='step')\n ## da_sums.wf_sum.plot.hist(bins=100, label='data',\n ## density=True, histtype='step')\n ## plt.title('PMT sum')\n ## plt.xlabel('Summed PMT charge (pe)')\n ## plt.yscale('log')\n ## plt.show()", "def process_files(path, patterns, callback):\n stats = dict.fromkeys(patterns, 0)\n for pattern, line_count in match_filenames(\n path, patterns, callback):\n stats[pattern] += line_count\n return stats", "def clippingcounter(clipping_list, input_dir):\n\t\texcludelist=[]\n\t\t\n\t\t#dicts to store results\n\t\tdicti=defaultdict(float)\n\t\tmatchesdicti=defaultdict(list)\n\t\tresults=[]\n\t\t\n\t\tclipping_list=[re.compile(\"[^web|i]\\W(\"+i+\")\\W\") if i in [\"cams?\", \"sites?\"] else re.compile(\"\\W(\"+i+\")\\W\") for i in clipping_list]\n\t\t#clipping_list=[re.compile(\"\\W(\"+i+\")\\W\") for i in clipping_list]\n\t\tclipping_list=set(clipping_list)\n\t\tprint [i.pattern for i in clipping_list]\n\t\t#iterate and match\n\t\tfor dir in [i for i in os.listdir(input_dir) if not i.startswith(\".\")]:\n\t\t\tprint dir\n\t\t\tfor fili in [i for i in os.listdir(os.path.join(input_dir, dir)) if not i.startswith(\".\")]:\n\t\t\t\twith codecs.open(os.path.join(input_dir, dir, fili), \"r\", \"utf-8\") as inputtext:\n\t\t\t\t\tinputad=ct.adtextextractor(inputtext.read(), fili).lower()\n\t\t\t\t#result is a list of lists which contain matches for each regex/acronym\n\t\t\t\tresult=[([m for m in i.findall(inputad) if not m in excludelist], i.pattern) for i in clipping_list] \n\t\t\t\t# o=[(r,os.path.join(input_dir, dir, fili)) for r in result if len(r[0]) > 2]\n# \t\t\t\tif o:\n# \t\t\t\t\tprint o\n\t\t\t\tresults.append([len(matches) for matches, pattern in result])\n\t\t\t\tfor matches, pattern in result:\n \t\t\t\t\t#the dicti is {pattern:count, pattern: count, ...}\n \t\t\t\t\tdicti[pattern]=dicti[pattern]+len(matches)\n \t\t\t\t\tmatchesdicti[pattern]=matchesdicti[pattern]+matches\n\t\tprint \"\\n\".join([\":\".join((i, str(dicti[i]), \"|\".join(set(matchesdicti[i])))) for i in sorted(dicti, key=dicti.get, reverse=True)])\t\n\t\tfor entry in {k:v for k,v in matchesdicti.items() if v > 10}:\n\t\t\tprint entry\n\t\t\ttk.tokenfinder([re.sub(\"[\\(\\)]\", \"\", entry)], \"/Users/ps22344/Downloads/craig_0208\")\n\t\treturn results", "def ParseFilterFile(input_lines):\n # Strip comments and whitespace from each line and filter non-empty lines.\n stripped_lines = (l.split('#', 1)[0].strip() for l in input_lines)\n filter_lines = [l for l in stripped_lines if l]\n\n # Split the tests into positive and negative patterns (gtest treats\n # every pattern after the first '-' sign as an exclusion).\n positive_patterns = [l for l in filter_lines if l[0] != '-']\n negative_patterns = [l[1:] for l in filter_lines if l[0] == '-']\n return positive_patterns, negative_patterns", "def compare_predictions():\n validation_labels = np.array(pd.read_csv(val_true_labels_dir + dataset_version + 'validation_labels.csv', index_col=0))\n validation_labels = np.reshape(validation_labels, (-1))\n\n diff_between_files = []\n also1s = []\n also2s = []\n for filename1 in os.listdir(val_predictions_dir):\n if filename1.endswith(\".csv\"):\n for filename2 in os.listdir(val_predictions_dir):\n if filename2.endswith(\".csv\"):\n if filename1 < filename2:\n wrong1 = 0\n wrong2 = 0\n diff_between = 0\n also1 = 0\n also2 = 0\n diff_corr1 = 0\n diff_corr2 = 0\n f1 = np.array(pd.read_csv(val_predictions_dir + filename1, index_col=0))\n f1 = np.reshape(f1, (-1))\n f2 = np.array(pd.read_csv(val_predictions_dir + filename2, index_col=0))\n f2 = np.reshape(f2, (-1))\n for line in range(f1.shape[0]):\n if f1[line] != validation_labels[line]:\n wrong1 += 1\n if f2[line] != validation_labels[line]:\n wrong2 += 1\n if f1[line] != f2[line]:\n diff_between += 1\n if f1[line] == validation_labels[line]:\n diff_corr1 += 1\n if f2[line] == validation_labels[line]:\n diff_corr2 += 1\n if f1[line] != validation_labels[line]:\n if f2[line] != validation_labels[line]:\n also2 += 1\n if f2[line] != validation_labels[line]:\n if f1[line] != validation_labels[line]:\n also1 += 1\n\n diff_between_files.append(diff_between)\n print(filename1)\n print('Wrongly predicted by 1: ' + str(100 * wrong1 / f1.shape[0]) + '%')\n print(filename2)\n print('Wrongly predicted by 2: ' + str(100 * wrong2 / f1.shape[0]) + '%')\n print()\n print('Differences between files: ' + str(100 * diff_between / f1.shape[0]) + '%')\n print(f'\\t of which correct by 1 {100 * diff_corr1 / diff_between}%, by 2 {100 * diff_corr2 / diff_between}%')\n also1s.append(also1 / wrong2)\n also2s.append(also2 / wrong1)\n print('Wrongly predicted by other among wrong ones: ' + str(100 * also2 / wrong1) + '%, ' + str(\n 100 * also1 / wrong2) + '%\\n\\n\\n')\n\n print('Max, min and avg differences between files:')\n print(str(100 * max(diff_between_files) / validation_labels.shape[0]) + '%')\n print(str(100 * min(diff_between_files) / validation_labels.shape[0]) + '%')\n print(str(100 * np.mean(diff_between_files) / validation_labels.shape[0]) + '%')\n\n print('\\nWrongly predicted by first that were also wrongly predicted by second:')\n print('Max: ' + str(100 * max(also2s)) + '%')\n print('Min: ' + str(100 * min(also2s)) + '%')\n print('Avg: ' + str(100 * np.mean(also2s)) + '%')\n\n print('\\nWrongly predicted by second that were also wrongly predicted by first:')\n print('Max: ' + str(100 * max(also1s)) + '%')\n print('Min: ' + str(100 * min(also1s)) + '%')\n print('Avg: ' + str(100 * np.mean(also1s)) + '%')", "def prescient_output_diff(results_dir_a, results_dir_b, numeric_fields, tolerances):\n # Compiles the names of output files based on results dir a and collects the DataFrames for each.\n csv_files_results_a = []\n dfs_results_a = {}\n files_to_exclude = {'Options.csv', }\n\n has_difference = False\n\n for csv_file in os.scandir(results_dir_a):\n if csv_file.name[-3:] == 'csv' and csv_file.name not in files_to_exclude:\n csv_file_root = csv_file.name[:-4]\n csv_file_path = csv_file.path\n\n csv_files_results_a.append(csv_file)\n dfs_results_a[csv_file_root] = pd.read_csv(csv_file_path)\n \n # Compiles the corresponding analog DataFrames for results_dir_b.\n dfs_results_b = {}\n\n for csv_file in csv_files_results_a:\n csv_file_root = csv_file.name[:-4]\n csv_path_results_b = os.path.join(results_dir_b, csv_file.name)\n\n try:\n df = pd.read_csv(csv_path_results_b)\n except FileNotFoundError:\n # No equivalent csv file found for results set b.\n raise(FileNotFoundError('No matching file for {0} found in the results_dir_b directory.'.format(csv_file.name)))\n else:\n dfs_results_b[csv_file_root] = df\n \n # Static definitions for numeric fields in each output file and the diff tolerance for each. \n # TODO: Why is there extra whitespace on certain column names?\n\n # Function used to check differences between results sets.\n def _check_all_differences(row):\n \"\"\"Compares all numerical fields and returns True if any numeric column's difference exceeds its tolerance.\"\"\"\n diff = False\n\n numeric_field_tolerances = tolerances.get(csv_file_root, {})\n\n for field in numeric_fields[csv_file_root]:\n # Use some default value if no tolerance for the field is specified.\n tol = numeric_field_tolerances.get(field, 1e-2)\n\n if abs(row['{0}_A'.format(field)] - row['{0}_B'.format(field)]) > tol:\n # \"any\" -> short-circuit\n diff = True\n return diff\n\n return diff\n\n # Get the root directory name of each set of results.\n results_a_root = os.path.split(results_dir_a)[-1]\n results_b_root = os.path.split(results_dir_b)[-1]\n\n # Create the diff output directory if necessary. This goes in the same parent directory as results_dir_a.\n output_dir = os.path.join(\n os.path.split(results_dir_a)[0], \n '{0}_{1}_diff'.format(results_a_root, results_b_root)\n )\n os.makedirs(output_dir, exist_ok=True)\n\n for csv_file in csv_files_results_a:\n # Iterate over each output file and generate a difference report.\n csv_file_root = csv_file.name[:-4]\n\n df_a = dfs_results_a[csv_file_root]\n df_b = dfs_results_b[csv_file_root]\n\n # Skip if DataFrame (a) is empty.\n if df_a.empty:\n continue\n\n # Join corresponding DataFrames and determine if any numeric column has a difference.\n df_joined = df_a.join(df_b, how='left', lsuffix='_A', rsuffix='_B')\n df_joined['has_difference'] = df_joined.apply(lambda row: _check_all_differences(row), axis=1)\n\n # Filter flagged differences.\n df_diff_report = df_joined.loc[df_joined['has_difference'] == True]\n\n if not df_diff_report.empty:\n # At least one difference exceeded its tolerance level.\n has_difference = True\n print('df_diff_report')\n print(csv_file)\n\n # Output diff results to csv files.\n df_diff_report.to_csv(os.path.join(output_dir, csv_file.name))\n \n return has_difference", "def do_diff(sourcelist):\n for source in sourcelist:\n dc = filecmp.dircmp('output-pandoc/'+source, 'output-panzer/'+source)\n if dc.right_only or dc.left_only or dc.diff_files:\n print(pretty_title(source))\n if dc.right_only:\n print('* only in output-panzer/%s:' % source)\n for line in pretty_list(dc.right_only):\n print(' ' + line)\n if dc.left_only:\n print('* only in output-pandoc/%s:' % source)\n for line in pretty_list(dc.left_only):\n print(' ' + line)\n if dc.diff_files:\n print('* differing:')\n for line in pretty_list(dc.diff_files):\n print(' ' + line)", "def compareFiles(baseFile_path, testTempFile):\n baseFile = open(baseFile_path, \"r\")\n testTempFile.seek(0) \n## only lines that have changed\n testoutput = []\n testTempFile.seek(0) \n baseFile.seek(0)\n m_base = baseFile.readlines()\n clean_base = []\n m_temp = testTempFile.readlines() \n clean_temp = []\n ignore_chars = '\\n\\t '\n for line in m_base:\n if not line == '\\n':\n clean_base += [line.strip(ignore_chars)]\n for line in m_temp: \n if not line == '\\n':\n clean_temp += [line.strip(ignore_chars)] \t\n for line in difflib.context_diff(clean_base, clean_temp):\n testoutput += [line] \n \n## all lines diff \n# diff = difflib.ndiff(baseFile.readlines(), testTempFile.readlines())\n# print ''.join(diff)\n baseFile.close() \n diffFile_name = baseFile_path.replace(\"_Base.output\",\".diff\")\n diffFile = open(diffFile_name, \"w\")\n \n if len(testoutput) > 1:\n for line in difflib.context_diff(m_base, m_temp):\n print line\n diffFile.write(line)\n diffFile.close() \n assert ( len(testoutput) == 1 )", "def main():\n args = get_args()\n debugging = args.debug\n f1, f2 = args.files\n\n logging.basicConfig(\n filename='.log',\n filemode='w',\n level=logging.DEBUG if args.debug else logging.CRITICAL\n )\n\n if f1 is None or not os.path.isfile(f1):\n print(f'\\\"{f1}\\\" is not a file')\n exit(1)\n if f2 is None or not os.path.isfile(f2):\n print(f'\\\"{f2}\\\" is not a file')\n exit(1)\n\n logging.debug('file1 = {}, file1 = {}'.format(f1,f2))\n\n in1 = open(f1, 'r').read().split()\n in2 = open(f2, 'r').read().split()\n\n dist_sum = 0\n for i in range(len(in1)):\n dist_sum += dist(in1[i], in2[i])\n print(dist_sum)", "def main(**kwargs):\n # get the args that were passed\n files = kwargs.pop('files', [])\n keep_none = kwargs.pop('keep_none', False)\n verbose = kwargs.pop('verbose', False)\n\n # hold all the headers in a dict with counter\n headers = defaultdict(int)\n\n # get all the headers\n for f in files:\n headers[get_header(f)] += 1\n\n # remove a 'None' key, if present (means there were empty files passed)\n if not keep_none:\n headers.pop(None, None)\n\n num_headers = len(headers.keys())\n\n if verbose:\n print(num_headers)\n return(num_headers)", "def process_files(self):\n matcher = self.choose_algorithm()\n # process one file at the time for better memory management\n for i, element in enumerate(self.input):\n filepath, _ = element\n\n try:\n with open(filepath, \"r\", encoding=\"utf-8\") as readfile:\n for line in readfile:\n matcher.find_match(line, self.case_insensitive)\n\n # collect unreadeable files for error log\n except Exception:\n self.errors.append(str(filepath))\n\n # copy results and reset matcher for next file\n self.__results = matcher.results\n\n if self.counter:\n self.__results = matcher.counts\n\n matcher.reset()\n\n # output - print or json\n if self.results:\n self.output(element)\n\n # if json print progress bar\n if self.json:\n self.progress_bar(i+1, len(self.input), prefix=\"Matching:\",\n fixed_len=True, length=40)", "def main():\n\tHOST = \"192.168.56.103\"\n\tPORT = 53338\n\tr = remote(HOST,PORT)\n\tfor i in xrange(1,12):\n\t\t#First option lets us get a file\n\t\tprint r.recvuntil(\"option: \")\n\t\tr.sendline(\"1\")\n\t\tprint r.recvuntil(\"Filename:\")\n\n\t\t#Get frequency analysis from flag.txt\n\t\tfileName = \"../flag.txt\" #length ~ 11\n\t\tprint len(fileName)\n\t\tdirName = \"./data/\" #length ~ 7\n\t\tprint len(dirName)\n\n\t\t#Null bytes can be removed by using up all the memory for name\n\t\tf = \"/\"*(255-7-11)+fileName\n\t\tprint len(f)\n\t\tr.sendline(f)\t\n\t\tprint r.recvuntil(\"option: \")\n\n\t\t#Now exploit the fact they haven't cleaned up the memory\n\t\tpayload = \"\\x01\"*(4095-i)\n\t\tpayload += \"\\n\"*i\n\t\tr.sendline(\"2\")\n\n\t\t#Grab analysis of 1 byte difference between corrupted memory and flag.txt\n\t\tprint r.recvuntil(\"data:\")\n\t\tr.sendline(payload)\n\t\tbuf = r.recvuntil(\"option:\")\n\t\tprint buf\n\t\t\n\t\t#Write freq analysis difference to temporary file\n\t\tfilename = \"temp\"+str(i)\n\t\tfp = open(filename,\"wb\")\n\t\tfp.write(buf)\n\n\t\t#Now diff temp<i> temp><i+1>\n\t\t#Except for temp0\n\tpass", "def test(self, filename):\n hit = 0\n total = 0\n n = self.n\n for sent in open(filename):\n samp = sent.rstrip('\\n')\n# samp = '~' + samp + '~' \n for i in range(len(samp) - n):\n total = total + 1\n prev = samp[i:i + n - 1]\n pred = self.pred(prev)\n if pred == samp[i + n - 1]:\n hit = hit + 1\n \n return hit/total", "def main():\n file_one_path, file_two_path, output_path =\\\n get_command_line_arguments(\n ['/home/ehler002/project/groups/go/Data/Cluster_Data/Dataset.txt',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/translated_genes.fpkm_table',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/Full_fpkm_Table.txt'])\n pattern = 'CRO_T'\n for file_path in [file_one_path, file_two_path]:\n assert os.path.exists(file_path), 'File %s does not exist.' % file_path\n start_time = datetime.datetime.now()\n print('Started concatenation at %s' % start_time)\n file_contents, headers = get_file_contents(file_two_path)\n file_contents = sort_file_contents(file_contents)\n file_contents = remove_pattern(file_contents, pattern)\n concatenate_files(file_one_path, file_contents, headers, output_path)\n print('Finished concatenation in %s' % (datetime.datetime.now() - start_time))", "def analyze_files(proj_files, patterns):\n sec_in_projects = defaultdict(list)\n counter = 0\n for project, files in proj_files.items():\n\n if counter % 1000 == 0:\n print('Progress: {:.2%}'.format(counter/len(proj_files)))\n counter += 1\n\n for file in files:\n with open(file) as infile:\n content = infile.read()\n\n for _, tools in patterns.items():\n\n for tool, details in tools.items():\n sec = False\n for pattern in details['Patterns']:\n if pattern.lower() in content.lower():\n sec = True\n if sec:\n sec_in_projects[project].append(tool)\n for project, tools in sec_in_projects.items():\n sec_in_projects[project] = list(set(tools))\n return sec_in_projects", "def test_process_file():\r\n\r\n ## Once you have process_file \"working\", uncomment the 5 lines starting with\r\n ## file1 = \"\"\r\n ## Set kdiffexe to the path to kdiff3.exe on your computer\r\n ## Set file1 and file2 to the original and fixed eversion of the files\r\n ## This should provide a nice visual comparison of the file with and\r\n ## without the fix. In general, this is not a good practice for testing.\r\n ## That is, launching an external applicaiton to show the results.\r\n ##\r\n # file1 = \"\"\r\n # file2 = \"\"\r\n # kdiffexe = '\"C:\\Program Files\\KDiff3\\kdiff3.exe\"'\r\n # cmd = r'{} {} {}'.format(kdiffexe, file1, file2)\r\n # os.system(cmd)\r\n fh.process_file()", "def Number_elements(file1,file2):\n start = time.time()\n\n verified_element = np.intersect1d(np.array(file1), np.array(file2)) \n\n print(len(verified_element))\n print(f'Duration: {time.time() - start} seconds')", "def main(args=None):\n args = parse_args(args)\n\n with multiprocessing.Pool(8) as pool:\n printer = StatusPrinter()\n names = generate_paths(args.input, args.recursive)\n names = printer.set_input(names)\n written = itertools.chain.from_iterable(\n pool.imap_unordered(\n partial(process_file, args.output), names, 1000))\n written = printer.set_output(written)\n\n unique_count, dupe_count, invalid_count = 0, 0, 0\n invalids = []\n for item in written:\n if item == '__duplicate__':\n dupe_count += 1\n elif item.startswith(args.input):\n invalids.append(item)\n invalid_count += 1\n else:\n unique_count += 1\n print('{} unique, {} duplicates, {} invalid ({} total)'.format(\n unique_count, dupe_count, invalid_count,\n invalid_count + unique_count + dupe_count))\n\n print('invalid files: \\n{}'.format('\\n'.join(invalids)))" ]
[ "0.618691", "0.61734617", "0.6023388", "0.5770834", "0.57630765", "0.5635864", "0.56322664", "0.5628903", "0.56088793", "0.5608539", "0.5580531", "0.55753785", "0.5561271", "0.55429035", "0.552018", "0.5514539", "0.5488725", "0.542185", "0.5407473", "0.5395535", "0.53643066", "0.5335616", "0.5331108", "0.532571", "0.5319877", "0.5318068", "0.5305101", "0.5297951", "0.5293746", "0.5293469" ]
0.6542992
0
Return the sum of the first n numbers of sequence S
def linear_sum(S, n): if n == 0: return 0 else: return linear_sum(S, n - 1) + S[n - 1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def linear_sum(S, n):\n if n == 0:\n return 0\n else:\n return linear_sum(S, n-1) + S[n-1]", "def sum_of_nth( n ):\n if n > 0:\n return sum( range(n + 1) )\n else:\n return 0", "def sum_series(n, first=0, second=1):\n if n == 1:\n return first\n elif n == 2:\n return second\n else:\n return sum_series(n-2, first, second) + sum_series(n-1, first, second)", "def sum_series(n, first=0, second=1):\n\n\tif n == 0:\n\t\treturn 0\n\telif n == 1:\n\t\treturn first\n\telif n == 2:\n\t\treturn second\n\telse: \n\t\treturn sum_series(n-1, first, second) + sum_series(n-2, first, second)", "def sum_series(n, first_value=0, second_value=1):\n for i in range(n):\n first_value, second_value = second_value, first_value + second_value\n return first_value", "def sum_series(n, zeroth, oneth):\n\n if (n == 0):\n return zeroth\n elif (n == 1):\n return oneth\n else:\n return sum_series(n - 1, zeroth, oneth) + \\\n sum_series(n - 2, zeroth, oneth)", "def sum_n_numbers():\n n=int(input(\"Enter a number:\"))\n s=0\n for i in range(n+1):\n s+=i\n print(\"sum=\",s)", "def sum_to(n):\n the_sum = 0\n for counter in range(n+1):\n the_sum = the_sum + counter\n return the_sum", "def sumTo(n):\n \n the_sum = 0 #current sum\n a_number = 1 #where we are\n while a_number <= n:\n the_sum += a_number\n a_number += 1\n return the_sum", "def sumTo(n):\n\n sum_all = (n * (n+1))/2\n\n return sum_all", "def sum_series(n, first=0, second=1):\n if n <= 0 :\n return None\n elif n == 1:\n return first\n elif n == 2:\n return second\n\n return sum_series(n-2, first, second) + sum_series(n-1, first, second)", "def sum_series(n, first_val = 0, second_val = 1):\n if n == 0:\n return first_val\n elif n == 1:\n return second_val\n else:\n return (sum_series(n-1,first_val,second_val) +\n sum_series(n-2, first_val, second_val))", "def sum_series(n, a=0, b=1):\n\tseq = []\n\tnth_term = 0\n\t\n\tfor i in range(0,n+1):\n\t\tif i == 0:\n\t\t\tseq.append(a)\n\t\tif i == 1:\n\t\t\tseq.append(b)\n\t\tif i > 1:\n\t\t\tnth_term = seq[-1] + seq[-2]\n\t\t\tseq.append(nth_term)\n\t\n\tprint(seq)\n\tprint(seq[n])\n\treturn(seq[n])", "def sum_to_n(n):\n total = 0\n for i in range(1,n+1):\n total += i\n return total", "def sumOfSeq(a, d, n):\n return a * n + n * (n - 1) * d / 2", "def integer_sum(n):\n\n sum = 0\n k = 0\n\n # INVARIANT\n # The sum of far is equal to the sum of the first k integer numbers\n # VARIANT: n-k\n #\n while (k!=n):\n k += 1\n sum += k\n\n return sum", "def sum(n):\n if n == 0:\n return 0\n return sum(n - 1) + n", "def sumTotal(n):\n\n sum_total = 0\n\n for i in range(1, n+1):\n sum_total = sum_total + i\n\n return sum_total", "def sum_series(n,zero_val=0,one_val=1):\n if n==0:\n return zero_val\n elif n==1:\n return one_val\n else:\n return sum_series(n-1,zero_val,one_val) + sum_series(n-2,zero_val,one_val)", "def sum_numbers(sequence):\r\n\r\n total = 0\r\n seq = get_numbers(sequence)\r\n for element in seq:\r\n total += element\r\n\r\n return total", "def total(n):\n if n < 0:\n return None\n else:\n result = 0\n for i in range(n + 1):\n result += i\n return result", "def total(n):\n if n < 0:\n return None\n else:\n result = 0\n for i in range(n + 1):\n result += i\n return result", "def sum_series(n,v1=0,v2=1):\n\tL1=v2\n\tL2=v1\n\tif n<0:\n\t\tprint(\"please enter positive int value\")\n\n\telif n==0:\n\t\treturn v1\n\n\telif n==1:\n\t\treturn v2\n\n\telse:\n\t\tfor i in range(n-1):\n\t\t\tC=L1+L2\n\t\t\tL2=L1\n\t\t\tL1=C\n\t\treturn C", "def _smooth_sum(x, n):\n m = len(x)\n result = _np.zeros(m - (n-1))\n for i in range(n):\n result += x[i:(m-n+i+1)]\n return result", "def sum_series(n, x = 0, y = 1):\n\n lst = []\n # Append the list at the i index\n for i in range(n):\n if(i == 0):\n lst.append(x)\n elif(i == 1):\n lst.append(y)\n elif(i > 1):\n lst.append((lst[i - 2] + lst[i - 1]))\n nth = lst[i]\n return nth", "def sum_natural(n):\n total, curr = 0 , 1\n\n while curr <= n:\n total, curr = total + curr, curr + 1\n return total", "def consecutiveNumbersSum(self, N):\n\n count = 0\n # nmax = int(-1 + sqrt(1+8*N)/2)\n # print(nmax)\n n = 1\n n2 = n*(n-1)/2 + n\n while n2 <= N:\n if (N-n2) % n == 0:\n # print(n)\n count += 1\n n += 1\n n2 = n*(n-1)/2 + n\n\n # Note N-(n2-n) % n == N-n2 % n\n return count", "def solve(n=10):\n return sum(M_N_S(n, d)[2] for d in range(10))", "def sum_series(n, x=0, y=1):\n if n == 0:\n return x\n elif n == 1:\n return y\n return sum_series(n - 1, x, y) + sum_series(n - 2, x, y)", "def sum_series(n, x=0, y=1):\n if n == 0:\n return x\n elif n == 1:\n return y\n else:\n return sum_series(n - 1, x, y) + sum_series(n - 2, x, y)" ]
[ "0.76841", "0.7444881", "0.72330695", "0.72233206", "0.71733654", "0.7087734", "0.7078644", "0.70632255", "0.7057611", "0.7053824", "0.70530355", "0.70193213", "0.6922918", "0.68946594", "0.6832259", "0.6748849", "0.67027134", "0.6694901", "0.6611888", "0.6601319", "0.6569598", "0.6569598", "0.65098536", "0.64840317", "0.64721006", "0.64710337", "0.64520174", "0.6450918", "0.645038", "0.64476436" ]
0.766399
1
Update class Update an class, identified by "class_name". It permits partial updates (maintaining constant the rest of the class info). The attributes to update are included in kwargs. To support a partial update, the first step is to get the class info because the service directory only provides full update.
def update(self, class_name, **kwargs): class_obj = self.get(class_name) class_obj.update(kwargs) response = self.client.post(Classes.PATH_CLASS_TEMPLATE.format(class_name=class_name), body=class_obj) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_update(self, *args):\n if len(args) == 1:\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) < 2:\n print(\"** instance id missing **\")\n return\n elif len(args) < 3:\n print(\"** attribute name missing **\")\n return\n elif len(args) < 4:\n print(\"** value missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n obj = dict_objs[key]\n if args[2] in obj.__class__.__dict__:\n obj.__dict__[args[2]] =\\\n type(obj.__class__.__dict__[args[2]])(args[3])\n else:\n obj.__dict__[args[2]] = args[3]\n storage.save()\n else:\n print(\"** no instance found **\")", "def do_update(self, arg):\n if len(arg) == 0:\n print(\"** class name missing **\")\n return\n coms = tuple(arg.split())\n if coms[0] not in self.cls:\n print(\"** class doesn't exist **\")\n elif len(coms) < 2:\n print(\"** instance id missing **\")\n return\n obj = coms[0] + \".\" + coms[1]\n if obj not in storage.all().keys():\n print(\"** no instance found **\")\n elif len(coms) < 3:\n print(\"** attribute name missing **\")\n elif len(coms) < 4:\n print(\"** value missing **\")\n else:\n typecast = type(eval(coms[3]))\n form = coms[3].strip('\"')\n form = form.strip(\"'\")\n setattr(storage.all()[obj], coms[2], typecast(form))", "def do_update(self, arg):\n args = arg.split()\n object_dict = storage.all()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if args[0] in self.class_dict:\n if len(args) == 1:\n print(\"** instance id missing **\")\n return\n elif len(args) == 2:\n print(\"** attribute name missing **\")\n return\n elif len(args) == 3:\n print(\"** value missing **\")\n return\n else:\n print(\"** class doesn't exist **\")\n return\n\n for i in range(len(args)):\n if args[i].startswith('\"') and args[i].endswith('\"'):\n args[i] = args[i][1:-1]\n\n for full_key in object_dict.keys():\n key = full_key.split('.')\n key_id = key[1]\n if args[0] in self.class_dict:\n if args[1] == object_dict[full_key].id:\n setattr(object_dict[full_key], args[2], args[3])\n setattr(object_dict[full_key], \"updated_at\",\n datetime.now())\n storage.save()\n return\n else:\n print(\"** class doesn't exist **\")\n return\n print(\"** no instance found **\")", "def update_css_class(kwargs, class_name):\n if \"className\" in kwargs:\n kwargs[\"className\"] += f\" {class_name}\"\n else:\n kwargs[\"className\"] = class_name", "def do_update(self, args):\n args = shlex.split(args)\n if len(args) == 0:\n print(\"** class name missing **\")\n return False\n elif args[0] in classes:\n if len(args) > 1:\n k = args[0] + \".\" + args[1]\n if k in models.storage.all():\n if len(args) > 2:\n if len(args) > 3:\n try:\n if isinstance(args[2], datetime) is True:\n pass\n if args[0] in classes:\n if isinstance(args[2], ints) is True:\n args[3] = int(args[3])\n elif isinstance(args[2], floats) is True:\n args[3] = float(args[3])\n except:\n pass\n setattr(models.storage.all()[k], args[2], args[3])\n models.storage.all()[k].save()\n else:\n print(\"** value missing **\")\n else:\n print(\"** attribute name missing **\")\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")", "def do_update(self, args):\n args = shlex.split(args)\n dicti = storage.all()\n if not args:\n print(\"** class name missing **\")\n elif not args[0] in name_of_class:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif not \"{}.{}\".format(args[0], args[1]) in dicti:\n print(\"** no instance found **\")\n elif len(args) == 2:\n print(\"** attribute name missing **\")\n elif len(args) == 3:\n print(\"** value missing **\")\n else:\n key = dicti[\"{}.{}\".format(args[0], args[1])]\n setattr(key, args[2], args[3])\n key.save()", "def do_update(self, args):\n args = args.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if len(args) == 1:\n print(\"** instance id missing **\")\n return\n if len(args) == 2:\n print(\"** attribute name missing **\")\n return\n if len(args) == 3:\n print(\"** value missing **\")\n return\n if args[0] not in HBNBCommand.valid_classes.keys():\n print(\"** class doesn't exist **\")\n return\n all_objs = storage.all(args[0])\n for k, v in all_objs.items():\n if k == args[1]:\n setattr(v, args[2], args[3])\n storage.save()\n return\n print(\"** no instance found **\")", "def do_update(self, args):\n args = shlex.split(args)\n if len(args) == 0:\n print(\"** class name missing **\")\n elif not args[0] in class_type:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif (\"{}.{}\".format(args[0], args[1]) not in storage.all().keys()):\n print(\"** no instance found **\")\n elif len(args) == 2:\n print(\"** attribute name missing **\")\n elif len(args) == 3:\n print(\"** value missing **\")\n else:\n new_dict = models.storage.all()\n tmp = \"{}.{}\".format(args[0], args[1])\n if tmp in new_dict.keys():\n attr = getattr(new_dict[tmp], args[2], \"\")\n setattr(new_dict[tmp], args[2], type(attr)(args[3]))\n new_dict[tmp].save()", "def update(self, cls, spec, fields, **kwargs):\n m = mapper(cls)\n return m.update_partial(self, spec, fields, **kwargs)", "def update_class(self, class_info):\n SchemaValidator(self.schema_extension_only, self.schema_nx).validate_class_schema(class_info)\n self.schema[\"@graph\"].append(class_info)\n self.load_schema(self.schema)\n print(\"Updated the class {} successfully!\".format(class_info[\"rdfs:label\"]))", "def do_update(self, arg):\n arg = arg.split()\n try:\n h = arg[0] + \".\" + arg[1]\n except:\n pass\n objects = storage.all()\n if len(arg) is 0:\n print(\"** class name missing **\")\n elif len(arg) == 1 and arg[0] in self.dict.keys():\n print(\"** instance id missing **\")\n elif arg[0] not in self.dict.keys():\n print(\"** class doesn't exist **\")\n elif h not in objects.keys():\n print(\"** no instance found **\")\n elif len(arg) <= 2:\n print(\"** attribute name missing **\")\n elif len(arg) <= 3:\n print(\"** value missing **\")\n else:\n setattr(objects[h], arg[2], arg[3])\n storage.save()", "def update(self, **kwargs):\n print(\"Updating model\")\n print(kwargs)\n for key in kwargs:\n setattr(self, key, kwargs[key])", "def update(self, *args, **kwargs):\n if args:\n self.__update(*args)\n elif kwargs:\n self.__update(**kwargs)", "def do_quota_class_update(cs, args):\n utils.print_dict(cs.quota_classes.update(\n args.quota_class_name,\n containers=args.containers,\n memory=args.memory,\n cpu=args.cpu,\n disk=args.disk)._info)", "def update(self, **kwargs):\n return self.parent.update_instance(self.name, kwargs)", "def update(self, *args, **kwargs):\n if kwargs is not None:\n for key, value in kwargs.items():\n setattr(self, key, value)", "def changeClass(self, newClass):\n\t\turl = \"https://habitica.com/api/v3/user/change-class?class=\" + newClass\n\t\treturn(postUrl(url, self.credentials))", "def salesforce_update(self, obj_name, obj_id, **kwargs):\n self.builtin.log(\n \"Updating {} {} with values {}\".format(obj_name, obj_id, kwargs)\n )\n obj_class = getattr(self.cumulusci.sf, obj_name)\n return obj_class.update(obj_id, kwargs)", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, **kwargs):\n for k, v in kwargs.iteritems():\n if hasattr(self, k):\n setattr(self, k, v)", "def update(self, **kwargs):\n return self._update_data(self.put(None, data=kwargs))", "def update(self, *args, **kwargs):\n raise NotImplementedError" ]
[ "0.66147435", "0.6480898", "0.6371645", "0.6344272", "0.6316442", "0.625787", "0.6200667", "0.61956793", "0.6190176", "0.61770034", "0.61499053", "0.60016674", "0.5948246", "0.59319293", "0.5903669", "0.57902175", "0.5737991", "0.5720873", "0.56708974", "0.56708974", "0.56708974", "0.56708974", "0.56708974", "0.56708974", "0.56635696", "0.56635696", "0.56635696", "0.56541514", "0.56439567", "0.56198514" ]
0.8564456
0
Delete an class Delete an class identified by "class_name"
def delete(self, class_name): return self.client.delete(Classes.PATH_CLASS_TEMPLATE.format(class_name=class_name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_destroy(self, arg):\n if len(arg) == 0:\n print(\"** class name missing **\")\n return\n coms = tuple(arg.split())\n if coms[0] not in self.cls:\n print(\"** class doesn't exist **\")\n elif len(coms) < 2:\n print(\"** instance id missing **\")\n else:\n obj = coms[0] + \".\" + coms[1]\n if obj not in storage.all().keys():\n print(\"** no instance found **\")\n else:\n del storage.all()[obj]\n storage.save()", "def remove_class(self, name):\n if self.has_class(name):\n self._cached_class.remove(name)\n self._update_class()", "def delete_obj_class(self, obj_class_name: str) -> ProjectMeta:\n return self.delete_obj_classes([obj_class_name])", "def do_destroy(self, *args):\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) != 2:\n print(\"** instance id missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n del dict_objs[key]\n storage.save()\n else:\n print(\"** no instance found **\")", "def drop_class(self, cls, ignore_instances=False):\n if ignore_instances:\n self.client.command(\n 'DROP CLASS {} UNSAFE'.format(cls.registry_name))\n else:\n self.client.command(\n 'DROP CLASS {}'.format(cls.registry_name))", "def delete_vs_class(vs_class_name, created_objects):\r\n if keep_objects:\r\n return\r\n custom_object_api_instance = client.CustomObjectsApi()\r\n try:\r\n custom_object_api_response = custom_object_api_instance.delete_cluster_custom_object(\r\n group=\"snapshot.storage.k8s.io\",\r\n version=\"v1\",\r\n plural=\"volumesnapshotclasses\",\r\n name=vs_class_name\r\n )\r\n LOGGER.debug(custom_object_api_response)\r\n LOGGER.info(f\"Volume Snapshot Class Delete : {vs_class_name} deleted\")\r\n created_objects[\"vsclass\"].remove(vs_class_name)\r\n except ApiException as e:\r\n LOGGER.error(f\"Exception when calling CustomObjectsApi->delete_cluster_custom_object_0: {e}\")\r\n clean_with_created_objects(created_objects)\r\n assert False", "def do_destroy(self, arg):\n args = shlex.split(arg)\n if len(args) == 0:\n print(\"** class name missing **\")\n elif args[0] in class_type:\n if len(args) > 1:\n key = args[0] + \".\" + args[1]\n if key in models.storage.all():\n models.storage.all().pop(key)\n models.storage.save()\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")", "def do_destroy(self, args):\n args = shlex.split(args)\n if len(args) == 0:\n print(\"** class name missing **\")\n return False\n if args[0] in classes:\n if len(args) > 1:\n key = args[0] + \".\" + args[1]\n if key in models.storage.all():\n models.storage.all().pop(key)\n models.storage.save()\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")", "def do_destroy(self, arg):\n arg = arg.split()\n try:\n args = arg[0] + \".\" + arg[1]\n except:\n pass\n objects = storage.all()\n if len(arg) is 0:\n print(\"** class name missing **\")\n elif len(arg) == 1 and arg[0] in self.dict.keys():\n print(\"** instance id missing **\")\n elif arg[0] not in self.dict.keys():\n print(\"** class doesn't exist **\")\n elif args not in objects:\n print(\"** no instance found **\")\n else:\n del objects[args]\n storage.save()", "def do_destroy(self, arg):\n args = arg.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n elif len(args) < 2 and args[0] in self.class_dict:\n print(\"** instance id missing **\")\n return\n elif len(args) < 2:\n print(\"** class name missing **\")\n return\n\n object_dict = storage.all()\n if args[0] in self.class_dict:\n for full_key in object_dict:\n key = full_key.split(\".\")\n if key[1] == args[1]:\n del object_dict[full_key]\n storage.save()\n return\n print(\"** no instance found **\")\n else:\n print(\"** class doesn't exist **\")", "def do_destroy(self, args):\n args = args.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if len(args) == 1:\n print(\"** instance id missing **\")\n return\n if args[0] not in HBNBCommand.class_check:\n print(\"** class doesn't exist **\")\n return\n\n all_objs = storage.all()\n key = args[0] + '.' + args[1]\n if key in all_objs:\n all_objs.pop(key)\n storage.save()\n else:\n print(\"** no instance found **\")", "def delete_storage_class(sc_name, created_objects):\r\n if sc_name == \"\" or keep_objects:\r\n return\r\n api_instance = client.StorageV1Api()\r\n try:\r\n LOGGER.info(f'SC Delete : deleting storage class {sc_name}')\r\n api_response = api_instance.delete_storage_class(\r\n name=sc_name, pretty=True, grace_period_seconds=0)\r\n LOGGER.debug(str(api_response))\r\n created_objects[\"sc\"].remove(sc_name)\r\n except ApiException as e:\r\n LOGGER.error(\r\n f\"Exception when calling StorageV1Api->delete_storage_class: {e}\")\r\n clean_with_created_objects(created_objects)\r\n assert False", "def delete_fixture_class(self, class_id):\n with self._class_lock:\n existing_fix = self.instances.find_one({'class_id': class_id, 'status': {'$ne': InstanceStatus.DELETED}})\n if existing_fix:\n raise AXIllegalOperationException(\"Fixtures belonging to class {} should be deleted prior to removal\"\n .format(existing_fix['class_name']))\n self.axdb_client.delete_fixture_class(class_id)\n logger.info(\"Deleted %s\", class_id)\n return class_id", "def delete_from(class_reference, custom_condition='', **attr_dict):\n _entries = select_from(class_reference, custom_condition=custom_condition, **attr_dict) \n _del = 0\n for _entry in _entries:\n _entry.delete()\n _del += 1\n return _del", "def do_destroy(self, line):\n try:\n tokens = split(line)\n except ValueError:\n return None\n if len(tokens) < 1:\n print(\"** class name missing **\")\n else:\n objects = models.storage.all()\n cls = models.getmodel(tokens[0])\n if cls is None:\n print(\"** class doesn't exist **\")\n elif len(tokens) < 2:\n print(\"** instance id missing **\")\n elif \".\".join(tokens[:2]) not in objects:\n print(\"** no instance found **\")\n else:\n del objects[\".\".join(tokens[:2])]\n models.storage.save()", "def do_destroy(self, args):\n args = shlex.split(args)\n dicti = storage.all()\n if not args:\n print(\"** class name missing **\")\n elif not args[0] in name_of_class:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif \"{}.{}\".format(args[0], args[1]) in dicti:\n dicti.pop(\"{}.{}\".format(args[0], args[1]))\n storage.save()\n else:\n print(\"** no instance found **\")", "def do_destroy(self, arg):\n arg_list = arg.split(\" \") if type(arg) == str else arg\n if not arg:\n print(\"** class name missing **\")\n return\n if arg_list[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n return\n if len(arg_list) < 2:\n print(\"** instance id missing **\")\n return\n key = arg_list[0] + \".\" + arg_list[1]\n if key in storage.all():\n del storage.all()[key]\n storage.save()\n return\n print(\"** no instance found **\")", "def tearDownClass(cls):\n cls.object.delete()", "def help_destroy(self):\n print(\"delete an instance based on the class name and id\")", "def do_destroy(self, line):\n args = line.split()\n\n if not args:\n print(\"** class name missing **\")\n elif args[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n elif len(args) < 2:\n print(\"** instance id missing **\")\n else:\n key = args[0] + \".\" + args[1]\n dict_objects = storage.all()\n obj = dict_objects.get(key)\n if obj:\n dict_objects.pop(key)\n storage.save()\n else:\n print(\"** no instance found **\")", "def do_destroy(self, line):\n list_line = line.split(' ')\n if line == \"\":\n print(\"** class name missing **\")\n elif list_line[0] not in HBNBCommand.classes.keys():\n print(\"** class doesn't exist **\")\n elif len(list_line) < 2:\n print(\"** instance id missing **\")\n elif list_line[0] + '.' + list_line[1] not in \\\n models.storage.all().keys():\n print(\"** no instance found **\")\n else:\n models.storage.all().pop(list_line[0] + '.' + list_line[1], None)\n models.storage.save()", "def deleteClass():\r\n displayClassList(True)\r\n if len(classes) == 0:\r\n return\r\n \r\n print(\"\\nWhich class would you like to delete?\")\r\n classIndex = input(\"Choice: \")\r\n while not properMenuChoice(classIndex):\r\n classIndex = input(\"Please enter a valid menu choice: \") \r\n \r\n if int(classIndex) == len(classes) + 1: #Return if choice is None from displayClassList\r\n return \r\n classIndex = int(classIndex)\r\n className = classes[classIndex-1].getName()\r\n classDay = classes[classIndex-1].getDay()\r\n del classes[classIndex-1]\r\n print(\"\\nDeleted \" + className + \" on \" + str(classDay))\r\n delay()", "def destroy(self, class_name, inst_id, stored_objects):\n instance = \"{}.{}\".format(class_name, inst_id)\n if instance not in stored_objects:\n print(\"** no instance found **\")\n else:\n del stored_objects[instance]", "def delete(self, cls, id):\n pass", "def delete(self, name):\n\n pass", "def deregister_class(self, name):\n isbase = name in self.base_types\n if not isbase and name not in self.template_types:\n _raise_type_error(name)\n if isbase:\n self.base_types.remove(name)\n else:\n self.template_types.pop(name, None)\n\n self.cython_ctypes.pop(name, None)\n self.cython_cytypes.pop(name, None)\n self.cython_pytypes.pop(name, None)\n self.from_pytypes.pop(name, None)\n self.cpp_types.pop(name, None)\n self.humannames.pop(name, None)\n self.cython_cimports.pop(name, None)\n self.cython_cyimports.pop(name, None)\n self.cython_pyimports.pop(name, None)\n\n self.cython_c2py_conv.pop(name, None)\n self.cython_py2c_conv.pop(name, None)\n self.cython_classnames.pop(name, None)\n\n self.clearmemo()", "def __delattr__(cls, name):\n raise TypeError('May not delete attributes on definition class')", "def remove(self, name: str) -> None:\n try:\n del self.instances[name]\n except KeyError:\n try:\n del self.classes[name]\n except KeyError:\n raise KeyError(f'{name} is not found in the library')\n return", "def delete(self):\n ...", "def delete(cls, type_obj):\n DB.session.delete(type_obj)\n DB.session.commit()" ]
[ "0.71252465", "0.69804424", "0.6953482", "0.6947389", "0.69018453", "0.68608165", "0.68567073", "0.6838112", "0.6836245", "0.68336", "0.6813786", "0.6794098", "0.6673387", "0.6561163", "0.654488", "0.6510977", "0.6471283", "0.63775885", "0.628025", "0.6266746", "0.6247461", "0.6145403", "0.61233795", "0.6064611", "0.6007088", "0.59758115", "0.59528184", "0.5930443", "0.5918605", "0.5899104" ]
0.84382033
0
Returns the required resources for the given training method.
def required_resources_for_method(method, uses_pretrained_r_net): if method == 'ppo_plus_eco': # We need to rent 2 GPUs, because with this amount of RAM, GCP won't allow # us to rent only one. return (105472, 16, 2) if method == 'ppo_plus_ec' and not uses_pretrained_r_net: return (52224, 12, 1) return (32768, 12, 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_resources(self):\n return list(self.get_inputs()) + list(self.get_outputs())", "def get_resources(self):\n return []", "def resources(self):\n return self.__resources", "def _GetResourceLoaders():\n loaders = []\n\n # Add all paths to list if they are specified on the command line (will warn\n # if any are invalid).\n # Otherwise add members of the default list iff they exist.\n if FLAGS['data_search_paths'].present:\n for path in FLAGS.data_search_paths:\n loaders.append(FileResourceLoader(path))\n else:\n for path in FLAGS.data_search_paths:\n if os.path.isdir(path):\n loaders.append(FileResourceLoader(path))\n loaders.extend(DEFAULT_RESOURCE_LOADERS)\n return loaders", "def resource_requests(self) -> Optional[pulumi.Input['BuildResourceRequestsArgs']]:\n return pulumi.get(self, \"resource_requests\")", "def get_resource_params():\n return Parameter.list()", "def get_resources(self, **extra_args):\n return [lrms for lrms in self.resources.itervalues()]", "def resources(self):", "def resources(ctx, job, gpu):\n\n def get_experiment_resources():\n try:\n message_handler = Printer.gpu_resources if gpu else Printer.resources\n PolyaxonClient().experiment.resources(\n user, project_name, _experiment, message_handler=message_handler)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get resources for experiment `{}`.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n def get_experiment_job_resources():\n try:\n message_handler = Printer.gpu_resources if gpu else Printer.resources\n PolyaxonClient().experiment_job.resources(user,\n project_name,\n _experiment,\n _job,\n message_handler=message_handler)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get resources for job `{}`.'.format(_job))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n\n if job:\n _job = get_experiment_job_or_local(job)\n get_experiment_job_resources()\n else:\n get_experiment_resources()", "def getloader(self):\n\t\treturn self.train_loader, self.test_loader", "def resource_requests(self) -> Optional[pulumi.Input['ResourceRequestsArgs']]:\n return pulumi.get(self, \"resource_requests\")", "def test_get_cloud_resources(self):\n pass", "def get_benchmark_requirements(cls):\n pass", "def create_resources(self) -> List[ResourceDescription]:\r\n return self.resources", "def resources(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resources\")", "def resources(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resources\")", "def resources(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resources\")", "def resources(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resources\")", "def required_resource_keys(self) -> AbstractSet[str]:\n return self._required_resource_keys", "def resource_requests(self) -> Optional[pulumi.Input['GatewayResourceRequestsArgs']]:\n return pulumi.get(self, \"resource_requests\")", "def resources(self):\n return list(self.get_resources_for_type(gdef.ResType_All))", "def resources(self) -> pulumi.Output[Sequence['outputs.MachineExtensionResponse']]:\n return pulumi.get(self, \"resources\")", "def get_train_files(self):\n raise NotImplementedError", "def get_resources(self):\n res = set()\n res.update(self.get_inputs())\n res.update(self.get_outputs())\n return res", "def get_resource_params(self):\n return Parameter.list()", "def get_resource_params(self):\n return Parameter.list()", "def resources(self) -> Sequence['outputs.GetResourcesResourceResult']:\n return pulumi.get(self, \"resources\")", "def resources(self):\n return [self]", "def _get_all_resources(self):\n all_resources = []\n for resource in ResourceModel.scan():\n all_resources.append(resource)\n return all_resources", "def _validate_resources(self):\n resources = self.options.resources\n\n for key in ['num_machines', 'num_mpiprocs_per_machine', 'tot_num_mpiprocs']:\n if key in resources and resources[key] != 1:\n raise exceptions.FeatureNotAvailable(\n f'Cannot set resource `{key}` to value `{resources[key]}` for `{self.__class__.__name__}`: '\n 'parallelization is not supported, only a value of `1` is accepted.'\n )" ]
[ "0.59075975", "0.58460414", "0.5659363", "0.5576517", "0.55648386", "0.5517262", "0.5499891", "0.5462048", "0.5450394", "0.54441565", "0.5433045", "0.54239994", "0.54126656", "0.54065466", "0.54039216", "0.54039216", "0.54039216", "0.54039216", "0.5390593", "0.53260726", "0.5321036", "0.53089315", "0.52986264", "0.52852166", "0.5247571", "0.5247571", "0.5247294", "0.5238903", "0.52282953", "0.52224135" ]
0.74345803
0
Creates and launches a VM on Google Cloud compute engine.
def launch_vm(vm_id, vm_metadata): print('\nCreating disk and vm with ID:', vm_id) vm_metadata['vm_id'] = vm_id ram_mbs, num_cpus, num_gpus = required_resources_for_method( vm_metadata['method'], bool(vm_metadata['pretrained_r_nets_path'])) create_disk_cmd = ( 'gcloud compute disks create ' '"{disk_name}" --zone "{zone}" --source-snapshot "{source_snapshot}" ' '--type "pd-standard" --project="{gcloud_project}" ' '--size=200GB'.format( disk_name=vm_id, zone=ZONE, source_snapshot=SOURCE_SNAPSHOT, gcloud_project=GCLOUD_PROJECT, )) print('Calling', create_disk_cmd) # Don't fail if disk already exists. subprocess.call(create_disk_cmd, shell=True) create_instance_cmd = ( 'gcloud compute --project={gcloud_project} instances create ' '{instance_name} --zone={zone} --machine-type={machine_type} ' '--subnet=default --network-tier=PREMIUM --maintenance-policy=TERMINATE ' '--service-account={service_account} ' '--scopes=storage-full,compute-rw ' '--accelerator=type=nvidia-tesla-p100,count={gpu_count} ' '--disk=name={disk_name},device-name={disk_name},mode=rw,boot=yes,' 'auto-delete=yes --restart-on-failure ' '--metadata-from-file startup-script=./scripts/vm_drop_root.sh ' '--metadata {vm_metadata} --async'.format( instance_name=vm_id, zone=ZONE, machine_type='custom-{num_cpus}-{ram_mbs}'.format( num_cpus=num_cpus, ram_mbs=ram_mbs), gpu_count=num_gpus, disk_name=vm_id, vm_metadata=( ','.join('{}={}'.format(k, v) for k, v in vm_metadata.items())), gcloud_project=GCLOUD_PROJECT, service_account=SERVICE_ACCOUNT, )) print('Calling', create_instance_cmd) subprocess.check_call(create_instance_cmd, shell=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_gce_instance(args, ip_address):\n Print.GN('Creating GCE VM.')\n instance_name = GCE_INSTANCE_NAME.lower()\n firewall_tag = 'https-server' if args.https_only else LEO_FIREWALL_RULE\n cmd = ['gcloud', 'compute', 'instances', 'create',\n instance_name,\n '--image-family', 'ubuntu-1604-lts',\n '--image-project', 'ubuntu-os-cloud',\n '--project', args.project,\n '--scopes', 'cloud-platform',\n '--zone', args.zone,\n '--address', ip_address,\n '--machine-type', args.gce_instance_type,\n '--service-account', args.service_account,\n '--boot-disk-size', str(args.gce_disk_size),\n '--labels', 'instance-creator=leonardo-easy-deploy',\n '--tags', firewall_tag,\n '--boot-disk-auto-delete',\n # 'metadata-from-file' must be the last argument.\n '--metadata-from-file']\n with tempfile.NamedTemporaryFile(mode='w') as startup_file:\n gce_vars = GCE_INIT_SCRIPT_VARS.format(\n server_host=args.host,\n user=LOCAL_USER,\n docker_image=_to_gcr_path(args.project, 'leonardo', args.branch),\n server_ssl_key=args.ssl_key,\n server_ssl_cert=args.ssl_cert,\n server_ca_bundle=args.ssl_ca_bundle,\n rootca_key=args.rootca_key,\n kms_key=args.kms_key,\n kms_keyring=args.kms_keyring,\n kms_project=args.kms_project,\n kms_location=args.kms_location,\n ssl_test_file=args.ssl_test_file,\n )\n startup_file.write(gce_vars + '\\n' + GCE_INIT_SCRIPT_LOGIC)\n startup_file.flush()\n cmd.append('startup-script=%s' % startup_file.name)\n subprocess.check_call(cmd)\n # Startup script always takes time during which the instance\n # is unavailable.\n time.sleep(15)\n print('Successfully created instance: %s' % instance_name)\n return instance_name", "def create_vm(cmd, client, resource_group_name, vm_name,\n private_cloud, template, resource_pool,\n amount_of_ram=None, number_of_cores=None,\n location=None, expose_to_guest_vm=None,\n nics=None, disks=None):\n from .vendored_sdks.models import VirtualMachine\n from .vendored_sdks.models import ResourcePool\n from ._config import PATH_CHAR\n\n resource_pool = ResourcePool(id=resource_pool)\n\n # Extracting template and private cloud name from the resource id\n template_name = template.rsplit(PATH_CHAR, 1)[-1]\n private_cloud_name = private_cloud.rsplit(PATH_CHAR, 1)[-1]\n vm_template = client.virtual_machine_templates.get(location, private_cloud_name, template_name)\n\n cores = number_of_cores or vm_template.number_of_cores\n ram = amount_of_ram or vm_template.amount_of_ram\n\n expose = vm_template.expose_to_guest_vm\n if expose_to_guest_vm is not None:\n expose = expose_to_guest_vm\n\n final_disks = vm_template.disks\n if disks is not None:\n final_disks = _modify_template_disks_according_to_input(final_disks, disks)\n\n final_nics = vm_template.nics\n if nics is not None:\n final_nics = _modify_template_nics_according_to_input(final_nics, nics, cmd, client,\n resource_group_name, vm_name,\n location, private_cloud)\n\n virtual_machine = VirtualMachine(location=location,\n amount_of_ram=ram,\n disks=final_disks,\n expose_to_guest_vm=expose,\n nics=final_nics,\n number_of_cores=cores,\n private_cloud_id=private_cloud,\n resource_pool=resource_pool,\n template_id=template)\n\n return client.virtual_machines.create_or_update(resource_group_name, vm_name, virtual_machine)", "def create_vm(self, imagefilename, name, compute_resource, datastore, disksize, nics,\r\n memory, num_cpus, guest_id, host=None):\r\n # Convenience variable\r\n client = self.client\r\n\r\n self.log.debug(\"Creating VM %s\" % name)\r\n # If the host is not set, use the ComputeResource as the target\r\n if host is None:\r\n target = client.find_entity_view(\"ComputeResource\",\r\n filter={\"name\": compute_resource})\r\n resource_pool = target.resourcePool\r\n else:\r\n target = client.find_entity_view(\"HostSystem\", filter={\"name\": host})\r\n resource_pool = target.parent.resourcePool\r\n\r\n disksize_pattern = re.compile(\"^\\d+[KMG]B\")\r\n if disksize_pattern.match(disksize) is None:\r\n raise Exception(\"Disk size %s is invalid. Try \\\"12G\\\" or similar\" % disksize)\r\n\r\n if disksize.endswith(\"GB\"):\r\n disksize_kb = int(disksize[:-2]) * 1024 * 1024\r\n elif disksize.endswith(\"MB\"):\r\n disksize_kb = int(disksize[:-2]) * 1024\r\n elif disksize.endswith(\"KB\"):\r\n disksize_kb = int(disksize[:-2])\r\n else:\r\n raise Exception(\"Disk size %s is invalid. Try \\\"12G\\\" or similar\" % disksize)\r\n\r\n memory_pattern = re.compile(\"^\\d+[KMG]B\")\r\n if memory_pattern.match(memory) is None:\r\n raise Exception(\"Memory size %s is invalid. Try \\\"12G\\\" or similar\" % memory)\r\n\r\n if memory.endswith(\"GB\"):\r\n memory_mb = int(memory[:-2]) * 1024\r\n elif memory.endswith(\"MB\"):\r\n memory_mb = int(memory[:-2])\r\n elif memory.endswith(\"KB\"):\r\n memory_mb = int(memory[:-2]) / 1024\r\n else:\r\n raise Exception(\"Memory size %s is invalid. Try \\\"12G\\\" or similar\" % memory)\r\n\r\n # A list of devices to be assigned to the VM\r\n vm_devices = []\r\n\r\n # Create a disk controller\r\n controller = self.create_controller(\"VirtualLsiLogicController\")\r\n vm_devices.append(controller)\r\n\r\n ds_to_use = None\r\n for ds in target.datastore:\r\n if ds.name == datastore:\r\n ds_to_use = ds\r\n break\r\n\r\n if ds_to_use is None:\r\n raise Exception(\"Could not find datastore on %s with name %s\" %\r\n (target.name, datastore))\r\n\r\n # Ensure the datastore is accessible and has enough space\r\n if ds_to_use.summary.accessible is not True:\r\n raise Exception(\"Datastore (%s) exists, but is not accessible\" %\r\n ds_to_use.summary.name)\r\n if ds_to_use.summary.freeSpace < disksize_kb * 1024:\r\n raise Exception(\"Datastore (%s) exists, but does not have sufficient\"\r\n \" free space.\" % ds_to_use.summary.name)\r\n\r\n disk = self.create_disk(datastore=ds_to_use, disksize_kb=disksize_kb)\r\n vm_devices.append(disk)\r\n\r\n cdrom = self.create_cdrom(datastore=ds_to_use)\r\n vm_devices.append(cdrom)\r\n \r\n for nic in nics:\r\n nic_spec = self.create_nic(target, nic)\r\n if nic_spec is None:\r\n raise Exception(\"Could not create spec for NIC\")\r\n\r\n # Append the nic spec to the vm_devices list\r\n vm_devices.append(nic_spec)\r\n\r\n vmfi = client.create(\"VirtualMachineFileInfo\")\r\n vmfi.vmPathName = \"[%s]\" % ds_to_use.summary.name\r\n vm_config_spec = client.create(\"VirtualMachineConfigSpec\")\r\n vm_config_spec.name = name\r\n vm_config_spec.memoryMB = memory_mb\r\n vm_config_spec.files = vmfi\r\n vm_config_spec.annotation = \"Auto-provisioned by psphere\"\r\n vm_config_spec.numCPUs = num_cpus\r\n vm_config_spec.guestId = guest_id\r\n vm_config_spec.deviceChange = vm_devices\r\n\r\n # Find the datacenter of the target\r\n if target.__class__.__name__ == \"HostSystem\":\r\n datacenter = target.parent.parent.parent\r\n else:\r\n datacenter = target.parent.parent\r\n\r\n importspec = client.create('VirtualMachineImportSpec')\r\n\r\n importspec.configSpec = vm_config_spec\r\n importspec.resPoolEntity = None\r\n\r\n lease = resource_pool.ImportVApp(spec = importspec, folder = datacenter.vmFolder)\r\n self.lease = lease\r\n\r\n # Lease takes a bit of time to initialize\r\n for i in range(1000):\r\n #print lease.error\r\n if lease.state == \"ready\":\r\n break\r\n if lease.state == \"error\":\r\n raise Exception(\"Our HttpNFCLease failed to initialize\")\r\n sleep(5)\r\n lease.update_view_data(properties=[\"state\"])\r\n\r\n #print \"For debug and general info, here is the lease info\"\r\n #pprint(lease.info)\r\n\r\n upload_url = None\r\n for url_candidate in lease.info.deviceUrl:\r\n if url_candidate['disk']:\r\n upload_url = str(url_candidate['url'])\r\n\r\n if not upload_url:\r\n raise Exception(\"Unable to extract disk upload URL from HttpNfcLease\")\r\n\r\n self.log.debug(\"Extracted image upload URL (%s) from lease\" % (upload_url))\r\n\r\n lease_timeout = lease.info.leaseTimeout\r\n self.time_at_last_poke = time()\r\n\r\n image_file = open(imagefilename)\r\n\r\n # Upload the image itself\r\n image_size = os.path.getsize(imagefilename)\r\n curl = pycurl.Curl()\r\n curl.setopt(pycurl.URL, upload_url)\r\n curl.setopt(pycurl.SSL_VERIFYPEER, 0)\r\n curl.setopt(pycurl.POST, 1)\r\n curl.setopt(pycurl.POSTFIELDSIZE, image_size)\r\n curl.setopt(pycurl.READFUNCTION, image_file.read)\r\n curl.setopt(pycurl.HTTPHEADER, [\"User-Agent: Load Tool (PyCURL Load Tool)\", \"Content-Type: application/octet-stream\"])\r\n curl.setopt(pycurl.NOPROGRESS, 0)\r\n curl.setopt(pycurl.PROGRESSFUNCTION, self.curl_progress)\r\n curl.perform()\r\n curl.close()\r\n\r\n image_file.close()\r\n\r\n lease.HttpNfcLeaseComplete()\r\n\r\n vm = lease.info.entity\r\n\r\n vm.MarkAsTemplate()", "def create(self):\n flavor = env_vars[\"cassandra_%s_flavor\" % self.type]\n #create the VM\n self.vm = VM(self.name, flavor, self.image, create=True)", "def create(vm_):\n name = vm_[\"name\"]\n record = {}\n ret = {}\n\n # fire creating event\n __utils__[\"cloud.fire_event\"](\n \"event\",\n \"starting create\",\n \"salt/cloud/{}/creating\".format(name),\n args={\"name\": name, \"profile\": vm_[\"profile\"], \"provider\": vm_[\"driver\"]},\n sock_dir=__opts__[\"sock_dir\"],\n transport=__opts__[\"transport\"],\n )\n log.debug(\"Adding %s to cloud cache.\", name)\n __utils__[\"cloud.cachedir_index_add\"](\n vm_[\"name\"], vm_[\"profile\"], \"xen\", vm_[\"driver\"]\n )\n\n # connect to xen\n session = _get_session()\n\n # determine resource pool\n resource_pool = _determine_resource_pool(session, vm_)\n\n # determine storage repo\n storage_repo = _determine_storage_repo(session, resource_pool, vm_)\n\n # build VM\n image = vm_.get(\"image\")\n clone = vm_.get(\"clone\")\n if clone is None:\n clone = True\n log.debug(\"Clone: %s \", clone)\n\n # fire event to read new vm properties (requesting)\n __utils__[\"cloud.fire_event\"](\n \"event\",\n \"requesting instance\",\n \"salt/cloud/{}/requesting\".format(name),\n sock_dir=__opts__[\"sock_dir\"],\n transport=__opts__[\"transport\"],\n )\n\n # create by cloning template\n if clone:\n _clone_vm(image, name, session)\n else:\n _copy_vm(image, name, session, storage_repo)\n\n # provision template to vm\n _provision_vm(name, session)\n vm = _get_vm(name, session)\n\n # start vm\n start(name, None, session)\n\n # get new VM\n vm = _get_vm(name, session)\n\n # wait for vm to report IP via guest tools\n _wait_for_ip(name, session)\n\n # set static IP if configured\n _set_static_ip(name, session, vm_)\n\n # if not deploying salt then exit\n deploy = vm_.get(\"deploy\", True)\n log.debug(\"delopy is set to %s\", deploy)\n if deploy:\n record = session.xenapi.VM.get_record(vm)\n if record is not None:\n _deploy_salt_minion(name, session, vm_)\n else:\n log.debug(\"The Salt minion will not be installed, deploy: %s\", vm_[\"deploy\"])\n record = session.xenapi.VM.get_record(vm)\n ret = show_instance(name)\n ret.update({\"extra\": record})\n\n __utils__[\"cloud.fire_event\"](\n \"event\",\n \"created instance\",\n \"salt/cloud/{}/created\".format(name),\n args={\"name\": name, \"profile\": vm_[\"profile\"], \"provider\": vm_[\"driver\"]},\n sock_dir=__opts__[\"sock_dir\"],\n transport=__opts__[\"transport\"],\n )\n return ret", "def _create_vm(self):\n self._create_instance_in_the_db()\n self.type_data = db.instance_type_get_by_name(None, 'm1.large')\n self.conn.spawn(self.context, self.instance, self.network_info)\n self._check_vm_record()", "def launch_instance(cloud):\n js = _get_jetstream_conn()\n\n sgs = ['CloudLaunchDefault']\n kp_name = \"cloudman_key_pair\"\n inst_size = 'm1.small'\n network_id = '86a1c3e8-b1fb-41f3-bcaf-8334567fe989'\n lc = js.compute.instances.create_launch_config()\n lc.add_network_interface(network_id)\n\n img_id = '2cf07e4a-62a8-41c2-9282-f3c53962f296' # Gxy Standalone 161021b01\n name = 'ea-galaxy-{0}'.format(strftime(\"%m-%d-%H-%M\", localtime()))\n\n i = js.compute.instances.create(\n name, img_id, inst_size, security_groups=sgs, launch_config=lc,\n key_pair=kp_name)\n return i", "def create_VM_instance(settings):\n connection = create_cloud_connection(settings)\n try:\n print \"Creating VM instance\"\n reservation = connection.run_instances(\n image_id=settings.VM_IMAGE,\n min_count=1,\n max_count=1,\n key_name=settings.PRIVATE_KEY_NAME,\n security_groups=settings.SECURITY_GROUP,\n instance_type=settings.VM_SIZE)\n #print (\"Created Reservation %s\" % reservation)\n new_instance = reservation.instances[0]\n print (\"Created Instance: %s\" % new_instance)\n _wait_for_instance_to_start_running(settings, new_instance)\n ip_address = get_instance_ip(new_instance,\n refresh=True, settings=settings)\n customize_prompt(settings, ip_address)\n print 'Created VM instance with IP: %s' % ip_address\n\n except EC2ResponseError, e:\n if \"Quota\" in e.body:\n print 'Quota Limit Reached'\n else:\n raise", "def start_vm(client, resource_group_name, vm_name):\n return client.start(resource_group_name, vm_name)", "def create_vm(args):\n if not args.disk and not args.pool:\n print(\"Either --disk or --pool option must be specified\", file=sys.stderr)\n return 1\n\n if args.disk and args.pool:\n print(\"--disk and --pool options are exclusive\", file=sys.stderr)\n return 1\n if args.pool and not args.disk_size:\n print(\"You must specify a disk size\", file=sys.stderr)\n return 1\n\n if args.net and args.virtual_network:\n print(\"--net and --virtual_network option are exclusive\", file=sys.stderr)\n return 1\n\n # insure unicity in networking options in BM case\n\n _all_net_names = set()\n if args.net:\n for n_name in args.net:\n if n_name not in _all_net_names:\n _all_net_names.add(n_name)\n else:\n print('Duplicate virtual network name [%s], ignore it', n_name)\n\n if '--network' in args.virt:\n sys.stderr.write(\"--network is not a supported option. Please retry without --network option.\\n\")\n return 1\n\n # sanity on extra arguments passed to virt-install(1)\n # some options do not create the guest but display information\n # this is wrongly interpreted as a succcess by underlying layers and we\n # may setup things by mistake\n _virt_install_extra = []\n for _a in args.virt:\n if _a not in ('--print-xml', '--version', '-h', '--help'):\n _virt_install_extra.append(_a)\n\n return oci_utils.kvm.virt.create(name=args.domain,\n root_disk=args.disk,\n pool=args.pool,\n disk_size=args.disk_size,\n network=list(_all_net_names),\n virtual_network=args.virtual_network,\n extra_args=_virt_install_extra)", "def createVM(self ,disk ,name):\n return", "def create_vm(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrv_CreateVm', self.handle))", "def create_virtual_machine(self, vm):\n if vm.template:\n backend_id = self.create_virtual_machine_from_template(vm)\n else:\n backend_id = self.create_virtual_machine_from_scratch(vm)\n\n try:\n backend_vm = self.client.get_vm(backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)\n\n vm.backend_id = backend_id\n vm.runtime_state = backend_vm['power_state']\n vm.save(update_fields=['backend_id', 'runtime_state'])\n\n for disk in backend_vm['disks']:\n disk = self._backend_disk_to_disk(disk['value'], disk['key'])\n disk.vm = vm\n disk.service_settings = vm.service_settings\n disk.project = vm.project\n disk.save()\n\n # If virtual machine is not deployed from template, it does not have any networks.\n # Therefore we should create network interfaces manually according to VM spec.\n if not vm.template:\n for network in vm.networks.all():\n try:\n self.client.create_nic(vm.backend_id, network.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)\n\n signals.vm_created.send(self.__class__, vm=vm)\n return vm", "def new_vm():\n\tcfg_path = input(\"\\n\\nInsert the ClickOS .cfg file absolute path:\\n\")\n\n\tbridge_name = get_bridge_name(cfg_path)\n\tif len(bridge_name) == 0:\n\t\tprint(\"Couldnt find the bridge name.\")\n\t\treturn 0\n\n\tcreate_bridge(bridge_name)\n\n\tboot_vm(cfg_path)\n\n\treturn 1", "def launch_vm(self):\r\n self._print(\"Starting VM\")\r\n options = [self.vboxheadless,'-startvm',self.vm_name]\r\n options.extend(self.vboxheadless_start_options)\r\n self.popen = subprocess.Popen(options)\r\n# result = process.wait()\r\n result = \"(other thread)\"\r\n self._print(\"Started %s\" % result)", "def create(vm_):\n try:\n # Check for required profile parameters before sending any API calls.\n if (\n vm_[\"profile\"]\n and config.is_profile_configured(\n __opts__,\n (_get_active_provider_name() or \"profitbricks\"),\n vm_[\"profile\"],\n )\n is False\n ):\n return False\n except AttributeError:\n pass\n\n if \"image_alias\" in vm_ and not version_compatible(\"4.0\"):\n raise SaltCloudNotFound(\n \"The 'image_alias' parameter requires the profitbricks \"\n \"SDK v4.0.0 or greater.\"\n )\n\n if \"image\" not in vm_ and \"image_alias\" not in vm_:\n log.error(\"The image or image_alias parameter is required.\")\n\n signal_event(vm_, \"creating\", \"starting create\")\n\n data = None\n datacenter_id = get_datacenter_id()\n conn = get_conn()\n\n # Assemble list of network interfaces from the cloud profile config.\n nics = _get_nics(vm_)\n\n # Assemble list of volumes from the cloud profile config.\n volumes = [_get_system_volume(vm_)]\n if \"volumes\" in vm_:\n volumes.extend(_get_data_volumes(vm_))\n\n # Assembla the composite server object.\n server = _get_server(vm_, volumes, nics)\n\n signal_event(vm_, \"requesting\", \"requesting instance\")\n\n try:\n data = conn.create_server(datacenter_id=datacenter_id, server=server)\n log.info(\n \"Create server request ID: %s\",\n data[\"requestId\"],\n exc_info_on_loglevel=logging.DEBUG,\n )\n\n _wait_for_completion(conn, data, get_wait_timeout(vm_), \"create_server\")\n except PBError as exc:\n log.error(\n \"Error creating %s on ProfitBricks\\n\\n\"\n \"The following exception was thrown by the profitbricks library \"\n \"when trying to run the initial deployment: \\n%s\",\n vm_[\"name\"],\n exc,\n exc_info_on_loglevel=logging.DEBUG,\n )\n return False\n except Exception as exc: # pylint: disable=W0703\n log.error(\n \"Error creating %s \\n\\nError: \\n%s\",\n vm_[\"name\"],\n exc,\n exc_info_on_loglevel=logging.DEBUG,\n )\n return False\n\n vm_[\"server_id\"] = data[\"id\"]\n\n def __query_node_data(vm_, data):\n \"\"\"\n Query node data until node becomes available.\n \"\"\"\n running = False\n try:\n data = show_instance(vm_[\"name\"], \"action\")\n if not data:\n return False\n log.debug(\n \"Loaded node data for %s:\\nname: %s\\nstate: %s\",\n vm_[\"name\"],\n pprint.pformat(data[\"name\"]),\n data[\"state\"],\n )\n except Exception as err: # pylint: disable=broad-except\n log.error(\n \"Failed to get nodes list: %s\",\n err,\n # Show the trackback if the debug logging level is enabled\n exc_info_on_loglevel=logging.DEBUG,\n )\n # Trigger a failure in the wait for IP function\n return False\n\n running = data[\"state\"] == \"RUNNING\"\n if not running:\n # Still not running, trigger another iteration\n return\n\n if ssh_interface(vm_) == \"private_lan\" and data[\"private_ips\"]:\n vm_[\"ssh_host\"] = data[\"private_ips\"][0]\n\n if ssh_interface(vm_) != \"private_lan\" and data[\"public_ips\"]:\n vm_[\"ssh_host\"] = data[\"public_ips\"][0]\n\n return data\n\n try:\n data = salt.utils.cloud.wait_for_ip(\n __query_node_data,\n update_args=(vm_, data),\n timeout=config.get_cloud_config_value(\n \"wait_for_ip_timeout\", vm_, __opts__, default=10 * 60\n ),\n interval=config.get_cloud_config_value(\n \"wait_for_ip_interval\", vm_, __opts__, default=10\n ),\n )\n except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:\n try:\n # It might be already up, let's destroy it!\n destroy(vm_[\"name\"])\n except SaltCloudSystemExit:\n pass\n finally:\n raise SaltCloudSystemExit(str(exc.message))\n\n log.debug(\"VM is now running\")\n log.info(\"Created Cloud VM %s\", vm_)\n log.debug(\"%s VM creation details:\\n%s\", vm_, pprint.pformat(data))\n\n signal_event(vm_, \"created\", \"created instance\")\n\n if \"ssh_host\" in vm_:\n vm_[\"key_filename\"] = get_key_filename(vm_)\n ret = __utils__[\"cloud.bootstrap\"](vm_, __opts__)\n ret.update(data)\n return ret\n else:\n raise SaltCloudSystemExit(\"A valid IP address was not found.\")", "def main():\n\n logging.basicConfig(level=logging.INFO)\n\n # Load GCE settings.\n settings = json.loads(open(gce.SETTINGS_FILE, 'r').read())\n\n # Perform OAuth 2.0 authorization flow.\n flow = flow_from_clientsecrets(\n settings['client_secrets'], scope=settings['compute_scope'])\n storage = Storage(settings['oauth_storage'])\n credentials = storage.get()\n\n # Authorize an instance of httplib2.Http.\n if credentials is None or credentials.invalid:\n credentials = run(flow, storage)\n http = httplib2.Http()\n auth_http = credentials.authorize(http)\n\n gce_helper = gce.Gce(auth_http, settings['project'])\n\n # Start an image with a local start-up script.\n logging.info('Starting up an instance')\n instance_name = 'startup-script-demo'\n zone_name = settings['compute']['zone']\n try:\n gce_helper.start_instance(\n instance_name,\n\tzone=zone_name,\n service_email=settings['compute']['service_email'],\n scopes=settings['compute']['scopes'],\n startup_script='startup.sh',\n metadata=[\n {'key': 'url', 'value': settings['image_url']},\n {'key': 'text', 'value': settings['image_text']},\n {'key': 'cs-bucket', 'value': settings['storage']['bucket']}])\n except gce.ApiError, e:\n logging.error('Error starting instance.')\n logging.error(e)\n return\n except gce.ApiOperationError as e:\n logging.error('Error starting instance')\n logging.error(e)\n return\n\n # List all running instances.\n logging.info('Here are your running instances:')\n instances = gce_helper.list_instances()\n for instance in instances:\n logging.info(instance['name'])\n\n logging.info(\n 'Visit http://storage.googleapis.com/%s/output.png',\n settings['storage']['bucket'])\n logging.info('It might take a minute for the output.png file to show up.')\n raw_input('Hit Enter when done to shutdown instance')\n\n # Stop the instance.\n logging.info('Shutting down the instance')\n try:\n gce_helper.stop_instance(instance_name, zone=zone_name)\n except gce.ApiError, e:\n logging.error('Error stopping instance.')\n logging.error(e)\n return\n except gce.ApiOperationError, e:\n logging.error('Error stopping instance')\n logging.error(e)\n return\n\n logging.info('Remember to delete the output.png file in ' + settings[\n 'storage']['bucket'])", "def step_create(test, checks=None):\n if checks is None:\n checks = []\n test.cmd(\n \"az networkcloud virtualmachine console create \"\n '--extended-location name={extendedLocation} type=\"CustomLocation\" --location {location} '\n \"--enabled {enabled} --expiration {expiration} --tags {tags} \"\n \"--ssh-public-key {sshPublicKey} --resource-group {resourceGroup} \"\n \"--virtual-machine-name {virtualMachineName}\",\n checks=checks,\n )", "def create_vm(self, account, vminfo):\n node = self.driver(account).ex_create_node_from_template(\n name=vminfo.vm_name,\n template=vminfo.vsphere.template,\n )\n\n vminfo.vm_id = node.id\n\n return node.id", "def spawn(self,\n context,\n instance,\n image_meta,\n injected_files,\n admin_password,\n network_info=None,\n block_device_info=None):\n vm_params = self._prepare_vm_params(instance, network_info,\n admin_password)\n name = self._azure_instance_name(instance)\n LOG.info(\"Spawning vm %s with params %s\" % (name, vm_params))\n utils.create_or_update_instance(\n self.compute_client, drv_conf.resource_group, name, vm_params)\n tags = {\n 'location': drv_conf.region,\n 'tags': {\n 'openstack_id': instance.uuid,\n 'openstack_project_id': context.project_id,\n 'openstack_user_id': context.user_id\n }\n }\n utils.create_or_update_instance(self.compute_client,\n drv_conf.resource_group, name, tags)\n az_instance = utils.get_instance(self.compute_client,\n drv_conf.resource_group, name)\n self._uuid_to_omni_instance[instance.uuid] = az_instance\n instance.metadata.update({\n OMNI_NAME: name,\n constants.OMNI_ID: az_instance.id\n })", "def cloud_init_interface(name, vm_=None, **kwargs):\n if vm_ is None:\n vm_ = {}\n vm_ = copy.deepcopy(vm_)\n vm_ = salt.utils.dictupdate.update(vm_, kwargs)\n\n profile_data = copy.deepcopy(vm_.get(\"lxc_profile\", vm_.get(\"profile\", {})))\n if not isinstance(profile_data, (dict, (str,))):\n profile_data = {}\n profile = get_container_profile(profile_data)\n\n def _cloud_get(k, default=None):\n return vm_.get(k, profile.get(k, default))\n\n if name is None:\n name = vm_[\"name\"]\n # if we are on ubuntu, default to ubuntu\n default_template = \"\"\n if __grains__.get(\"os\", \"\") in [\"Ubuntu\"]:\n default_template = \"ubuntu\"\n image = _cloud_get(\"image\")\n if not image:\n _cloud_get(\"template\", default_template)\n backing = _cloud_get(\"backing\", \"dir\")\n if image:\n profile[\"template\"] = image\n vgname = _cloud_get(\"vgname\", None)\n if vgname:\n profile[\"vgname\"] = vgname\n if backing:\n profile[\"backing\"] = backing\n snapshot = _cloud_get(\"snapshot\", False)\n autostart = bool(_cloud_get(\"autostart\", True))\n dnsservers = _cloud_get(\"dnsservers\", [])\n dns_via_dhcp = _cloud_get(\"dns_via_dhcp\", True)\n password = _cloud_get(\"password\", \"s3cr3t\")\n password_encrypted = _cloud_get(\"password_encrypted\", False)\n fstype = _cloud_get(\"fstype\", None)\n lvname = _cloud_get(\"lvname\", None)\n thinpool = _cloud_get(\"thinpool\", None)\n pub_key = _cloud_get(\"pub_key\", None)\n priv_key = _cloud_get(\"priv_key\", None)\n size = _cloud_get(\"size\", \"20G\")\n script = _cloud_get(\"script\", None)\n script_args = _cloud_get(\"script_args\", None)\n users = _cloud_get(\"users\", None)\n if users is None:\n users = []\n ssh_username = _cloud_get(\"ssh_username\", None)\n if ssh_username and (ssh_username not in users):\n users.append(ssh_username)\n network_profile = _cloud_get(\"network_profile\", None)\n nic_opts = kwargs.get(\"nic_opts\", None)\n netmask = _cloud_get(\"netmask\", \"24\")\n path = _cloud_get(\"path\", None)\n bridge = _cloud_get(\"bridge\", None)\n gateway = _cloud_get(\"gateway\", None)\n unconditional_install = _cloud_get(\"unconditional_install\", False)\n force_install = _cloud_get(\"force_install\", True)\n config = _get_salt_config(_cloud_get(\"config\", {}), **vm_)\n default_nic = _cloud_get(\"default_nic\", DEFAULT_NIC)\n # do the interface with lxc.init mainly via nic_opts\n # to avoid extra and confusing extra use cases.\n if not isinstance(nic_opts, dict):\n nic_opts = salt.utils.odict.OrderedDict()\n # have a reference to the default nic\n eth0 = nic_opts.setdefault(default_nic, salt.utils.odict.OrderedDict())\n # lxc config is based of ifc order, be sure to use odicts.\n if not isinstance(nic_opts, salt.utils.odict.OrderedDict):\n bnic_opts = salt.utils.odict.OrderedDict()\n bnic_opts.update(nic_opts)\n nic_opts = bnic_opts\n gw = None\n # legacy salt.cloud scheme for network interfaces settings support\n bridge = _cloud_get(\"bridge\", None)\n ip = _cloud_get(\"ip\", None)\n mac = _cloud_get(\"mac\", None)\n if ip:\n fullip = ip\n if netmask:\n fullip += f\"/{netmask}\"\n eth0[\"ipv4\"] = fullip\n if mac is not None:\n eth0[\"mac\"] = mac\n for ix, iopts in enumerate(_cloud_get(\"additional_ips\", [])):\n ifh = f\"eth{ix + 1}\"\n ethx = nic_opts.setdefault(ifh, {})\n if gw is None:\n gw = iopts.get(\"gateway\", ethx.get(\"gateway\", None))\n if gw:\n # only one and only one default gateway is allowed !\n eth0.pop(\"gateway\", None)\n gateway = None\n # even if the gateway if on default \"eth0\" nic\n # and we popped it will work\n # as we reinject or set it here.\n ethx[\"gateway\"] = gw\n elink = iopts.get(\"link\", ethx.get(\"link\", None))\n if elink:\n ethx[\"link\"] = elink\n # allow dhcp\n aip = iopts.get(\"ipv4\", iopts.get(\"ip\", None))\n if aip:\n ethx[\"ipv4\"] = aip\n nm = iopts.get(\"netmask\", \"\")\n if nm:\n ethx[\"ipv4\"] += f\"/{nm}\"\n for i in (\"mac\", \"hwaddr\"):\n if i in iopts:\n ethx[\"mac\"] = iopts[i]\n break\n if \"mac\" not in ethx:\n ethx[\"mac\"] = salt.utils.network.gen_mac()\n # last round checking for unique gateway and such\n gw = None\n for ethx in [a for a in nic_opts]:\n ndata = nic_opts[ethx]\n if gw:\n ndata.pop(\"gateway\", None)\n if \"gateway\" in ndata:\n gw = ndata[\"gateway\"]\n gateway = None\n # only use a default bridge / gateway if we configured them\n # via the legacy salt cloud configuration style.\n # On other cases, we should rely on settings provided by the new\n # salt lxc network profile style configuration which can\n # be also be overridden or a per interface basis via the nic_opts dict.\n if bridge:\n eth0[\"link\"] = bridge\n if gateway:\n eth0[\"gateway\"] = gateway\n #\n lxc_init_interface = {}\n lxc_init_interface[\"name\"] = name\n lxc_init_interface[\"config\"] = config\n lxc_init_interface[\"memory\"] = _cloud_get(\"memory\", 0) # nolimit\n lxc_init_interface[\"pub_key\"] = pub_key\n lxc_init_interface[\"priv_key\"] = priv_key\n lxc_init_interface[\"nic_opts\"] = nic_opts\n for clone_from in [\"clone_from\", \"clone\", \"from_container\"]:\n # clone_from should default to None if not available\n lxc_init_interface[\"clone_from\"] = _cloud_get(clone_from, None)\n if lxc_init_interface[\"clone_from\"] is not None:\n break\n lxc_init_interface[\"profile\"] = profile\n lxc_init_interface[\"snapshot\"] = snapshot\n lxc_init_interface[\"dnsservers\"] = dnsservers\n lxc_init_interface[\"fstype\"] = fstype\n lxc_init_interface[\"path\"] = path\n lxc_init_interface[\"vgname\"] = vgname\n lxc_init_interface[\"size\"] = size\n lxc_init_interface[\"lvname\"] = lvname\n lxc_init_interface[\"thinpool\"] = thinpool\n lxc_init_interface[\"force_install\"] = force_install\n lxc_init_interface[\"unconditional_install\"] = unconditional_install\n lxc_init_interface[\"bootstrap_url\"] = script\n lxc_init_interface[\"bootstrap_args\"] = script_args\n lxc_init_interface[\"bootstrap_shell\"] = _cloud_get(\"bootstrap_shell\", \"sh\")\n lxc_init_interface[\"bootstrap_delay\"] = _cloud_get(\"bootstrap_delay\", None)\n lxc_init_interface[\"autostart\"] = autostart\n lxc_init_interface[\"users\"] = users\n lxc_init_interface[\"password\"] = password\n lxc_init_interface[\"password_encrypted\"] = password_encrypted\n # be sure not to let objects goes inside the return\n # as this return will be msgpacked for use in the runner !\n lxc_init_interface[\"network_profile\"] = network_profile\n for i in [\"cpu\", \"cpuset\", \"cpushare\"]:\n if _cloud_get(i, None):\n try:\n lxc_init_interface[i] = vm_[i]\n except KeyError:\n lxc_init_interface[i] = profile[i]\n return lxc_init_interface", "def create_machine(request):\n\n params = params_from_request(request)\n cloud_id = request.matchdict['cloud']\n\n for key in ('name', 'size'):\n if key not in params:\n raise RequiredParameterMissingError(key)\n\n key_id = params.get('key')\n machine_name = params['name']\n location_id = params.get('location', None)\n image_id = params.get('image')\n if not image_id:\n raise RequiredParameterMissingError(\"image\")\n # this is used in libvirt\n disk_size = int(params.get('libvirt_disk_size', 4))\n disk_path = params.get('libvirt_disk_path', '')\n size_id = params['size']\n # deploy_script received as unicode, but ScriptDeployment wants str\n script = str(params.get('script', ''))\n # these are required only for Linode/GCE, passing them anyway\n image_extra = params.get('image_extra', None)\n disk = params.get('disk', None)\n image_name = params.get('image_name', None)\n size_name = params.get('size_name', None)\n location_name = params.get('location_name', None)\n ips = params.get('ips', None)\n monitoring = params.get('monitoring', False)\n networks = params.get('networks', [])\n docker_env = params.get('docker_env', [])\n docker_command = params.get('docker_command', None)\n script_id = params.get('script_id', '')\n script_params = params.get('script_params', '')\n post_script_id = params.get('post_script_id', '')\n post_script_params = params.get('post_script_params', '')\n async = params.get('async', False)\n quantity = params.get('quantity', 1)\n persist = params.get('persist', False)\n docker_port_bindings = params.get('docker_port_bindings', {})\n docker_exposed_ports = params.get('docker_exposed_ports', {})\n azure_port_bindings = params.get('azure_port_bindings', '')\n # hostname: if provided it will be attempted to assign a DNS name\n hostname = params.get('hostname', '')\n plugins = params.get('plugins')\n cloud_init = params.get('cloud_init', '')\n associate_floating_ip = params.get('associate_floating_ip', False)\n associate_floating_ip_subnet = params.get('attach_floating_ip_subnet',\n None)\n project_id = params.get('project', None)\n bare_metal = params.get('bare_metal', False)\n # bare_metal True creates a hardware server in SoftLayer,\n # whule bare_metal False creates a virtual cloud server\n # hourly True is the default setting for SoftLayer hardware\n # servers, while False means the server has montly pricing\n softlayer_backend_vlan_id = params.get('softlayer_backend_vlan_id', None)\n hourly = params.get('billing', True)\n job_id = params.get('job_id')\n job_id = params.get('job_id')\n # The `job` variable points to the event that started the job. If a job_id\n # is not provided, then it means that this is the beginning of a new story\n # that starts with a `create_machine` event. If a job_id is provided that\n # means that the current event will be part of already existing, unknown\n # story. TODO: Provide the `job` in the request's params or query it.\n if not job_id:\n job = 'create_machine'\n job_id = uuid.uuid4().hex\n else:\n job = None\n\n # these are needed for OnApp\n size_ram = params.get('size_ram', 256)\n size_cpu = params.get('size_cpu', 1)\n size_disk_primary = params.get('size_disk_primary', 5)\n size_disk_swap = params.get('size_disk_swap', 1)\n boot = params.get('boot', True)\n build = params.get('build', True)\n cpu_priority = params.get('cpu_priority', 1)\n cpu_sockets = params.get('cpu_sockets', 1)\n cpu_threads = params.get('cpu_threads', 1)\n port_speed = params.get('port_speed', 0)\n hypervisor_group_id = params.get('hypervisor_group_id')\n\n auth_context = auth_context_from_request(request)\n\n try:\n Cloud.objects.get(owner=auth_context.owner,\n id=cloud_id, deleted=None)\n except Cloud.DoesNotExist:\n raise NotFoundError('Cloud does not exist')\n\n # compose schedule as a dict from relative parameters\n if not params.get('schedule_type'):\n schedule = {}\n else:\n if params.get('schedule_type') not in ['crontab',\n 'interval', 'one_off']:\n raise BadRequestError('schedule type must be one of '\n 'these (crontab, interval, one_off)]'\n )\n if params.get('schedule_entry') == {}:\n raise RequiredParameterMissingError('schedule_entry')\n\n schedule = {\n 'name': params.get('name'),\n 'description': params.get('description', ''),\n 'action': params.get('action', ''),\n 'script_id': params.get('schedule_script_id', ''),\n 'schedule_type': params.get('schedule_type'),\n 'schedule_entry': params.get('schedule_entry'),\n 'expires': params.get('expires', ''),\n 'start_after': params.get('start_after', ''),\n 'max_run_count': params.get('max_run_count'),\n 'task_enabled': bool(params.get('task_enabled', True)),\n 'auth_context': auth_context.serialize(),\n }\n\n auth_context.check_perm(\"cloud\", \"read\", cloud_id)\n auth_context.check_perm(\"cloud\", \"create_resources\", cloud_id)\n tags = auth_context.check_perm(\"machine\", \"create\", None) or {}\n if script_id:\n auth_context.check_perm(\"script\", \"run\", script_id)\n if key_id:\n auth_context.check_perm(\"key\", \"read\", key_id)\n\n # Parse tags.\n try:\n mtags = params.get('tags') or {}\n if not isinstance(mtags, dict):\n if not isinstance(mtags, list):\n raise ValueError()\n if not all((isinstance(t, dict) and len(t) is 1 for t in mtags)):\n raise ValueError()\n mtags = {key: val for item in mtags for key, val in item.items()}\n tags.update(mtags)\n except ValueError:\n raise BadRequestError('Invalid tags format. Expecting either a '\n 'dictionary of tags or a list of single-item '\n 'dictionaries')\n\n args = (cloud_id, key_id, machine_name,\n location_id, image_id, size_id,\n image_extra, disk, image_name, size_name,\n location_name, ips, monitoring, networks,\n docker_env, docker_command)\n kwargs = {'script_id': script_id,\n 'script_params': script_params, 'script': script, 'job': job,\n 'job_id': job_id, 'docker_port_bindings': docker_port_bindings,\n 'docker_exposed_ports': docker_exposed_ports,\n 'azure_port_bindings': azure_port_bindings,\n 'hostname': hostname, 'plugins': plugins,\n 'post_script_id': post_script_id,\n 'post_script_params': post_script_params,\n 'disk_size': disk_size,\n 'disk_path': disk_path,\n 'cloud_init': cloud_init,\n 'associate_floating_ip': associate_floating_ip,\n 'associate_floating_ip_subnet': associate_floating_ip_subnet,\n 'project_id': project_id,\n 'bare_metal': bare_metal,\n 'tags': tags,\n 'hourly': hourly,\n 'schedule': schedule,\n 'softlayer_backend_vlan_id': softlayer_backend_vlan_id,\n 'size_ram': size_ram,\n 'size_cpu': size_cpu,\n 'size_disk_primary': size_disk_primary,\n 'size_disk_swap': size_disk_swap,\n 'boot': boot,\n 'build': build,\n 'cpu_priority': cpu_priority,\n 'cpu_sockets': cpu_sockets,\n 'cpu_threads': cpu_threads,\n 'port_speed': port_speed,\n 'hypervisor_group_id': hypervisor_group_id}\n if not async:\n ret = methods.create_machine(auth_context.owner, *args, **kwargs)\n else:\n args = (auth_context.owner.id, ) + args\n kwargs.update({'quantity': quantity, 'persist': persist})\n tasks.create_machine_async.apply_async(args, kwargs, countdown=2)\n ret = {'job_id': job_id}\n ret.update({'job': job})\n return ret", "def create(vmname):\n\n imgpath = os.path.join(base_disk_path, vmname + '.img')\n shutil.copyfile(base_vm_img, imgpath)\n config = _vm_conf_template.format(**locals())\n vm = _conn.defineXML(config)\n xml = ET.fromstring(vm.XMLDesc(0))\n mac = xml.find('devices').find('interface').find('mac').attrib['address']\n infokeeper.add_vm(vmname, mac)\n return 'VM %s created' % vmname", "def create_vm(name, flavor_id, image_id, IPv4, logger):\n networks = [{'uuid': env_vars['cassandra_network_id']}]\n if IPv4: networks.append({'uuid': 2216})\n vm_id = -1\n try:\n logger.info(\"creating flavor %d, image %s\" % (flavor_id, image_id))\n my_dict = cyclades_client.create_server(name, flavor_id, image_id, personality=personality('root'),\n networks=networks)\n vm_id = my_dict['id']\n\n except ClientError as e:\n logger.error(\"failed to create server with kamaki\")\n logger.error(e)\n # print('Error: %s' % e)\n # if e.status:\n # print('- error code: %s' % e.status)\n # if e.details:\n # for detail in e.details:\n # print('- %s' % detail)\n raise Exception(\"Failed creating server\")\n return vm_id", "def create(self, spec, force_cache):\n\n instance_id = self.get_instance_id(spec)\n instance_dir = os.path.join(self.directory, instance_id)\n # create the directory to hold all the bits\n logger.info(\"Creating directory %s\" % (instance_dir, ))\n os.mkdir(instance_dir)\n\n logger.info(\"Creating virtual machine\")\n self.vboxmanage(\"createvm\", name=instance_id, directory=self.directory, ostype=self.ostype[spec.image.distro])\n self.vboxmanage(\"configurevm\", name=instance_id, memsize=spec.hardware.memory)\n network = self.guess_network()\n network.configurevm(instance_id)\n\n logger.info(\"Creating disk image from %s\" % (spec.image, ))\n # create the disk image and attach it\n disk = os.path.join(instance_dir, instance_id + \"_disk1.vdi\")\n self.qemu_img(\"convert\", source=spec.image.fetch(self.image_dir, force_cache), destination=disk, format=\"vdi\")\n self.vboxmanage(\"create_sata\", name=instance_id)\n self.vboxmanage(\"attach_disk\", name=instance_id, disk=disk)\n\n # create the seed ISO\n logger.info(\"Creating cloudinit seed\")\n config_class = self.configs[spec.image.distro]\n cloud_config = config_class(spec)\n meta_data = MetaData(spec.name)\n seed = Seed(instance_dir, cloud_config=cloud_config, meta_data=meta_data)\n seed.write()\n\n logger.info(\"Attaching devices\")\n # connect the seed ISO and the tools ISO\n self.vboxmanage(\"create_ide\", name=instance_id)\n self.vboxmanage(\"attach_ide\", name=instance_id, port=\"0\", device=\"0\", filename=seed.pathname)\n self.vboxmanage(\"attach_ide\", name=instance_id, port=\"0\", device=\"1\", filename=\"/usr/share/virtualbox/VBoxGuestAdditions.iso\")\n logger.info(\"Machine created\")\n\n logger.info(\"Mounting host drive\")\n hostpath = os.path.expanduser(\"~\")\n self.vboxmanage(\"mount\", name=instance_id, hostpath=hostpath)\n return self.load(instance_id)", "def launch(self, context, instance_name, mem_target,\n new_instance_ref, network_info, migration_url=None,\n use_image_service=False, image_refs=[], params={}):\n newname, path = self.pre_launch(context, new_instance_ref, network_info,\n migration=(migration_url and True),\n use_image_service=use_image_service,\n image_refs=image_refs)\n\n vmsargs = vmsrun.Arguments()\n for key, value in params.get('guest', {}).iteritems():\n vmsargs.add_param(key, value)\n\n # Launch the new VM.\n LOG.debug(_(\"Calling vms.launch with name=%s, new_name=%s, target=%s, \"\n \"migration_url=%s, vmsargs=%s\"),\n instance_name, newname, mem_target, str(migration_url),\n str(vmsargs.jsonize()))\n\n result = tpool.execute(commands.launch,\n instance_name,\n newname,\n str(mem_target),\n path=path,\n mem_url=migration_url,\n migration=(migration_url and True),\n vmsargs=vmsargs)\n\n LOG.debug(_(\"Called vms.launch with name=%s, new_name=%s, target=%s, \"\n \"migration_url=%s, vmsargs=%s\"),\n instance_name, newname, mem_target, str(migration_url),\n str(vmsargs.jsonize()))\n\n # Take care of post-launch.\n self.post_launch(context,\n new_instance_ref,\n network_info,\n migration=(migration_url and True))\n return result", "def CreateInstance(self, instance_name, image_name,\n image_project=_PROJECT_ID,\n machine_type=_DEFAULT_MACHINE_TYPE,\n metadata=None,\n network=_DEFAULT_NETWORK,\n scopes=_DEFAULT_SCOPES,\n use_ssd=False,\n disk_size_gb=20.0,\n zone=_DEFAULT_ZONE):\n\n # Before attempting to create the instance, verify that it does not exist.\n try:\n self.GetInstance(instance_name, zone=zone)\n except gapi_errors.HttpError as e:\n if e.resp and e.resp.status != 404:\n # If we get an unexpected error, raise it.\n raise e\n # If the error was 404, then we just exit this try-except-else and\n # continue.\n else:\n # If no error is thrown, that means that the instance exists, therefore\n # we raise an error.\n raise ComputeEngineApiError('The instance \"%s\" already exists.'\n % instance_name)\n\n zone_url = '%s/zones/%s' % (self._PROJECT_URL, zone)\n machine_type_url = '%s/machineTypes/%s' % (zone_url, machine_type)\n network_url = '%s/global/networks/%s' % (self._PROJECT_URL, network)\n image_url = '%s/%s/global/images/%s' % (gcloud_constants.GCE_API_URL,\n image_project, image_name)\n\n disk_type = 'pd-ssd' if use_ssd else 'pd-standard'\n disk_type_url = '%s/diskTypes/%s' % (zone_url, disk_type)\n\n if metadata is None:\n metadata = {}\n\n instance_descriptor = {\n 'name': instance_name,\n 'machineType': machine_type_url,\n 'disks': [{\n 'autoDelete': 'true',\n 'boot': 'true',\n 'type': 'PERSISTENT',\n 'initializeParams': {\n 'diskName': instance_name,\n 'diskSizeGb': disk_size_gb,\n 'diskType': disk_type_url,\n 'sourceImage': image_url\n }\n }],\n 'networkInterfaces': [{\n 'accessConfigs': [{\n 'type': 'ONE_TO_ONE_NAT',\n 'name': 'External NAT'\n }],\n 'network': network_url,\n }],\n 'serviceAccounts': [{\n 'email': self._SERVICE_EMAIL,\n 'scopes': scopes\n }],\n 'metadata': metadata,\n }\n\n request = self._service.instances().insert(\n project=self._PROJECT_ID, body=instance_descriptor, zone=zone)\n\n try:\n return self._RunWithRetries(request.execute, self._CommonErrorMatcher)\n except gapi_errors.HttpError as e:\n if e.resp and e.resp.status == 409:\n return self.GetInstance(instance_name, zone=zone)\n else:\n raise e", "def start_virtual_machine(self, vm):\n try:\n self.client.start_vm(vm.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)", "def create(self):\n print(\"+ Creating cluster: {}. This may take a few minutes ...\".format(self.name_hyphenated))\n if self.num_gpus == 0:\n out = util.syscall(\"gcloud container clusters create {} -m {} --disk-size {} --num-nodes {} {}\".\n format(self.name_hyphenated, self.machine_type, self.disk_size, self.num_nodes,\n \"--zone \" + self.location if self.location else \"\"), return_outputs=\"as_str\")\n else:\n out = util.syscall(\"gcloud container clusters create {} --enable-cloud-logging --enable-cloud-monitoring \"\n \"--accelerator type={},count={} {} -m {} --disk-size {} --enable-kubernetes-alpha \"\n \"--image-type UBUNTU --num-nodes {} --cluster-version 1.9.2-gke.1 --quiet\".\n format(self.name_hyphenated, self.gpu_type, self.gpus_per_node,\n \"--zone \"+self.location if self.location else \"\", self.machine_type, self.disk_size,\n self.num_nodes), return_outputs=\"as_str\")\n # check output of cluster generating code\n if re.search(r'error', out, re.IGNORECASE):\n raise util.TFCliError(out)\n else:\n print(\"+ Successfully created cluster.\")\n self.instances, self.primary_name = util.get_compute_instance_specs(self.name_hyphenated)\n self.started = True\n\n # install NVIDIA drivers on machines per local kubectl\n if self.num_gpus > 0:\n print(\"+ Installing NVIDIA GPU drivers and k8s device plugins ...\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/GoogleCloudPlatform/\"\n \"container-engine-accelerators/k8s-1.9/daemonset.yaml\")\n util.syscall(\"kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n util.syscall(\"kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/\"\n \"release-1.9/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml\")\n\n print(\"+ Done. Cluster: {} created.\".format(self.name_hyphenated))", "def up(self, arguments):\n gui = arguments['--gui']\n save = not arguments['--no-cache']\n requests_kwargs = utils.get_requests_kwargs(arguments)\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n utils.index_active_instance(instance_name)\n\n vmx = utils.init_box(self.box_name, self.box_version, requests_kwargs=requests_kwargs, save=save)\n vmrun = VMrun(vmx, user=self.user, password=self.password)\n puts_err(colored.blue(\"Bringing machine up...\"))\n started = vmrun.start(gui=gui)\n if started is None:\n puts_err(colored.red(\"VM not started\"))\n else:\n time.sleep(3)\n puts_err(colored.blue(\"Getting IP address...\"))\n lookup = self.get(\"enable_ip_lookup\", False)\n ip = vmrun.getGuestIPAddress(lookup=lookup)\n puts_err(colored.blue(\"Sharing current folder...\"))\n vmrun.enableSharedFolders()\n vmrun.addSharedFolder('mech', os.getcwd(), quiet=True)\n if ip:\n if started:\n puts_err(colored.green(\"VM started on {}\".format(ip)))\n else:\n puts_err(colored.yellow(\"VM was already started on {}\".format(ip)))\n else:\n if started:\n puts_err(colored.green(\"VM started on an unknown IP address\"))\n else:\n puts_err(colored.yellow(\"VM was already started on an unknown IP address\"))" ]
[ "0.72538877", "0.72196287", "0.6990168", "0.69501716", "0.68095875", "0.64573157", "0.64194506", "0.6372524", "0.6343484", "0.6325651", "0.6299524", "0.6254636", "0.62357014", "0.6076735", "0.60734236", "0.60668397", "0.6060681", "0.5957012", "0.5925895", "0.5918209", "0.58862084", "0.5885182", "0.5878289", "0.5871909", "0.5856081", "0.5843454", "0.5837422", "0.5813378", "0.5783024", "0.5779247" ]
0.75047314
0
remove elements from values that are not part of form if elements in `to_remove` contains a '!' it will ignore everything EXCEPT the !, can not mix ! and normal, if it detects a ! it will ignore all besides elements with !
def remove_from_values(values, to_remove): to_keep = [] for x in to_remove: if '!' in x: to_keep.append(x.replace("!", "")) if len(to_keep) == 0: for x in to_remove: del values[x] else: tmp_values = values.copy() for key in tmp_values.keys(): if key not in to_keep: del values[key]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_values(self, name, remove):\n # Do we need to modify a mask's lib def.?\n if not self.is_array(name) and self._is_array_item(name):\n name = self._maskname_from_item(name)\n # Are any meta undefined codes provided? - Warn user!\n values = self._get_value_loc(name)\n codes = self.codes(name)\n ignore_codes = [r for r in remove if r not in codes]\n if ignore_codes:\n print('Warning: Cannot remove values...')\n print('*' * 60)\n msg = \"Codes {} not found in values object of '{}'!\"\n print(msg.format(ignore_codes, name))\n print('*' * 60)\n remove = [x for x in remove if x not in ignore_codes]\n # Would be remove all defined values? - Prevent user from doing this!\n new_values = [value for value in values\n if value['value'] not in remove]\n if not new_values:\n msg = \"Cannot remove all codes from the value object of '{}'!\"\n raise ValueError(msg.format(name))\n # Apply new ``values`` definition\n if self.is_array(name):\n self._meta['lib']['values'][name] = new_values\n else:\n self._meta['columns'][name]['values'] = new_values\n # Remove values in ``data``\n if self.is_array(name):\n items = self._get_itemmap(name, 'items')\n for i in items:\n self.uncode(i, {x: {i: x} for x in remove})\n self._verify_data_vs_meta_codes(i)\n else:\n self.uncode(name, {x: {name: x} for x in remove})\n self._verify_data_vs_meta_codes(name)\n # convert delimited set to single if only one cat is left\n self._prevent_one_cat_set(name)\n return None", "def remove_unuseful(remove_fields: np.ndarray, remove_values: np.ndarray):\n remove_fields = remove_fields[[0, 1, 2, 3, 4, 6]]\n remove_values = remove_values[:, [0, 1, 2, 3, 4, 6]]\n return remove_fields, remove_values", "def remove_ei(remove_fields: np.ndarray, remove_values: np.ndarray):\n remove_fields = remove_fields[2:10]\n remove_values = remove_values[:, 2:10]\n return remove_fields, remove_values", "def clean_values(values_to_clean: np.ndarray):\n char_rem = \"!@#$%^*()[]{};:.,/<>?|`~-=_+'\\\\\"\n for j in range(values_to_clean.shape[0]):\n for k in range(2, 4):\n for c in char_rem:\n values_to_clean[j, k] = re.sub(' +', ' ', values_to_clean[j, k].replace(c, \" \").strip())\n return values_to_clean", "def _remove_data(things, lst_remove=None):\n\n for data in things:\n data.pop(\"_sa_instance_state\", None)\n data.pop(\"user_id\", None)\n\n if lst_remove is not None:\n for str_remove in lst_remove:\n if str_remove in data:\n data.pop(str_remove, None)\n\n return things", "def remove(self,values):\n for box, value in values.items():\n if len(value) == 1:\n for peer in self.peers[box]:\n values = self.remove_digit(values, peer, value)\n return values", "def eliminate(values):\n\tsolved = [box for box in boxes if len(values[box]) == 1]\n\tempties = [box for box in boxes if len(values[box]) == 0]\n\n\tfor empty in empties:\n\t\tvalues[empty] = '123456789'\n\n\tfor box in solved:\n\n\t\tfor peer in peers[box]:\n\t\t\tvalues = assign_value(values, peer, values[peer].replace(values[box], ''))\n\n\treturn values", "def clean(self, value):\n return [f.clean(v) for v,f in zip(value, self.fields)]", "def eliminate(values):\n for box, val in values.items():\n if len(val) == 1:\n for peer in peers[box]:\n values[peer] = values[peer].replace(val, '')", "def eliminate(values):\n # TODO: Copy your code from the classroom to complete this function\n for box,value in values.items():\n #print (box,value)\n if len(values[box]) == 1:\n for peer in peers[box]:\n if value in values[peer]:\n values[peer] = values[peer].replace(value,'')\n return values", "def _remove_all_matches(values, needle):\n values[:] = (i for i in values if i != needle)", "def eliminate(values):\n for b in boxes:\n if len(values[b]) == 1:\n for p in peers[b]:\n values = assign_value(values, p, values[p].replace(values[b], ''))\n return values", "def remove(requirements: Iterable[str], to_remove: Iterable[str]) -> List[str]:\n removable = {Requirement(r).name for r in to_remove}\n return [r for r in requirements if Requirement(r).name not in removable]", "def remove_matching(self, room, expr, user):\n room, user = str(room), str(user)\n as_pattern = re.compile(expr, re.I)\n\n to_remove = []\n\n with self._lock:\n regexes_for_room = self.notifications.get(room, {})\n for regex, users_for_regex in regexes_for_room.items():\n # check for exact match or pattern match\n if regex == expr or as_pattern.search(regex):\n if user in users_for_regex:\n to_remove.append(regex)\n\n # remove regexes after matching, to avoid mutating-while-iterating\n for regex in to_remove:\n self._remove(room, regex, user)\n\n if to_remove:\n self._save()\n\n return to_remove", "def eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n # values[peer] = values[peer].replace(digit, '')\n new_value = values[peer].replace(digit, '')\n assign_value(values, peer, new_value)\n return values", "def row_inout_eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n \n location = values[box][0]\n \n if location in location_dict.keys():\n outside = location_dict[location][0]\n \n if str(6) not in box: #only look at periods 1-5\n \n following_activity = inout_dict[box][0]\n if following_activity not in solved_values:\n temp_list = list(values[following_activity])\n \n for locations_next in values[following_activity]:\n \n if location_dict[locations_next][0] == outside and outside == True:\n \n try:\n temp_list.remove(locations_next)\n except:\n pass\n \n \n values[following_activity] = temp_list\n\n return values", "def _clean(matches):\n # type: (List[str]) -> None\n while True:\n try:\n matches.remove(\"\")\n except ValueError:\n break\n\n while True:\n try:\n matches.remove(\",\")\n except ValueError:\n return", "def test_remove_all_values2(delete_tree):\n delete_tree.remove(\"ted\")\n delete_tree.remove(\"teabagged\")\n delete_tree.remove(\"tea\")\n delete_tree.remove(\"teabag\")\n delete_tree.remove(\"teabagger\")\n delete_tree.remove(\"teabags\")\n delete_tree.remove(\"teabaggers\")", "def nonphysicalxs_remotion(a2_data,res_nufi_removal):\n for i in a2_data['I'].keys():\n if i=='MACR' and res_nufi_removal==True:\n if 'nufi' in a2_data['I'][i]['R'].keys():\n a2_data['I'][i]['R'].pop('nufi')\n for r in a2_data['I'][i]['R'].keys():\n if any(x in r for x in ['111', '112', '122', '212', '222', '211', '322',\n '321', '312', '311', '221', '121']):\n a2_data['I'][i]['R'].pop(r)\n return a2_data", "def eliminate(values):\n complete_boxes = [box for box in values.keys() if len(values[box])==1]\n for box in complete_boxes:\n for peer in peers[box]:\n values = assign_value(values, peer, values[peer].replace(values[box], \"\"))\n \n return values", "def eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n values[peer] = values[peer].replace(digit,'')\n return values", "def eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n values[peer] = values[peer].replace(digit,'')\n return values", "def remove_elements(l, e):\n return [x for x in l if x != e]", "def _clean_ignored(self, tokens):\n return list(filter(lambda t : t.token_value not in self._ignored, tokens))", "def eliminate(values):\r\n\r\n ''' Your solution here '''\r\n for key, value in values.items():\r\n if (len(value) == 1):\r\n for key_peer in peers[key]:\r\n values[key_peer] = values[key_peer].replace(value, '')\r\n return values", "def clean_remove_values(cls, cleaned_input, instance):\n remove_values = cleaned_input.get(\"remove_values\", [])\n for value in remove_values:\n if value.attribute != instance:\n msg = \"Value %s does not belong to this attribute.\" % value\n raise ValidationError(\n {\n \"remove_values\": ValidationError(\n msg, code=AttributeErrorCode.INVALID\n )\n }\n )\n return remove_values", "def list_cleanup(self, data):\n for data_value in list(data):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.required.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] != filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n data.remove(data_value)\n break\n if filter_value not in data_value[filter_key]:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] not in filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n data.remove(data_value)\n break\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if not found_match:\n data.remove(data_value)\n break\n else:\n self.logger.warning(msg=\"List_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"List_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n\n for data_value in list(data):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.excluded.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] == filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n if filter_value in data_value[filter_key]:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] in filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if found_match:\n data.remove(data_value)\n break\n else:\n self.logger.warning(msg=\"List_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"List_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n\n return data", "def remove_values(self, values: Collection[Hashable]) -> bool:\n\t\tany_values_removed = False\n\n\t\tfor value in values:\n\t\t\tif value in self._potential_values:\n\t\t\t\tself._potential_values.remove(value)\n\t\t\t\tany_values_removed = True\n\n\t\treturn any_values_removed", "def Clean(pmf):\n vals = [val for val in pmf.Values() if val < thresh]\n [pmf.Remove(val) for val in vals]", "def test_remove_with_multiple_removes(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" \"bar\" \"baz=1\" %}',\n query_str='foo=foo&bar=bar&foo=&baz=1&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('qux=qux'))" ]
[ "0.6221369", "0.61069876", "0.6103691", "0.60917854", "0.5998779", "0.5948206", "0.59433925", "0.59233046", "0.59129953", "0.5878356", "0.5876084", "0.580941", "0.5638583", "0.5597303", "0.55797964", "0.55451703", "0.55345356", "0.54755443", "0.5473616", "0.5463152", "0.5457681", "0.5457681", "0.54440105", "0.5435991", "0.5434311", "0.5415044", "0.5405144", "0.53982264", "0.5397951", "0.53909487" ]
0.80732477
0
Function plot_obs(rxLoc,d) Generate a 2d interpolated plot from scatter points of data INPUT
def plot_obs_2D(rxLoc, d=None, varstr='TMI Obs', vmin=None, vmax=None, levels=None, fig=None, axs=None): from scipy.interpolate import griddata import pylab as plt # Plot result if fig is None: fig = plt.figure() if axs is None: axs = plt.subplot() plt.sca(axs) plt.scatter(rxLoc[:, 0], rxLoc[:, 1], c='k', s=10) if d is not None: if (vmin is None): vmin = d.min() if (vmax is None): vmax = d.max() # Create grid of points x = np.linspace(rxLoc[:, 0].min(), rxLoc[:, 0].max(), 100) y = np.linspace(rxLoc[:, 1].min(), rxLoc[:, 1].max(), 100) X, Y = np.meshgrid(x, y) # Interpolate d_grid = griddata(rxLoc[:, 0:2], d, (X, Y), method='linear') plt.imshow(d_grid, extent=[x.min(), x.max(), y.min(), y.max()], origin='lower', vmin=vmin, vmax=vmax, cmap="plasma") plt.colorbar(fraction=0.02) if levels is None: plt.contour(X, Y, d_grid, 10, vmin=vmin, vmax=vmax, cmap="plasma") else: plt.contour(X, Y, d_grid, levels=levels, colors='r', vmin=vmin, vmax=vmax, cmap="plasma") plt.title(varstr) plt.gca().set_aspect('equal', adjustable='box') return fig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_plot(x,y):", "def scatter_stat(obs, model, label, xlabel, ylabel, rasterized=False):\n\n pl.scatter(obs, model, s=1, color=c2, rasterized=rasterized)\n lim_and_line2(obs, model)\n pl.xlabel(xlabel)\n pl.ylabel(ylabel)", "def save_plot_interpolate(input_samples, samples, idx, identifier, num_epochs=None, distances=None, sigma=1):\n n_samples = samples.shape[0]\n sample_length = samples.shape[1]\n\n if not num_epochs is None:\n col = hsv_to_rgb((1, 1.0*(idx)/num_epochs, 0.8))\n else:\n col = 'grey'\n\n x_points = np.arange(sample_length)\n if distances is None:\n nrow = n_samples\n else:\n nrow = n_samples + 1\n ncol = 1\n fig, axarr = plt.subplots(nrow, ncol, figsize=(3, 9))\n if distances is None:\n startat = 0\n else:\n startat = 1\n axarr[0].plot(distances.dA, color='green', label='distance from A', linestyle='--', marker='o', markersize=4)\n axarr[0].plot(distances.dB, color='orange', label='distance from B', linestyle='dotted', marker='o', markersize=4)\n axarr[0].get_xaxis().set_visible(False)\n axarr[0].set_title('distance from endpoints')\n for m in range(startat, nrow):\n sample = samples[m-startat, :, 0]\n axarr[m].plot(x_points, sample, color=col)\n for m in range(startat, nrow):\n axarr[m].set_ylim(-1.1, 1.1)\n axarr[m].set_xlim(0, sample_length)\n axarr[m].spines[\"top\"].set_visible(False)\n axarr[m].spines[\"bottom\"].set_visible(False)\n axarr[m].spines[\"right\"].set_visible(False)\n axarr[m].spines[\"left\"].set_visible(False)\n axarr[m].tick_params(bottom='off', left='off')\n axarr[m].get_xaxis().set_visible(False)\n axarr[m].get_yaxis().set_visible(False)\n axarr[m].set_facecolor((0.96, 0.96, 0.96))\n if not input_samples is None:\n # now do the real samples\n axarr[startat].plot(x_points, input_samples[0], color='green', linestyle='--')\n axarr[-1].plot(x_points, input_samples[1], color='green', linestyle='--')\n\n axarr[-1].xaxis.set_ticks(range(0, sample_length, int(sample_length/4)))\n fig.suptitle(idx)\n fig.subplots_adjust(hspace = 0.2)\n fig.savefig(\"./experiments/plots/\" + identifier + \"_interpolate.png\")\n fig.savefig(\"./experiments/plots/\" + identifier + \"_interpolate.pdf\")\n plt.clf()\n plt.close()\n return", "def plot_2d_interactive_fig(xy, colours, spatial_data = None, temporal_data = None,\n inset_axes_side = {'x':0.1, 'y':0.1}, arrow_length = 0.1, figsize = (10,6), \n labels = None, legend = None, markers = None, \n figures = 'window', png_path = './', fig_filename = '2d_interactive_plot'):\n def remove_axes2_and_arrow(fig):\n \"\"\" Given a figure that has a second axes and an annotation arrow due to a \n point having been hovered on, remove this axes and annotation arrow. \n Inputs:\n fig | matplotlib figure \n Returns:\n History:\n 2020/09/08 | MEG | Written\n \"\"\"\n # 1: try and remove any axes except the primary one\n try:\n fig.axes[1].remove() \n except:\n pass\n \n # 2: try and remove any annotation arrows\n for art in axes1.get_children():\n if isinstance(art, matplotlib.patches.FancyArrow):\n try:\n art.remove() \n except:\n continue\n else:\n continue\n fig.canvas.draw_idle() # update the figure\n \n \n def axes_data_to_fig_percent(axes_lims, fig_lims, point):\n \"\"\" Given a data point, find where on the figure it plots (ie convert from axes coordinates to figure coordinates) \n Inputs:\n axes_xlims | tuple | usually just the return of something like: axes1.get_ylim()\n fig_lims | tuple | the limits of the axes in the figure. usuall (0.1, 0.9) for an axes made with something like this: axes1 = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # main axes\n point |float | point in data coordinates\n Returns:\n fig_position | float | where the data point is in the figure. (0,0) would be the lower left corner. \n History:\n 2020/09/08 | MEG | Written\n \n \"\"\"\n gradient = (fig_lims[1] - fig_lims[0])/(axes_lims[1] - axes_lims[0])\n y_intercept = fig_lims[0] - (gradient * axes_lims[0])\n fig_position = (gradient * point) + y_intercept\n return fig_position\n \n def calculate_insetaxes_offset(lims, points, offset_length):\n \"\"\"\n The offsets between the inset axes and the point are different depending on which quadrant of the graph the point is in. \n Inputs:\n lims | list | length is equal to the number of dimensions. Filled with tuples of the axes limits. \n point | list | length is equal to the number of diemsions. Filled with points. \n offset_length | float | length of the arrow. \n Returns:\n offsets | list | length is equal to the number of dimensions. Length of offset for inset axes in each dimension. \n History:\n 2020/09/08 | MEG | Written\n \"\"\"\n import numpy as np\n offsets = []\n for dim_n in range(len(lims)): # loop through each dimension. \n dim_centre = np.mean(lims[dim_n])\n if points[dim_n] < dim_centre:\n offsets.append(-offset_length)\n else:\n offsets.append(offset_length)\n return offsets\n \n def hover(event):\n if event.inaxes == axes1: # determine if the mouse is in the axes\n cont, ind = sc.contains(event) # cont is a boolean of if hoving on point, ind is a dictionary about the point being hovered over. Note that two or more points can be in this. \n if cont: # if on point\n remove_axes2_and_arrow(fig) # remove the axes and arrow created when hovering on the point (incase cursor moves from one point to next without going off a point)\n point_n = ind['ind'][0] # get the index of which data point we're hovering on in a simpler form. \n \n # 1: Add the annotation arrow (from inset axes to data point)\n arrow_lengths = calculate_insetaxes_offset([axes1.get_xlim(), axes1.get_ylim()], \n [xy[0,point_n], xy[1,point_n]], arrow_length) # calculate the length of the arrow, which depends which quadrant we're in (as the arrow always go away from the plot)\n axes1.arrow(xy[0,point_n] + arrow_lengths[0], xy[1,point_n] + arrow_lengths[1], # add the arrow. Notation is all a bit backward as head is fixed at end, so it has to be drawn backwards. \n -arrow_lengths[0], -arrow_lengths[1], clip_on = False, zorder = 999) # clip_on makes sure it's visible, even if it goes off the edge of the axes. \n\n # 2: Add the inset axes \n fig_x = axes_data_to_fig_percent(axes1.get_xlim(), (0.1, 0.9), xy[0,point_n] + arrow_lengths[0]) # convert position on axes to position in figure, ready to add the inset axes\n fig_y = axes_data_to_fig_percent(axes1.get_ylim(), (0.1, 0.9), xy[1,point_n] + arrow_lengths[1]) # ditto for y dimension\n if arrow_lengths[0] > 0 and arrow_lengths[1] > 0: # top right quadrant\n inset_axes = fig.add_axes([fig_x, fig_y, # create the inset axes, simple case, anochored to lower left forner\n inset_axes_side['x'], inset_axes_side['y']], anchor = 'SW') \n elif arrow_lengths[0] < 0 and arrow_lengths[1] > 0: # top left quadrant\n inset_axes = fig.add_axes([fig_x - inset_axes_side['x'], fig_y, # create the inset axes, nudged in x direction, anchored to lower right corner\n inset_axes_side['x'], inset_axes_side['y']], anchor = 'SE') \n elif arrow_lengths[0] > 0 and arrow_lengths[1] < 0: # lower right quadrant\n inset_axes = fig.add_axes([fig_x, fig_y - inset_axes_side['y'], # create the inset axes, nudged in y direction\n inset_axes_side['x'], inset_axes_side['y']], anchor = 'NW') \n else: # lower left quadrant\n inset_axes = fig.add_axes([fig_x - inset_axes_side['x'], fig_y - inset_axes_side['y'], # create the inset axes, nudged in both x and y\n inset_axes_side['x'], inset_axes_side['y']], anchor = 'NE') \n \n # 3: Plot on the inset axes\n if temporal_data is not None:\n inset_axes.plot(temporal_data['xvals'], temporal_data['tcs_r2'][point_n,]) # draw the inset axes time course graph\n inset_axes.axhline(0)\n if spatial_data is not None:\n inset_axes.matshow(spatial_data['images_r3'][point_n,]) # or draw the inset axes image\n inset_axes.set_xticks([]) # and remove ticks (and so labels too) from x\n inset_axes.set_yticks([]) # and from y\n fig.canvas.draw_idle() # update the figure. \n else: # else not on a point\n remove_axes2_and_arrow(fig) # remove the axes and arrow created when hovering on the point \n else: # else not in the axes\n remove_axes2_and_arrow(fig) # remove the axes and arrow created when hovering on the point (incase cursor moves from one point to next without going off a point)\n \n import matplotlib.pyplot as plt\n import matplotlib\n import numpy as np\n\n # 1: Check some inputs:\n if temporal_data is None and spatial_data is None: # check inputs\n raise Exception(\"One of either spatial or temporal data must be supplied. Exiting. \")\n if temporal_data is not None and spatial_data is not None:\n raise Exception(\"Only either spatial or temporal data can be supplied, but not both. Exiting. \")\n\n # 2: Draw the figure\n fig = plt.figure(figsize = figsize) # create the figure, size set in function args. \n axes1 = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # main axes\n if markers is None: # if a dictionary about different markers is not supplied... \n sc = axes1.scatter(xy[0,],xy[1,],c=colours, s=100) # draw the scatter plot, just draw them all with the default marker\n else: # but if we do have a dictionary of markers. \n n_markers = len(markers['styles']) # get the number of unique markers\n for n_marker in range(n_markers): # loop through each marker style\n point_args = np.ravel(np.argwhere(markers['labels'] == n_marker)) # get which points have that marker style\n try:\n sc = axes1.scatter(xy[0,point_args], xy[1,point_args], c=colours[point_args], s=100, marker = markers['styles'][n_marker]) # draw the scatter plot with different marker styles\n except:\n pass\n sc = axes1.scatter(xy[0,],xy[1,],c=colours, s=100, alpha = 0.0) # draw the scatter plot again with all the points (regardless of marker style), but with invisble markers. As the last to be drawn, these are the ones that are hovered over, and indexing works as all the points are draw this time. \n\n # 3: Try and add various labels from the labels dict\n try:\n fig.canvas.manager.set_window_title(labels['title'])\n fig.suptitle(labels['title'])\n except:\n pass\n try:\n axes1.set_xlabel(labels['xlabel'])\n except:\n pass\n try:\n axes1.set_ylabel(labels['ylabel'])\n except:\n pass\n \n # 4: Possibly add a legend, using the legend dict. \n if legend is not None:\n axes1.legend(handles = legend['elements'], labels = legend['labels'], \n bbox_to_anchor=(1., 0.5), loc = 'center right', bbox_transform=plt.gcf().transFigure) # Put a legend to the right of the current axis. bbox is specified in figure coordinates. \n \n fig.canvas.mpl_connect(\"motion_notify_event\", hover) # connect the figure and the function. \n \n if figures == 'window':\n pass\n elif figures == \"png\":\n fig.savefig(f\"{png_path}/{fig_filename}.png\")\n plt.close()\n elif figures == 'png+window':\n fig.savefig(f\"{png_path}/{fig_filename}.png\")\n else:\n pass", "def plot_obs_noise_dist(obs_fn, obs_noise, min_dist=0, max_dist=10):\n x = np.linspace(min_dist, max_dist, 100)\n y = [obs_fn(xi) for xi in x]\n plt.plot(x, y)\n plt.xlabel(\"Distance\")\n plt.ylabel(\"Probability\")\n plt.xlim((min_dist, max_dist))\n plt.ylim((0, 1))\n plt.axvline(x=obs_noise, color='red', alpha=0.7)\n plt.annotate(\"Observation noise stddev\", xy=(obs_noise, 0))", "def plot_obs(self):\n if self.obs_im is None and self.obs_ax is None:\n fig, self.obs_ax = plt.subplots()\n self.obs_ax.set_title('Observation')\n self.obs_ax.set_xticks(())\n self.obs_ax.set_yticks(())\n self.obs_im = self.obs_ax.imshow(self.obs, cmap='gray')\n else:\n self.obs_im.set_data(self.obs)", "def coords_plot(self):\n self.load_coords()\n x = []\n y = []\n px = [] \n for item in self.coords:\n if item[1] >52.10 and item[1] <52.4 and item[2]>20.8 and item [2] <21.4:\n x.append(item[1])\n y.append(item[2])\n px.append(item[3])\n plt.scatter(x,y,c=px,s=150,alpha=0.3)\n plt.show()", "def make_2d_scatter_plot(self, xdata, ydata, xlabel=None, xunits=None,\n ylabel=None, yunits=None, title=None,\n subplotnum=None, num_rows=None,\n plot_cor=True, set_range=True):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n\n if not set_range:\n xlim = plt.gca().get_xlim()\n ylim = plt.gca().get_ylim()\n\n plt.scatter(xdata, ydata)\n\n # Adjust ranges unless told otherwise\n if set_range:\n if isinstance(xdata, list):\n hrange = max(xdata) - min(xdata)\n if hrange != 0.0:\n plt.xlim(min(xdata)-0.1*hrange,\n max(xdata)+0.1*hrange)\n elif isinstance(xdata, np.ndarray):\n hrange = xdata.max() - xdata.min()\n if hrange != 0.0:\n plt.xlim(xdata.min()-0.1*hrange,\n xdata.max()+0.1*hrange)\n if isinstance(ydata, list):\n vrange = max(ydata) - min(ydata)\n if vrange != 0.0:\n plt.ylim(min(ydata)-0.1*vrange,\n max(ydata)+0.3*vrange)\n elif isinstance(ydata, np.ndarray):\n vrange = ydata.max() - ydata.min()\n if vrange != 0.0:\n plt.ylim(ydata.min()-0.1*vrange,\n ydata.max()+0.3*vrange)\n else:\n plt.xlim(xlim)\n plt.ylim(ylim)\n if plot_cor:\n # Calculate correlation and annotate\n rho, pval = self.get_correlation_coefficient(\n xdata=xdata,\n ydata=ydata,\n xsystkey=xlabel,\n ysystkey=ylabel\n )\n if (len(set(xdata)) != 1) and (len(set(ydata)) != 1):\n if subplotnum is not None:\n if num_rows is None:\n raise ValueError(\n \"Need to know the number of rows in \"\n \"order to correctly place the correlation \"\n \"annotation on the subplot\"\n )\n row = int((subplotnum-1)/4)\n xtext = 0.25*0.25+((subplotnum-1)%4)*0.25\n ytext = 0.88-(1.0/num_rows)*0.9*row\n plt.figtext(\n xtext,\n ytext,\n 'Correlation = %.2f'%rho,\n fontsize='large'\n )\n else:\n plt.figtext(\n 0.15,\n 0.80,\n 'Correlation = %.2f'%rho,\n fontsize=16\n )\n\n # Set labels, if required\n if xlabel is not None:\n nice_xlabel = self.make_label(xlabel, xunits)\n plt.xlabel(nice_xlabel, fontsize=16)\n if ylabel is not None:\n nice_ylabel = self.make_label(ylabel, yunits)\n plt.ylabel(nice_ylabel, fontsize=16)\n if subplotnum is None and (title is not None):\n plt.title(title, fontsize=16)", "def plot_scatter(fdata, chart=None, *, sample_points=None, derivative=0,\n fig=None, axes=None,\n n_rows=None, n_cols=None, n_points=None, domain_range=None,\n sample_labels=None, label_colors=None, label_names=None,\n **kwargs):\n\n if sample_points is None:\n # This can only be done for FDataGrid\n sample_points = fdata.sample_points\n evaluated_points = fdata.data_matrix\n else:\n evaluated_points = fdata(sample_points, grid=True)\n\n fig, axes = _get_figure_and_axes(chart, fig, axes)\n fig, axes = _set_figure_layout_for_fdata(fdata, fig, axes, n_rows, n_cols)\n\n if domain_range is None:\n domain_range = fdata.domain_range\n else:\n domain_range = _list_of_arrays(domain_range)\n\n sample_colors, patches = _get_color_info(\n fdata, sample_labels, label_names, label_colors, kwargs)\n\n if fdata.dim_domain == 1:\n\n color_dict = {}\n\n for i in range(fdata.dim_codomain):\n for j in range(fdata.n_samples):\n\n if sample_colors is not None:\n color_dict[\"color\"] = sample_colors[j]\n\n axes[i].scatter(sample_points[0],\n evaluated_points[j, ..., i].T,\n **color_dict, **kwargs)\n\n else:\n\n X = fdata.sample_points[0]\n Y = fdata.sample_points[1]\n X, Y = np.meshgrid(X, Y)\n\n color_dict = {}\n\n for i in range(fdata.dim_codomain):\n for j in range(fdata.n_samples):\n\n if sample_colors is not None:\n color_dict[\"color\"] = sample_colors[j]\n\n axes[i].scatter(X, Y,\n evaluated_points[j, ..., i].T,\n **color_dict, **kwargs)\n\n _set_labels(fdata, fig, axes, patches)\n\n return fig", "def plot(model, center, extent, outname):\n # define model grid\n xg = np.linspace(-extent, extent, model.shape[0])\n yg = xg.copy()\n interp_func = RectBivariateSpline(xg, yg, model)\n\n x = np.array([-2, -1, 0, 1, 2]) + center[0]\n y = np.array([-2, -1, 0, 1, 2]) + center[1]\n psf = interp_func(x, y)\n\n x, y = np.meshgrid(x, y)\n f = pl.figure(figsize=(10, 5))\n\n pl.gray()\n ax1 = pl.subplot(121)\n ax1.imshow(model, interpolation='nearest', origin='lower',\n extent=(-extent, extent, -extent, extent),\n norm=LogNorm(vmin=model.min(), vmax=model.max()))\n ax1.plot(x, y, 's', mec='r', mfc='none', mew=2)\n\n pl.xlim(-2.5, 2.5)\n pl.ylim(-2.5, 2.5)\n ax2 = pl.subplot(122)\n ax2.imshow(psf, interpolation='nearest', origin='lower',\n extent=(-extent, extent, -extent, extent),\n norm=LogNorm(vmin=model.min(), vmax=model.max()))\n\n ax2.set_xticks([-2, -1, 0, 1, 2])\n ax2.set_yticks([-2, -1, 0, 1, 2])\n ax2.set_xticklabels(['%0.3f' % v for v in x[0]])\n ax2.set_yticklabels(['%0.3f' % v for v in y[:, 0]])\n\n coordsA, coordsB = \"data\", \"data\"\n pixels = np.array([[0.0, 0.0], [2., 2.], [-1., -1.]])\n locs = np.array([[-0.5, 0.5], [-0.5, 0.5], [-0.5, -0.5]])\n rads = [0.15, 0.25, -0.25]\n for i, p in enumerate(pixels):\n xy1 = p + center\n xy2 = p + locs[i]\n con = ConnectionPatch(xyA=xy2, xyB=xy1, coordsA=coordsA,\n coordsB=coordsB, axesA=ax2, axesB=ax1,\n arrowstyle=\"<-, head_length=1.2, head_width=0.8\", \n shrinkB=5,\n connectionstyle='arc3, rad=%s' % rads[i],\n color='r', lw=2)\n ax2.add_artist(con)\n ax2.plot(p[0], p[1], 's', mfc='none', mec='r', mew=2, ms=50)\n\n #pl.xlim(-2.5, 2.5)\n #pl.ylim(-2.5, 2.5)\n f.savefig(outname)", "def visualizeObs():\n fcontourf(fObs, [-2, 2], [-1, 1], [0, 10])", "def plot_xy(nc,params,tms,lev=None):\n \n import matplotlib.pyplot as plt\n import ggWRFutils as gW\n from datetime import datetime\n import numpy as np\n wvar={}\n for p in params:\n if p != 'Times':\n if p=='WS10':\n wvar[p]=np.sqrt(nc.variables['U10'][:]**2+nc.variables['U10'][:]**2)\n elif p=='UV10': \n wvar['U10']=nc.variables['U10'][:,:,:] \n wvar['V10']=nc.variables['V10'][:,:,:] \n elif p=='UV':\n wvar['U']=nc.variables['U'][:,lev,:,:] \n wvar['V']=nc.variables['V'][:,lev,:,:] \n elif len(nc.variables[p].shape) > 3:\n wvar[p]=nc.variables[p][:,lev,:,:] \n else: \n wvar[p]=nc.variables[p][:] \n Nx,Ny,Nz,lon,lat,dx,dy=gW.getDimensions(nc)\n for p in params:\n if params[p]=='pcolor':\n plt.pcolor(lon,lat,wvar[p][tms,:,:],shading='flat')\n plt.colorbar()\n if params[p]=='contourf':\n plt.contourf(lon,lat,wvar[p][tms,:,:],50)\n plt.colorbar()\n if params[p]=='contour':\n plt.contourf(lon,lat,wvar[p][tms,:,:])\n plt.colorbar()\n if params[p]=='quiver':\n if p=='UV10':\n plt.quiver(lon[::10,::10],lat[::10,::10],wvar['U10'][tms,::10,::10],wvar['V10'][tms,::10,::10],units='width')\n elif p=='UV':\n plt.quiver(lon,lat,wvar['U'][tms,:,:],wvar['V'][tms,:,:])\n plt.hold(True)\n plt.xlim(lon.min(),lon.max())\n plt.ylim(lat.min(),lat.max())\n fig=plt.gcf()\n return fig", "def plot_lattice_points2D(Q, markersize=12, color='b', marker='o'):\n\n ax = plt.gca()\n axsize = ax.axis()\n\n ax.plot(Q[:, 0], Q[:, 1], markersize=markersize, color=color, marker=marker)\n ax.axis(axsize)", "def plotTrajectory(arg, color = sf.cyan, xyRate=True, radiusRate = 80.0\n , blAxes = True):\n if not(hasattr(arg, '__getitem__')) and hasattr(arg, '__iter__'):\n arg = list(arg)\n\n vs = sf.vs_()\n\n color = tuple(color) # color argment may be list/vector\n if isinstance(arg,list) or isinstance(arg,tuple) or isinstance(\n arg,type(sf.sc.array([0,]))):\n from octnOp import ClOctonion\n if not(hasattr(arg[0],'__len__')) and isinstance(arg[0], complex):\n arg = [ (x.real, x.imag) for x in arg]\n elif not(hasattr(arg[0],'__len__')) and isinstance(arg[0], ClOctonion):\n arg = [ x[1:4] for x in arg]\n\n if len(arg[0])==2:\n import visual.graph as vg\n global __obj2dDisplayGeneratedStt\n\n maxX = max([abs(elm[0]) for elm in arg])\n maxY = max([abs(elm[1]) for elm in arg])\n\n print \"maxX:\",maxX, \" maxY:\",maxY\n\n if (__obj2dDisplayGeneratedStt == None):\n if xyRate == True: # 11.01.16 to \n maxAt = max(maxX, maxY)\n __obj2dDisplayGeneratedStt = vg.gdisplay(\n width=600*maxX/maxAt,height=600*maxY/maxAt)\n else:\n __obj2dDisplayGeneratedStt = vg.gdisplay(\n width=600,height=600)\n #__bl2dDisplayGeneratedStt = True\n grphAt = vg.gcurve(color = color)\n for i in range(len(arg)):\n assert len(arg[i])==2, \"unexpeted length data:\"+str(arg[i])\n grphAt.plot(pos = arg[i])\n\n #return __obj2dDisplayGeneratedStt\n #import pdb; pdb.set_trace()\n #print \"debug:\",grphAt.gcurve.pos\n\n # plot start mark\n grphSqAt = vg.gcurve(color = color)\n pos0At = grphAt.gcurve.pos[0,:][:2]\n rateAt = 50\n for x,y in sf.mitr([-maxX/rateAt, maxX/rateAt]\n , [-maxY/rateAt, maxY/rateAt]):\n grphSqAt.plot(pos = pos0At+[x,y])\n \n grphSqAt.plot(pos = pos0At+[-maxX/rateAt,-maxY/rateAt])\n\n return grphAt # 09.02.04 to animate graph\n elif len(arg[0])==3:\n vs.scene.forward=(-1,+1,-1)\n vs.scene.up=(0,0,1)\n\n c = vs.curve( color = color )\n\n maxX, maxY, maxZ = 0,0,0\n for i in range(len(arg)):\n if maxX < abs(arg[i][0]):\n maxX = abs(arg[i][0])\n if maxY < abs(arg[i][1]):\n maxY = abs(arg[i][1])\n if maxZ < abs(arg[i][2]):\n maxZ = abs(arg[i][2])\n c.append( arg[i] )\n #print c.pos\n print \"maxX:\",maxX, \" maxY:\",maxY, \" maxZ:\",maxZ\n maxAt = max(maxX,maxY,maxZ)\n c.radius = maxAt/radiusRate\n\n vs.sphere(pos = arg[0], radius = 3*c.radius, color = color)\n\n if blAxes == True:\n # draw axise\n vs.curve( pos=[(0,0,0), (maxAt,0,0)]\n , color=(1,0,0)\n , radius = maxAt/100 )\n vs.curve( pos=[(0,0,0), (0,maxAt,0)]\n , color=(0,1,0)\n , radius = maxAt/100 )\n vs.curve( pos=[(0,0,0), (0,0,maxAt)]\n , color=(0,1,1)\n , radius = maxAt/100 )\n #return vs.scene\n return c # 09.02.04 to animate graph\n else:\n assert False,\"unexpeted data:\"+str(arg)", "def scatterplot(loc: List[CrimeStatistics]) -> None: \n # return None #stub\n #template based on visualization\n \n x = enrollment_list(loc)\n y = crime_list(loc)\n \n \n pyplot.scatter(x,y)\n pyplot.xlabel(\"Enrollment\")\n pyplot.ylabel(\"Total crime per campus\")\n pyplot.title(\"correlation between enrollment and crimes committed\")\n \n \n \n pyplot.show()\n print(linregress(x,y))\n \n \n return None", "def plot_synthetic_data(smp, mu, sg, lm, lm_ind, nm, ns):\n x = np.linspace(-20, 20, 400)\n clrs1 = [(0.6, 0, 0), (0, 0.6, 0), (0, 0, 0.6)]\n clrs2 = [(1, 0.2, 0), (0, 1, 0), (0, 0.6, 1)]\n plt.hist(smp, 100, normed=1, color=(0.7, 0.7, 0.7))\n for k in range(nm):\n this_smp = smp[lm_ind[k]:lm_ind[k + 1]]\n plt.plot(x, lm[k] * spst.norm(mu[k], sg[k]).pdf(x), color=clrs2[k])\n plt.plot(this_smp, -0.05*np.ones(this_smp.shape), '.',\\\n color=clrs1[k], markersize=2)", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='blue')", "def plot_points(self,recon_data,zoom=\"dynamic\",varname='wspd',barbs=False,scatter=False,\\\n ax=None,return_ax=False,prop={},map_prop={}):\n \n #Set default properties\n default_prop={'obs_colors':'plasma','obs_levels':np.arange(30,151,10),'sortby':varname,'linewidth':1.5,'ms':7.5}\n default_map_prop={'res':'m','land_color':'#FBF5EA','ocean_color':'#EDFBFF',\\\n 'linewidth':0.5,'linecolor':'k','figsize':(14,9),'dpi':200}\n \n #Initialize plot\n prop = self.add_prop(prop,default_prop)\n map_prop = self.add_prop(map_prop,default_map_prop)\n self.plot_init(ax,map_prop)\n \n #set default properties\n input_prop = prop\n input_map_prop = map_prop\n \n #error check\n if isinstance(zoom,str) == False:\n raise TypeError('Error: zoom must be of type str')\n \n #--------------------------------------------------------------------------------------\n \n #Keep record of lat/lon coordinate extrema\n max_lat = None\n min_lat = None\n max_lon = None\n min_lon = None\n\n #Check for storm type, then get data for storm\n if isinstance(recon_data,pd.core.frame.DataFrame):\n pass\n else:\n raise RuntimeError(\"Error: recon_data must be dataframe\")\n\n #Retrieve storm data\n lats = recon_data['lat']\n lons = recon_data['lon']\n\n #Add to coordinate extrema\n if max_lat == None:\n max_lat = max(lats)\n else:\n if max(lats) > max_lat: max_lat = max(lats)\n if min_lat == None:\n min_lat = min(lats)\n else:\n if min(lats) < min_lat: min_lat = min(lats)\n if max_lon == None:\n max_lon = max(lons)\n else:\n if max(lons) > max_lon: max_lon = max(lons)\n if min_lon == None:\n min_lon = min(lons)\n else:\n if min(lons) < min_lon: min_lon = min(lons)\n\n #Plot recon data as specified\n \n if barbs:\n \n dataSort = recon_data.sort_values(by='wspd').reset_index(drop=True)\n norm = mlib.colors.Normalize(vmin=min(prop['obs_levels']), vmax=max(prop['obs_levels']))\n cmap = mlib.cm.get_cmap(prop['obs_colors'])\n colors = cmap(norm(dataSort['wspd'].values))\n colors = [tuple(i) for i in colors]\n qv = plt.barbs(dataSort['lon'],dataSort['lat'],\\\n *uv_from_wdir(dataSort['wspd'],dataSort['wdir']),color=colors,length=5,linewidth=0.5)\n \n if scatter:\n \n dataSort = recon_data.sort_values(by=prop['sortby'],ascending=(prop['sortby']!='p_sfc')).reset_index(drop=True)\n prop['obs_levels']=np.linspace(min(dataSort[varname]),max(dataSort[varname]),256)\n cmap = mlib.cm.get_cmap(prop['obs_colors'])\n \n sc = plt.scatter(dataSort['lon'],dataSort['lat'],c=dataSort[varname],cmap = cmap,\\\n vmin=min(prop['obs_levels']), vmax=max(prop['obs_levels']), s=prop['ms'])\n\n #--------------------------------------------------------------------------------------\n \n #Pre-generated zooms\n if zoom in ['north_atlantic','conus','east_conus']:\n bound_w,bound_e,bound_s,bound_n = self.set_projection(zoom)\n \n #Storm-centered plot domain\n elif zoom == \"dynamic\":\n \n bound_w,bound_e,bound_s,bound_n = self.dynamic_map_extent(min_lon,max_lon,min_lat,max_lat)\n self.ax.set_extent([bound_w,bound_e,bound_s,bound_n], crs=ccrs.PlateCarree())\n \n #Custom plot domain\n else:\n \n #Check to ensure 3 slashes are provided\n if zoom.count(\"/\") != 3:\n raise ValueError(\"Error: Custom map projection bounds must be provided as 'west/east/south/north'\")\n else:\n try:\n bound_w,bound_e,bound_s,bound_n = zoom.split(\"/\")\n bound_w = float(bound_w)\n bound_e = float(bound_e)\n bound_s = float(bound_s)\n bound_n = float(bound_n)\n self.ax.set_extent([bound_w,bound_e,bound_s,bound_n], crs=ccrs.PlateCarree())\n except:\n raise ValueError(\"Error: Custom map projection bounds must be provided as 'west/east/south/north'\")\n \n #Determine number of lat/lon lines to use for parallels & meridians\n self.plot_lat_lon_lines([bound_w,bound_e,bound_s,bound_n])\n \n #--------------------------------------------------------------------------------------\n \n #Add left title\n dot = u\"\\u2022\"\n if barbs:\n vartitle = f'{dot} flight level wind'\n if scatter:\n if varname == 'sfmr':\n vartitle = f'{dot} SFMR surface wind'\n if varname == 'wspd':\n vartitle = f'{dot} flight level wind'\n if varname == 'p_sfc':\n vartitle = f'{dot} surface pressure'\n self.ax.set_title('Recon '+vartitle,loc='left',fontsize=17,fontweight='bold')\n\n #Add right title\n #max_ppf = max(PPF)\n start_date = dt.strftime(min(recon_data['time']),'%H:%M UTC %d %b %Y')\n end_date = dt.strftime(max(recon_data['time']),'%H:%M UTC %d %b %Y')\n self.ax.set_title(f'Start ... {start_date}\\nEnd ... {end_date}',loc='right',fontsize=13)\n\n #--------------------------------------------------------------------------------------\n \n #Add legend\n\n #Add colorbar\n \n #Return axis if specified, otherwise display figure\n if ax != None or return_ax == True:\n return self.ax,'/'.join([str(b) for b in [bound_w,bound_e,bound_s,bound_n]])\n else:\n plt.show()\n plt.close()", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='red')", "def plot_2d(vector_array, save_plot_dir):\n principal_df = pd.DataFrame(data = vector_array, columns = ['pc1', 'pc2'])\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n xs = principal_df['pc1']\n ys = principal_df['pc2']\n ax.scatter(xs, ys, s=50, alpha=0.6, edgecolors='w')\n\n ax.set_xlabel('pc1')\n ax.set_ylabel('pc2')\n\n plt.savefig(save_plot_dir + '/2D_scatter.png')\n plt.close()", "def plot_2d(self, X, labels=None, s=20, marker='o',\n dimensions=(0, 1), ax=None, colors=None,\n fignum=None, cmap=matplotlib.cm.jet, # @UndefinedVariable\n ** kwargs):\n if ax is None:\n fig = pylab.figure(fignum)\n ax = fig.add_subplot(111)\n if labels is None:\n labels = numpy.zeros(X.shape[0])\n ulabels = []\n for lab in labels:\n if not lab in ulabels:\n ulabels.append(lab)\n nlabels = len(ulabels)\n if colors is None:\n colors = [cmap(float(i) / nlabels) for i in range(nlabels)]\n X_ = self.project(X, self.Q)[:,dimensions]\n kwargs.update(dict(s=s))\n plots = list()\n for i, l in enumerate(ulabels):\n kwargs.update(dict(color=colors[i], marker=marker[i % len(marker)]))\n plots.append(ax.scatter(*X_[labels == l, :].T, label=str(l), **kwargs))\n ax.set_xlabel(r\"PC$_1$\")\n ax.set_ylabel(r\"PC$_2$\")\n try:\n pylab.tight_layout()\n except:\n pass\n return plots", "def plot_xy(self, xpts, ypts):\n self.plot(np.asarray((xpts, ypts)).T)", "def plot_historical(ax, ddict, obs=None):\n\n ax = plt.gca() if ax is None else ax\n\n for target, _ in ddict.items():\n data = ddict[target]\n if data[\"Observatory\"] in obs or obs==\"all\":\n print(target)\n ax.scatter(data[\"z\"],\n np.log10(data[\"Eiso\"].value),\n marker = data[\"marker\"],\n color = data[\"col\"],\n label = target)\n return ax", "def plot_2D_so(som, **kwargs):\n info = som.toXY()\n\n # Get the independent axes\n x = info[0][0]\n y = info[0][1]\n\n # Get dimensions of data\n Nx = x.size\n Ny = y.size\n\n # z values are filtered since plotting has trouble with NaNs. The\n # I{nan_to_num} function zeros NaNs and sets (-)Inf to the largest\n # (negative) positive value.\n z = numpy.reshape(numpy.nan_to_num(info[0][2]), (Nx, Ny))\n\n # Matplotlib and NumPy don't agree on how our 2D data is actually\n # distributed. We use the notion that the fastest running index is the\n # y axis for a given data set. NumPy creates a 2D array that has\n # Nrows = Nx and Ncols = Ny which agrees with our designation. However,\n # Matplotlib requires that Ncols is actually the x direction for the plot.\n # This means the labels are created in reverse order and the original x\n # and y arrays are plotted in reverse. \n\n # Set plot attributes, take overrides if provided\n try:\n xlabel = kwargs[\"xlabel\"]\n del kwargs[\"xlabel\"]\n except KeyError:\n xlabel = som.getAxisLabel(1) + \" [\" + som.getAxisUnits(1) + \"]\"\n\n try:\n ylabel = kwargs[\"ylabel\"]\n del kwargs[\"ylabel\"]\n except KeyError:\n ylabel = som.getAxisLabel(0) + \" [\" + som.getAxisUnits(0) + \"]\"\n\n # Add labels back into keyword dictionary\n kwargs[\"xlabel\"] = xlabel\n kwargs[\"ylabel\"] = ylabel\n\n drplot.plot_2D_arr(y, x, z, **kwargs)", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='green')", "def _plot_dict_scatter(d):\n xvals, yvals = _dict2lists(d)\n pylab.scatter(xvals, yvals)", "def plot_coords(coords: List[int], labels: List[str], sample_density=100):\n global graph_data\n size = len(coords)\n if not graph_data:\n graph_data = [[0] * 100] * size\n for i in range(size):\n graph_data[i][-1] = coords[i]\n\n live_plotter(np.linspace(result_count - sample_density, result_count, sample_density), graph_data,\n identifier='Sensor Values',\n labels=labels)\n for i in range(size):\n graph_data[i] = np.append(graph_data[i][1:], 0.0)", "def plotXY(xName,xDataRaw,yName, yDataRaw):\n scanFileHolder = getScanFileHolderXY(xName,xDataRaw,yName, yDataRaw) \n scanFileHolder.plot(xName, yName)\n return scanFileHolder", "def plot_2d_data(x, y=None, path=None, labels=None):\n fig = plt.figure()\n if any(y): cmap = plt.cm.Spectral\n else: cmap = None\n plt.scatter(x[:, 0], x[:, 1], c=y, s=40, cmap=cmap)\n if not labels:\n plt.xlabel('dimension 1')\n plt.ylabel('dimension 2')\n else:\n plt.xlabel(labels[0])\n plt.ylabel(labels[1])\n if path:\n plt.savefig(path, bbox_inches='tight')\n return fig", "def plot_r(f=500, d=100e-3, dr=0.01, picture_file=None, picture_formats=['png', 'pdf', 'svg']):#x_axis='r', \n import matplotlib.pyplot\n i = 0\n rs = []\n sigmas = []\n ys = []\n print \"r_soll ->\\tsigma ->\\tr\"\n datas = []\n for r in numpy.arange(0, 1+dr, dr) :\n for t in [0] :\n print \"%f\\t\" %(r),\n sigma = getSigma(r)\n print \"%f\\t\" % (sigma),\n rs.append(r)\n sigmas.append(sigma)\n v = getSynapticActivity(f=f, r=r, fireing_rate=1, duration=d, delay=t)\n #print v\n #matplotlib.pyplot.scatter(v, numpy.zeros( len(v) ) + i )\n r = vector_strength(f, v)\n print \"%f\" % (r)\n ys.append(r)\n i = i+1\n datas.append([sigma,r])\n numpy.savetxt(\"../../../Data/%.1f_%f@%i.dat\" % (getSigma(dr),dr,int(f*d)), datas) \n\n matplotlib.pyplot.figure()\n matplotlib.pyplot.xlabel('sigma')\n matplotlib.pyplot.ylabel('measured vector strength')\n matplotlib.pyplot.xlim(0, getSigma(dr))\n matplotlib.pyplot.ylim(0, 1)\n matplotlib.pyplot.grid()\n matplotlib.pyplot.scatter(sigmas,ys, marker='x', color='black')#, basex=10, basey=10, ls=\"-\"\n if(picture_file != None):\n for picture_format in picture_formats:\n matplotlib.pyplot.savefig(picture_file+'sigma_'+str(getSigma(dr))+'_'+str(int(f*d))+'.'+picture_format,format=picture_format)\n else:\n matplotlib.pyplot.show()\n\n matplotlib.pyplot.figure()\n matplotlib.pyplot.xlabel('aimed vector strength')\n matplotlib.pyplot.ylabel('measured vector strength')\n #matplotlib.pyplot.legend([\"based on %i examples / dot\" % (f*d) ], loc='best');\n matplotlib.pyplot.xlim(0, 1)\n matplotlib.pyplot.ylim(0, 1)\n matplotlib.pyplot.grid()\n\n matplotlib.pyplot.scatter(rs,ys, marker='x', color='black')\n if(picture_file != None):\n for picture_format in picture_formats:\n matplotlib.pyplot.savefig(picture_file+'_'+str(dr)+'_'+str(int(f*d))+'.'+picture_format,format=picture_format)\n else:\n matplotlib.pyplot.show()\n\n matplotlib.pyplot.close('all')\n datas = numpy.ndarray((len(datas),2), buffer=numpy.array(datas),dtype=float)\n return datas" ]
[ "0.6011397", "0.5986594", "0.59757596", "0.59473705", "0.5904608", "0.58929086", "0.58712304", "0.58073235", "0.57341915", "0.57297295", "0.5701801", "0.5694246", "0.5684777", "0.56670064", "0.56571454", "0.56532675", "0.56177986", "0.5595032", "0.5587592", "0.5575011", "0.5574968", "0.5569163", "0.553768", "0.5536258", "0.5532065", "0.55155444", "0.5512973", "0.55052006", "0.5467348", "0.5467104" ]
0.81654465
0
Returns the cursor coordinates as a tuple (x, y).
def cursor_coordinates(self): text = self.getText() lines = text.split("\n") pos = self.getCursorPos() if pos == 0: return (0, 0) i = 0 cursor_row = -1 cursor_col = -1 for row, line in enumerate(lines): i += len(line) + 1 # we need to include "\n" if pos < i: cursor_row = row cursor_col = pos - i + len(line) + 1 break return (cursor_col, cursor_row)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def coordinates(self) -> Tuple[int, int]:\n return self.x, self.y", "def getMousePosition(self):\n return (self.mouseData.x, self.mouseData.y)", "def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y", "def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y", "def mouse_coords(desktop=False):\n x, y = c_int(0), c_int(0)\n if desktop:\n mouse.SDL_GetGlobalMouseState(byref(x), byref(y))\n else:\n mouse.SDL_GetMouseState(byref(x), byref(y))\n return (int(x.value), int(y.value))", "def xy(self) -> Tuple[int, int]:\n return self._x, self._y", "def get_pos(self):\n return (self.x, self.y)", "def coords2D(self):\n return (self.x, self.y)", "def xy(self) -> Tuple[float, float]:\n return (self.x, self.y)", "def coordinates(self) -> Tuple[float, float, float, float, float]:\n return (self.x, self.y, self.x + self.width, self.y + self.height)", "def get_position(self):\n return (self.x_pos, self.y_pos)", "def get(self):\n return (self.x,self.y);", "def mousePos():\n data = display.Display().screen().root.query_pointer()._data\n return data[\"root_x\"], data[\"root_y\"]", "def getXY(self):\n return (self.X,self.Y)", "def position(self):\n return self.x, self.y", "def position(self) -> Tuple[int, int]:\n return self.row, self.col", "def getCoord(self, i):\n _x = self.__xpts[i]\n _y = self.__ypts[i]\n return _x, _y", "def pixelcoord(coordx: float, coordy: float) -> Tuple[int, int]:\n ox, oy = origin()\n x, y = int(round(ox+coordx)), int(round(oy-coordy))\n return (x, y)", "def position(self):\n return self._x, self._y", "def get_mouse_coordinate(self):\n pos = pygame.mouse.get_pos()\n mov = pygame.mouse.get_rel()\n row = pos[0] // (self.CELL_WIDTH + self.MARGIN)\n col = (pos[1] - self.PANEL_HEIGHT) // (self.CELL_WIDTH + self.MARGIN)\n if mov != (0, 0) and not self.env.not_in_grid(row, col):\n return (row, col)\n return self.markerPos", "def get_point(self):\n return self._x, self._y", "def coords(self) -> Tuple[float, float]:\n return self.lat, self.lng", "def cursorPosQt(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n return pos.x(), pos.y()", "def cursorPosQt(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n return pos.x(), pos.y()", "def pixel2coords(self, x, y):\n xoff, a, b, yoff, d, e = self.geotransform()\n\n xp = a * x + b * y + xoff\n yp = d * x + e * y + yoff\n return (xp, yp)", "def getPosition(self):\n\tleft = self.getLeft()\n\ttop = self.getTop()\n\treturn (left,top)", "def get_coords(self):\n xTK = int(jeu.coords(self.rectangle)[0]) # Coordonnées TKinter x1 et y1 du rectangle correspondant à la voiture\n yTK = int(jeu.coords(self.rectangle)[1])\n # On divise par la largeur d'une case et on renvoie les valeurs obtenues sous la forme d'un tuple\n X = xTK//100\n Y = yTK//100\n resultat = [X, Y]\n return resultat", "def get_coords(self) -> Tuple[int]:\r\n return self.file, self.rank", "def get_position_coords(cls):\n row = math.floor(cls.position / cls.size)\n col = cls.position - row * cls.size\n return row, col", "def get_location(self):\r\n return self.__x, self.__y" ]
[ "0.7824512", "0.7726604", "0.76174706", "0.76174706", "0.75677025", "0.7565198", "0.75248986", "0.7379414", "0.7330282", "0.72748744", "0.72602683", "0.72091806", "0.7191261", "0.7189939", "0.7050582", "0.70483506", "0.70447123", "0.7040171", "0.70338386", "0.70088947", "0.70079243", "0.6965507", "0.69520426", "0.69520426", "0.6951989", "0.6930813", "0.69162124", "0.69013476", "0.68874717", "0.6878694" ]
0.7819652
1
Focuses the "prev" cell. Moves the cursor to the proper position.
def focus_prev_cell(self, prev): x, y = self._cell_input.cursor_coordinates() y_new = prev._cell_input.rows() - 1 prev._cell_input.set_cursor_coordinates(x, y_new) prev.set_focus()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def focus_prev(self):\n self.focus_item(forward=False)", "def _prev(self, _):\n self.notebook.SetSelection(self.idx-1)", "def focus_next_cell(self, next):\n x, y = self._cell_input.cursor_coordinates()\n y_new = 0\n next._cell_input.set_cursor_coordinates(x, y_new)\n next.set_focus()\n self.lost_focus(force=True)", "def setPrev(self, prev):\n\t\t\tself.prev = prev", "async def prev(self, button: discord.ui.Button, interaction: discord.Interaction) -> None:\n if self.cursor < 1:\n return await interaction.response.defer()\n\n self.cursor -= 1\n await interaction.response.edit_message(content=self.current())", "def join_with_prev(self, prev):\n if prev._cell_input.getText() == \"\":\n new_text = self._cell_input.getText()\n else:\n new_text = prev._cell_input.getText()\n if self._cell_input.getText() != \"\":\n new_text += \"\\n\" + self._cell_input.getText()\n y_new = prev._cell_input.rows()\n if prev._cell_input.getText() == \"\":\n y_new -= 1\n prev._cell_input.setText(new_text)\n prev._cell_input.set_cursor_coordinates(0, y_new)\n prev.set_focus()", "def select_item_prev(self):\n\n loc_diff = self._get_distance_betweenitems(self.page_current.item_selected, self.page_current.item_selected - 1)\n if loc_diff + self.render_offset_item < self.terminal_height:\n self.page_current.item_selected -= 1\n self.render_offset_item = 0\n else:\n self.render_offset_item -= self.terminal_height\n\n self.render() # TODO Why the render function needs to be called for instant update unknown. Need to look into.", "def prev(self, prev):\n\n self._prev = prev", "def jump_to_previous(self):\n self.nvim.command('silent! wincmd p')", "def movePrev(self):\n parentNode = self.parentNode\n index = parentNode.idevices.index(self)\n if index > 0:\n temp = parentNode.idevices[index - 1]\n parentNode.idevices[index - 1] = self\n parentNode.idevices[index] = temp", "def MoveToPreviousSlide(self, event):\n pass", "def move_previous():\n self.variables.table.set_joystick(False)\n self.variables.table.set_axis(\n [True, True, True]\n ) # so all axis can be adressed\n errorcode = self.variables.table.move_to(\n [self.previous_xloc, self.previous_yloc, self.previous_zloc],\n True,\n self.variables.default_values_dict[\"settings\"][\"height_movement\"],\n )\n # if errorcode:\n # self.variables.message_to_main.put(errorcode)\n self.variables.table.set_axis([True, True, False]) # so z axis is off again\n self.variables.table.set_joystick(True)", "def previous(self):\n if self.cursor.pref:\n self.cursor = self.cursor.pref\n return self.cursor\n return None", "def edit_widget_focus(self):\n if self.goto:\n self.goto_node()\n self.update_position(self.get_position())", "def on_btPagePrev_clicked(self, widget, data=None):\n\n if self.page > 1:\n self.page -= 1\n self.part = 1\n self.refresh()", "def previous(self, _event):\n self.set_val(self.val - 1)", "def __quickSearchPrev(self):\n # first we have to check if quick search is active\n # and try to activate it if not\n if self.__quickSearchToolbarVisibility is None:\n self.__quickSearchToolbarVisibility = (\n self.__quickSearchToolbar.isVisible()\n )\n if not self.__quickSearchToolbar.isVisible():\n self.__quickSearchToolbar.show()\n if not self.quickFindtextCombo.lineEdit().hasFocus():\n aw = self.activeWindow()\n self.quickFindtextCombo.lastActive = aw\n if aw:\n self.quickFindtextCombo.lastCursorPos = aw.getCursorPosition()\n else:\n self.quickFindtextCombo.lastCursorPos = None\n tff = self.textForFind(False)\n if tff:\n self.quickFindtextCombo.lineEdit().setText(tff)\n self.quickFindtextCombo.lineEdit().setFocus()\n self.quickFindtextCombo.lineEdit().selectAll()\n self.__quickSearchSetEditColors(False)\n else:\n self.__quickSearchInEditor(True, True)", "def go_prev(self, inst):\n\n # Change active date\n self.active_date = [self.active_date[0], self.quarter_nums[0][1],\n self.quarter_nums[0][0]]\n\n # Name of prev screen\n n = self.quarter_nums[0][1] - 1\n prev_scr_name = \"%s-%s\" % (self.month_names_eng[n],\n self.quarter_nums[0][0])\n\n # If it's doen't exitst, create it\n if not self.sm.has_screen(prev_scr_name):\n self.create_month_scr(self.quarter[0])\n\n self.sm.current = prev_scr_name\n self.sm.transition.direction = \"left\"\n\n self.get_quarter()\n self.title = \"%s - %s\" % (self.month_names[self.active_date[1] - 1],\n self.active_date[2])\n\n self.title_label.text = self.title", "def __previousBookmark(self):\n self.activeWindow().previousBookmark()", "def _PrevExpression(self):\r\n self.RestoreExpression()\r\n self.expressionindex-=1\r\n return self.SetExpressionByIndex(self.expressionindex)", "def move_prev(self, step=1):\n if self._index is not None and self._index - step >= 0:\n self._index -= step\n # if index <= start index of current frame --> recalculate findex\n if self._index < self._findex * self._flen:\n self._findex -= int(math.ceil(step / float(self._flen)))\n return self[self._index]\n return None", "def set_prev(self, p) -> None:\n self.prev = p", "def display_previous_command( self, event ):\n if self.command_history_index > 0:\n if self.command_history_index == len( self.command_history ) - 1:\n self.command_history[ -1 ] = self.get()\n self.command_history_index -= 1\n self.delete( 0, tk.END )\n self.insert( 0, self.command_history[ self.command_history_index ] )", "def prev_artist(self):\n test = self.artists_list.currentIndex() - 1\n if test >= 0:\n self.artists_list.setCurrentIndex(test)\n self.update_navigation_buttons()", "def previous(self):\n self._select_interface(self._rc_previous, self._http_previous)", "def prev_artist(self):\n test = self.artist_list.currentIndex() - 1\n if test >= 0:\n self.artist_list.setCurrentIndex(test)\n self.update_navigation_buttons()", "def wrap_cursor_forward(event):\n b = event.cli.current_buffer\n relative_begin_index = b.document.get_start_of_line_position()\n b.cursor_left(count=abs(relative_begin_index))\n b.cursor_down(count=1)", "def onPrevious(self, event):\n\t\tself.previousPicture()", "def set_previous(self, new_previous):\n self.previous = new_previous", "def go_to_previous_spike(self, ):\n self._jump_to_spike(-1)" ]
[ "0.7651945", "0.74857336", "0.6641398", "0.65821093", "0.64603764", "0.64561605", "0.6435459", "0.6334366", "0.6332495", "0.63032544", "0.6289549", "0.6205924", "0.6180435", "0.6161628", "0.6105949", "0.61046416", "0.6073891", "0.60474837", "0.5960907", "0.5882112", "0.5861614", "0.58610183", "0.5843969", "0.5841444", "0.5840595", "0.58287454", "0.57903147", "0.57626414", "0.57408476", "0.57376444" ]
0.85488623
0
Focuses the "next" cell. Moves the cursor to the proper position.
def focus_next_cell(self, next): x, y = self._cell_input.cursor_coordinates() y_new = 0 next._cell_input.set_cursor_coordinates(x, y_new) next.set_focus() self.lost_focus(force=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _next(self, _):\n self.notebook.SetSelection(self.idx+1)", "def focus_next(self):\n self.focus_item()", "def next(self):\n self.jumpahead(1)", "def move_to_next_cell(self, create=False):\n if self._active_cell < self.num_cells()-1:\n current_cell = self._cell_list[self._active_cell]\n next_cell = self._cell_list[self._active_cell+1]\n current_cell.focus_next_cell(next_cell)\n elif create:\n self.add_cell()\n self.move_to_next_cell()\n elif self.num_cells() == 1:\n self._cell_list[0].set_focus()", "def __goto(self):\n from QScintilla.GotoDialog import GotoDialog\n \n aw = self.activeWindow()\n lines = aw.lines()\n curLine = aw.getCursorPosition()[0] + 1\n dlg = GotoDialog(lines, curLine, self.ui, None, True)\n if dlg.exec_() == QDialog.Accepted:\n aw.gotoLine(dlg.getLinenumber(), expand=True)", "def page_next(self):\n if self._pos >= self._npages - 1:\n # exit if we are already at the end\n self.page_quit()\n else:\n self._pos += 1\n self._display()", "def focus_prev_cell(self, prev):\n x, y = self._cell_input.cursor_coordinates()\n y_new = prev._cell_input.rows() - 1\n prev._cell_input.set_cursor_coordinates(x, y_new)\n prev.set_focus()", "def advance(self):\n self.currentIndex += 1\n self.updateCurrentCommand()", "def next(self):\n current = self.listbox.curselection()[0]\n if current < self.listbox.size() - 1:\n self.listbox.selection_clear(current)\n self.listbox.activate(current+1)\n self.listbox.select_set(current+1)\n self.play()", "def advance(self, distance):\n self.cursor += distance", "def advance(self):\n self.pos += 1\n if self.pos > len(self.syntax) - 1:\n self.current_char = None\n else:\n self.current_char = self.syntax[self.pos]", "def _advance(self):\n self._current += 1", "def __nextBookmark(self):\n self.activeWindow().nextBookmark()", "def next(self, next):\n\n self._next = next", "def next(self, next):\n\n self._next = next", "def select_item_next(self):\n\n # If current item fits terminal height choose next item,\n # if not, adjust render_offset_item without selecting new item(Edge case)\n loc_diff = self._get_distance_betweenitems(self.page_current.item_selected, self.page_current.item_selected + 1)\n if loc_diff - self.render_offset_item < self.terminal_height:\n self.page_current.item_selected += 1\n self.render_offset_item = 0\n else:\n self.render_offset_item += self.terminal_height\n\n self.render() # TODO Why the render function needs to be called for instant update unknown. Need to look into.", "def _advance(self):\n self._prev, self._current = self._current, self._prev + self._current", "def goto_position(editor, pos):\n cursor = editor.textCursor()\n editor.moveCursor(cursor.End)\n cursor.setPosition(pos)\n editor.setTextCursor(cursor)", "def next(self):\n self.pos += 1\n self.current_char = None if self.pos >= len(self.input) else self.input[self.pos]", "def get_next_position(self):", "def _next(self):\n # if self.order == 'rowsfirst':\n self.row_counter += 1\n if self.row_counter >= self.nrows:\n self.column_counter += 1\n self.row_counter = 0\n if self.column_counter > self.ncolumns:\n raise ValueError(\n \"Call to get next axis exceeds the number of columns requested initially: %d\"\n % self.columns\n )\n # else:\n # self.column_counter += 1\n # if self.column_counter >= self.ncolumns:\n # self.row_counter += 1\n # self.column_counter = 0\n # if self.row_counter >= self.nrows:\n # raise ValueError('Call to get next axis exceeds the number of rows requested initially: %d' % self.nrows)", "def next(self):\n if self.index.get() != len(self.frame_list) - 1:\n for i in range(len(self.frame_list)):\n self.frame_list[i].pack_forget()\n\n self.index.set(self.index.get() + 1)\n self.frame_list[self.index.get()].pack(fill=\"both\", expand=True)\n\n self.work_out_pages()", "def setNext(self, next):\n\t\t\tself.next = next", "def advance(self):\n self.pos += 1\n if self.pos > len(self.text) - 1:\n self.current_char = None # Indicates end of input\n else:\n self.current_char = self.text[self.pos]", "def _advance(self, c=1):\n self._index += c", "def go_to_next_state(self):\n pass", "def advance(self):\n self.pos += 1\n if self.pos < len(self.text):\n self.current_char = self.text[self.pos]\n else:\n self.current_char = None", "def set_next(self, new_next):\n self.next = new_next", "async def next(self, button: discord.ui.Button, interaction: discord.Interaction) -> None:\n if self.cursor >= len(self.images) - 1:\n return await interaction.response.defer()\n\n self.cursor += 1\n await interaction.response.edit_message(content=self.current())", "def page_next(self):\n if self._npos >= self._npages - 1:\n # exit if we are already at the end\n self.page_quit()\n else:\n self._npos += 1\n if self.exit_on_lastpage and self._npos >= (self._npages - 1):\n self.display(show_footer=False)\n self.page_quit(quiet=True)\n else:\n self.display()" ]
[ "0.7646229", "0.7625387", "0.70269966", "0.68980944", "0.64222103", "0.6386664", "0.6382788", "0.6369909", "0.6353012", "0.6218585", "0.62098944", "0.6159042", "0.6122109", "0.6112056", "0.6112056", "0.61118174", "0.611045", "0.6102978", "0.6070798", "0.6044282", "0.60154325", "0.59536684", "0.5934677", "0.59278643", "0.5926526", "0.5924044", "0.5909914", "0.5889776", "0.58818", "0.58797425" ]
0.8417928
0
Joins this cell with the previous cell. It doesn't delete the current cell (this is the job of the Worksheet to properly delete ourselves).
def join_with_prev(self, prev): if prev._cell_input.getText() == "": new_text = self._cell_input.getText() else: new_text = prev._cell_input.getText() if self._cell_input.getText() != "": new_text += "\n" + self._cell_input.getText() y_new = prev._cell_input.rows() if prev._cell_input.getText() == "": y_new -= 1 prev._cell_input.setText(new_text) prev._cell_input.set_cursor_coordinates(0, y_new) prev.set_focus()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_backward():\r\n point().delete_left_char()\r\n set_point(point().offset(-1))", "def _delete(self):\n self.prev.next = self.next\n self.next.prev = self.prev", "def focus_prev_cell(self, prev):\n x, y = self._cell_input.cursor_coordinates()\n y_new = prev._cell_input.rows() - 1\n prev._cell_input.set_cursor_coordinates(x, y_new)\n prev.set_focus()", "def delete(self):\n if self.prev:\n self.prev.next = self.next\n if self.next:\n self.next.prev = self.prev", "def undo(self):\n self.setIndex(self._index-1)", "def movePrev(self):\n parentNode = self.parentNode\n index = parentNode.idevices.index(self)\n if index > 0:\n temp = parentNode.idevices[index - 1]\n parentNode.idevices[index - 1] = self\n parentNode.idevices[index] = temp", "def _prev(self, _):\n self.notebook.SetSelection(self.idx-1)", "def toggle_cell_at_point(self,x,**kw):\n c=self.delete_cell_at_point(x)\n if c is None:\n c=self.add_cell_at_point(x,**kw)\n return c", "def delete_forward():\r\n point().delete_right_char()", "def undo(self):\n if self.history:\n xy0, xy1, data_size = self.history.pop()\n x0, y0 = xy0\n x1, y1 = xy1\n self._used[y1][x1] -= data_size\n self._used[y0][x0] = data_size\n if self.goal == xy1:\n self.goal = xy0", "def prev(self, prev):\n\n self._prev = prev", "def erase_cell(state: State) -> State:\n assert state.index < state.array_len\n return state._replace(\n array=state.array[: state.index] + [None] + state.array[state.index + 1 :]\n )", "def mark_mine(self, cell):\n if cell in self.cells:\n self.cells.remove(cell)\n self.count=self.count-1", "def erase(self):\r\n self.in_arrow = None\r\n self.out_arrow = None", "def previous_line():\r\n set_point(point().previous_line())", "def previous_board(self):\n pass", "def __previousBookmark(self):\n self.activeWindow().previousBookmark()", "def undolast(self):\n import Part\n if len(self.node) > 1:\n self.node.pop()\n # last = self.node[-1]\n if self.obj.Shape.Edges:\n edges = self.obj.Shape.Edges\n if len(edges) > 1:\n newshape = Part.makePolygon(self.node)\n self.obj.Shape = newshape\n else:\n self.obj.ViewObject.hide()\n # DNC: report on removal\n # _msg(translate(\"draft\", \"Removing last point\"))\n _msg(translate(\"draft\", \"Pick next point\"))", "def undolast(self):\n import Part\n if len(self.node) > 1:\n self.node.pop()\n # last = self.node[-1]\n if self.obj.Shape.Edges:\n edges = self.obj.Shape.Edges\n if len(edges) > 1:\n newshape = Part.makePolygon(self.node)\n self.obj.Shape = newshape\n else:\n self.obj.ViewObject.hide()\n # DNC: report on removal\n # _msg(translate(\"draft\", \"Removing last point\"))\n _msg(translate(\"draft\", \"Pick next point\"))", "def previous(self):\n\n pass", "def mark_mine(self, cell):\n if cell in self.cells:\n self.count -= 1\n self.cells.remove(cell)", "def moveDown(self):\n currentRow = self.getCurrentRow()\n if currentRow < (self.jobRow.rowCount() - 1):\n rowData = self.removeRow()\n self.insertRow(currentRow + 1, rowData)\n self.layers.insert(currentRow + 1, rowData)\n self.updateDependLabels()", "def deleteCell(self, cell):\n index = self.cells.index(cell)\n self.cells[index].delete()\n self.cells.remove(cell)", "def previous(self, _event):\n self.set_val(self.val - 1)", "def update_current_word(self):\n self.current_word = self.current_row.pop(0) + \" \"", "def undo(self, outer_instance):\n pass", "def pop(self):\n cell = next(self.cells)\n if self.min_row == self.max_row:\n self.min_col += 1\n else:\n self.min_row += 1\n return cell", "def c_undo(self):\r\n try:\r\n self.canvas.delete(self.canvas.find_all()[-1])\r\n self.update()\r\n return True\r\n except: return False", "def prev(self):\n return self.__prev", "def up(cell):\n return [cell[0] - 1, cell[1]]" ]
[ "0.5954091", "0.5862837", "0.582299", "0.56134", "0.55469036", "0.54742885", "0.5445057", "0.5317975", "0.5310467", "0.52803475", "0.5277451", "0.52689356", "0.5197231", "0.5184458", "0.5116611", "0.5108614", "0.510692", "0.5083509", "0.5083509", "0.50660044", "0.5053383", "0.5042416", "0.50386196", "0.49588123", "0.49542123", "0.4929395", "0.49249822", "0.49118444", "0.48992747", "0.48962522" ]
0.6454845
0
Moves to the next cell. create .... if True, creates the next cell if we are at the end of the worksheet
def move_to_next_cell(self, create=False): if self._active_cell < self.num_cells()-1: current_cell = self._cell_list[self._active_cell] next_cell = self._cell_list[self._active_cell+1] current_cell.focus_next_cell(next_cell) elif create: self.add_cell() self.move_to_next_cell() elif self.num_cells() == 1: self._cell_list[0].set_focus()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_next_cell(self, cell, auto, i, j, next):\r\n move_list = {1: [i - 1, j - 1], 2: [i - 1, j], 3: [i - 1, j + 1], 4: [i, j + 1], 5: [i + 1, j + 1],\r\n 6: [i + 1, j], 7: [i + 1, j - 1], 8: [i, j - 1]}\r\n\r\n def prob_sick():\r\n ''' check the probability of a creature to be sick\r\n according to number of sick neighbours'''\r\n n = self.sick_neighbors(move_list)\r\n p = np.random.uniform(0, 1)\r\n if n * P > p:\r\n return SICK\r\n else:\r\n return cell\r\n\r\n next_value = self.valid_cell(move_list[next])\r\n '''******************************************************************************\r\n Check the next cell, if free- move the creature in\r\n else- check probability to be sick and change if needed\r\n ******************************************************************************'''\r\n if auto[next_value[0], next_value[1]] == 0:\r\n auto[next_value[0], next_value[1]] = prob_sick()\r\n auto[i, j] = 0\r\n else:\r\n auto[i, j] = prob_sick()\r\n\r\n return auto", "def __next__(self):\n if self._curr_point.x == self._width:\n self._curr_point.x = 0\n raise StopIteration\n\n cell = self.get_cell(self._curr_point)\n\n # Without shallow copy we will have prev_point reference\n # to the curr_point, so in practice - it will be the same object,\n # but we need them to exist separately. Shallow copy is enough for\n # this purpose, no need in copy.deepcopy().\n self._prev_point = copy.copy(self._curr_point)\n\n if self._curr_point.y < self._height - 1:\n self._curr_point.y += 1\n else:\n self._curr_point.x += 1\n self._curr_point.y = 0\n\n return cell", "def focus_next_cell(self, next):\n x, y = self._cell_input.cursor_coordinates()\n y_new = 0\n next._cell_input.set_cursor_coordinates(x, y_new)\n next.set_focus()\n self.lost_focus(force=True)", "def get_next_cell(self):\r\n cell = self.cells_to_process.pop(0)\r\n return cell", "def live(self, cell: Position):\n self._next_state.add(cell)", "def next(self):\n # IMPLEMENT ME\n next_board = []\n for i in range(self.height):\n row = []\n for j in range(self.width):\n row.append(\".\")\n next_board.append(row)\n\n for i in range(self.height):\n for j in range(self.width):\n neighbours = self.count_neighbors(i, j)\n if self.board[i][j] == 'x':\n if neighbours == 2 or neighbours == 3:\n next_board[i][j] = 'x'\n elif neighbours == 3:\n next_board[i][j] = 'x'\n\n self.board = next_board", "def next(self):\n while not self.is_stable():\n self.step()", "def get_next_cell(self,cell,dirNum,fact):\r\n dirTup = self.directions[dirNum]\r\n return (cell[0]+fact*dirTup[0],cell[1]+fact*dirTup[1])", "def cell_create(game_set, screen, covids, cells):\n cell_create_flag = True\n cell = Cell(game_set, screen)\n for old_cell in cells.sprites():\n if old_cell.rect.y < game_set.cell_number_adjust:\n cell_create_flag = False\n break\n if (not pygame.sprite.spritecollide(cell, cells, 0) and\n not pygame.sprite.spritecollide(cell, covids, 0) and\n cell_create_flag):\n cells.add(cell)", "def advance_generation(self):\n self.generation += 1\n next_cells = [[self.cell_state['dead']] * self.cols for x in range(self.lines)]\n for i in range(self.lines):\n for j in range(self.cols):\n neighbors = self.get_neighbors(i, j)\n if self[i][j] == self.cell_state['alive']:\n if neighbors == 2 or neighbors == 3:\n next_cells[i][j] = self.cell_state['alive']\n elif self[i][j] == self.cell_state['dead']:\n if neighbors == 3:\n next_cells[i][j] = self.cell_state['alive']\n super().__init__(next_cells)", "def next(self):\n self.jumpahead(1)", "def start_next_row(self) -> None:\n\n if self._is_ringing_rounds:\n self._row = self._rounds\n else:\n self._row = self.row_generator.next_row(self.stroke)\n if len(self._row) < len(self._rounds):\n # Add cover bells if needed\n self._row.extend(self._rounds[len(self._row):])\n\n for (index, bell) in enumerate(self._row):\n self.expect_bell(index, bell)", "def add_cell(self, cell):\r\n if cell not in self.cells:\r\n self.cells.add(cell)\r\n if cell.block == \"A\":\r\n self.blockA += 1\r\n self.blockA_free += 1\r\n self.blockA_cells.append(cell)\r\n else:\r\n assert cell.block == \"B\"\r\n self.blockB += 1\r\n self.blockB_free += 1\r\n self.blockB_cells.append(cell)", "def nextRow(self) -> bool:\n if self.hasNextRow():\n self.__currentRow += 1\n return True\n\n return False", "def __next__(self):\n if self.n < len(self.cellData):\n self.n += 1\n return self.cellData[self.n - 1]\n else:\n raise StopIteration", "def test_solo_cell():\n cell = c6.Cell(loc=[1, 1])\n for i in range(10):\n cell.step()", "def next_move(self):\n self.branchize()\n try:\n _, new_branch = self.score_scheme.pop(0)\n except IndexError:\n return False\n\n move_list = [new_branch[\"move\"]]\n parent = new_branch[\"parent\"]\n\n while True:\n try:\n move_list.append(parent[\"move\"]) \n parent = parent[\"parent\"]\n\n except KeyError:\n break\n\n self.__originate__()\n for i in move_list[::-1]:\n self.change(i)\n\n self.current_branch = new_branch\n self.output += str(self)\n return True", "def next(self):\n if(self.estado == 1):\n self.fin = False\n self.progeso[2] = \"\\t| 0\"\n elif (self.estado == 2):\n self.progeso[3] = \"\\t| /\"\n elif (self.estado == 3):\n self.progeso[3] = \"\\t| / \\\\\"\n elif (self.estado == 4):\n self.progeso[4] = \"\\t| |\"\n elif (self.estado == 5):\n self.progeso[5] = \"\\t| / \"\n elif (self.estado == 6):\n self.progeso[5] = \"\\t| / \\\\\" \n self.estado = 0\n self.fin = True\n \n self.estado+=1", "def next_state_of_cell(self, x_cell, y_cell):\n neighbours = self.get_number_neighbours_of_cell(x_cell, y_cell)\n if(self.board_state[x_cell][y_cell] == 1):\n # Any live cell with more than three live neighbours dies, \n # as if by overpopulation.\n if(neighbours > 3):\n return 0\n # Any live cell with fewer than two live neighbours dies,\n # as if by underpopulation.\n elif(neighbours < 2):\n return 0\n # Any live cell with two or three live neighbours lives\n # on to the next generation.\n else:\n return 1\n if(self.board_state[x_cell][y_cell] == 0):\n # Any dead cell with exactly three live neighbours becomes a live cell, \n # as if by reproduction.\n if(neighbours == 3):\n return 1\n else:\n return 0", "def _nextPosition(self, position, speed):\n\n next_position = position + self.integration_step * speed\n\n # Check if you reach a terminal state\n if abs(next_position) > 1:\n self.stuck = True\n return next_position", "def next_t(cell_list, current_burning, b_grid, current_fuel, f_grid, h_grid, \n i_threshold, w_direction, burnt_cells):\n for cell in cell_list: \n \n # for a cell that's not yet burning\n if b_grid[cell[0]][cell[1]] is False:\n burn = check_ignition(current_burning, current_fuel, h_grid, \n i_threshold, w_direction, cell[0], cell[1])\n if burn:\n burnt_cells.append(cell)\n b_grid[cell[0]][cell[1]] = True\n \n # for a cell that's already burning\n else: \n if f_grid[cell[0]][cell[1]] > 1:\n f_grid[cell[0]][cell[1]] -= 1\n else:\n f_grid[cell[0]][cell[1]] -= 1\n b_grid[cell[0]][cell[1]] = False", "def _next(self):\n # if self.order == 'rowsfirst':\n self.row_counter += 1\n if self.row_counter >= self.nrows:\n self.column_counter += 1\n self.row_counter = 0\n if self.column_counter > self.ncolumns:\n raise ValueError(\n \"Call to get next axis exceeds the number of columns requested initially: %d\"\n % self.columns\n )\n # else:\n # self.column_counter += 1\n # if self.column_counter >= self.ncolumns:\n # self.row_counter += 1\n # self.column_counter = 0\n # if self.row_counter >= self.nrows:\n # raise ValueError('Call to get next axis exceeds the number of rows requested initially: %d' % self.nrows)", "def test_cell(index):\r\n global cell_cnt\r\n cell_cnt += 1\r\n loc = lst_free_cells[index]\r\n cell_text = (application.ui.__getattribute__(f'cell{loc.column+1}{loc.row+1}')).text()\r\n # print(f'starting to test {cell_text} at {loc}')\r\n if cell_text == \"\": # no value in the cell yet\r\n val = 1\r\n elif 0 < int(cell_text) < 9: # value in the cell, get the next higher number to test\r\n delete_value(loc)\r\n val = int(cell_text) + 1\r\n else: # cell contains 9, so go back, if at first cell -> not solvable\r\n if index == 0:\r\n print(\"Go solution\")\r\n return 5\r\n else:\r\n print(f'No legal move at '\r\n f'{loc}')\r\n delete_value(loc)\r\n return -1\r\n if legal_next(val, loc):\r\n change_value(val, loc)\r\n if index == len(lst_free_cells) - 1:\r\n # done solving\r\n return 5\r\n else:\r\n print(f'Placed {val} at {loc}')\r\n return 1\r\n else:\r\n change_value(val, loc)\r\n return 0", "def next_generation(self):\n new_board = self.array.copy()\n for cell in self.cells:\n cell.update(new_board)\n \n if np.array_equal(self.prev_array, new_board):\n self.game.stable = True\n else:\n self.prev_array = self.array\n self.array = new_board", "def start_cell(self, style_name, span=1):\n raise NotImplementedError", "def get_next_cell(self):\r\n if len(self.queue) == 0:\r\n return\r\n val = self.peek_queue()\r\n self.queue.remove(val)\r\n return val[0]", "def get_next_available_cell(self, starting_cell, board):\n row = starting_cell.row\n column = starting_cell.column + self.total_column_req + 1\n if column >= board.columns:\n row = starting_cell.row + self.total_rows_req + 1\n column = 0\n\n if row >= board.rows:\n print('Cannot place ship outside board.')\n return None\n return board.grid[row][column]", "def add_new_cell(self, x, y, color):\n # if the origin changes then we are going to need to update all of the cells in the grid with new relative\n # positions.\n self.num_colored_cells += 1\n if color != self.color:\n self.color = -1\n x_origin_change = 0\n y_origin_change = 0\n bounding_box_change = False\n if x < self.top_left_x:\n x_origin_change = self.top_left_x - x\n self.top_left_x = x\n self.bounding_box_x_len += x_origin_change\n bounding_box_change = True\n elif x > self.top_left_x + self.bounding_box_x_len:\n self.bounding_box_x_len = x - self.top_left_x\n bounding_box_change = True\n if y < self.top_left_y:\n y_origin_change = self.top_left_y - y\n self.top_left_y = y\n self.bounding_box_y_len += y_origin_change\n bounding_box_change = True\n elif y > self.top_left_y + self.bounding_box_y_len:\n self.bounding_box_y_len = y - self.top_left_y\n bounding_box_change = True\n\n if bounding_box_change:\n new_cells = np.zeros((self.bounding_box_x_len + 1, self.bounding_box_y_len + 1), dtype=np.int32)\n new_cells[x_origin_change:len(self.cells) + x_origin_change,\n y_origin_change:len(self.cells[0]) + y_origin_change] = self.cells\n self.cells = new_cells\n self.cells[x - self.top_left_x][y - self.top_left_y] = color", "def end_cell(self):\n raise NotImplementedError", "def calculate_next_board_state(self):\n new_board_state = np.zeros_like(self.board_state)\n\n for x in range(self.board_size[0]):\n for y in range(self.board_size[0]):\n new_board_state[x][y] = self.next_state_of_cell(x,y)\n \n self.set_state(new_board_state)" ]
[ "0.64226735", "0.6207801", "0.6127847", "0.6035226", "0.59616894", "0.59289956", "0.57356495", "0.5691281", "0.5688103", "0.5622672", "0.559166", "0.55331457", "0.55153364", "0.54764044", "0.54577273", "0.5436615", "0.54298186", "0.54120505", "0.53717905", "0.53711927", "0.5367694", "0.536224", "0.53044647", "0.52675784", "0.52486306", "0.5245704", "0.52224344", "0.52070767", "0.51980513", "0.51934564" ]
0.74477285
0
Updates the cell id > active index mapping.
def update_id2idx(self): self._id2idx = {} for n, cell in enumerate(self._cell_list): self._id2idx[cell.id()] = n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_in_db(db_values: list, sheet_cells: List[Cell], indices: Iterable[int]) -> List[int]:\n # print('Entering _update_in_db')\n mod_idx = []\n for i in indices:\n if db_values[i] != sheet_cells[i].value and (db_values[i] or sheet_cells[i].value):\n db_values[i] = sheet_cells[i].value\n mod_idx.append(i)\n # print('Leaving _update_in_db')\n return mod_idx", "def map_column_to_index(self, col):\n if col in self.column_maps:\n return\n\n # First construct the map from original ids to new ones.\n ids = pd.concat((self.train[col], self.test[col])).unique()\n n = len(ids)\n idmap = dict(itertools.izip(ids, xrange(n)))\n\n # Next use the map to convert the ids in-place.\n self.train.loc[:, col] = self.train[col].apply(lambda _id: idmap[_id])\n self.test.loc[:, col] = self.test[col].apply(lambda _id: idmap[_id])\n\n # Now swap key for value in the idmap to provide a way to convert back.\n reverse_map = {val: key for key, val in idmap.iteritems()}\n self.column_maps[col] = reverse_map", "def update_idx(self):\n self.idx = (self.F * self.FMUL +\n self.E * self.EMUL +\n self.Z * self.ZMUL +\n self.A * self.AMUL +\n self.B * self.BMUL )", "def map_column_to_index(self, col):\n if col in self.column_maps:\n return\n\n # First construct the map from original ids to new ones.\n ids = self.dataset[col].unique()\n n = len(ids)\n idmap = dict(itertools.izip(ids, xrange(n)))\n\n # Next use the map to conver the ids in-place.\n self.dataset[col] = self.dataset[col].apply(lambda _id: idmap[_id])\n\n # Now swap key for value in the idmap to provide a way to convert back.\n reverse_map = {val: key for key, val in idmap.iteritems()}\n self.column_maps[col] = reverse_map", "def edit_index(state):\n node = state\n for key in (\"layers\", \"mode\"):\n node = node.get(key, {})\n return node.get(\"index\", 0)", "def edit_current_cell(self):\n cells = self.csv_data_table.selectionModel().selectedIndexes()\n if len(cells) == 1:\n for cell in sorted(cells):\n r = cell.row()\n c = cell.column()\n self.csv_data_table.editItem(self.csv_data_table.item(r, c))", "def activate(self):\n super(ActiveIndexedComponent, self).activate()\n if self.is_indexed():\n for component_data in itervalues(self):\n component_data.activate()", "def updateRow(self, index: int) -> None:\n ...", "def setInternalIndex(self,ind):\n\t\tself.trMtrxNode_ind = ind", "def global_index( self , active_index = None, ijk = None):\n return self.__global_index( active_index = active_index , ijk = ijk )", "def rebuild_index_old(self):\n logging.debug(\"updating detailed information for {}\".format(self))\n\n with get_db_connection() as db:\n c = db.cursor()\n c.execute(\"\"\"DELETE FROM observable_mapping WHERE alert_id = %s\"\"\", ( self.id, ))\n c.execute(\"\"\"DELETE FROM tag_mapping WHERE alert_id = %s\"\"\", ( self.id, ))\n db.commit()\n\n self.build_index()", "def _idx_changed(self, idx):\n self.refresh_memory()", "def _create_idx(self):\n self._idx = {}\n for idx, (L, M, N) in enumerate(self.modes):\n if L not in self._idx:\n self._idx[L] = {}\n if M not in self._idx[L]:\n self._idx[L][M] = {}\n self._idx[L][M][N] = idx", "def set_current(self, cell):\n\n if self.aux is not None:\n self.charMap[self.aux[0]][self.aux[1]].is_current = False\n self.aux = cell\n self.charMap[cell[0]][cell[1]].is_current = True", "def _update_battle_position(self, new_cells=[], previous_cells=[]):\n if previous_cells:\n for previous_cell in previous_cells:\n self._battle_area.set_cell(previous_cell.get_name(), False)\n if new_cells:\n for new_cell in new_cells:\n self._battle_area.set_cell(new_cell.get_name(), self)", "def set_index(self, list):\n for key in list:\n self.find_label_by_id(key).index = True", "def _update_head(self, index_entry, branch, new_id):\r\n index_entry['versions'][branch] = new_id\r\n self.db_connection.update_course_index(index_entry)", "def make_cell_change(self, x, y):\n self.cells[x][y] = 1 if not self.cells[x][y] else 0", "def index_update_at(row, col):\n updates = []\n if row == 0:\n updates.append(NAME_SCHEME[\"index update\"].format(prefix=f\"t{col}\"))\n\n if col == 0:\n updates.append(NAME_SCHEME[\"index update\"].format(prefix=f\"l{row}\"))\n\n return updates", "def _update_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n if self.to_be_updated[row_number][col_number]:\n self.cells[row_number][col_number].update()", "def set_index(self, df):\n\n # generate a map of continuous index values to items\n self.index2item = dict(enumerate(df[self.col_item].unique()))\n\n # invert the mapping from above\n self.item2index = {v: k for k, v in self.index2item.items()}\n\n # create mapping of users to continuous indices\n self.user2index = {x[1]: x[0] for x in enumerate(df[self.col_user].unique())}\n\n # set values for the total count of users and items\n self.n_users = len(self.user2index)\n self.n_items = len(self.index2item)", "def _update_farness_map(self,ind):", "def change_map_up(self):\n if self.current_map_idx > 0:\n self.change_map(self.current_map_idx + 1)", "def id_index_map(self):\n result = {}\n for index, component_data in iteritems(self):\n result[id(component_data)] = index\n return result", "def set_indices(self, part_instance_counts):\n type_indices = {}\n for entry in self._entries:\n try:\n entry.set_indices(\n model_type_index=type_indices.setdefault(entry.ENTRY_SUBTYPE, 0),\n instance_count=part_instance_counts.get(entry.name, 0),\n )\n except KeyError as e:\n raise SoulstructError(\n f\"Invalid map component name for {entry.ENTRY_SUBTYPE.name} model {entry.name}: {e}\"\n )\n else:\n type_indices[entry.ENTRY_SUBTYPE] += 1", "def id_to_index(self, id):\n raise NotImplementedError", "def update_by_index(df, col, indexs, data):\n for indx in indexs:\n df.loc[indx, col] = data", "def update_course_index(self, course_key, updated_index_entry):\n bulk_write_record = self._get_bulk_ops_record(course_key)\n if bulk_write_record.active:\n bulk_write_record.index = updated_index_entry\n else:\n self.db_connection.update_course_index(updated_index_entry, course_context=course_key)", "def map_ord_to_index(origin_char_list, save_path):\n ord_2_index_dict = {str(i) + '_index': str(ord(c)) for i, c in\n enumerate(CharDictBuilder._read_chars(origin_char_list))}\n index_2_ord_dict = {str(ord(c)) + '_ord': str(i) for i, c in\n enumerate(CharDictBuilder._read_chars(origin_char_list))}\n total_ord_map_index_dict = dict(ord_2_index_dict)\n total_ord_map_index_dict.update(index_2_ord_dict)\n CharDictBuilder._write_json(save_path, total_ord_map_index_dict)", "def setReference(self, updatedIndices):\n # self.colors[:] = [self.colors[i] for i in updatedIndices]\n self.cellData[:] = [self.cellData[i] for i in updatedIndices]" ]
[ "0.58209056", "0.5769968", "0.5764713", "0.5733865", "0.5632195", "0.5628743", "0.5600656", "0.5409752", "0.53816265", "0.5359478", "0.5354873", "0.5332774", "0.5331675", "0.52668226", "0.5264521", "0.52564687", "0.52549136", "0.5241691", "0.5241589", "0.5239948", "0.5236718", "0.5211851", "0.51898587", "0.5184889", "0.5174102", "0.51696295", "0.51557285", "0.5150407", "0.51420426", "0.5127821" ]
0.7694175
0
Inserts an element "new_elem" before the element "elem".
def insertChildBefore(new_elem, elem): parent = DOM.getParent(elem) id = DOM.getChildIndex(parent, elem) DOM.insertChild(parent, new_elem, id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wrap_with(old_el, new_el):\n new_el.insert_before(old_el)\n new_el.append(old_el)\n return new_el", "def prepend_element(self, element):\n\n pass", "def insert_element_before_similar(self, parent, new_child):\n new_tag = self.tag_base_name(new_child.tag)\n for i, child in enumerate(parent.getchildren()):\n if not self.tag_base_name_is(child, new_tag):\n parent.insert(i, new_child)\n break\n else:\n parent.append(new_child)", "def insert_element(some_list, index, new_el):\n return some_list.insert(index, new_el)", "def DocumentElementInsertBefore(self):\n raise NotImplementedError()", "def add_element(self, elem):\n self.add_element_with_id(elem, self.next_id)", "def insert(self, element):\n self.line.append(element)", "def push(self, new_element):\n self.ll.insert_first(new_element)", "def DocumentElementInsertAfter(self):\n raise NotImplementedError()", "def insert(self, pos, element):\n if pos <= 0:\n self.add(element)\n elif pos >= self.length():\n self.append(element)\n else:\n node = Node(element)\n cursor = self.head\n for i in range(pos-1):\n cursor = cursor.next\n node.next = cursor.next\n node.prev = cursor\n cursor.next.prev = node\n cursor.next = node", "def insert_new_element(self, element: LabelMetadata) -> None:\n\n if isinstance(element, dict):\n element = LabelMetadata.from_dict(element)\n if not isinstance(element, LabelMetadata):\n raise TypeError('element must be an LabelMetadata instance, got type {}'.format(type(element)))\n\n if self._elements is None:\n self._elements = [element, ]\n elif len(self._elements) == 0:\n self._elements.append(element)\n else:\n for i, entry in enumerate(self._elements):\n if element.timestamp > entry.timestamp:\n self._elements.insert(i, element)\n break", "def insert(self, new_element, position):\n current = self.head\n count = 1\n \n if position == 1:\n new_element.next = current\n self.head = new_element\n # elif not(isinstance(self.get_pos(pos), str)): # use: type(self.get_pos(pos)) == str\n else:\n while count < position-1:\n current = current.next\n count += 1\n new_element.next = current.next\n current.next = new_element", "def insert(self, new_element, position):\n count=1\n current = self.head\n if position == 1:\n new_element.next = self.head\n self.head = new_element\n while current:\n if count+1 == position:\n new_element.next =current.next\n current.next = new_element\n return\n else:\n count += 1\n current = current.next\n # break\n pass", "def addPrevSibling(self, elem):\n if elem is None: elem__o = None\n else: elem__o = elem._o\n ret = libxml2mod.xmlAddPrevSibling(self._o, elem__o)\n if ret is None:raise treeError('xmlAddPrevSibling() failed')\n __tmp = xmlNode(_obj=ret)\n return __tmp", "def insert(self, elem, prio):\n self.n += 1\n self.A.append( (e,w) )\n self.pos[e] = self.n\n i = self.n\n p = i // 2\n self.insert_loop(i, p)", "def insert(self, new_element, position):\n current = self.head\n index = 1\n \n if position == 1:\n current.next = new_element\n \n if position > 1:\n while index!= position - 1:\n current = current.next\n index += 1\n new_element.next = current.next\n current.next = new_element", "def add(self, elem):\n self.add_last(elem)", "def insert(self, p, elem):\n node = self._validate(p)\n new_node = self._Node(elem, idx=self._curr_idx, parent=node._parent)\n self._curr_idx += 1\n node._parent = new_node\n new_node._children.append(node)\n self._size += 1\n\n # Invalidate depths and heights after modifying the tree.\n self._depths, self._heights = None, None\n return self._make_position(new_node)", "def insert(self, element: Node):\r\n if self._top == None:\r\n self._top = Node(None, element)\r\n return None\r\n new_element = self._add_element(element)\r\n self._correct_tree(new_element)", "def prepend(self, element):\n temp = Node(element, None, self.head)\n self.size += 1\n if self.size <= 1:\n self.tail = self.head\n self.head = temp", "def insert_at(sequence, index, element):\n # ... and the rest is up to you\n\n new_sequence = sequence[:index] + element + sequence[index:]\n return new_sequence", "def insert_after(node, new_node):\n new_node.next = node.next\n node.next = new_node", "def insert(self, new_element, position):\n current = self.head\n count = 1\n if position > 1:\n while ((current)and (count < position)):\n if(count == position-1):\n\n new_element.next=current.next\n current.next = new_element\n break\n #print(\"count\",count)\n current = current.next\n count = count + 1\n elif position == 1:\n new_element.next = self.head\n self.head = new_element\n\n pass", "def insert(self, new):\n return self.replace(None, new)", "def add_before(self, p, e):\n original = self._validate(p)\n return self._insert_between(e, original._prev, original)", "def addSibling(self, elem):\n if elem is None: elem__o = None\n else: elem__o = elem._o\n ret = libxml2mod.xmlAddSibling(self._o, elem__o)\n if ret is None:raise treeError('xmlAddSibling() failed')\n __tmp = xmlNode(_obj=ret)\n return __tmp", "def insert_element(new_cloth, index=0):\n global clothes\n clothes.insert(index, new_cloth)\n print (clothes)", "def insertBefore( self, node ):\n if isinstance( self, HtmlDomNode ) and isinstance( node, HtmlDomNode ):\n node.parentNode.before( node, self )", "def add(self, elem):", "def add(self, elem):" ]
[ "0.71355605", "0.69677913", "0.6811626", "0.6761492", "0.67238426", "0.64580613", "0.63880986", "0.63516754", "0.63296306", "0.6283591", "0.6275506", "0.6227607", "0.62055457", "0.6180249", "0.6157978", "0.61048913", "0.6090873", "0.6085723", "0.6082616", "0.59911156", "0.5946614", "0.58659893", "0.5864627", "0.58635473", "0.5827933", "0.57726914", "0.57506305", "0.5748751", "0.5745198", "0.5745198" ]
0.86346006
0
Prevents the current event's default behavior.
def event_preventDefault(): event = DOM.eventGetCurrentEvent() if event.preventDefault: event.preventDefault() else: event.returnValue = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prevent_default_event(self, e):\n if e.target is self.node:\n if not (e.altKey is True and e.ctrlKey is True and e.shiftKey is True):\n e.preventDefault()", "def preventContextMenu(self, setting=True):\n if setting:\n if self.onContextMenu: return\n element = self.getElement()\n self.onContextMenu = lambda event: event.preventDefault()\n element.addEventListener('contextmenu', self.onContextMenu)\n else:\n if not self.onContextMenu: return\n element = self.getElement()\n element.removeEventListener('contextmenu', self.onContextMenu)\n self.onContextMenu = None", "def on_disable(self) -> None:\n self._cancel_automation()", "def disable_tk(self):\n self.clear_inputhook()", "def disable(self):\n super().disable()", "def doNonKeyEvent(self, event, obj):\n c = self.c\n eventType = event.type()\n if eventType == Type.WindowActivate:\n g.app.gui.onActivateEvent(event, c, obj, self.tag)\n elif eventType == Type.WindowDeactivate:\n g.app.gui.onDeactivateEvent(event, c, obj, self.tag)\n elif eventType == Type.FocusIn:\n if self.tag == 'body':\n c.frame.body.onFocusIn(obj)\n if c.frame and c.frame.top and obj is c.frame.top.lineEdit:\n if c.k.getStateKind() == 'getArg':\n c.frame.top.lineEdit.restore_selection()\n elif eventType == Type.FocusOut and self.tag == 'body':\n c.frame.body.onFocusOut(obj)\n # Return True unless we have a key event.\n return eventType not in (Type.ShortcutOverride, Type.KeyPress, Type.KeyRelease)", "def ignore(self, event: Event = None) -> None:\n if self.handler:\n self.openSpellTab()\n self.handler.ignore()\n else:\n self.openSpellTab()", "def disable_wx(self):\n self.clear_inputhook()", "def disable_gtk(self):\n self.clear_inputhook()", "def disable(self) -> None:", "def event_trigger(self, event):\n return False", "def event_trigger(self, event):\n return False", "def disable(self):\n pass", "def mouseDoubleClickEvent(self, event):\n event.ignore()", "def disable(self):\n self.SetInteractive(0)", "def disable(self):", "def deny():\n raise InterruptEvent", "def set_as_handled(self):\n self.not_handled = False", "def disable_receiver(self):\n self.set_receiver(False)", "def ignore(self, event):\n return not self.active", "def disable(self):\n raise NotImplementedError", "def Disable(self):\n handler = self.get_command_object(\"Disable\")\n handler()", "def __disableControls(self):\n self.ignoreAll()", "def disable_qt4(self):\n self.clear_inputhook()", "def cancel_inner():\n kernel32.SetEvent(cancel_event)", "def skip_control_z(event):\n pass", "def disable_emission(self):\n self.ask(\"LASER=OFF\")\n self.ask(\"LASER=ON\") # unlocks emission button, does NOT start emission!", "def disable_tee(self):\n self._tee = False", "def _disable(self):\n self.debug_log(\"Disabling...\")\n self._unregister_handlers()", "def __disable__(self) -> None:\n pass" ]
[ "0.77278936", "0.67206496", "0.6367158", "0.6188951", "0.6186308", "0.6146374", "0.61462164", "0.6137589", "0.61199397", "0.6106635", "0.608739", "0.608739", "0.60655683", "0.6055399", "0.6053924", "0.5991946", "0.5972741", "0.595977", "0.59427965", "0.593023", "0.5912482", "0.5898756", "0.58444786", "0.5838527", "0.58353764", "0.581378", "0.57836646", "0.57440317", "0.5739075", "0.57250726" ]
0.7761421
0