query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Convert a string value in to given type, type can be one of datetime.datetime, datetime.date and datetime.time. Uses current locale and timezone settings to convert the value.
def _parse_datetime(value, type=datetime.datetime): assert type in (datetime.datetime, datetime.date, datetime.time) if not isinstance(value, basestring): return value #TODO: use current locale and timezone info to parse to value formats = { datetime.datetime: '%Y-%m-%d %H:%M:%S', datetime.date: '%Y-%m-%d', datetime.time: '%H:%M:%S' } format = formats[type] try: value = datetime.datetime.strptime(value, format) except ValueError, e: raise ValidationError(e) try: return getattr(value, type.__name__)() except: return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _type_convert(self, value):\n if value is None:\n return value\n\n try:\n return datetime.datetime.strptime(value, \"%Y-%m-%d %H:%M:%S\")\n except ValueError:\n pass\n\n try:\n return int(value)\n except ValueError:\n pass\n\n try:\n if _parser(value.strip().replace(\"_\", \"\")):\n return decimal.Decimal(value)\n except decimal.InvalidOperation:\n pass\n\n return value", "def _from_python(value):\r\n if isinstance(value, datetime):\r\n value = value.strftime('%Y-%m-%dT%H:%M:%S.000Z')\r\n elif isinstance(value, date):\r\n value = value.strftime('%Y-%m-%dT00:00:00.000Z')\r\n elif isinstance(value, bool):\r\n if value:\r\n value = 'true'\r\n else:\r\n value = 'false'\r\n else:\r\n value = unicode_safe(value)\r\n return value", "def convert_types(cls, value):\n if type(value) in (datetime, date):\n return time.mktime(value.timetuple())\n elif isinstance(value, Decimal):\n return float(value)\n else:\n return value", "def turn2type(value,t):\n k = None\n if (t==\"str\"):\n\tk = value\n if (t==\"int\"):\n\tk = int(value)\n if (t==\"float\"):\n\tk = float(value)\n if (t==\"date\"):\n\tk = time.mktime(datetime.datetime.strptime(value, \"%m/%d/%Y\").timetuple())\n return k", "def extract_datetime(\n datetime_str: Text, type_: Union[datetime.datetime, datetime.date]\n):\n if type_ == datetime.datetime:\n return parse_datetime(datetime_str)\n elif type_ == datetime.date:\n return parse_date(datetime_str)\n elif type_ == datetime.time:\n return parse_time(datetime_str)", "def convert(v):\n\n if type(v) is str and rexp.match(v):\n return as_date(v)\n return v", "def __convert_value(\n key: str,\n value: any,\n data_type: type\n) -> any:\n\n if value is None:\n return None\n\n if isinstance(value, data_type):\n return value\n\n # convert any integers if a float is expected. This can happen during\n # JSON encoding and decoding.\n if data_type == float and isinstance(value, int):\n return float(value)\n\n # datetime objects are supplied as a JSON (JavaScript) string.\n if data_type == datetime and isinstance(value, str):\n return parse_time(value)\n\n # enumerations are supplied as strings\n if issubclass(data_type, NebEnum) and isinstance(value, str):\n return getattr(data_type, \"parse\")(value)\n\n # dicts are interpreted as objects, so we instantiate a new object from\n # the provided dictionary. This may fail if the supplied data_type does\n # not have a constructor that accepts a dict.\n if isinstance(value, dict):\n return data_type(value)\n\n # if we got to this place an invalid data type was supplied and we raise\n # a TypeError.\n error = f\"{key} of invalid type {data_type}, got {value.__class__}\"\n raise TypeError(error)", "def str_to_type(name_type):\n if name_type == 'float' or name_type == 'Float':\n return float\n if name_type == 'bool':\n return bool\n if name_type == 'int':\n return lambda x: int(float(x))\n if name_type == 'list':\n return ast.literal_eval\n if name_type == 'date':\n return lambda x: dateutil.parser.parse(x).strftime('%Y-%m-%dT%H:%M:%SZ')\n if name_type == 'str':\n return str\n\n \n return None", "def normalise_field_value(value):\n if isinstance(value, datetime):\n return make_timezone_naive(value)\n elif isinstance(value, Decimal):\n return decimal_to_string(value)\n return value", "def convert_to_time(value):\n if isinstance(value, datetime.time):\n return value\n elif isinstance(value, str):\n return datetime.time.fromisoformat(value)\n else:\n return datetime.time(value)", "def _convert_date(date_string, s_format='%Y-%m-%d'):\r\n if isinstance(date_string, str):\r\n return datetime.strptime(date_string, s_format)\r\n elif isinstance(date_string, datetime):\r\n return date_string\r\n else:\r\n raise TypeError(date_string, 'is not a string or datetime object')", "def _convert_value_type_phantom(value: str) -> Any:\n float_regexes = [r'\\d*\\.\\d*[Ee][-+]\\d*', r'-*\\d*\\.\\d*']\n timedelta_regexes = [r'\\d\\d\\d:\\d\\d']\n int_regexes = [r'-*\\d+']\n\n if value == 'T':\n return True\n if value == 'F':\n return False\n\n for regex in float_regexes:\n if re.fullmatch(regex, value):\n return float(value)\n\n for regex in timedelta_regexes:\n if re.fullmatch(regex, value):\n hours, minutes = value.split(':')\n return datetime.timedelta(hours=int(hours), minutes=int(minutes))\n\n for regex in int_regexes:\n if re.fullmatch(regex, value):\n return int(value)\n\n return value", "def _from_python(self, value):\n if hasattr(value, \"strftime\"):\n if hasattr(value, \"hour\"):\n offset = value.utcoffset()\n if offset:\n value = value - offset\n value = value.replace(tzinfo=None).isoformat() + \"Z\"\n else:\n value = \"%sT00:00:00Z\" % value.isoformat()\n elif isinstance(value, bool):\n if value:\n value = \"true\"\n else:\n value = \"false\"\n else:\n if IS_PY3:\n # Python 3.X\n if isinstance(value, bytes):\n value = str(value, errors=\"replace\") # NOQA: F821\n else:\n # Python 2.X\n if isinstance(value, str):\n value = unicode(value, errors=\"replace\") # NOQA: F821\n\n value = \"{0}\".format(value)\n\n return clean_xml_string(value)", "def strp(value, form):\n # pylint: disable=broad-except\n from time import strptime\n def_value = datetime.utcfromtimestamp(0)\n try:\n return datetime.strptime(value, form)\n except TypeError:\n try:\n return datetime(*(strptime(value, form)[0:6]))\n except ValueError:\n return def_value\n except Exception:\n return def_value", "def init_value(self, value, strict: bool = True):\n if isinstance(value, str):\n value = datetime.datetime.fromisoformat(value)\n elif isinstance(value, float):\n value = datetime.datetime.fromtimestamp(value)\n return super().init_value(value, strict)", "def check_type(val):\n\n try:\n a = float(val)\n return type(a)\n except ValueError:\n pass\n\n try:\n a = int(val)\n return type(a)\n except ValueError:\n pass\n\n try:\n a = dt.datetime.strptime(val, '%Y-%m-%dT%H:%M:%SZ')\n return type(a)\n except ValueError:\n pass\n\n return type(val)", "def python_cast(self, v):\n\n if self.type_is_time():\n import dateutil.parser\n dt = dateutil.parser.parse(v)\n\n if self.datatype == Column.DATATYPE_TIME:\n dt = dt.time()\n if not isinstance(dt, self.python_type):\n raise TypeError(\n '{} was parsed to {}, expected {}'.format(\n v,\n type(dt),\n self.python_type))\n\n return dt\n else:\n # This isn't calling the python_type method -- it's getting a python type, then instantialting it,\n # such as \"int(v)\"\n return self.python_type(v)", "def convert_to_string(value):\n if isinstance(value, str):\n return value\n # Boolean test must come before integer check!\n elif isinstance(value, bool):\n return str(value).lower()\n elif isinstance(value, int):\n return str(value)\n elif isinstance(value, float):\n return str(value)\n elif isinstance(value, UTCDateTime):\n return str(value).replace(\"Z\", \"\")\n else:\n raise TypeError(\"Unexpected type %s\" % repr(value))", "def _convert(string, type, message):\n try:\n return type(string)\n except ValueError as e:\n print(e)\n raise CharmmPSFError('Could not convert %s' % message)", "def _change_time_format(time_string):\n datetime_object = parser.isoparse(time_string)\n return datetime_object", "def _str_to_val(self, value):\n kind, value = value.split(': ', 1)\n\n # Lists and dictionaries are special case\n if kind in ('L', 'D'):\n return eval(value)\n\n if kind in TYPE_MAPPING.keys():\n if kind == 'B':\n if value != 'True':\n return False\n\n value = TYPE_MAPPING[kind](value)\n\n return value\n else:\n raise ValueError(\"An Unknown type of setting was found!\")", "def datetime_from_string(time):\n try:\n if type(time) == datetime.datetime:\n return time\n else:\n try:\n return datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S')\n except ValueError:\n return datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S.%f')\n except ValueError:\n return time\n except TypeError:\n return time", "def convert_time(cls, time_str):\n if cls.date_ignore_pattern:\n time_str = re.sub(cls.date_ignore_pattern, '', time_str)\n return datetime.strptime(time_str, cls.date_format)", "def cvt_time(dt_str):\n # Note, these timestamps don't include time zones\n return datetime.strptime(dt_str, '%Y-%m-%dT%H:%M:%S.%fZ')", "def _parse_date(date_string, date_type):\n # If date_string is None return None\n if date_string is None:\n return None\n\n # Parse rfc3339 dates from string\n elif date_type == \"rfc3339\":\n if date_string[-3] == \":\":\n date_string = date_string[:-3] + date_string[-2:]\n return datetime.datetime.strptime(date_string, \"%Y-%m-%dT%H:%M:%S%z\")\n\n # Parse date only strings\n elif date_type == \"date-only\":\n if re.match(r\"^(\\d){4}-00-00$\", date_string):\n return datetime.datetime.strptime(date_string, \"%Y-00-00\").date()\n else:\n return datetime.datetime.strptime(date_string, \"%Y-%m-%d\").date()\n \n elif date_type == \"date-time\":\n return datetime.datetime.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")", "def decode_datetime(self, string):\n if isinstance(string, str):\n if 'T' in string:\n return datetime.strptime(string, \"%Y%m%dT%H%M%S\")\n else:\n return datetime.strptime(string, \"%Y%m%d\")\n else:\n return string", "def myconverter(o: object):\n if isinstance(o, datetime.datetime):\n return o.__str__()", "def _convert_value_to_correct_datatype(value: str):\n if value == 'true':\n return True\n if value == 'false':\n return False\n try:\n result = ast.literal_eval(value)\n return result\n except Exception: # if it is not possible to evaluate the value then consider it as a string\n return value", "def _handle_sql_types(value):\n if type(value) is datetime:\n return value.isoformat()\n return str(value)", "def unpack_time(s, type='I'):\n\ttry:\n\t\t(l,), s = unpack(\"!\"+type, s)\n\texcept TypeError, e:\n\t\traise TypeError(\"Problem unpacking time: %s\" % e)\n\n\tif l < 0:\n\t\treturn None\n\treturn datetime.fromtimestamp(l), s" ]
[ "0.7504813", "0.6699153", "0.6594165", "0.6546857", "0.64849734", "0.6314421", "0.6287953", "0.6278388", "0.6217169", "0.60698295", "0.6042053", "0.60261947", "0.60256886", "0.5981932", "0.59376043", "0.59363985", "0.59323305", "0.59245753", "0.5917688", "0.59000576", "0.5861351", "0.5859223", "0.58584946", "0.585572", "0.5848388", "0.58218986", "0.5763404", "0.5750719", "0.57453376", "0.5732711" ]
0.76290923
0
EEG Signal Simulation Simulate an artificial EEG signal. This is a crude implementation based on the MNEPython raw simulation example. Help is needed to improve this function.
def eeg_simulate(duration=1, length=None, sampling_rate=1000, noise=0.1, random_state=None): # Try loading mne try: import mne import mne.datasets import mne.simulation except ImportError as e: raise ImportError( "The 'mne' module is required for this function to run. ", "Please install it first (`pip install mne`).", ) from e # Seed the random generator for reproducible results rng = check_random_state(random_state) # Generate number of samples automatically if length is unspecified if length is None: length = duration * sampling_rate if duration is None: duration = length / sampling_rate # Get paths to data path = mne.datasets.sample.data_path() / "MEG" / "sample" raw_file = path / "sample_audvis_raw.fif" fwd_file = path / "sample_audvis-meg-eeg-oct-6-fwd.fif" # Load real data as the template raw = mne.io.read_raw_fif(raw_file, preload=True, verbose=False) raw = raw.set_eeg_reference(projection=True, verbose=False) n_dipoles = 4 # number of dipoles to create def data_fun(times, n_dipoles=4): """Generate time-staggered sinusoids at harmonics of 10Hz""" n = 0 # harmonic number n_samp = len(times) window = np.zeros(n_samp) start, stop = [int(ii * float(n_samp) / (2 * n_dipoles)) for ii in (2 * n, 2 * n + 1)] window[start:stop] = 1.0 n += 1 data = 25e-9 * np.sin(2.0 * np.pi * 10.0 * n * times) data *= window return data times = raw.times[: int(raw.info["sfreq"] * 2)] fwd = mne.read_forward_solution(fwd_file, verbose=False) stc = mne.simulation.simulate_sparse_stc( fwd["src"], n_dipoles=n_dipoles, times=times, data_fun=data_fun, random_state=rng, ) # Repeat the source activation multiple times. raw_sim = mne.simulation.simulate_raw(raw.info, [stc] * int(np.ceil(duration / 2)), forward=fwd, verbose=False) cov = mne.make_ad_hoc_cov(raw_sim.info, std=noise / 1000000) raw_sim = mne.simulation.add_noise(raw_sim, cov, iir_filter=[0.2, -0.2, 0.04], verbose=False, random_state=rng) # Resample raw_sim = raw_sim.resample(sampling_rate, verbose=False) # Add artifacts # mne.simulation.add_ecg(raw_sim, verbose=False) # mne.simulation.add_eog(raw_sim, verbose=False) eeg = raw_sim.pick_types(eeg=True, verbose=False).get_data() return eeg[0, 0 : int(duration * sampling_rate)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_noise(emg):\n MAX_AMPLITUDE = 32767\n\n # Sampling\n # 1 second of data requires 600 frames. And 600 fps is 600 Hz, sampling rate of EMG.\n Ts = 1/EMG_F_SAMPLE\n\n # Time vector\n t = np.arange(0, len(emg)/EMG_F_SAMPLE, Ts) # each unit of t is a second\n\n # Noise\n randAmplitudeScale = np.random.random()*0.1\n randOffset = np.random.random() * 2*np.pi\n \n fNoise = 50; # Frequency [Hz]\n aNoise = randAmplitudeScale*MAX_AMPLITUDE # Amplitude\n noise = aNoise * np.sin(2 * np.pi * t * fNoise + randOffset)\n\n # Add noise to signal\n for channel in [\"emg1\", \"emg2\", \"emg3\", \"emg4\", \"emg5\", \"emg6\"]:\n emg[channel] += noise\n return emg", "def simulation_energy_over_time(E, T_ns, T_ns_threshold):\n no_start = (T_ns > T_ns_threshold)\n # (x, y, labels_dict, file_title, plot_title)\n generate_2D_plot(T_ns[no_start], E[no_start],\n {'x': r'time [$ns$]',\n 'y': r'E [$kcal/mol/A^2$]'},\n \"energy_graph\",\n \"Energy(time) graph\")", "def test_Eg(self):\n self.setUp()\n tmp = np.arange(1, 49).reshape(3, 2, 4, 2)\n g1 = np.broadcast_to(tmp[..., None], tmp.shape + (2,)).swapaxes(1, -1)\n f = .02 * np.arange(1, 25).reshape(3, 2, 4)\n n_samples, n_MC, K = self.n_samples, self.n_MC, 2\n Lambda_1 = self.E_func.Lambda_g(np.ones(shape=(n_samples, K, n_MC)), f)\n pi_xi = 1 / (1 + np.exp(np.array([-3, -4, -6])))\n Eg1 = self.E_func.Eg(g1, Lambda_1, pi_xi, f)\n Eg1_ = np.array([4.396, 12.396])\n np.testing.assert_almost_equal(Eg1[0, 0], Eg1_, 3)", "def _start_eplus_simulation(self):\n if not self.model:\n self.exit('No model specified.')\n if not self.weather:\n self.exit('No weather specified.')\n model_path = self.model\n if model_path[0] == '~':\n model_path = os.path.expanduser(model_path)\n if model_path[0] != '/':\n model_path = os.path.join(self.cwd, model_path)\n weather_path = self.weather\n if weather_path[0] == '~':\n weather_path = os.path.expanduser(weather_path)\n if weather_path[0] != '/':\n weather_path = os.path.join(self.cwd, weather_path)\n model_dir = os.path.dirname(model_path)\n bcvtb_dir = self.bcvtb_home\n if bcvtb_dir[0] == '~':\n bcvtb_dir = os.path.expanduser(bcvtb_dir)\n if bcvtb_dir[0] != '/':\n bcvtb_dir = os.path.join(self.cwd, bcvtb_dir)\n _log.debug('Working in %r', model_dir)\n\n self._write_port_file(os.path.join(model_dir, 'socket.cfg'))\n self._write_variable_file(os.path.join(model_dir, 'variables.cfg'))\n\n if self.version >= 8.4:\n cmd_str = \"cd %s; export BCVTB_HOME=%s; energyplus -w %s -r %s\" % (\n model_dir, bcvtb_dir, weather_path, model_path)\n else:\n cmd_str = \"export BCVTB_HOME=%s; runenergyplus %s %s\" % (bcvtb_dir, model_path, weather_path)\n _log.debug('Running: %s', cmd_str)\n f = open(model_path, 'r')\n lines = f.readlines()\n f.close()\n endmonth = 0\n if self.currentday + self.length > self.maxday:\n endday = self.currentday + self.length - self.maxday\n endmonth = self.currentmonth + 1\n else:\n endday = self.currentday + self.length\n endmonth = self.currentmonth\n for i in range(len(lines)):\n if lines[i].lower().find('runperiod,') != -1:\n if not self.real_time_flag:\n lines[i + 2] = ' ' + str(self.startmonth) + ', !- Begin Month' + '\\n'\n lines[i + 3] = ' ' + str(self.startday) + ', !- Begin Day of Month' + '\\n'\n lines[i + 4] = ' ' + str(self.endmonth) + ', !- End Month' + '\\n'\n lines[i + 5] = ' ' + str(self.endday) + ', !- End Day of Month' + '\\n'\n else:\n lines[i + 2] = ' ' + str(self.currentmonth) + ', !- Begin Month' + '\\n'\n lines[i + 3] = ' ' + str(\n self.currentday) + ', !- Begin Day of Month' + '\\n'\n lines[i + 4] = ' ' + str(endmonth) + ', !- End Month' + '\\n'\n lines[i + 5] = ' ' + str(endday) + ', !- End Day of Month' + '\\n'\n for i in range(len(lines)):\n if lines[i].lower().find('timestep,') != -1 and lines[i].lower().find('update frequency') == -1:\n if lines[i].lower().find(';') != -1:\n lines[i] = ' Timestep,' + str(self.timestep) + ';' + '\\n'\n else:\n lines[i + 1] = ' ' + str(self.timestep) + ';' + '\\n'\n if self.customizedOutT > 0:\n lines.append('ExternalInterface:Actuator,') + '\\n'\n lines.append(' outT, !- Name') + '\\n'\n lines.append(' Environment, !- Actuated Component Unique Name') + '\\n'\n lines.append(' Weather Data, !- Actuated Component Type') + '\\n'\n lines.append(' Outdoor Dry Bulb; !- Actuated Component Control Type') + '\\n'\n f = open(model_path, 'w')\n\n for i in range(len(lines)):\n f.writelines(lines[i])\n f.close()\n self.simulation = subprocess.Popen(cmd_str, shell=True)", "def createSignalModelExponential(data):\n print \"Creating model\"\n switchpoint = DiscreteUniform('switchpoint', lower=0, upper=len(data))\n \n noise_sigma = HalfNormal('noise_sigma', tau=sigToTau(.01))\n exp_sigma = HalfNormal('exp_sigma', tau=sigToTau(.05))\n \n #Modeling these parameters this way is why wf needs to be normalized\n exp_rate = Uniform('exp_rate', lower=0, upper=.1)\n exp_scale = Uniform('exp_scale', lower=0, upper=.1)\n \n timestamp = np.arange(0, len(data), dtype=np.float)\n \n @deterministic(plot=False, name=\"test\")\n def uncertainty_model(s=switchpoint, n=noise_sigma, e=exp_sigma):\n ''' Concatenate Poisson means '''\n out = np.empty(len(data))\n out[:s] = n\n out[s:] = e\n return out\n \n @deterministic\n def tau(eps=uncertainty_model):\n return np.power(eps, -2)\n \n## @deterministic(plot=False, name=\"test2\")\n## def adjusted_scale(s=switchpoint, s1=exp_scale):\n## out = np.empty(len(data))\n## out[:s] = s1\n## out[s:] = s1\n## return out\n#\n# scale_param = adjusted_scale(switchpoint, exp_scale)\n\n @deterministic(plot=False)\n def baseline_model(s=switchpoint, r=exp_rate, scale=exp_scale):\n out = np.zeros(len(data))\n out[s:] = scale * ( np.exp(r * (timestamp[s:] - s)) - 1.)\n \n# plt.figure(fig.number)\n# plt.clf()\n# plt.plot(out ,color=\"blue\" )\n# plt.plot(data ,color=\"red\" )\n# value = raw_input(' --> Press q to quit, any other key to continue\\n')\n\n return out\n\n baseline_observed = Normal(\"baseline_observed\", mu=baseline_model, tau=tau, value=data, observed= True )\n return locals()", "def runEisensteinHu(self, sig8):\n #Output EHu file\n f = file('ehu.in','w')\n\n #f.write((str(self.cp.omega_baryon + self.cp.omega_cdm))+', '+str(self.cp.omega_lambda)+', '+\\\n # str(self.cp.omega_neutrino)+', '+str(self.cp.omega_baryon)+'\\n')\n\n h = self.cp.hubble/100.\n om0 = (self.cp.ombh2 + self.cp.omch2)/h**2\n f.write(str(om0)+', '+str(1.-om0)+', '+ str(self.cp.omega_neutrino)+', '+str(self.cp.ombh2/h**2)+'\\n')\n f.write(str(h)+', '+str(self.cp.temp_cmb)+', '+str(self.cp.massless_neutrinos)+'\\n')\n f.write(str(self.cp.transfer_redshift[0])+'\\n')\n f.write(str(self.cp.transfer_kmax)+', '+str(self.cp.transfer_k_per_logint)+'\\n')\n f.write('1\\n')\n tilt = self.cp.scalar_spectral_index[0]\n f.write(str(tilt)+'\\n')\n f.write('0\\n')\n\n f.close()\n\n # run EHu code\n os.system('../ehu/power < ehu.in > ehu.crap')\n\n # read into c.k, c.pk\n eh = N.loadtxt('trans.dat')\n self.k = eh[:,0]*1.\n #print self.k\n self.logk = M.log(self.k)\n self.trans = eh[:,1]*1.\n if tilt == 1.:\n delH = 1.94e-5*(self.cp.omega_cdm + self.cp.omega_baryon)**(-0.785)\n delta = delH**2*(3000.0*self.k/(self.cp.hubble/100.))**4*self.trans**2\n else:\n delH = 1.94e-5*self.cp.omega_cdm**(-0.785 - 0.05*M.log(tilt))\\\n * M.exp(-0.95*(tilt - 1.) - 0.169*(tilt - 1)**2)\n delta = delH**2*(3000.0*self.k/(self.cp.hubble/100.))**(3 + tilt)*self.trans**2\n\n # Just an approximate normalization; really need sig8.\n \n self.pk = (2.*M.pi**2 * delta/self.k**3)*(self.cp.hubble/100.)**3\n if self.cp.transfer_redshift[0] > 0.:\n ps = PowerSpectrum(self.cp)\n sig8use = sig8*ps.d1(self.cp.transfer_redshift[0])/ps.d1(0.)\n else:\n sig8use = sig8\n normalizePk(self,sig8use) # sets c.logpk, too\n\n return", "def performEOGRegression(eeg ,eog):\n\n size_tuple = np.shape(eog) # resizing the EOG array so that its pseudoinverse can be calculated\n dimension = len(size_tuple)\n if dimension == 1:\n eog.resize((1, size_tuple[0]))\n eeg_t = np.transpose(eeg)\n eog_t = np.transpose(eog)\n pseudoinv = np.linalg.pinv(np.dot(np.transpose(eog_t), eog_t)) # performing pseudoinverse\n inv = np.dot(pseudoinv,np.transpose(eog_t))\n subtract_eog = np.dot(eog_t,np.dot(inv,eeg_t))\n clean_eeg = np.transpose(np.subtract(eeg_t, subtract_eog)) # subtracting the EOG noise from the EEG signal\n return clean_eeg;", "def _emiss_ee(self,Eph):\n if self.weight_ee == 0.0:\n return np.zeros_like(Eph)\n\n gam = np.vstack(self._gam)\n # compute integral with electron distribution\n emiss = c.cgs * trapz_loglog(np.vstack(self._nelec) * self._sigma_ee(gam,Eph),\n self._gam, axis=0)\n return emiss", "def main():\n\tparser = argparse.ArgumentParser(description=\"Estimate the efferents modulation induced by EES and afferent input together\")\n\tparser.add_argument(\"eesFrequency\", help=\"ees frequency\", type=float, choices=[gt.Range(0,1000)])\n\tparser.add_argument(\"eesAmplitude\", help=\"ees amplitude (0-600] or %%Ia_II_Mn\")\n\tparser.add_argument(\"species\", help=\"simulated species\", choices=[\"rat\",\"human\"])\n\tparser.add_argument(\"inputFile\", help=\"neural network structure file (e.g. fsSFrFfMnArtModHuman.txt)\")\n\tparser.add_argument(\"name\", help=\"name to add at the output files\")\n\tparser.add_argument(\"--mnReal\", help=\"Real Mn flag, IntFire Mn otherwise\",action=\"store_true\")\n\tparser.add_argument(\"--simTime\", help=\"simulation time\", type=int, default=1000)\n\tparser.add_argument(\"--burstingEes\", help=\"flag to use burst stimulation\", action=\"store_true\")\n\tparser.add_argument(\"--nPulsesPerBurst\", help=\"number of pulses per burst\", type=int, default=5)\n\tparser.add_argument(\"--burstsFrequency\", help=\"stimulation frequency within bursts\",type=float, default=600, choices=[gt.Range(0,1000)])\n\tparser.add_argument(\"--seed\", help=\"positive seed used to initialize random number generators (default = time.time())\", type=int, choices=[gt.Range(0,999999)])\n\targs = parser.parse_args()\n\n\tif args.seed is not None: sh.save_seed(args.seed)\n\telse: sh.save_seed(int(time.time()))\n\n\t# Import simulation specific modules\n\tfrom simulations import ForSimSpinalModulation\n\tfrom NeuralNetwork import NeuralNetwork\n\tfrom EES import EES\n\tfrom BurstingEES import BurstingEES\n\tfrom NetworkStimulation import NetworkStimulation\n\n\t# Initialze variables...\n\tif args.eesAmplitude[0]==\"%\": eesAmplitude = [float(x) for x in args.eesAmplitude[1:].split(\"_\")]\n\telse: eesAmplitude = float(args.eesAmplitude)\n\tname = args.name+\"_amp_\"+args.eesAmplitude+\"_freq_\"+str(args.eesFrequency)\n\tpc = h.ParallelContext()\n\tnn=NeuralNetwork(pc,args.inputFile)\n\tif not args.burstingEes: ees = EES(pc,nn,eesAmplitude,args.eesFrequency,pulsesNumber=100000,species=args.species)\n\telse: ees = BurstingEES(pc,nn,eesAmplitude,args.eesFrequency,args.burstsFrequency,args.nPulsesPerBurst,species=args.species)\n\tees.get_amplitude(True)\n\tprint \"The stimulation frequency is: \",args.eesFrequency,\" Hz\"\n\tafferentsInput = None\n\n\tcellsToRecord = {}\n\tcellsToRecord['Iaf'] = nn.cells['SOL']['Iaf']\n\tcellsToRecord['MnS']=nn.cells['SOL']['MnS']\n\t# cellsToRecord['MnFf']=nn.cells['SOL']['MnFf']\n\t# cellsToRecord['MnFr']=nn.cells['SOL']['MnFr']\n\t# modelTypes = {\"MnS\":\"artificial\",\"MnFr\":\"artificial\",\"MnFf\":\"artificial\",\"Iaf\":\"artificial\"}\n\tmodelTypes = {\"MnS\":\"artificial\",\"Iaf\":\"artificial\"}\n\tsimulation = ForSimSpinalModulation(pc,nn,cellsToRecord,modelTypes, afferentsInput, None, None, args.simTime)\n\tsimulation.set_results_folder(\"../../results/AffEffModSweap/\")\n\tsimulation.run()\n\tsimulation.raster_plot(name,False)\n\tcomm.Barrier()\n\n\tsimulation.save_results(name)", "def signal_generation(Es):\n size = 3 * 10**5\n low = 1\n high = 9\n\n rint = np.random.randint(low, high, size)\n signal = np.zeros((size, 2))\n # Mapping, regardless of the grey coding\n signal[:, 0] = map(lambda m: (Es)**0.5 * cos(2 * pi * m / 8), rint)\n signal[:, 1] = map(lambda m: (Es)**0.5 * sin(2 * pi * m / 8), rint)\n return rint, signal", "def Execute_ePCSim(ePCSim_conn, command):\r\n ePCSim_conn.prompt = \"EGATE>\"\r\n ePCSim_conn.SendCmd(command)", "def generate_eos_inputs(generate_structure, generate_code):\n\n def _generate_eos_inputs():\n return {\n 'structure': generate_structure(symbols=('Si',)),\n 'sub_process_class': 'common_workflows.relax.quantum_espresso',\n 'generator_inputs': {\n 'protocol': 'fast',\n 'engines': {\n 'relax': {\n 'code': generate_code('quantumespresso.pw').store(),\n 'options': {\n 'resources': {\n 'num_machines': 1\n }\n }\n }\n },\n 'electronic_type': 'metal',\n 'relax_type': 'positions'\n }\n }\n\n return _generate_eos_inputs", "def integrateGRayFlux(self, emin, emax, t_sec, g11, m_neV, bfield = 'jansson12', esteps = 100, eflux = False):\n EMeV_array = np.logspace(np.log10(emin), np.log10(emax), esteps)\n\n dna_dedt = self.AvgALPflux(EMeV_array, t_sec, g11) # alps / MeV / s \n pag = self.Pag(EMeV_array, g11, m_neV, bfield = bfield) # conversion prob\n dng_dedt = dna_dedt * pag # gamma rays / MeV / s \n flux = dng_dedt * self.fluxconstant # gamma rays / MeV / s / cm^2\n\n if eflux:\n return simps(flux * EMeV_array * EMeV_array, np.log(EMeV_array))\n else:\n return simps(flux * EMeV_array, np.log(EMeV_array))", "def add_elec_bunch( sim, gamma0, n_e, p_zmin, p_zmax, p_rmin, p_rmax,\n p_nr=2, p_nz=2, p_nt=4, dens_func=None, boost=None,\n direction='forward', filter_currents=True ) :\n\n # Convert parameters to boosted frame\n if boost is not None:\n beta0 = np.sqrt( 1. - 1./gamma0**2 )\n p_zmin, p_zmax = boost.copropag_length(\n [ p_zmin, p_zmax ], beta_object=beta0 )\n n_e, = boost.copropag_density( [n_e], beta_object=beta0 )\n gamma0, = boost.gamma( [gamma0] )\n\n # Modify the input parameters p_zmin, p_zmax, r_zmin, r_zmax, so that\n # they fall exactly on the grid, and infer the number of particles\n p_zmin, p_zmax, Npz = adapt_to_grid( sim.fld.interp[0].z,\n p_zmin, p_zmax, p_nz )\n p_rmin, p_rmax, Npr = adapt_to_grid( sim.fld.interp[0].r,\n p_rmin, p_rmax, p_nr )\n\n # Create the electrons\n relat_elec = Particles( q=-e, m=m_e, n=n_e,\n Npz=Npz, zmin=p_zmin, zmax=p_zmax,\n Npr=Npr, rmin=p_rmin, rmax=p_rmax,\n Nptheta=p_nt, dt=sim.dt,\n continuous_injection=False,\n dens_func=dens_func, use_cuda=sim.use_cuda,\n grid_shape=sim.fld.interp[0].Ez.shape )\n\n # Give them the right velocity\n relat_elec.inv_gamma[:] = 1./gamma0\n relat_elec.uz[:] = np.sqrt( gamma0**2 -1.)\n\n # Electron beam moving in the background direction\n if direction == 'backward':\n relat_elec.uz[:] *= -1.\n\n # Add them to the particles of the simulation\n sim.ptcl.append( relat_elec )\n\n # Get the corresponding space-charge fields\n get_space_charge_fields( sim.fld, [relat_elec], gamma0,\n filter_currents, direction=direction)", "def parse_eplus_msg(self, msg):\n msg = msg.decode(\"utf-8\") \n msg = msg.rstrip()\n _log.info(f\"Received message from EnergyPlus: {msg}\")\n arry = msg.split()\n arry = [float(item) for item in arry]\n _log.info(f\"Received message from EnergyPlus: {arry}\")\n slot = 6\n self.sim_flag = arry[1]\n\n if self.sim_flag != 0.0:\n # Exit based on error status\n _log.debug(\"FLAG: {} - {}\".format(self.sim_flag, type(self.sim_flag)))\n self._check_sim_flag()\n elif arry[2] < self.eplus_outputs and len(arry) < self.eplus_outputs + 6:\n self.exit('Got message with ' + arry[2] + ' inputs. Expecting ' + str(self.eplus_outputs) + '.')\n else:\n if float(arry[5]):\n self.time = float(arry[5])\n for input in self.inputs:\n name_value = input.get('name', None)\n dynamic_default_value = input.get('dynamic_default', None)\n if name_value is not None and dynamic_default_value is not None:\n slot = 6\n for output in self.outputs:\n _log.debug(\"Output: {}\".format(output))\n default_value = output.get('default', None)\n if default_value is not None:\n if default_value.lower().find(name_value.lower()) != -1:\n input['default'] = float(arry[slot])\n slot += 1\n slot = 6\n for output in self.outputs:\n name_value = output.get('name', None)\n type_value = output.get('type', None)\n field_value = output.get('field', None)\n if name_value is not None and type_value is not None:\n try:\n output['value'] = float(arry[slot])\n except:\n _log.debug(slot)\n self.exit('Unable to convert received value to double.')\n if \"currentmonthv\" in type_value.lower():\n self.month = float(arry[slot])\n _log.debug(f\"month {self.month}\")\n elif \"currentdayofmonthv\" in type_value.lower():\n self.day = float(arry[slot])\n _log.debug(f\"day {self.day}\")\n elif \"currenthourv\" in type_value.lower():\n self.hour = float(arry[slot])\n _log.debug(f\"hour {self.hour}\")\n elif \"currentminutev\" in type_value.lower():\n self.minute = float(arry[slot])\n _log.debug(f\"minute: {self.minute}\")\n elif field_value is not None and 'operation' in field_value.lower():\n self.operation = float(arry[slot])\n _log.debug(f\"operation (1:on, 0: off) {self.operation}\")\n slot += 1", "def simulate(): \n \n # Create tmpdir to hold all steerfiles and log files \n SimObj = Simulation(steerfiles=steerfiles, name=os.path.splitext(os.path.basename(rawfile_alu))[0] + '-sim' )\n\n # Set Beam energy\n SimObj.set_beam_momentum(beamenergy)\n\n # Create steerfiles for processing\n simpath = create_sim_path_air(SimObj)\n\n # Get gearfile\n localgearfile = SimObj.get_filename('gear.xml')\n\n # Misalign gear file\n randomize_telescope(gearfile=localgearfile, mean_list=mean_list, sigma_list=sigma_list, sensorexception_list=sensorexception_list, modeexception_list=modeexception_list)\n\n localtruthdb_filename=SimObj.create_dbfilename(truthdb_filename)\n\n # Convert gear file to alignmentDB root file, which will be stored in the sim folder\n Create_AlignmentDBFile_From_Gear(gearfile=SimObj.get_filename('gear.xml'), truthdbfilename=localtruthdb_filename)\n\n # Copy gearfile\n SimObj.copy_file('gear.xml','gear_air.xml')\n\n # Get air gearfile\n gearfile_air = SimObj.get_filename('gear_air.xml')\n\n # Change DUT in copied gearfile\n set_parameter(gearfile=gearfile_air, sensorID=11, parametername='thickness', value=0.0001)\n set_parameter(gearfile=gearfile_air, sensorID=11, parametername='radLength', value=304000.0)\n\n\n # Create caltag for the truthdb\n localcaltag = os.path.splitext(os.path.basename(rawfile_air))[0] + '-test'\n simcaltag=localcaltag+ '-truthdb'\n\n # Run simulation to create rawfile with simulated digits \n SimObj.simulate(path=simpath,caltag=simcaltag)", "def delayE(self):\n sinE = np.sin(self.E())\n return self.GAMMA", "def spikingModel(wEE, wEI, wIE, wII, stim_e, stim_i,\n time=1000, dt=0.1, Vth=1.0, Vre=0.0,\n tau_e=15.0, tau_i=10.0, ref_e=5.0, ref_i=5.0, \n syntau2_e=3.0, syntau2_i=2.0, syntau1=1.0):\n\n T = np.arange(0,time,dt)\n nE = wEE.shape[0]\n nI = wII.shape[0]\n\n Ve = np.zeros((nE,len(T)))\n Vi = np.zeros((nI,len(T)))\n # Set initial conditions\n Ve = np.random.uniform(0,1,size=(nE,))\n Vi = np.random.uniform(0,1,size=(nI,))\n # Instantiate synaptic currents empty matrix\n Ie = np.zeros((nE,len(T)))\n Ii = np.zeros((nI,len(T)))\n # Instantiate spiking matrix\n spkE = np.zeros((nE,time))\n spkI = np.zeros((nI,time))\n # Instantiate synaptic input matrix (temporally downsampled)\n synE = np.zeros((nE,time))\n synI = np.zeros((nI,time))\n\n bin_spkE = np.zeros((nE,))\n bin_spkI = np.zeros((nI,))\n # Synaptic rise gating variable\n xrse_ee = np.zeros((nE,))\n xdec_ee = np.zeros((nE,))\n xrse_ei= np.zeros((nI,))\n xdec_ei = np.zeros((nI,))\n xrse_ie = np.zeros((nE,))\n xdec_ie = np.zeros((nE,))\n xrse_ii= np.zeros((nI,))\n xdec_ii = np.zeros((nI,))\n\n\n # Set random biases from a uniform distribution\n # Excitatory neurons\n mu_e = np.random.uniform(1.1,1.2,size=(nE,))\n #mu_e = np.random.uniform(1.05,1.15,size=(nE,)) # Imbalanced state\n # Inhibitory neurons\n mu_i = np.random.uniform(1.0,1.05,size=(nI,))\n\n maxrate = 500 # max rate is 100hz\n maxtimes = int(np.round(maxrate*time/1000))\n timesE = np.zeros((nE,maxrate))\n timesI = np.zeros((nI,maxrate))\n ne_s = np.zeros((nE,),dtype=int)\n ni_s = np.zeros((nI,),dtype=int)\n\n refractory_e = np.zeros((nE,))\n refractory_i = np.zeros((nI,))\n for t in range(len(T)-1):\n ## Using RK2 method\n\n ## K1s\n Ve = Ve + dt*((mu_e + stim_e - Ve)/tau_e + Ie[:,t])\n Vi = Vi + dt*((mu_i + stim_i - Vi)/tau_i + Ii[:,t])\n\n # Synaptic gating\n # Excitatory synapses\n xrse_ee = xrse_ee - dt*xrse_ee/syntau1 + np.matmul(bin_spkE,wEE)\n xdec_ee = xdec_ee - dt*xdec_ee/syntau2_e + np.matmul(bin_spkE,wEE)\n xrse_ei = xrse_ei - dt*xrse_ei/syntau1 + np.matmul(bin_spkE,wEI)\n xdec_ei = xdec_ei - dt*xdec_ei/syntau2_e + np.matmul(bin_spkE,wEI)\n # Inhibitory dt*synapses\n xrse_ie = xrse_ie - dt*xrse_ie/syntau1 + np.matmul(bin_spkI,wIE)\n xdec_ie = xdec_ie - dt*xdec_ie/syntau2_i + np.matmul(bin_spkI,wIE)\n xrse_ii = xrse_ii - dt*xrse_ii/syntau1 + np.matmul(bin_spkI,wII)\n xdec_ii = xdec_ii - dt*xdec_ii/syntau2_i + np.matmul(bin_spkI,wII)\n\n # Calculate synaptic outputs given rise and decay times\n Ie[:,t+1] = (xdec_ee - xrse_ee)/(syntau2_e - syntau1) + (xdec_ie - xrse_ie)/(syntau2_i - syntau1)\n Ii[:,t+1] = (xdec_ii - xrse_ii)/(syntau2_i - syntau1) + (xdec_ei - xrse_ei)/(syntau2_e - syntau1)\n\n ## Spiking\n # Find which neurons exceed threshold (and are not in a refractory period)\n bin_spkE = np.multiply(Ve>Vth, refractory_e==0.0)\n bin_spkI = np.multiply(Vi>Vth, refractory_i==0.0)\n\n # Save spike time (and downsample to 1ms)\n tms = int(np.floor(T[t]))\n spkE[bin_spkE,tms] = 1 # spikes are okay - refractory period is 5ms, anyway\n spkI[bin_spkI,tms] = 1\n synE[:,tms] = synE[:,tms] + Ie[:,t]\n synI[:,tms] = synI[:,tms] + Ii[:,t]\n\n # Reset voltages\n Ve[bin_spkE] = Vre\n Vi[bin_spkI] = Vre\n\n # spike times\n timesE[bin_spkE,ne_s[bin_spkE]] = T[t+1]\n timesI[bin_spkI,ni_s[bin_spkI]] = T[t+1]\n ne_s[bin_spkE] = ne_s[bin_spkE] + 1\n ni_s[bin_spkI] = ni_s[bin_spkI] + 1\n\n\n # Set refractory period\n # Add a refractory time step to neurons who just spiked, and to those are still in a refractory period\n refractory_e = refractory_e + (bin_spkE * dt) + (refractory_e!=0) * dt \n refractory_i = refractory_i + (bin_spkI * dt) + (refractory_i!=0) * dt\n # Once refractory period is complete, allow to spike\n can_spike_again_e = np.round(refractory_e,1) == ref_e\n can_spike_again_i = np.round(refractory_i,1) == ref_i\n\n refractory_e[can_spike_again_e] = 0.0\n refractory_i[can_spike_again_i] = 0.0\n\n # Set neurons who are in their refractory to the baseline membrane potential\n in_refractory_e = refractory_e != 0.0\n in_refractory_i = refractory_i != 0.0\n\n Ve[in_refractory_e] = Vre\n Vi[in_refractory_i] = Vre\n \n return spkE, spkI, synE, synI, timesE, timesI, ne_s, ni_s", "def _emiss_ep(self,Eph):\n if self.weight_ep == 0.0:\n return np.zeros_like(Eph)\n\n gam = np.vstack(self._gam)\n eps = (Eph / mec2).decompose().value\n # compute integral with electron distribution\n emiss = c.cgs * trapz_loglog(np.vstack(self._nelec) * self._sigma_1(gam,eps),\n self._gam, axis=0).to(u.cm**2 / Eph.unit)\n return emiss", "def E(self, t):\n\n\t\tE = self.E0\n\n\t\t# Gaussian pulse shape\n\t\tE *= np.exp(-2.*np.log(2.)*((t-self.t0)/self.pulse_duration)**2.)\n\n\t\t# Instantaneous phase\n\t\tif self.phase:\n\t\t\tE *= np.cos(self.omega*(t-self.t0))\n\n\t\t# Transmition\n\t\tif self.remove_reflected_part and self.domain.D == 0:\n\t\t\tmaterial = self.domain.materials[0]\n\t\t\tE *= ((1.-material.Reflectivity)/material._Drude_index.real)**0.5\n\n\t\treturn E", "def exercise_e():\n print \"- Solving for question e\"\n N = 500\n plt.figure(figsize=[5, 5])\n for _omega_r in [0.01, 0.5, 1, 5]:\n def potential3(x): # If this confuses you (which it should), read the multiline comment above\n o = _omega_r\n return o * o * x * x + 1.0 / x\n A, rho = construct_matrix(dim=N, varMax=10, potential=potential3)\n A_eval, A_evec = je.jacobi_solve(A)\n A_eval, A_evec = sort_eigenpair(A_eval, A_evec)\n plt.plot(rho, A_evec[:, 0], label=\"$\\\\omega_r=$%.2f\" % _omega_r)\n\n figsetup(title=\"Dimensionless wavefunction for first eigenstates\", xlab=\"$\\\\rho$\", ylab=\"$u(\\\\rho)$\",\n fname=\"question2e\")\n print \"- Done Solving for question e\"\n return", "def ETPA(omegap, E, edip, Te, g_idx=[0], e_idx=[], f_idx=[]):\n N = len(E)\n tdm = edip\n # gamma = np.zeros(nstates)\n # for j in range(1, N):\n # gamma[j] = sum(tdm[:j, j]**2) * 0.0005\n # gamma[1:] = 0.0001\n\n # print('lifetimes of polariton states = {} eV'.format(gamma * au2ev))\n\n omega1 = omegap * 0.5\n omega2 = omegap - omega1\n # flist = [3, 4] # final states list\n i = g_idx[0]\n\n A = np.zeros(N, dtype=complex)\n\n signal = 0.0\n\n for f in f_idx:\n for m in e_idx:\n A[f] += tdm[f, m] * tdm[m, i] * \\\n ((exp(1j * (omega1 - (en[m] - en[i]) + 1j * gamma[m]) * T) - 1.) / (omega1 - (en[m] - en[i]) + 1j * gamma[m]) \\\n + (exp(1j * (omega2 - (en[m] - en[i])) * T) - 1.)/(omega2 - (en[m] - en[i]) + 1j * gamma[m]))\n\n signal += np.abs(A[f])**2 * lorentzian(omegap - en[f] + en[i], gamma[f])\n\n return signal", "def Eee(h_in, h_out, neutron_spectrum):\n h_out.Scale(0.098)\n neutron_spectrum.Add(h_in, 1)\n neutron_spectrum.Add(h_out, -1)\n neutron_spectrum.Fit('gaus')\n #neutron_spectrum.Draw()\n #input()\n results_fit = neutron_spectrum.GetFunction('gaus')\n mean_energy = results_fit.GetParameter(1)\n spread_energy = results_fit.GetParError(1)\n return mean_energy, spread_energy", "def env_EMG(emg, fs):\n EMGenv = np.copy(emg)\n \n #Remove line noise\n cof_50 = np.array([49, 51])\n Wn_50 = 2*cof_50/fs\n Wn_50[Wn_50 >= 1] = 0.99\n [B50, A50] = signal.butter(3, Wn_50, 'bandstop') #third order bandstop Butterworth filter\n EMGenv = signal.filtfilt(B50, A50, EMGenv, axis = 0)\n \n #BandPass filtering\n cof_1 = np.array([80, 500])\n Wn_1 = 2*cof_1/fs\n Wn_1[Wn_1 >= 1] = 0.99\n [B1, A1] = signal.butter(3, Wn_1, 'bandpass') #third order bandpass Butterworth filter\n EMGenv = signal.filtfilt(B1, A1, EMGenv, axis = 0)\n \n #Rectify\n EMGenv = abs(EMGenv)\n \n #LowPass filtering\n cof_2 = np.array([10])\n Wn_2 = 2*cof_2/fs\n Wn_2[Wn_2 >= 1] = 0.99\n [B2, A2] = signal.butter(3, Wn_2, 'lowpass') #third order lowpass Butterworth filter\n EMGenv = signal.filtfilt(B2, A2, EMGenv, axis = 0)\n \n return EMGenv", "def Main():\n EnigmaSim = simulation() #Creates the simulation object\n EnigmaSim.Run() #Runs the simulation", "def simulated_dph(grbdir,typ,t_src,alpha,beta,E0,A):\n\tfilenames = glob.glob(grbdir + \"/MM_out/*\")\n\tbadpixfile = glob.glob(grbdir + \"/*badpix.fits\")[0]\n\tfilenames.sort()\n\tpix_cnts = np.zeros((16384,len(filenames)))\n\terr_pix_cnts = np.zeros((16384,len(filenames)))\n\ten = np.arange(5, 261., .5)\n\tsel = (en>=100) & (en <= 150)\n\ten_range = np.zeros(len(filenames))\n\tfor f in range(len(filenames)):\n\t\ten_range[f] = filenames[f][20:26]\n\terr_100_500 = (100.0 <= en_range.astype(np.float)) & (en_range.astype(np.float) <= 500.0)\n\terr_500_1000 = (500.0 < en_range.astype(np.float)) & (en_range.astype(np.float) <= 1000.0)\n\terr_1000_2000 = (1000.0 < en_range.astype(np.float)) & (en_range.astype(np.float) <= 2000.0)\n\texist_1000_2000 = np.where(err_1000_2000 == True)\n\tE = np.array([])\n\t\n\tprint \"Indices where energy is in between 1000 and 2000 :\",exist_1000_2000[0]\n\t\n\tfor i,f in enumerate(filenames):\n\t\t\tdata = fits.getdata(f + \"/SingleEventFile.fits\")\n\t\t\tE = np.append(E, float(f[20:26]))\n\t\t\terror = np.sqrt(data) \n\t\t\tdata[:,~sel] = 0.\n\t\t\terror[:,~sel] = 0.\n\t\t\tpix_cnts[:,i] = data.sum(1)*model(E[i], alpha, beta, E0, A,typ)/55.5\n\t\t\terr_pix_cnts[:,i] = np.sqrt(((error*model(E[i], alpha, beta, E0, A,typ)/55.5)**2).sum(1))\t\t\n\t\t\t\n\tpix_cnts_total = np.zeros((16384,))\n\terr_100_500_total = np.sqrt((err_pix_cnts[:,err_100_500]**2).sum(1))*(E[err_100_500][1]-E[err_100_500][0])\n\terr_500_1000_total = np.sqrt((err_pix_cnts[:,err_500_1000]**2).sum(1))*(E[err_500_1000][1]-E[err_500_1000][0])\n\n\tif (len(exist_1000_2000[0]) != 0):\n\t\terr_1000_2000_total = np.sqrt((err_pix_cnts[:,err_1000_2000]**2).sum(1))*(E[err_1000_2000][1]-E[err_1000_2000][0])\n\telse :\n\t\terr_1000_2000_total = 0\n\t\n\terr_pix_cnts_total = np.sqrt(err_100_500_total**2 + err_500_1000_total**2 + err_1000_2000_total**2) # dE is 5 from 100-500, 10 from 500-1000, 20 from 1000-2000\n\n\tfor i in range(16384):\n\t\t\tpix_cnts_total[i] = simps(pix_cnts[i,:], E)\t\t\t\n\n\tquad0pix = pix_cnts_total[:4096]\n\tquad1pix = pix_cnts_total[4096:2*4096]\n\tquad2pix = pix_cnts_total[2*4096:3*4096]\n\tquad3pix = pix_cnts_total[3*4096:]\n\t\t\n\terr_quad0pix = err_pix_cnts_total[:4096]\n\terr_quad1pix = err_pix_cnts_total[4096:2*4096]\n\terr_quad2pix = err_pix_cnts_total[2*4096:3*4096]\n\terr_quad3pix = err_pix_cnts_total[3*4096:]\n\t\n\tquad0 = np.reshape(quad0pix, (64,64), 'F')\n\tquad1 = np.reshape(quad1pix, (64,64), 'F')\n\tquad2 = np.reshape(quad2pix, (64,64), 'F')\n\tquad3 = np.reshape(quad3pix, (64,64), 'F')\n\t\t\n\terr_quad0 = np.reshape(err_quad0pix, (64,64), 'F')\n\terr_quad1 = np.reshape(err_quad1pix, (64,64), 'F')\n\terr_quad2 = np.reshape(err_quad2pix, (64,64), 'F')\n\terr_quad3 = np.reshape(err_quad3pix, (64,64), 'F')\n\t\n\tsim_DPH = np.zeros((128,128), float)\n\tsim_err_DPH = np.zeros((128,128), float)\n\t\n\tsim_DPH[:64,:64] = np.flip(quad0, 0)\n\tsim_DPH[:64,64:] = np.flip(quad1, 0)\n\tsim_DPH[64:,64:] = np.flip(quad2, 0)\n\tsim_DPH[64:,:64] = np.flip(quad3, 0)\n\t\n\tsim_err_DPH[:64,:64] = np.flip(err_quad0, 0)\n\tsim_err_DPH[:64,64:] = np.flip(err_quad1, 0)\n\tsim_err_DPH[64:,64:] = np.flip(err_quad2, 0)\n\tsim_err_DPH[64:,:64] = np.flip(err_quad3, 0)\n\n\tbadpix = fits.open(badpixfile)\n\tdphmask = np.ones((128,128))\n\t\n\tbadq0 = badpix[1].data # Quadrant 0\n\tbadpixmask = (badq0['PIX_FLAG']!=0)\n\tdphmask[(63 - badq0['PixY'][badpixmask]) ,badq0['PixX'][badpixmask]] = 0\n\n\tbadq1 = badpix[2].data # Quadrant 1\n\tbadpixmask = (badq1['PIX_FLAG']!=0)\n\tdphmask[(63 - badq1['PixY'][badpixmask]), (badq1['PixX'][badpixmask]+64)] = 0\n\n\tbadq2 = badpix[3].data # Quadrant 2\n\tbadpixmask = (badq2['PIX_FLAG']!=0)\n\tdphmask[(127 - badq2['PixY'][badpixmask]), (badq2['PixX'][badpixmask]+64)] = 0\n\n\tbadq3 = badpix[4].data # Quadrant 3\n\tbadpixmask = (badq3['PIX_FLAG']!=0)\n\tdphmask[(127 - badq3['PixY'][badpixmask]), badq3['PixX'][badpixmask]] = 0\n\t\t\t\n\toneD_sim = (sim_DPH*dphmask).flatten()\n\n\treturn oneD_sim*t_src,sim_DPH*t_src,dphmask,sim_err_DPH*t_src", "def main():\n fem_mesh.check_version()\n\n opts = read_cli()\n\n # setup the new output file with a very long, but unique, filename\n loadfilename = (\"gauss_exc_sigma_%.3f_%.3f_%.3f_center_%.3f_%.3f_%.3f_amp_%.3f_amp_cut_%.3f_%s.dyn\" %\n (opts.sigma[0], opts.sigma[1], opts.sigma[2],\n opts.center[0], opts.center[1], opts.center[2],\n opts.amp, opts.amp_cut, opts.sym))\n LOADFILE = open(loadfilename, 'w')\n LOADFILE.write(\"$ Generated using %s:\\n\" % sys.argv[0])\n LOADFILE.write(\"$ %s\\n\" % opts)\n\n LOADFILE.write(\"*LOAD_NODE_POINT\\n\")\n\n # loop through all of the nodes and see which ones fall w/i the Gaussian\n # excitation field\n sym_node_count = 0\n node_count = 0\n NODEFILE = open(opts.nodefile,'r')\n for i in NODEFILE:\n # make sure not to process comment and command syntax lines\n if i[0] != \"$\" and i[0] != \"*\":\n i = i.rstrip('\\n')\n # dyna scripts should be kicking out comma-delimited data; if not,\n # then the user needs to deal with it\n fields = i.split(',')\n fields = [float(j) for j in fields]\n # check for unexpected inputs and exit if needed (have user figure\n # out what's wrong)\n if len(fields) != 4:\n print(\"ERROR: Unexpected number of node columns\")\n print(fields)\n sys.exit(1)\n # compute the Gaussian amplitude at the node\n exp1 = math.pow((fields[1]-opts.center[0])/opts.sigma[0], 2)\n exp2 = math.pow((fields[2]-opts.center[1])/opts.sigma[1], 2)\n exp3 = math.pow((fields[3]-opts.center[2])/opts.sigma[2], 2)\n nodeGaussAmp = opts.amp * math.exp(-(exp1 + exp2 + exp3))\n\n # write the point load only if the amplitude is above the cutoff\n # dyna input needs to be limited in precision\n if nodeGaussAmp > opts.amp*opts.amp_cut:\n\n node_count += 1\n # check for quarter symmetry force reduction (if needed)\n if opts.sym == 'qsym':\n if (math.fabs(fields[1]) < opts.search_tol and\n math.fabs(fields[2]) < opts.search_tol):\n nodeGaussAmp = nodeGaussAmp/4\n sym_node_count += 1\n elif (math.fabs(fields[1]) < opts.search_tol or\n math.fabs(fields[2]) < opts.search_tol):\n nodeGaussAmp = nodeGaussAmp/2\n sym_node_count += 1\n # check for half symmetry force reduction (if needed)\n elif opts.sym == 'hsym':\n if math.fabs(fields[1]) < opts.search_tol:\n nodeGaussAmp = nodeGaussAmp/2\n sym_node_count += 1\n elif opts.sym != 'none':\n sys.exit('ERROR: Invalid symmetry option specified.')\n\n LOADFILE.write(\"%i,3,1,-%.4f\\n\" % (int(fields[0]),\n nodeGaussAmp))\n\n # wrap everything up\n NODEFILE.close()\n LOADFILE.write(\"*END\\n\")\n LOADFILE.write(\"$ %i loads generated\\n\" % node_count)\n LOADFILE.write(\"$ %i exist on a symmetry plane / edge\\n\" % sym_node_count)\n LOADFILE.close()", "def _run_sim(self):\n\n self.ensemble = Ensemble(self.game_display,\n (self.disp_width, self.disp_height),\n n_atoms=self.n_atoms, exc0=self.exc0,\n chi=self.chi, f=self.f, mass=self.mass,\n g0=self.g0, g1=self.g1, rad=self.rad)\n self.window_open = True\n self.t = range(self.plot_window)\n self.T_ex = np.ones(self.plot_window)*np.nan\n self.T_ex[-1] = self.ensemble.T_ex\n self.T_kin = np.ones(self.plot_window)*np.nan\n self.T_kin[-1] = self.ensemble.T_kin\n\n self.plot_T_ex = self.ax.plot(self.t, self.T_ex, 'r',\n label='Excitation Temperature')\n self.plot_T_kin = self.ax.plot(self.t, self.T_kin, 'b',\n label='Kinetic Temperature')\n self.ax.legend(loc='upper left')\n self.ax.set_ylim(0, 2*self.ensemble.T_kin)\n self.ax.set_xlim(0, self.plot_window)\n self.ax.set_xlabel('Time (frames)')\n self.ax.set_ylabel('Temperature (arb. units)')\n self.ax.tick_params(labeltop=False, labelright=True, right=True)\n\n self.clock = pygame.time.Clock()\n while self.window_open:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.window_open = False\n\n self.clock.tick(self.fps)\n self.ensemble.update(self.clock.get_time())\n self._update_plot()\n pygame.display.update()", "def record(self):\n\n # TODO: Make the Metadata transmission automatic\n n_channels = 32\n sampling_rate = 500\n channel_types = 'eeg'\n\n # Info class required by mne\n info = mne.create_info(ch_names=n_channels, sfreq=sampling_rate, ch_types=channel_types)\n\n # TODO: Dynamically reduce array size\n\n while self.flag_event.is_set():\n sample, timestamp = self.inlet.pull_sample()\n self.timeObj.append(timestamp)\n self.sampleObj.append(sample)\n self.data = np.array(self.sampleObj).reshape((n_channels, -1)) * 1e-6\n if (self.data.shape[1]+1) % sampling_rate == 0:\n custom_raw = mne.io.RawArray(self.data, info)\n custom_raw.save(\"./Data/sample_raw.fif\", overwrite=True)\n\n # TODO: Finish real time data plotting\n # print(self.data.shape)\n # if (self.data.shape[1]+1) % sampling_rate == 0:\n # # custom_raw = mne.io.RawArray(self.data, info)\n # # custom_raw.plot()\n # # plt.plot(self.timeObj, data.T * 1e-6)\n # # plt.pause(0.05)\n # # plt.show()\n # ani = animation.FuncAnimation(fig, self.animate, interval=10)\n # plt.pause(0.05)\n # plt.show()", "def Start_ePCSim(ePCSim_conn, egate_port_number='20111', edaemon_port_number='10111'):\r\n ePCSim_conn.SendCmd(\"pkill egate\")\r\n ePCSim_conn.SendCmd(\"pkill edaemon\")\r\n ePCSim_conn.SendCmd(\"cd /home/epc_sim\")\r\n ePCSim_conn.SendCmd(\"./egate --port \" + egate_port_number)\r\n ePCSim_conn.SendCmd(\"./edaemon --port \" + edaemon_port_number)\r\n ePCSim_conn.prompt = \"EGATE>\"\r\n ePCSim_conn.SendCmdWithKeyWord(\"telnet localhost 20111\", \"EGATE>\")\r\n ePCSim_conn.SendCmdWithKeyWord(\"cfg file=epc.cfg\", \"CHANGE STATE: CONFIGURED\")\r\n ePCSim_conn.SendCmdWithKeyWord(\"start\", \"CHANGE STATE: ACTIVE\")" ]
[ "0.61956763", "0.610275", "0.5993526", "0.597416", "0.59077984", "0.59017754", "0.5880396", "0.58016926", "0.5760875", "0.5706816", "0.5699131", "0.5676859", "0.567066", "0.56360096", "0.56196326", "0.5602858", "0.55992264", "0.55836403", "0.5576451", "0.5570734", "0.55598336", "0.55442196", "0.5541251", "0.5539116", "0.55350554", "0.55320936", "0.55309486", "0.55248", "0.55138236", "0.5507884" ]
0.698078
0
Query the tracklets table for the total number of tracklets associated to this SSM object with their status. The status tells whether the tracklet was not found ('U). We are only interested in clean tracklets whose detections are brighter than LIMITING_MAG and that have been observerd before maxMJD.
def objectTracklets(ssmObject, maxMJD, limitingMag=LIMITING_MAG): # Get a cursor from the DB connection. cursor = Conection.connect(DB_USER, DB_PASSWD, instance, DB_HOST) # Select al the tracklets associated with ssmObject. # TODO: Is using ext_epoch right? sql = 'select tracklet_id, status from tracklets where ' sql += 'ssm_id = %d and classification = "C" ' % (ssmObject) sql += 'and ext_epoch <= %f' % (maxMJD) nRes = cursor.execute(sql) if(not nRes): return tracklets = cursor.fetchall() # Now, for each tracklet make sure that all the corresponding detections # are visible. visibleTracklets = [] sql = 'select d.det_id from detections d, tracklet_attrib ta where ' sql += 'ta.tracklet_id = %d and d.det_id = ta.det_id and ' sql += 'd.mag > %f' %(limitingMag) for (tracklet_id, status) in tracklets: nRes = cursor.execute(sql %(tracklet_id)) if(not nRes): visibleTracklets.append((tracklet_id, status)) # <-- end if # <-- end for return(visibleTracklets)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tubclean_count(self):\n if self.is_info_v2:\n result = DeviceStatus.int_or_none(self._data.get(\"tclCount\"))\n else:\n result = self._data.get(\"TclCount\")\n if result is None:\n result = \"N/A\"\n return self._update_feature(WashDeviceFeatures.TUBCLEAN_COUNT, result, False)", "def count_alive_neighbors(self, status):\n kernel = np.array(\n [[1, 1, 1],\n [1, 0, 1],\n [1, 1, 1]])\n\n count = convolve2d(status, kernel, mode='same', boundary=\"wrap\")\n return count", "def status_counts(self):\n return self._status_counts", "def processObject(ssmObject, maxMJD):\n percentFound = -1.\n \n # Get a cursor from the DB connection.\n cursor = Conection.connect(DB_USER, DB_PASSWD, instance, DB_HOST)\n \n # Fnd all the visible tracklets associated with the object. Each tracklet is\n # an array of the form [tracklet_id, status]. Status = 'U' means not found.\n tracklets = objectTracklets(ssmObject, maxMJD)\n if(not tracklets):\n return(100.)\n numTracklets = len(tracklets)\n numFound = 0\n for (tracklet_id, status) in tracklets:\n if(status != 'U'):\n numFound += 1\n percentFound = 100. * float(numFound) / float(numTracklets)\n return(percentFound)", "def counts_by_test_result_status(self, status):\n return len([\n [key, event] for (key, event) in self.result_events.items()\n if event.get(\"status\", \"\") == status])", "def get_full_juju_status():\n\n status = model.get_status(lifecycle_utils.get_juju_model())\n return status", "def num_tracked_samples(self, u=None):\n u = self.virtual_root if u is None else u\n return self._ll_tree.get_num_tracked_samples(u)", "def update_status(self):\n num_nbrs = len(self.neighbors)\n if not 2 <= num_nbrs <= 3:\n self.status = 0\n elif num_nbrs == 3:\n self.status = 1", "def status(self):\n if self.qemu.is_running():\n status = 0\n self.log.info(\"vm-status\", result=\"online\")\n for device in list(self.qemu.block_info().values()):\n self.log.info(\n \"disk-throttle\",\n device=device[\"device\"],\n iops=device[\"inserted\"][\"iops\"],\n )\n else:\n status = 1\n self.log.info(\"vm-status\", result=\"offline\")\n for volume in self.ceph.volumes:\n locker = volume.lock_status()\n self.log.info(\"rbd-status\", volume=volume.fullname, locker=locker)\n consul = locate_live_service(self.consul, \"qemu-\" + self.name)\n if consul:\n self.log.info(\n \"consul\", service=consul[\"Service\"], address=consul[\"Address\"]\n )\n else:\n self.log.info(\"consul\", service=\"<not registered>\")\n return status", "def cant_tracks(self):\n return len(self.tracks)", "def count(self):\n return len(self.wallpapers)", "def check_status():\n logger.debug(\"Starting the check_status() routine.\")\n\n url = \"https://www.toggl.com/api/v8/time_entries/current\"\n token = os.environ[\"TOGGL_API_TOKEN\"]\n auth_token = base64.b64encode(f\"{token}:api_token\".encode()).decode()\n resp = requests.get(url, headers={\"Authorization\": \"Basic \" + auth_token})\n\n cols = \"id\", \"duration\", \"description\"\n status = {k: v for k, v in (resp.json()[\"data\"] or {}).items() if k in cols}\n logger.debug(f\"{'Something' if 'id' in status else 'Something'} is being tracked.\")\n\n return status", "def obstacle_count(self):\n #scan area in front of robot\n self.scan()\n #Figure ot how many obstacles there were\n see_an_object = False\n count = 0", "def get_track_count(self):\n self.app.curs.execute('select count(*) c from track')\n if self.app.curs.rowcount == 1:\n row = self.app.curs.fetchone()\n return row['c']\n else: # pragma: no cover\n return 0", "def num_available_watchpoints(self):\n return self._dll.JLINKARM_GetNumWPUnits()", "def load_217_count_results(self, model):\r\n\r\n fmt = '{0:0.' + str(Configuration.PLACES) + 'G}'\r\n\r\n self.txtLambdaB.set_text(str(fmt.format(model.base_hr)))\r\n\r\n return False", "def test_final_result():\n img = cv2.imread(os.path.join(TESTS_ASSETS_VISION_DIR, \"mussels_sample.PNG\"))\n _orig, _circles_removed, _blurred_and_smoothed, _convex_hull, _mussels_found, _mussels_count = count_mussels(img)\n final_result = _mussels_count\n assert final_result == 8", "def update_status(cls):\n for job in cls.query.filter(cls.finished == False):\n num_hits_left = session.query(BoxHit).filter_by(training_job_id = job.id, outstanding=True).count()\n urls_left = session.query(VideoTrainingURL).filter_by(training_job_id=job.id, processed = False)\n dynamo = DynamoIngestionStatusClient()\n num_urls_left = 0\n for url in urls_left:\n dynamo_url = dynamo.get(url.url)\n if dynamo_url is None or dynamo_url['status'] == 'Failed':\n # will never be processed, so ignore for our purposes\n url.processed = True\n else:\n num_urls_left += 1\n if num_hits_left+num_urls_left == 0:\n job.finished = True\n print '*** Job ID: %s is complete ***' % str(job.id)\n\n print '------------- Stats for Job ID: %s -------------' % str(job.id)\n print 'Total URLs : %i' % VideoTrainingURL.query.filter_by(training_job_id = job.id).count()\n print 'Total HITs : %i' % BoxHit.query.filter_by(training_job_id = job.id).count()\n if not job.finished:\n print 'unprocessed URLs: %i' % num_urls_left\n print 'outstanding HITs: %i\\n' % num_hits_left\n session.flush()", "def count(self):\n \n return len(self.img_lst)", "def __len__(self):\n\n try:\n return len(self.counts)\n except SpectrumError:\n return len(self.cps)", "def count_umls(self) -> int:\n return self._count_model(Umls)", "def track_count(self):\n if len(self._trackq) > 0:\n return len(self._trackq) - 1\n else:\n return 0", "def status_summary(self):\n base_query_set = super(PeeringSessionManager, self).get_queryset()\n summary = base_query_set.annotate(\n label=models.Case(\n models.When(provisioning_state=2, then=models.Case(\n models.When(admin_state=2, then=models.Case(\n models.When(operational_state=6,\n then=models.Value('Up')),\n default=models.Value('Down')\n )),\n default=models.Value('Admin Down')\n )),\n models.When(provisioning_state=1,\n then=models.Value('Provisioning')),\n default=models.Value('None'),\n output_field=models.CharField()\n )).values('label').annotate(value=models.Count('label'))\n return summary", "def obstacle_count(self):\n self.wide_scan()\n found_something = False\n counter = 0\n for distance in self.scan:\n if distance and distance < 200 and not found_something:\n found_something = True\n counter += 1\n print(\"Object # %d found, I think\" % counter)\n if distance and distance > 200 and found_something:\n found_something = False\n print(\"\\n----I SEE %d OBJECTS----\\n\" % counter)", "def counttasksbystatus(self, **kwargs):\n if 'minutes' not in kwargs:\n raise InvalidParameter(\"The parameter minutes is mandatory for the tasksbystatus api\")\n rows = self.api.query(None, None, self.Task.CountLastTasksByStatus, minutes=kwargs[\"minutes\"])\n\n return rows", "def get_location_count_from_studies(cls, studies):\n\n activations = db.session.query(cls.location_id, func.count(cls.pmid\n )).filter(cls.pmid.in_(studies), cls.location_id < 81925\n ).group_by(cls.pmid).all()\n\n return activations", "def active_count(self):\n cnt = 0\n for item in self[:]:\n if item.is_alive():\n cnt += 1\n else:\n self.remove(item)\n return cnt", "def num_wires(self):\n return len(list(_flatten(self.wires)))", "def count_pegs(self):\r\n count = 0\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"1\":\r\n count += 1\r\n\r\n return count", "def getNumCleanedTiles(self):\n counter = 0\n for tile in self.tiles:\n if self.tiles[tile] == 'clean':\n counter += 1\n return counter" ]
[ "0.54586697", "0.54425967", "0.51669234", "0.515886", "0.5096581", "0.5061716", "0.49336", "0.48054257", "0.47217533", "0.4708356", "0.47045496", "0.46993577", "0.469208", "0.46712843", "0.466063", "0.46598294", "0.465898", "0.46517268", "0.46492374", "0.46242303", "0.462415", "0.4613437", "0.46064818", "0.45964175", "0.45963413", "0.458422", "0.4539063", "0.45332077", "0.45237482", "0.45224112" ]
0.6052711
0
Find the fraction of the total number of clean tracklets theoretically associated with the given SSM object that MOPS actually associated with it. ssmObject is the ssm_id
def processObject(ssmObject, maxMJD): percentFound = -1. # Get a cursor from the DB connection. cursor = Conection.connect(DB_USER, DB_PASSWD, instance, DB_HOST) # Fnd all the visible tracklets associated with the object. Each tracklet is # an array of the form [tracklet_id, status]. Status = 'U' means not found. tracklets = objectTracklets(ssmObject, maxMJD) if(not tracklets): return(100.) numTracklets = len(tracklets) numFound = 0 for (tracklet_id, status) in tracklets: if(status != 'U'): numFound += 1 percentFound = 100. * float(numFound) / float(numTracklets) return(percentFound)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def objectTracklets(ssmObject, maxMJD, limitingMag=LIMITING_MAG):\n # Get a cursor from the DB connection.\n cursor = Conection.connect(DB_USER, DB_PASSWD, instance, DB_HOST)\n \n # Select al the tracklets associated with ssmObject.\n # TODO: Is using ext_epoch right?\n sql = 'select tracklet_id, status from tracklets where '\n sql += 'ssm_id = %d and classification = \"C\" ' % (ssmObject)\n sql += 'and ext_epoch <= %f' % (maxMJD)\n nRes = cursor.execute(sql)\n if(not nRes):\n return\n tracklets = cursor.fetchall()\n \n # Now, for each tracklet make sure that all the corresponding detections \n # are visible.\n visibleTracklets = []\n sql = 'select d.det_id from detections d, tracklet_attrib ta where '\n sql += 'ta.tracklet_id = %d and d.det_id = ta.det_id and '\n sql += 'd.mag > %f' %(limitingMag)\n for (tracklet_id, status) in tracklets:\n nRes = cursor.execute(sql %(tracklet_id))\n if(not nRes):\n visibleTracklets.append((tracklet_id, status))\n # <-- end if\n # <-- end for\n return(visibleTracklets)", "def calc_dim(s):\n s = s.detach().numpy()\n dim = 0\n # calculate how much 90% would be\n s_square = [i ** 2 for i in s]\n sum_square = sum(s_square)\n goal = .9 * sum_square\n # find 90%\n count = 0\n while count < goal:\n count += s_square[dim]\n dim += 1\n return dim # return this many dimensions", "def getSn(classObj):\r\n temp = []\r\n noOfColl = len(classObj.dataSet[0])\r\n mean = classObj.meanOf()\r\n\t#print(mean)\r\n\t#print(classObj.dataSet[:,2])\r\n\r\n for i in range(noOfColl):\r\n\r\n noOfElems = classObj.noOfElem(i)\r\n\t\t\t\r\n squareSum = classObj.diffSquaredSum(classObj.dataSet[:, i], mean[i])\r\n sn = np.sqrt(squareSum / (noOfElems - 1))\r\n temp.append(sn)\r\n\r\n return temp", "def total_sdram_requirements(self):", "def _reduced_mass(structure) -> float:\n reduced_comp = structure.composition.reduced_composition\n num_elems = len(reduced_comp.elements)\n elem_dict = reduced_comp.get_el_amt_dict()\n\n denominator = (num_elems - 1) * reduced_comp.num_atoms\n\n all_pairs = combinations(elem_dict.items(), 2)\n mass_sum = 0\n\n for pair in all_pairs:\n m_i = Composition(pair[0][0]).weight\n m_j = Composition(pair[1][0]).weight\n alpha_i = pair[0][1]\n alpha_j = pair[1][1]\n\n mass_sum += (alpha_i + alpha_j) * (m_i * m_j) / (m_i + m_j) # type: ignore\n\n reduced_mass = (1 / denominator) * mass_sum\n\n return reduced_mass", "def orbit_count(objects: Dict[str, ObjectMass]) -> int:\n total = 0\n\n for mass in objects.values():\n total += mass.orbit_count()\n\n return total", "def graphite_entropic_change_PeymanMPM(sto, c_s_max):\n\n du_dT = 10 ** (-3) * (\n 0.28\n - 1.56 * sto\n - 8.92 * sto ** (2)\n + 57.21 * sto ** (3)\n - 110.7 * sto ** (4)\n + 90.71 * sto ** (5)\n - 27.14 * sto ** (6)\n )\n\n return du_dT", "def countm(m):\n nfound=0\n\n for i in range(1,m+1):\n for jpk in range(2,(2*i)+1):\n d1=i*i+(jpk)*(jpk) \n if(checkpsq(d1)): \n if(jpk<=i):\n factor=jpk/2 \n else:\n factor=((2*i-jpk)+2)/2 \n nfound=nfound+factor\n\n return nfound", "def countObjects(inputImb, inputIma):\n\n lb = label(inputImb)\n obefore = len(regionprops(lb))\n lbc = label(1 - inputImb, connectivity=1)\n ocbefore = len(regionprops(lbc))\n la = label(inputIma)\n oafter = len(regionprops(la))\n lac = label(1 - inputIma, connectivity=1)\n ocafter = len(regionprops(lac))\n\n if obefore == oafter and ocbefore == ocafter:\n deletableTemp = 1\n else:\n deletableTemp = 0\n\n return deletableTemp", "def support_difference_count(m, m_hat):\n m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat)\n return int((m_nnz + m_hat_nnz - (2 * intersection_nnz)) / 2.0)", "def percentage_controlled(self, obj):\n return sum(\n wiw.relative_share for wiw in obj.writerinwork_set.all()\n if wiw.controlled)", "def percent(obj,object2):\n if object2:\n return int(float(int(obj))/object2*100)\n else:\n return 0", "def _get_sho_chunk_sizes(self, max_mem_mb):\n # Step 1: Find number of FORC cycles and repeats (if any), DC steps, and number of loops\n # dc_offset_index = np.argwhere(self._sho_spec_inds.attrs['labels'] == 'DC_Offset').squeeze()\n num_dc_steps = np.unique(self._sho_spec_inds[self._fit_spec_index, :]).size\n all_spec_dims = list(range(self._sho_spec_inds.shape[0]))\n all_spec_dims.remove(self._fit_spec_index)\n\n # Remove FORC_cycles\n sho_spec_labels = self.h5_main.spec_dim_labels\n has_forcs = 'FORC' in sho_spec_labels or 'FORC_Cycle' in sho_spec_labels\n if has_forcs:\n forc_name = 'FORC' if 'FORC' in sho_spec_labels else 'FORC_Cycle'\n try:\n forc_pos = sho_spec_labels.index(forc_name)\n except Exception:\n raise\n # forc_pos = np.argwhere(sho_spec_labels == forc_name)[0][0]\n self._num_forcs = np.unique(self._sho_spec_inds[forc_pos]).size\n all_spec_dims.remove(forc_pos)\n\n # Remove FORC_repeats\n has_forc_repeats = 'FORC_repeat' in sho_spec_labels\n if has_forc_repeats:\n try:\n forc_repeat_pos = sho_spec_labels.index('FORC_repeat')\n except Exception:\n raise\n # forc_repeat_pos = np.argwhere(sho_spec_labels == 'FORC_repeat')[0][0]\n self._num_forc_repeats = np.unique(self._sho_spec_inds[forc_repeat_pos]).size\n all_spec_dims.remove(forc_repeat_pos)\n\n # calculate number of loops:\n if len(all_spec_dims) == 0:\n loop_dims = 1\n else:\n loop_dims = get_dimensionality(self._sho_spec_inds, all_spec_dims)\n loops_per_forc = np.product(loop_dims)\n\n # Step 2: Calculate the largest number of FORCS and positions that can be read given memory limits:\n size_per_forc = num_dc_steps * loops_per_forc * len(self.h5_main.dtype) * self.h5_main.dtype[0].itemsize\n \"\"\"\n How we arrive at the number for the overhead (how many times the size of the data-chunk we will use in memory)\n 1 for the original data, 1 for data copied to all children processes, 1 for results, 0.5 for fit, guess, misc\n \"\"\"\n mem_overhead = 3.5\n max_pos = int(max_mem_mb * 1024 ** 2 / (size_per_forc * mem_overhead))\n if self._verbose:\n print('Can read {} of {} pixels given a {} MB memory limit'.format(max_pos,\n self._sho_pos_inds.shape[0],\n max_mem_mb))\n self.max_pos = int(min(self._sho_pos_inds.shape[0], max_pos))\n self.sho_spec_inds_per_forc = int(self._sho_spec_inds.shape[1] / self._num_forcs / self._num_forc_repeats)\n self.metrics_spec_inds_per_forc = int(self._met_spec_inds.shape[1] / self._num_forcs / self._num_forc_repeats)\n\n # Step 3: Read allowed chunk\n self._sho_all_but_forc_inds = list(range(self._sho_spec_inds.shape[0]))\n self._met_all_but_forc_inds = list(range(self._met_spec_inds.shape[0]))\n if self._num_forcs > 1:\n self._sho_all_but_forc_inds.remove(forc_pos)\n met_forc_pos = np.argwhere(get_attr(self._met_spec_inds, 'labels') == forc_name)[0][0]\n self._met_all_but_forc_inds.remove(met_forc_pos)\n\n if self._num_forc_repeats > 1:\n self._sho_all_but_forc_inds.remove(forc_repeat_pos)\n met_forc_repeat_pos = np.argwhere(get_attr(self._met_spec_inds, 'labels') == 'FORC_repeat')[0][0]\n self._met_all_but_forc_inds.remove(met_forc_repeat_pos)\n\n return", "def getOmegaMVEst(Sn):\n\n sols = defaultdict(lambda: defaultdict(int))\n\n for a, b, c in tripletGenerator(Sn):\n if a is b or a is c or b is c: continue\n if isSolvableVect(a, b, c):\n d = solveVect(a, b, c)\n dtuple = tuple(d[:-1])\n dclass = d[-1]\n sols[dtuple][dclass] += 1\n\n nOK = nKO = 0\n for x in Sn:\n xtuple = tuple(x[:-1])\n xclass = x[-1]\n if xtuple not in sols: continue\n maj_class = max(sols[xtuple].keys(), key=lambda k: sols[xtuple][k])\n\n if maj_class == xclass:\n nOK += 1\n else:\n nKO += 1\n\n try:\n estW = nOK / (nOK + nKO)\n except ZeroDivisionError:\n estW = 0\n\n return estW", "def compute_ps_mass(ps):\n\treturn sum(AA_mass_table[it] for it in ps)", "def _calculate_score(lsh, minhash, total_num_events):\n neighbours = lsh.query(minhash)\n return float(len(neighbours)) / float(total_num_events)", "def get_nsset_count_value(obj):\n summary = get_summary_value(obj)\n if summary is None:\n return None\n\n values = summary.split(\" \")\n if len(values) == 2:\n s = values[0]\n if s.isdigit():\n return int(s)\n return None", "def get_Nphotons(self, slamb, sflux, axis=-1):\n passb = self.reinterp(slamb)\n wave = passb._wavelength\n dlambda = np.diff(wave)\n\n # h = 6.626075540e-27 # erg * s\n # c = 2.99792458e18 # AA / s\n h = Constants.h.to('erg * s').value\n c = Constants.c.to('AA/s').value\n vals = sflux.value * wave * passb.transmit\n vals[~np.isfinite(vals)] = 0.\n Nphot = 0.5 * np.sum((vals[1:] + vals[:-1]) * dlambda) / (h * c)\n Nphot = Nphot * Unit('photon*s**-1*cm**-2')\n return Nphot / passb.width # photons / cm2 / s / A", "def get_number_of_measurement(self):\n used_fragments = set()\n counter = 0\n for fragment in self.observed_fragments:\n num_of_isotope = 0\n used_counter = 0\n for i in self.mdv[fragment]:\n num_of_isotope = num_of_isotope + 1\n if self.mdv[fragment][i]['use'] == 'use':\n\n counter = counter + 1\n used_counter = used_counter + 1\n if num_of_isotope == used_counter:\n used_fragments.add(fragment)\n return counter-len(used_fragments)", "def get_total_mass(self) -> int:\n total_mass = 0\n for i_complex, i_abundance in self._complexes.items():\n total_mass += i_complex.get_size_of_complex() * i_abundance\n return total_mass", "def to_number_of_molecules(self, total_substance_molecules, tolerance=None):\n raise NotImplementedError()", "def totalize_natures(sv):\r\n tot=0 \r\n for nod in sv.Object.values():\r\n tot+=len(nod.nature)\r\n return tot", "def total_sold(album):\n return album.total_sold", "def calculate_size(self, num_dots):\n self.objects = num_dots\n square = sqrt(self.objects)\n if self.objects % square == 0:\n return int(square), int(square)\n else:\n denom = self.objects // sqrt(self.objects)\n while self.objects % denom != 0:\n denom -= 1\n return int(denom), int(self.objects // denom)", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def nsobj_to_extract(self):\n\n if len(self.sobjs_obj) > 0:\n return len(self.sobjs_obj) if self.return_negative else np.sum(self.sobjs_obj.sign > 0)\n else:\n return 0", "def get_cmmb(cst, segs):\n assert isinstance(cst, ChromStruct)\n\n # initialize a GeneticMap and get genetic distances at segment boundaries\n gmp = GeneticMap(cst.chrom, cst.gmap_files)\n gsegs = gmp.interp_gpos(segs)\n gdists = (gsegs[:,1] - gsegs[:,0]) / (segs[:,1] - segs[:,0])\n\n return gdists" ]
[ "0.54343474", "0.5389818", "0.5329431", "0.53050345", "0.52252626", "0.52009857", "0.516708", "0.5143384", "0.51321924", "0.5112056", "0.5111174", "0.5042978", "0.5042217", "0.5034905", "0.5014961", "0.49824715", "0.49607834", "0.4958764", "0.49410716", "0.49187186", "0.49155182", "0.49153927", "0.4911885", "0.49036694", "0.48827296", "0.48827296", "0.48827296", "0.48827296", "0.48746094", "0.48703974" ]
0.71355146
0
Find the SSM orbits associated with the given instance. Also return the maximum MJD that the precovery pipeline is working on.
def listObjects(instance): # Get a cursor from the DB connection. cursor = Conection.connect(DB_USER, DB_PASSWD, instance, DB_HOST) # Compose the SQL query to find all the orbits/SSM objects. We do this with # a simle query to the derivedobjects table since we realy only need the # ssm_id values. maxMJD = completedPrecoveryMaxDate(instance) if(maxMJD == None): return([], None) sql = 'select distinct(ssm_id) from derivedobjects where ssm_id is not null' sql += ' and status = "I"' # sql += ' and updated >= "%s"' %(minModifiedDate) # <-- end if nRes = cursor.execute(sql) return([x[0] for x in cursor.fetchall()], float(maxMJD))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def completedPrecoveryMaxDate(instance):\n # Get a cursor from the DB connection.\n cursor = Conection.connect(DB_USER, DB_PASSWD, instance, DB_HOST)\n \n # Fetch the maximum MJD precovery has processed.\n sql = 'select max(epoch_mjd) from detections d, tracklet_attrib ta '\n sql += 'where d.det_id = ta.det_id and ta.tracklet_id in '\n sql += '(select tracklet_id from history_precoveries)'\n nRes = cursor.execute(sql)\n return(cursor.fetchone()[0])", "def get_current_building_mask(im, instance):\n current_building_mask = np.zeros(im.shape, dtype=np.uint16)\n current_building_mask[im == instance] = 1\n current_building_area = np.sum(current_building_mask)\n return current_building_mask, current_building_area", "def findMaximumDeviationJunction(junctions, wires, resistances, voltages, currents):\n raise NotImplementedError", "def get_instance_output(instance: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetInstanceResult]:\n ...", "def getObservationMJDSecRange(vis):\n return([getObservationStart(vis),getObservationStop(vis)])", "def get_instance_overlap(kt_mod, kh_mod, kth_mod):\n kt = get_number_of_instances(kt_mod)\n kh = get_number_of_instances(kh_mod)\n kth = get_number_of_instances(kth_mod)\n if kh == 0 or kt == 0 or kth == 0:\n return 0\n else: \n return 1 - (kth - kt) / kh", "def find_hrc_calib_obsid(inst):\n##\n##--- create a list of already processed data\n##\n# cmd = 'ls -d /data/hrc/' + str(inst) + '/6* > '+ zspace\n# os.system(cmd)\n# with open(zspace, 'r') as f:\n# ftest = f.read()\n# wrd = str(inst) + '/61'\n# mc = re.search(wrd, ftest)\n# if mc is not None:\n# cmd = 'ls -d /data/hrc/' + str(inst) + '/61* >' + zspace\n# os.system(cmd)\n#\n# cmd = 'ls -d /data/hrc/' + str(inst) + '/62* >' + zspace\n# os.system(cmd)\n#\n# data = mcf.read_data_file(zspace, remove=1)\n# prev_list = []\n# for ent in data:\n# atemp = re.split('\\/', ent)\n# prev_list.append(int(float(atemp[-1])))\n#\n##\n##--- find today's date and set checking range for the last 30 days\n##\n# today = time.strftime('%Y:%j:%H:%M:%S', time.gmtime())\n# today = int(Chandra.Time.DateTime(today).secs)\n# start = today - 10 * 86400\n##\n##--- extract hrc obsid information\n##\n# line = 'operation=browse\\n'\n# line = line + 'dataset=flight\\n'\n# line = line + 'level=1\\n'\n# line = line + 'detector=hrc\\n'\n# line = line + 'filetype=evt1\\n'\n# line = line + 'tstart=' + str(start) + '\\n'\n# line = line + 'tstop=' + str(today) + '\\n'\n# line = line + 'go\\n'\n#\n# with open('zline', 'w') as fo:\n# fo.write(line)\n#\n# cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script zline > ' + zspace\n# os.system(cmd)\n#\n# mcf.rm_files('./zline')\n#\n# data = mcf.read_data_file(zspace, remove=1)\n##\n##--- select obsids with 61* and 62* starting\n##\n# h_list = []\n# for ent in data:\n# mc = re.search('hrcf', ent)\n# if mc is not None:\n# atemp = re.split('hrcf', ent)\n# btemp = re.split('_', atemp[1])\n# obsid = int(float(btemp[0]))\n# if obsid > 61000 and obsid < 63000:\n##\n##--- if it is already observed skip it\n##\n# if obsid in prev_list:\n# continue\n##\n##--- check which instrument\n##\n# chk = check_inst(obsid)\n# if chk == inst:\n# h_list.append(obsid)\n\n\n\n h_list = ['62410', '62423', '62435', '62437', '62439', '62441', '62443', '62635', '62637', '62649', '62973', '62997', '62422', '62426', '62436', '62438', '62440', '62442', '62446', '62636', '62638', '62796', '62991']\n\n\n return h_list", "def test_check_for_max_rmsd():\n phil_groups = ncs_group_master_phil.fetch(\n iotbx.phil.parse(phil_str)).extract()\n pdb_inp = iotbx.pdb.input(source_info=None, lines=test_pdb_str_2)\n ncs_obj_phil = ncs.input(\n hierarchy=pdb_inp.construct_hierarchy(),\n ncs_phil_groups=phil_groups.ncs_group)\n nrgl = ncs_obj_phil.get_ncs_restraints_group_list()\n pdb_inp = iotbx.pdb.input(lines=test_pdb_str_2,source_info=None)\n ph = pdb_inp.construct_hierarchy()\n # passing test\n assert nrgl.check_for_max_rmsd(ph.atoms().extract_xyz() ,chain_max_rmsd=1)\n # make sure test fails when it suppose to\n nrgl[0].copies[1].t = matrix.col([100, -89.7668, 5.8996])\n assert not nrgl.check_for_max_rmsd(ph.atoms().extract_xyz(),chain_max_rmsd=1)", "def MaxSlMsd(self):\r\n\t\treturn self._get_attribute('maxSlMsd')", "def get_instance_output(instance_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetInstanceResult]:\n ...", "def get_instance_output(instance_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetInstanceResult]:\n ...", "def get_pow_nodb(self, name):\n try:\n pow = self.circles[name].get_power_usage()\n return name, pow\n except (TimeoutException, SerialException):\n return name, -1", "def instance_spec(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"instance_spec\")", "def find_second_dwell(date, dwell1_state, dwell2_state, t_dwell1, msid, limit, model_spec, init, limit_type='max',\n duration=2592000, t_backoff=1725000, n_dwells=10, min_dwell=None, max_dwell=None, pseudo=None):\n\n datesecs = CxoTime(date).secs\n msid = msid.lower()\n\n if 'max' in limit_type.lower():\n limit_type = 'max'\n else:\n limit_type = 'min'\n\n if max_dwell is None:\n # This ensures three \"cycles\" of the two dwell states, within the portion of the schedule used for evaluation\n # (t_backoff).\n # Subtract 1000 sec for extra padding.\n max_dwell = (t_backoff - t_dwell1) / 3 - 1000\n\n if min_dwell is None:\n min_dwell = 1.0e-6\n\n results = {'converged': False, 'unconverged_hot': False, 'unconverged_cold': False,\n 'min_temp': np.nan, 'mean_temp': np.nan, 'max_temp': np.nan, 'temperature_limit': limit,\n 'dwell_2_time': np.nan, 'min_pseudo': np.nan, 'mean_pseudo': np.nan, 'max_pseudo': np.nan,\n 'hotter_state': np.nan, 'colder_state': np.nan}\n\n # Ensure t_dwell1 is a float, may not be necessary anymore\n t_dwell1 = float(t_dwell1)\n\n opt_fun = create_opt_fun(datesecs, dwell1_state, dwell2_state, t_dwell1, msid, model_spec, init, t_backoff,\n duration)\n\n # First just check the bounds to avoid unnecessary runs of `opt_fun`\n output = np.array([opt_fun(t) for t in [min_dwell, max_dwell]],\n dtype=[('duration2', float), ('max', float), ('mean', float), ('min', float)])\n\n if 'max' in limit_type:\n\n # All cases report temperatures entirely below the limit.\n if np.all(output['max'] < limit):\n results = _handle_unconverged_cold(output, results)\n\n # All cases report temperatures entirely above the limit.\n elif np.all(output['max'] > limit):\n results = _handle_unconverged_hot(output, results)\n\n # Temperatures straddle the limit, so a refined dwell 2 time is possible.\n else:\n results, output = _refine_dwell2_time('max', n_dwells, min_dwell, max_dwell, limit, opt_fun, results)\n\n elif 'min' in limit_type:\n\n # All cases report temperatures entirely below the limit.\n if np.all(output['min'] < limit):\n results = _handle_unconverged_cold(output, results)\n\n # All cases report temperatures entirely above the limit.\n elif np.all(output['min'] > limit):\n results = _handle_unconverged_hot(output, results)\n\n # Temperatures straddle the limit, so a refined dwell 2 time is possible.\n else:\n results, output = _refine_dwell2_time('min', n_dwells, min_dwell, max_dwell, limit, opt_fun, results)\n\n if output['max'][0] > output['max'][-1]:\n results['hotter_state'] = 1\n results['colder_state'] = 2\n else:\n results['hotter_state'] = 2\n results['colder_state'] = 1\n\n return results", "def get_instance_bounding_box(img, bounding_boxes, instance):\n mask = np.zeros(img.shape, dtype=np.uint16)\n mask[img == instance] = 1\n ret, threshed = cv.threshold(mask, 0, 2 ** 16, cv.THRESH_BINARY)\n compressed = threshed.astype(np.uint8)\n contours, hierarchy = cv.findContours(compressed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\n x, y, w, h = cv.boundingRect(contours[0])\n cv.rectangle(bounding_boxes, (x, y), (x + w, y + h), (randint(25, 255), randint(25, 255), randint(25, 255)), 3)\n img2 = contours = hierarchy = mask = None", "def get_termination_command_state(instance: Dict[str, str]) -> Optional[str]:\n invocations = ssm.list_command_invocations(\n InstanceId=instance[\"InstanceId\"], Filters=[{\"key\": \"DocumentName\", \"value\": SSM_TERMINATION_DOCUMENT_NAME}]\n )[\"CommandInvocations\"]\n\n if len(invocations) == 0:\n return None\n\n invocations.sort(key=lambda invocation: invocation[\"RequestedDateTime\"], reverse=True)\n\n return invocations[0][\"Status\"] # type: ignore", "def lms_instance(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"lms_instance\")", "def search_for_maximum(self):\n return self.maximise_aquisition(self.expected_improvement)", "def findMaximumDeviationLoop(junctions, wires, resistances, voltages, currents):\n raise NotImplementedError", "def get_sfdc_instance_output(location: Optional[pulumi.Input[str]] = None,\n product_id: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n sfdc_instance_id: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSfdcInstanceResult]:\n ...", "def wave_get_max_micros():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVSM, 2, 0))", "def _find_optimal_sigma02(self):\n\n # Note: this sigma0 is only when eta is at infinity. Hence, computing\n # it does not require eta, update of self.mixed_cor, or update of Y, C,\n # Mz. Hence, once it is computed, it can be reused even if other\n # variables like eta changed. Here, it suffice to only check of\n # self.sigma0 is None to compute it for the first time. On next calls,\n # it does not have to be recomputed.\n if self.sigma02 is None:\n\n if self.B is None:\n Cinv = numpy.matmul(self.X.T, self.X)\n C = numpy.linalg.inv(Cinv)\n Xtz = numpy.matmul(self.X.T, self.z)\n XCXtz = numpy.matmul(self.X, numpy.matmul(C, Xtz))\n self.sigma02 = numpy.dot(self.z, self.z-XCXtz) / self.rdof\n\n else:\n self.sigma02 = numpy.dot(self.z, self.z) / self.rdof\n\n return self.sigma02", "def IncludeMaxSlMsd(self):\r\n\t\treturn self._get_attribute('includeMaxSlMsd')", "def _get_instance(self):\n #return '_earth_instance_' + rospy.get_name().strip('/')\n return self.instance", "def snapshot_class(self) -> Optional[Sequence['outputs.CSIPowerMaxSpecDriverSnapshotClass']]:\n return pulumi.get(self, \"snapshot_class\")", "def get_info(self, instance_name):\n LOG.debug(\"get_info\")\n\n instance_id = self._instance_name_to_id(instance_name)\n bmm = db.bmm_get_by_instance_id(None, instance_id)\n status = PowerManager(bmm[\"ipmi_ip\"]).status()\n if status == \"on\":\n inst_power_state = power_state.RUNNING\n else:\n inst_power_state = power_state.SHUTOFF\n\n return {'state': inst_power_state,\n 'max_mem': 0,\n 'mem': 0,\n 'num_cpu': 2,\n 'cpu_time': 0}", "def max_pwm(self):\r\n return self._max_pwm", "def retrieve_nomad_from_search_parameters(self, search_params):\n ra_range, dec_range = self.radec_range_from_search_parameters(search_params)\n print \"Retrieving on ranges:\"\n print \" RA = \" + str(ra_range)\n print \" DEC = \" + str(dec_range)\n return fetch_nomad_box(ra_range, dec_range, epoch=float(search_params['EPOCH']['value']))", "def get_smart_guard_max_snapshots_count(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetSmartGuardMaxSnapshotsCount', self.handle)", "def getRMSD(self):\n\n superimpose = self.SuperimposeStructures()\n rmsd = superimpose.rms\n\n return rmsd" ]
[ "0.49544078", "0.48614013", "0.4803768", "0.47599295", "0.46683034", "0.46434867", "0.45607504", "0.45236927", "0.45087636", "0.45051718", "0.45051718", "0.4498567", "0.44935858", "0.4478318", "0.44600636", "0.44428247", "0.44174296", "0.43791753", "0.43775365", "0.43559694", "0.4335885", "0.4326444", "0.4326352", "0.4313296", "0.43093702", "0.42850325", "0.42847025", "0.42703697", "0.4266933", "0.42665514" ]
0.5541556
0
Get paper_id from `hit'.
def _paper_id(hit: DD) -> str: return hit["_source"]["paper_id"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hit_id(self):\n return self.assignment.HITId", "def find_issue_id(self):", "def _get_id(results, index):\n return results[index]['_id']", "def dangling_pic(pic):\n ppl = pic.person_set.fetch(100)\n if not ppl:\n return pic.key().id()", "def hit(self):\n return self._hit", "def getID():", "def __getIDFromCID(self, cid):\n if cid == \"daemon\": return self._did\n \n if cid in self._attachments or cid == self._did:\n return cid\n \n for k,v in self._attachments.items():\n if cid == v.cmd: return k\n \n return None", "def entry_id(self):\n if self.lexid is not None:\n return self.lexid.split('.')[0]\n else:\n return None", "def get_paper_by_id(paper_id):\n dblp_key = paper_id.replace(\"/\", \"_\")\n if local.paper_exists(dblp_key):\n return dblp_key\n\n print(\"getting information from dblp about paper {}\".format(paper_id))\n data = get(\"https://dblp.org/rec/\" + paper_id + \".xml\")[\"dblp\"]\n return get_paper(data)", "def tag_id(self, tag):\n assert isinstance(tag, str)\n\n df = self.dfs[\"tags\"]\n tag_records = df[df.tag == tag]\n if 1 == len(tag_records): \n return tag_records[\"id\"].values[0]\n elif 1 < len(tag_records): \n raise Exception(\"More than one record exist by tag\")\n else :\n # We should not be strict to tag name since it is a user input.\n import warnings\n warnings.warn(\"No record matched with tag\", Warning)\n return None", "def get_id_from_ref(ref):\n ref_id = None\n if ref is not None and len(ref) > 0:\n ref_id = path.split(ref)[1]\n return ref_id", "def get_id(self, refobj):\n return cmds.getAttr(\"%s.identifier\" % refobj)", "def get_identifier(self, object):\n try:\n identifier = object[\"uri\"]\n except KeyError:\n identifier = object[\"ref\"]\n return identifier", "def get_id(self, revnum):\n\n index = bisect.bisect_right(self.revnums, revnum) - 1\n id = self.ids[index]\n\n if id is None:\n raise KeyError(revnum)\n\n return id", "def _id(self, document):\n pass", "def get_post_id(self):\n return self.key.parent().id()", "def ews_pid (tag):\n\n return mapitags.PROP_ID(tag)", "def get_id(self):\n return self[\"_id\"]", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id" ]
[ "0.69868296", "0.56247854", "0.5616075", "0.5425608", "0.53989947", "0.5375978", "0.52967244", "0.52966356", "0.5277519", "0.52638745", "0.524657", "0.5234392", "0.52156657", "0.5189373", "0.518699", "0.5174917", "0.5170238", "0.5169146", "0.51510876", "0.51510876", "0.51510876", "0.51510876", "0.51510876", "0.51510876", "0.51510876", "0.51510876", "0.51510876", "0.51510876", "0.51510876", "0.51510876" ]
0.86228186
0
Get title from `hit'.
def _title(hit: DD) -> str: return hit["_source"]["title"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_title(self, obj):\n title = obj.habit.title\n return title", "def get_title():", "def get_title(self):\n return self.metadata['title']", "def title(self):\n return self.get(self._names[\"title\"])", "def title(self):\n return self.get(self._names[\"title\"])", "def getStoryTitle(self, source):\n titleStart = source.find('>', source.find('>')+1) + 1\n titleEnd = source.find('</a>')\n title = source[titleStart:titleEnd]\n title = title.lstrip() # Strip trailing whitespace characters.\n return title", "def get_title(self):\n return self.run_command('get_title')[0]", "def get_title(self):\n meta = self.get_meta_data()\n if \"og:title\" in meta:\n return meta[\"og:title\"]\n else:\n soup = BeautifulSoup(self.TARGET_DATA)\n title = soup.find('title')\n if title:\n return title.text\n else:\n return \"No Title\"", "def title(self):\n return self.get(\"title\")", "def get_title(self):\n return self.title", "def get_title(self):\n return self.title", "def get_title(self):\n return self.title", "def title(self):\n return self['title']", "def get_title(self):\n return self._title", "def get_title(self):\n return self._title", "def get_title(self):\n return self._title", "def get_title(self) -> str:\n pass", "async def title(self):\n if not hasattr(self, \"_title\"):\n self._title = await Stack.fetch_stack_value(self, \"http://purl.org/dc/terms/title\", await self.uuid)\n return self._title", "def get_title(line):\n title = line.split(' (')[0]\n return title", "def get_title(html_soup):\n heading = html_soup.find('title').get_text()\n return heading", "def get_title(self):\n return self._get_title_()", "def getTitle(self):\n return self._title", "def get_title(self):\n\n return self.title", "def title(self):\n return self.metadata.get('title')", "def title(self) -> str:\n\t\t# pylint: disable=unsubscriptable-object\n\t\treturn self.value[1]", "def title(self):\n return self.data.find(\n 'span', class_='briefResultsTitle'\n ).find(\n 'a'\n ).get_text()", "def get_title(self):\n\n return self._title", "def get_title(self):\n\n title = ''\n doc = self.article.doc\n\n title_element = self.parser.getElementsByTag(doc, tag='title')\n # no title found\n if title_element is None or len(title_element) == 0:\n return title\n\n # title elem found\n title_text = self.parser.getText(title_element[0])\n used_delimeter = False\n\n # split title with |\n if '|' in title_text:\n title_text = self.split_title(title_text, PIPE_SPLITTER)\n used_delimeter = True\n\n # split title with -\n if not used_delimeter and '-' in title_text:\n title_text = self.split_title(title_text, DASH_SPLITTER)\n used_delimeter = True\n\n # split title with »\n if not used_delimeter and u'»' in title_text:\n title_text = self.split_title(title_text, ARROWS_SPLITTER)\n used_delimeter = True\n\n # split title with :\n if not used_delimeter and ':' in title_text:\n title_text = self.split_title(title_text, COLON_SPLITTER)\n used_delimeter = True\n\n title = MOTLEY_REPLACEMENT.replaceAll(title_text)\n return title", "def GetTitle(self):\n return self._title", "def title(self):\n return self.values.get('title')" ]
[ "0.74535805", "0.72680277", "0.70484865", "0.68149185", "0.68149185", "0.67732847", "0.6760391", "0.6757609", "0.6750882", "0.6747287", "0.6747287", "0.6747287", "0.67468166", "0.6732499", "0.6732499", "0.6732499", "0.6717095", "0.66841376", "0.66783696", "0.66464376", "0.66378367", "0.66295815", "0.66048646", "0.65806705", "0.657283", "0.65481985", "0.65476745", "0.6515086", "0.65042466", "0.6504024" ]
0.89552915
0
Test `join` filter function.
def test_join(self): test_cases = [ Case( description="lists of strings", val=["a", "b"], args=[ "#", ], kwargs={}, expect="a#b", ), Case( description="join a string", val="a, b", args=[ "#", ], kwargs={}, expect=FilterValueError, ), Case( description="lists of integers", val=[1, 2], args=[ "#", ], kwargs={}, expect="1#2", ), Case( description="missing argument defaults to space", val=["a", "b"], args=[], kwargs={}, expect="a b", ), Case( description="too many arguments", val=["a", "b"], args=[", ", ""], kwargs={}, expect=FilterArgumentError, ), Case( description="arguments not a string", val=["a", "b"], args=[5], kwargs={}, expect="a5b", ), Case( description="value not an array", val=12, args=[", "], kwargs={}, expect=FilterValueError, ), Case( description="value array contains non string", val=["a", "b", 5], args=["#"], kwargs={}, expect="a#b#5", ), Case( description="join an undefined variable with a string", val=self.env.undefined("test"), args=[", "], kwargs={}, expect="", ), Case( description="join an array variable with undefined", val=["a", "b"], args=[self.env.undefined("test")], kwargs={}, expect="ab", ), ] self._test(Join, test_cases)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_join_and(self):\n self.assertEqual(join_and(self.fruits_singular, plural=False),\n \"apple, a orange and a banana\")\n self.assertEqual(join_and(self.fruits_plural, plural=True),\n \"apples, oranges and bananas\")\n self.assertEqual(join_and([\"apple\"], plural=False),\n \"apple\")\n self.assertEqual(join_and([\"apples\"], plural=True),\n \"apples\")\n self.assertEqual(join_and([], plural=True),\n \"\")\n self.assertEqual(join_and([], plural=False),\n \"\")", "def test_left_join_method_is_successful(setup1, setup2):\n assert left_join(setup1, setup2) == [['Apple', 'Jack']]", "def testCorrectJoin(self):\n b_tree = OOBTree()\n b_tree.update({1: \"Monkey D. Luffy\", 2: \"Roronoa Zoro\", 3: \"Nami\"})\n failed_counter = 0\n key = 1\n data = {\"from\":\"East Blue\"}\n (mod_data, mod_tree, failed_counter) = self.processing.join(b_tree, key, data, failed_counter)\n self.assertEqual(mod_data, {\"from\":\"East Blue\", \"right_data\":\"Monkey D. Luffy\"})\n self.assertEqual(len(mod_tree), 2)\n self.assertEqual(failed_counter, 0)", "def join(self):\n pass", "def test_url_join(self):\n assert ct.url_join(\"www.bad-actor.services\", \"api\") == \"http://www.bad-actor.services/api\"\n assert ct.url_join(\"https://www.bad-actor.services\", \"api\") == \"https://www.bad-actor.services/api\"\n assert ct.url_join(\"https://www.bad-actor.services\", \"/api\") == \"https://www.bad-actor.services/api\"\n assert ct.url_join(\"https://www.bad-actor.services\", \"/api\") == \"https://www.bad-actor.services/api\"\n assert ct.url_join(\"http://www.bad-actor.services\", \"/\") == \"http://www.bad-actor.services/\"\n assert ct.url_join(\"https://www.bad-actor.services/\", \"/\") == \"https://www.bad-actor.services/\"\n assert ct.url_join(\n \"https://www.bad-actor.services/\", \"/\", \"api\") == \\\n \"https://www.bad-actor.services/api\"\n assert ct.url_join(\n \"bad-actor-services_bad-actor-services-web_1:5000\", \"/api/proxies\") == \\\n \"http://bad-actor-services_bad-actor-services-web_1:5000/api/proxies\"", "def test_if_two_tables(table_one, table_two):\n assert left_join(table_one, table_two) == [['fond', 'enamored', 'averse'], ['guide', 'usher', 'follow'], ['diligent', 'employed', 'idle'], ['wrath', 'anger', 'deligth']]", "def query_join(*query_list):\n return \"&\".join(query_list)", "def test_join_optimizable_2():\n cleanup()\n print_test_separator(\"Starting test_optimizable_2\")\n\n cat = CSVCatalog.CSVCatalog()\n\n cds = []\n cds.append(CSVCatalog.ColumnDefinition(\"playerID\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"nameLast\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"nameFirst\", column_type=\"text\"))\n cds.append(CSVCatalog.ColumnDefinition(\"birthCity\", \"text\"))\n cds.append(CSVCatalog.ColumnDefinition(\"birthCountry\", \"text\"))\n cds.append(CSVCatalog.ColumnDefinition(\"throws\", column_type=\"text\"))\n\n t = cat.create_table(\n \"people\",\n data_dir + \"People.csv\",\n cds)\n print(\"People table metadata = \\n\", json.dumps(t.describe_table(), indent=2))\n t.define_index(\"pid_idx\", ['playerID'], 'INDEX')\n\n cds = []\n cds.append(CSVCatalog.ColumnDefinition(\"playerID\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"H\", \"number\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"AB\", column_type=\"number\"))\n cds.append(CSVCatalog.ColumnDefinition(\"teamID\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"yearID\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"stint\", column_type=\"number\", not_null=True))\n\n t = cat.create_table(\n \"batting\",\n data_dir + \"Batting.csv\",\n cds)\n print(\"Batting table metadata = \\n\", json.dumps(t.describe_table(), indent=2))\n\n people_tbl = CSVTable.CSVTable(\"people\")\n batting_tbl = CSVTable.CSVTable(\"batting\")\n\n print(\"Loaded people table = \\n\", people_tbl)\n print(\"Loaded batting table = \\n\", batting_tbl)\n\n start_time = time.time()\n\n join_result = people_tbl.join(batting_tbl,['playerID'], None)\n\n end_time = time.time()\n\n print(\"Result = \\n\", join_result)\n elapsed_time = end_time - start_time\n print(\"\\n\\nElapsed time = \", elapsed_time)\n\n print_test_separator(\"Complete test_join_optimizable_2\")", "def testJoin(self):\r\n P=lambda p:ufsi.NativeUnixPath(p)\r\n data={\r\n # 1\r\n 'relativePath':\r\n ['/dir1/',P('dir2/fileBase.ext'),'/dir1/dir2/fileBase.ext'],\r\n\r\n # 2\r\n 'absolutePath':\r\n ['/dir1/',P('/dir2/fileBase.ext'),'/dir2/fileBase.ext'],\r\n\r\n # 3\r\n 'notSeparatorTerminatedPath':\r\n ['dir1',P('dir2/fileBase.ext'),'dir1/dir2/fileBase.ext'],\r\n\r\n # 4\r\n 'emptyPath':\r\n ['dir1',P(''),'dir1/'],\r\n\r\n # 5\r\n 'nonNativePath':\r\n ['dir1',ufsi.HttpPath('http://www.google.com.au/'),\r\n 'http://www.google.com.au/']\r\n }\r\n\r\n for k in data.iterkeys():\r\n p1=P(data[k][0])\r\n p2=data[k][1]\r\n r1=str(p1.join(p2))\r\n r2=data[k][2]\r\n self.assertEquals(r1,r2,\r\n '%s: join result was %r but should have been %r'\r\n %(k,r1,r2))", "def _join():\n df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],\n 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})\n other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],\n 'B': ['B0', 'B1', 'B2']})\n print(df.join(other, lsuffix='_caller', rsuffix='_other')) # 为重复 column 添加前缀\n print(df.set_index('key').join(other.set_index('key')))\n print(df.join(other.set_index('key'), on='key', how='right')) # left,right表示以哪边的index为准\n print(df.join(other.set_index('key'), on='key', how='inner')) # inner,outer 表示交集、并集", "def sjoin(left_df, right_df, how=..., op=..., lsuffix=..., rsuffix=...):\n ...", "def join_where(self, table, one, operator, two, type='inner'):\n return self.join(table, one, operator, two, type, True)", "def test_join(self):\n s = djset()\n s.add([1, 2, 3])\n s.add([4, 5, 6])\n s.add([2, 5])\n self.assertEquals({1, 2, 3, 4, 5, 6}, s.data[1])\n self.assertFalse(2 in s.data)", "def join_string(part1, part2, concatenation_string = 'AND', seperator=' '):\n\n if part1 == '':\n return part2\n\n elif part2 == '':\n return part1\n\n\n if part1[-1] == seperator:\n sep1 = ''\n else:\n sep1 = seperator\n\n\n if part2[0] == seperator:\n sep2 = ''\n else:\n sep2 = ' '\n\n\n return part1 + sep1 + concatenation_string + sep2 + part2", "def join(self, iterable) -> String:\n pass", "def test_join_optimizable_2(optimize=False):\n cleanup()\n print_test_separator(\"Starting test_optimizable_2, optimize = \" + str(optimize))\n\n cat = CSVCatalog.CSVCatalog()\n cds = []\n\n cds.append(CSVCatalog.ColumnDefinition(\"playerID\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"nameLast\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"nameFirst\", column_type=\"text\"))\n cds.append(CSVCatalog.ColumnDefinition(\"birthCity\", \"text\"))\n cds.append(CSVCatalog.ColumnDefinition(\"birthCountry\", \"text\"))\n cds.append(CSVCatalog.ColumnDefinition(\"throws\", column_type=\"text\"))\n\n t = cat.create_table(\n \"people\",\n data_dir + \"People.csv\",\n cds)\n print(\"People table metadata = \\n\", json.dumps(t.describe_table(), indent=2))\n t.define_index(\"pid_idx\", \"INDEX\", ['playerID'])\n\n cds = []\n cds.append(CSVCatalog.ColumnDefinition(\"playerID\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"H\", \"number\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"AB\", column_type=\"number\"))\n cds.append(CSVCatalog.ColumnDefinition(\"teamID\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"yearID\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"stint\", column_type=\"number\", not_null=True))\n\n t = cat.create_table(\n \"batting\",\n data_dir + \"Batting.csv\",\n cds)\n print(\"Batting table metadata = \\n\", json.dumps(t.describe_table(), indent=2))\n\n people_tbl = CSVTable.CSVTable(\"people\")\n batting_tbl = CSVTable.CSVTable(\"batting\")\n\n print(\"Loaded people table = \\n\", people_tbl)\n print(\"Loaded batting table = \\n\", batting_tbl)\n\n start_time = time.time()\n\n join_result = people_tbl.join(batting_tbl,['playerID'], None, optimize=optimize)\n\n end_time = time.time()\n\n print(\"Result = \\n\", join_result)\n elapsed_time = end_time - start_time\n print(\"\\n\\nElapsed time = \", elapsed_time)\n\n print_test_separator(\"Complete test_join_optimizable_2\")", "def test_broadcastable_flags_all_broadcastable_on_joinaxis(self):\r\n rng = numpy.random.RandomState(seed=utt.fetch_seed())\r\n a_val = rng.rand(1, 4, 1).astype(self.floatX)\r\n b_val = rng.rand(1, 4, 1).astype(self.floatX)\r\n\r\n a = self.shared(a_val, broadcastable=(True, False, True))\r\n b = self.shared(b_val, broadcastable=(True, False, True))\r\n c = self.join_op()(0, a, b)\r\n assert not c.type.broadcastable[0]\r\n\r\n f = function([], c, mode=self.mode)\r\n topo = f.maker.fgraph.toposort()\r\n assert [True for node in topo if isinstance(node.op, self.join_op)]\r\n\r\n f()\r\n utt.verify_grad((lambda a, b: join(0, a, b)), [a_val, b_val], rng=rng)", "def check_join_a_tuple(joiner, tup, rval):\n # Always check with the reverse operation.\n # Since the function used join, we need to use split.\n sp = rval.split(joiner)\n # The split list should have the same number of items as the tuple\n assert len(sp) == len(tup), 'Split length does not match!'", "def _join(lst, key, sep=\";\"):\n return sep.join([d[key] for d in lst if d[key]])", "def testFailedJoin(self):\n b_tree = OOBTree()\n b_tree.update({1: \"Monkey D. Luffy\", 2: \"Roronoa Zoro\", 3: \"Nami\"})\n failed_counter = 0\n key = 10\n data = {\"from\":\"East Blue\"}\n (mod_data, mod_tree, failed_counter) = self.processing.join(b_tree, key, data, failed_counter)\n self.assertEqual(mod_data, {\"from\":\"East Blue\"})\n self.assertEqual(len(mod_tree), 3)\n self.assertEqual(failed_counter, 1)", "def join(items, sep=' ', prefix=None, suffix=None, target=None):\n if target is None:\n target = SQLQuery()\n\n target_items = target.items\n\n if prefix:\n target_items.append(prefix)\n\n for i, item in enumerate(items):\n if i != 0:\n target_items.append(sep)\n if isinstance(item, SQLQuery):\n target_items.extend(item.items)\n else:\n target_items.append(item)\n\n if suffix:\n target_items.append(suffix)\n return target", "def test_customjoin(self):\n\n m = mapper(User, users, properties={\n 'orders':relation(mapper(Order, orders, properties={\n 'items':relation(mapper(Item, orderitems))\n }))\n })\n\n q = create_session().query(m)\n l = (q.select_from(users.join(orders).join(orderitems)).\n filter(orderitems.c.item_name=='item 4'))\n\n self.assert_result(l, User, user_result[0])", "def hash_join (planNode):\n operation_name = planNode.get_attr(\"Node Type\")\n operation_type = planNode.get_attr(\"Join Type\")\n cond_msg = planNode.get_attr(\"Hash Cond\")\n\n if operation_type != '':\n operation_type += ' '\n \n if (cond_msg):\n cond_msg = ' on condition '+cond_msg\n description = \"{}{}{}\".format (operation_type, operation_name, cond_msg)\n return description", "def process_join(data: JoinedQueryData, verbose: bool) -> str:\n # The collection (sometimes referred to as \"type\" in the docs) to join\n string = 'type:' if verbose else ''\n string += str(data.collection)\n # The fields used to link the two collections\n if (parent := data.field_on) is not None:\n string += f'^on:{parent}'\n if (child := data.field_to) is not None:\n string += f'^to:{child}'\n # Flags\n if (is_list := data.is_list) or verbose:\n string += '^list:' + ('1' if is_list else '0')\n if not (is_outer := data.is_outer) or verbose:\n string += '^outer:' + ('1' if is_outer else '0')\n # Show/hide field lists\n if show := data.show:\n string += '^show:' + '\\''.join(str(s) for s in show)\n elif hide := data.hide:\n string += '^hide:' + '\\''.join(str(s) for s in hide)\n # Inject at name\n if (name := data.inject_at) is not None:\n string += f'^inject_at:{name}'\n # QueryBase terms\n if terms := data.terms:\n string += '^terms:' + '\\''.join(t.serialise() for t in terms)\n # Process nested (inner) joins\n if joins := data.joins:\n string += f'({\",\".join(process_join(j, verbose) for j in joins)})'\n return string", "def _join_pathway(query, pathway_id, pathway_name):\n if pathway_id or pathway_name:\n if pathway_id:\n query = query.filter(models.Pathway.pathway_id.like(pathway_id))\n if pathway_name:\n query = query.filter(models.Pathway.pathway_name.like(pathway_name))\n\n return query", "def join(self, model_or_queryset, *filter_q, **filter_kw):\n join_type = filter_kw.get('_join_type', INNER)\n queryset = super(With, self).join(model_or_queryset, *filter_q, **filter_kw)\n\n # the underlying Django code forces the join type into INNER or a LEFT OUTER join\n alias, _ = queryset.query.table_alias(self.name)\n join = queryset.query.alias_map[alias]\n if join.join_type != join_type:\n join.join_type = join_type\n return queryset", "def join(ctx, network, force):\n return join_wrapper(ctx.obj['client'], network, force)", "def test_join_optimizable_4():\n cleanup()\n print_test_separator(\"Starting test_optimizable_4\")\n\n cat = CSVCatalog.CSVCatalog()\n\n cds = []\n cds.append(CSVCatalog.ColumnDefinition(\"playerID\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"H\", \"number\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"AB\", column_type=\"number\"))\n cds.append(CSVCatalog.ColumnDefinition(\"teamID\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"yearID\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"stint\", column_type=\"number\", not_null=True))\n cds.append(CSVCatalog.ColumnDefinition(\"lgID\", \"text\", True))\n\n t = cat.create_table( \"batting\", data_dir + \"Batting.csv\",cds)\n t.define_index(\"pid_idx\", ['playerID'], \"INDEX\")\n print(\"Batting table metadata = \\n\", json.dumps(t.describe_table(), indent=2))\n \n\n\n cds = []\n cds.append(CSVCatalog.ColumnDefinition(\"franchID\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"teamID\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"yearID\", \"text\", True))\n cds.append(CSVCatalog.ColumnDefinition(\"W\", column_type=\"number\", not_null=True))\n cds.append(CSVCatalog.ColumnDefinition(\"lgID\", \"text\", True))\n\n t = cat.create_table( \"teams\", data_dir + \"Teams.csv\", cds)\n t.define_index(\"pid_idx\", ['yearID','teamID'], \"INDEX\")\n print(\"Teams table metadata = \\n\", json.dumps(t.describe_table(), indent=2))\n\n batting_tbl = CSVTable.CSVTable(\"batting\")\n team_tbl = CSVTable.CSVTable(\"teams\")\n print(\"Loaded batting table = \\n\", batting_tbl)\n print(\"Loaded Teams table = \\n\", team_tbl)\n start_time = time.time()\n\n tmp = {\"playerID\": \"willite01\", \"yearID\":\"1956\"}\n join_result = batting_tbl.join(team_tbl,['yearID', 'lgID','teamID'], tmp)\n\n end_time = time.time()\n print(\"Result = \\n\", join_result)\n elapsed_time = end_time - start_time\n print(\"\\n\\nElapsed time = \", elapsed_time)\n\n print_test_separator(\"Complete test_join_optimizable_4\")", "def join_with_or(values) -> str:\n return join_with_and(values, 'or')", "def __set_join_function(self):\n if self.mode == self.MODE.UNIQUE:\n self.__join_function = self.join_unique\n elif self.mode == self.MODE.SEQ_COUNT:\n self.__join_function = self.join_sequence_count\n elif self.mode == self.MODE.VEC_COUNT:\n self.__join_function = self.join_vector_count\n elif self.mode == self.MODE.VEC_COUNT_MASKED:\n self.__join_function = self.join_vector_count_masked" ]
[ "0.68004614", "0.6535712", "0.6305247", "0.59191144", "0.58536106", "0.58509207", "0.5828059", "0.56852114", "0.56152767", "0.5599719", "0.55828947", "0.5563236", "0.5553356", "0.55384237", "0.55197346", "0.5468807", "0.5425347", "0.54054934", "0.53015125", "0.5295291", "0.5269809", "0.524525", "0.5224512", "0.5193194", "0.5173943", "0.51710707", "0.51665485", "0.5165197", "0.5162153", "0.51246333" ]
0.7226226
0
Test `first` filter function.
def test_first(self): test_cases = [ Case( description="lists of strings", val=["a", "b"], args=[], kwargs={}, expect="a", ), Case( description="lists of things", val=["a", "b", 1, [], {}], args=[], kwargs={}, expect="a", ), Case( description="empty list", val=[], args=[], kwargs={}, expect=None, ), Case( description="unexpected argument", val=["a", "b"], args=[", "], kwargs={}, expect=FilterArgumentError, ), Case( description="value not an array", val=12, args=[], kwargs={}, expect=FilterValueError, ), Case( description="first of undefined", val=self.env.undefined("test"), args=[], kwargs={}, expect=None, ), ] self._test(First, test_cases)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def first(iterable: Iterable[T1], predicate: Callable[[T1], bool]) -> Union[T1, None]:\n for x in iterable:\n if predicate(x):\n return x\n return None", "def _first(self, \n iterable, \n condition=lambda x: True):\n try:\n return next(x for x in iterable if condition(x))\n except:\n return None", "def take_until_first(predicate, iterable):\n for x in iterable:\n yield x\n if predicate(x):\n break", "def first(l: iter, predicate):\n for ele in l:\n if predicate(ele):\n return ele\n raise RuntimeError(\"Found nothing to match predicate\")", "def first(self):", "def first_true(iterable, default=False, pred=None):\n return next(filter(pred, iterable), default)", "def test_first(self):\r\n vals = [3, 4, 2]\r\n self.assertEqual(first(vals), 3)\r\n vals.reverse()\r\n self.assertEqual(first(vals), 2)", "def test_filter_one_key():\n data = [\n {\n \"name\": \"Bill\",\n \"last_name\": \"Gilbert\",\n \"occupation\": \"was here\",\n \"type\": \"person\",\n },\n {\"is_dead\": True, \"kind\": \"parrot\", \"type\": \"bird\", \"name\": \"polly\"},\n ]\n\n actual_result = make_filter(last_name=\"Gilbert\").apply(data)\n expected_result = [data[0]]\n assert actual_result == expected_result", "def select_first(condition):\n return where(condition) | unless(StopIteration, next)", "def first(self):\n return self._reduce_for_stat_function(F.first, only_numeric=False)", "def first(items):\r\n return items[0]", "def test_slice_single_first(self):\n self.table.append(['Tom', 26])\n self.assertEqual(self.table[0], ['Tom', 26])", "def find_first(self, filter, min=0):\n assert callable(filter)\n\n for index, item in enumerate(self.stack[min:], min):\n if filter(item):\n return index\n return -1", "def take_first(count):\n def _take_first(iterable):\n return islice(iterable, count)\n return pipe | set_name('take_first(%s)' % count, _take_first)", "def first(xs):\n if not xs:\n return None\n return xs[0]", "def first(xs):\n if not xs:\n return None\n return xs[0]", "def first_true(cls, iterable, default=None, pred=None):\n # first_true([a,b,c], x) --> a or b or c or x\n # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x\n return next(filter(pred, iterable), default)", "def First():\n return CheckForError(lib.Generators_Get_First())", "def getFirstFunction(self) -> ghidra.program.model.listing.Function:\n ...", "def first(self, func: Callable[[T], bool], default=None, raise_exception: bool=True) -> Optional[T]:\n if raise_exception:\n return next(iter(filter(func, self.array)))\n return next(iter(filter(func, self.array)), default)", "def first(collection):\n return next(iter(collection))", "def first(collection):\n return next(iter(collection))", "def firstFunction(self):", "def first(iterable: t.Iterable[T]) -> T:\n return next(iter(iterable))", "def first(seq):\n return next(iter(seq))", "def get_first_element(dataset):\n return dataset.first()", "def first(items):\n return next(iter(items or []), None)", "def first(self, callback: Callable = None) -> Any:\n if callback:\n return self.filter(callback).first()\n\n return self[0]", "def test_filter_one_key_second():\n data = [\n {\n \"name\": \"Bill\",\n \"last_name\": \"Gilbert\",\n \"occupation\": \"was here\",\n \"type\": \"person\",\n },\n {\"is_dead\": True, \"kind\": \"parrot\", \"type\": \"bird\", \"name\": \"polly\"},\n ]\n\n actual_result = make_filter(kind=\"parrot\").apply(data)\n expected_result = [data[1]]\n assert actual_result == expected_result", "def first(seq):\n try: # try iterator interface\n return seq.next()\n except AttributeError:\n pass\n try: # seq is no iterator, try indexed lookup\n return seq[0]\n except IndexError:\n pass\n raise TypeError(\n \"Argument to `first()` method needs to be iterator or sequence.\")" ]
[ "0.67819643", "0.6583545", "0.6578263", "0.65747654", "0.6469289", "0.6452934", "0.6449825", "0.6402848", "0.63544303", "0.63225055", "0.6258148", "0.6254651", "0.6247087", "0.62392735", "0.62146485", "0.62146485", "0.615671", "0.61102223", "0.6039504", "0.60003775", "0.59562504", "0.59562504", "0.59327835", "0.59273964", "0.5891083", "0.5883891", "0.5863406", "0.5852371", "0.5835443", "0.5760196" ]
0.69991195
0
Test `last` filter function.
def test_last(self): test_cases = [ Case( description="lists of strings", val=["a", "b"], args=[], kwargs={}, expect="b", ), Case( description="lists of things", val=["a", "b", 1, [], {}], args=[], kwargs={}, expect={}, ), Case( description="empty list", val=[], args=[], kwargs={}, expect=None, ), Case( description="unexpected argument", val=["a", "b"], args=[", "], kwargs={}, expect=FilterArgumentError, ), Case( description="value not an array", val=12, args=[], kwargs={}, expect=FilterValueError, ), Case( description="last of undefined", val=self.env.undefined("test"), args=[], kwargs={}, expect=None, ), ] self._test(Last, test_cases)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_last(n, result):\n from last import last\n assert last(n) == result", "def test_9999_last(self):\n self.lasttest = True", "def test_last_element(self):\n self.assertEqual(functions.last_element([1, 2, 3]), 3)\n self.assertEqual(functions.last_element([]), None)", "def test_slice_last(self):\n self.table.append(['Tom', 26])\n self.table.append(['Chantelle', 24])\n self.assertEqual(self.table[-1], ['Chantelle', 24])", "def is_last(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_last\")", "def getLastFunction(self) -> ghidra.program.model.listing.Function:\n ...", "def get_last(self, count):", "def test_last(self):\n l_list = DoubleLinkedList()\n l_list.push(1234)\n l_list.push(12)\n self.assertEqual(l_list.get_list()[-1], l_list.last())", "def find_last(self, filter):\n assert callable(filter)\n\n indexes = range(len(self.stack) - 1, -1, -1)\n items = self.stack[::-1]\n\n for index, item in zip(indexes, items):\n if filter(item):\n return index\n return -1", "def get_last(cls, **filters) -> dict:\n return cls.get(**filters)", "def is_last(self) -> Optional[bool]:\n return pulumi.get(self, \"is_last\")", "def FilterDone(self, last_bits):\n return last_bits", "def test_last_values(self):\n event_put = {\n 'operation': 'post',\n 'payload': {\n 'timeseriex': [(1, 100), (2, 100)],\n 'timeseriey': [(3, 100), (4, 100)],\n }\n }\n\n lambda_database.handler(event_put, None)\n last_payload = {\n 'timeseries': ['timeseriex'],\n 'granularity': granularities.SECOND\n }\n\n # Give some time, so message can arrive to the consumer\n time.sleep(0.5)\n\n event_last = {\n 'operation': 'last',\n 'payload': last_payload\n }\n\n data = lambda_database.handler(event_last, None)\n self.assertEqual(data['timeseriex'][0], event_put['payload']['timeseriex'][-1])", "def test_last_files(self):\n glob_manager = GlobManager(['*'])\n self.assertCountEqual(glob_manager.last_files, set())\n\n glob_manager.get_files()\n self.assertCountEqual(\n glob_manager.last_files,\n {\n 'bob.py', 'dave.txt', 'fred.txt.py', 'geoff.py', 'jim.py.txt',\n 'rob.txt'\n })", "def last(self, callback: Callable = None) -> Any:\n if callback:\n return self.filter(callback).last()\n\n return self[-1]", "def return_last(iter):\n for thing in iter:\n pass\n return thing", "def test_apply_filter_leq(app):\n with app.app_context():\n users = User.query\n users = apply_filter(users, User,\n {'column': 'last_seen', 'type': 'leq',\n 'value': 121212121})\n assert str(users.whereclause) == 'users.last_seen <= :last_seen_1'", "def test_query_events_by_last_date(self):\n events = list(query_events_by_last_date(Event.objects.all(), timezone.now()))\n self.assertFalse(self.event_show2 in events)\n self.assertTrue(self.event_show1 in events)", "def test_get_last_layer(self):\n\t\t\n\t\tprint(\"test_get_last_layer\")\n\t\tlayer_iterator = ww.WeightWatcher().make_layer_iterator(self.model)\n\t\tnum_layers = 0\n\t\tfor ww_layer in layer_iterator:\n\t\t\tnum_layers += 1\n\t\t\tprint(num_layers, ww_layer.name, ww_layer.layer_id)\n\t\t\t\n\t\tself.assertEqual('fc', ww_layer.name)\n\t\t# layer id is 40 because we skup batch normlayers\n\t\tself.assertEqual(40, ww_layer.layer_id)\n\n\t\treturn", "def test_check_max(self):\n\t\tself.filter.set_operator(\".max\")\n\t\tself.filter.set_limit(12)\n\t\tself.assertTrue(self.filter.check(Object(field=12)))\n\t\tself.assertTrue(self.filter.check(Object(field=0)))\n\t\tself.assertFalse(self.filter.check(Object(field=13)))", "def last(iterator):\n item = None\n for item in iterator:\n pass\n return item", "def test_getLast(self):\n m = MessageSet(1, None)\n m.last = 2\n self.assertEqual(m.last, 2)", "def test_entities__EntityOrder__isLast__2(entityOrder):\n assert not entityOrder.isLast(IEntity(IPhoneNumber))", "def test_slice_second_last(self):\n self.table.append(['Tom', 26])\n self.table.append(['Chantelle', 24])\n self.assertEqual(self.table[-2], ['Tom', 26])", "def test_setLastWithWildcardRange(self):\n m = MessageSet(1, None)\n m.add(2, None)\n m.last = 5\n self.assertEqual(list(m), [1, 2, 3, 4, 5])", "def test_get_last_layer(self):\n\t\t\n\t\tprint(\"test_get_last_layer\")\n\t\tlayer_iterator = ww.WeightWatcher().make_layer_iterator(self.model)\n\t\tnum_layers = 0\n\t\tfor ww_layer in layer_iterator:\n\t\t\tnum_layers += 1\n\t\t\tprint(num_layers, ww_layer.name, ww_layer.layer_id)\n\t\t\t\n\t\tself.assertEqual('layer4.1.conv2', ww_layer.name)\n\t\t# layer id is 40 because we skup batch normlayers\n\t\tself.assertEqual(40, ww_layer.layer_id)\n\n\t\treturn", "def test_next_window_time_no_sample_passed(self):\n test_window_scheme = WindowingScheme(self.window_test_filter, 3)\n time.sleep(4)\n collected_value = test_window_scheme.filter(self.more_than_upper_bound)\n self.assertEquals(collected_value, self.more_than_upper_bound)", "def is_last(self, level):\n\n return level == self.levels[-1]", "def test_get_param_last(self):\n kwargs = {'first': 1, 'second': 2}\n\n with pytest.raises(TypeError, match=\"unrecognized keyword.*second\"):\n assert ct.config._get_param(\n 'config', 'first', kwargs, pop=True, last=True) == 1\n\n assert ct.config._get_param(\n 'config', 'second', kwargs, pop=True, last=True) == 2", "def test_last_when_empty(self):\n l_list = DoubleLinkedList()\n with self.assertRaises(Exception) as context:\n l_list.last()\n self.assertTrue('Empty list' in str(context.exception))" ]
[ "0.6690879", "0.66017205", "0.65425724", "0.61579126", "0.6045275", "0.6015733", "0.599462", "0.59935486", "0.59502226", "0.5896181", "0.58830184", "0.5843675", "0.57821405", "0.57237977", "0.5683516", "0.56639475", "0.56478554", "0.5635642", "0.5617747", "0.55998707", "0.5579839", "0.5568987", "0.55585814", "0.5553479", "0.5504698", "0.546926", "0.5448927", "0.5434189", "0.5418755", "0.5410752" ]
0.6940046
0
Test `concat` filter function.
def test_concat(self): test_cases = [ Case( description="lists of strings", val=["a", "b"], args=[["c", "d"]], kwargs={}, expect=["a", "b", "c", "d"], ), Case( description="missing argument", val=["a", "b"], args=[], kwargs={}, expect=FilterArgumentError, ), Case( description="too many arguments", val=["a", "b"], args=[["c", "d"], ""], kwargs={}, expect=FilterArgumentError, ), Case( description="arguments not a list", val=["a", "b"], args=[5], kwargs={}, expect=FilterArgumentError, ), Case( description="not an array", val="a, b", args=[["c", "d"]], kwargs={}, expect=FilterValueError, ), Case( description="array contains non string", val=["a", "b", 5], args=[["c", "d"]], kwargs={}, expect=["a", "b", 5, "c", "d"], ), Case( description="undefined left value", val=self.env.undefined("test"), args=[["c", "d"]], kwargs={}, expect=["c", "d"], ), Case( description="undefined argument", val=["a", "b"], args=[self.env.undefined("test")], kwargs={}, expect=FilterArgumentError, ), ] self._test(Concat, test_cases)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testConcatSourceMultipleButOneConcatable(self):\n env = self.env\n\n # Even if multiple input files, if only one is concat-able, won't concat.\n cs = env.ConcatSource('foo3.cc', ['a.cc', 'd.o'])\n self.assertEqual(map(str, cs), ['d.o', 'a.cc'])", "def test_evaluate_concat_expression(self):\n value = self.evaluate_common(\"concat('starts','with')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.String, \"Expected String\")\n self.assertTrue(value.value == \"startswith\")\n value = self.evaluate_common(\"concat('3.1',concat('4','159'))\")\n self.assertTrue(value.value == \"3.14159\")\n try:\n value = self.evaluate_common(\"concat('3.14',1)\")\n self.fail(\"integer as parameter\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"concat('3.14')\")\n self.fail(\"1 parameter\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"concat('3.1','4','159')\")\n self.fail(\"3 parameters\")\n except odata.EvaluationError:\n pass", "def testConcatDisabled(self):\n env = self.env\n\n # If CONCAT_SOURCE_ENABLE is not set, files are passed through\n env['CONCAT_SOURCE_ENABLE'] = False\n cs = env.ConcatSource('foo4.cc', ['a.cc', 'b.cc', 'c.cc'])\n self.assertEqual(map(str, cs), ['a.cc', 'b.cc', 'c.cc'])", "def test_cat_basic(self):\n\n utils.compare_tracing_methods(\n SimpleCatModule(0, 1, 2),\n torch.randn(2, 3, 4),\n torch.randn(2, 3, 4),\n torch.randn(2, 3, 4),\n fusible_ops={\"prim::FusedConcat\"},\n )", "def test_concat_impl(self, value, expected_concat_value):\n # Need to convert np arrays to tensors first.\n value = tf.nest.map_structure(tf.constant, value)\n concat_value = concat._concat_impl(value)\n self.assertAllEqual(concat_value, expected_concat_value)", "def test_array_concat():\n\n array = Array(columns=\"abc\")\n for i in range(10):\n array.append([1, 2, 3])\n\n # Any 2-dimensional array witht the same number of rows should work.\n other = [[4, 5, 6]] * len(array)\n array.concat(other)\n\n assert array.shape == (10, 6)\n assert len(array.columns) == 6\n assert all(type(column) is str for column in array.columns)\n for row in array:\n assert tuple(row) == (1, 2, 3, 4, 5, 6)\n\n # Now this should fail since the columns have the same names.\n other = Array(columns=\"abc\")\n for i in range(10):\n other.append([7, 8, 9])\n assert_raises(ValueError, array.concat, other)\n\n # Adding a prefix should make it work.\n array.concat(other, prefix=\"other\")\n assert array.shape == (10, 9)\n assert len(array.columns) == 9\n for row in array:\n assert tuple(row) == (1, 2, 3, 4, 5, 6, 7, 8, 9)", "def _concat(self, partial: Optional[O], outputs: O):\n raise NotImplementedError", "def test_filter_with_empty_filters(mockdata, qfilter):\n assert len(qfilter.filter(mockdata)) == 100", "def test_url_concat(self):\n assert ct.url_join(\"www.bad-actor.services\", \"api\") == \"http://www.bad-actor.services/api\"\n assert ct.url_concat(\"https://www.bad-actor.services\", \"api\") == \"https://www.bad-actor.services/api\"\n assert ct.url_concat(\"https://www.bad-actor.services\", \"/api\") == \"https://www.bad-actor.services/api\"\n assert ct.url_concat(\n \"https://www.bad-actor.services\", \"/api\", \"new//one\") == \"https://www.bad-actor.services/api/new/one\"\n assert ct.url_concat(\"https://www.bad-actor.services\", \"/\") == \"https://www.bad-actor.services/\"\n assert ct.url_concat(\"https://www.bad-actor.services/\", \"/\") == \"https://www.bad-actor.services/\"", "def test_by_source_mininimal_data(minimal_mockdata, qfilter):\n res = qfilter.filter(minimal_mockdata, s='s1')\n assert len(res) == 1\n res = qfilter.filter(minimal_mockdata, s='xxxxxx')\n assert not res", "def test_string_filter(mutation_sequence_with_set_descriptions):\n string_filter = StringFilter(string_to_match=\"SUPER SHOP\")\n assert len(string_filter.apply(mutation_sequence_with_set_descriptions)) == 2", "def testRegisterConcatenation(self):\n reg_one = ShiftRegister(2)\n reg_one.shift(\"a\")\n reg_one.shift(\"b\")\n reg_two = ShiftRegister(3)\n reg_two.shift(\"c\")\n reg_two.shift(\"d\")\n reg_two.shift(\"e\")\n reg_cat = reg_one.concatenate(reg_two)\n self.assertEqual(''.join(reg_cat), \"abcde\")", "def test_string_concat():\n tree = parse(dedent(\"\"\"\\\n import logging\n\n logging.info(\"Hello\" + \" \" + \"World!\")\n \"\"\"))\n visitor = LoggingVisitor()\n visitor.visit(tree)\n\n assert_that(visitor.violations, has_length(2))\n # NB: We could easily decide to report only one of these\n assert_that(visitor.violations[0][1], is_(equal_to(STRING_CONCAT_VIOLATION)))\n assert_that(visitor.violations[1][1], is_(equal_to(STRING_CONCAT_VIOLATION)))", "def test_filter_prepend(pre_arg, pre_src, pre_dest):\n args = parser.parse_args([\"-pre\", *pre_arg])\n filters = renamer.initfilters(args)\n dest = renamer.get_renames(pre_src, filters, args.extension, args.raw)\n print(dest)\n print(pre_dest)\n assert dest == pre_dest", "def test_string_filter_chain(mutation_sequence_with_set_descriptions):\n mutations = mutation_sequence_with_set_descriptions\n\n string_filter = StringFilter(string_to_match=\"SUPER SHOP\")\n string_filter2 = StringFilter(string_to_match=\"alert!\", parent=string_filter)\n string_filter3 = StringFilter(\n string_to_match=\"some other string\", parent=string_filter2\n )\n\n assert len(string_filter.apply(mutations)) == 2\n assert len(string_filter2.apply(mutations)) == 1\n assert len(string_filter3.apply(mutations)) == 0", "def testUsingFilterTool(self):\n pass", "def test_filter_sequence_true(self):\n self.es.register_filter(bar=('foo', 'bar', 'baz'))\n self.assertTrue(self.es.streamfilter(self.data))", "def concat(self, other: Any) -> ColumnOperators:\n return self.operate(concat_op, other)", "def test_cat_with_empty_tensor(self):\n\n utils.compare_tracing_methods(\n SimpleCatModule(0, 1, 2),\n torch.empty(0),\n torch.randn(2, 3, 4, 5),\n torch.randn(2, 3, 4, 5),\n fusible_ops={\"prim::FusedConcat\"},\n )", "def test_filters_are_clones_not_references(self):\n # Everything else is considered immutable\n qs = FBO(\n path=TEST_FILES_ROOT,\n glob='*.rst',\n )\n self.assertEqual(\n 3,\n qs.count(),\n )\n qs2 = qs.filter(name='test1.rst')\n self.assertEqual(\n 3,\n qs.count(),\n )\n self.assertEqual(\n 1,\n qs2.count(),\n )", "def test_filter_function_all(self):\n self.es.register_filter(lambda x: True)\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: False)\n self.assertFalse(self.es.streamfilter(self.data))", "def test_filter_device1(self):\n pass", "def concatenate_data():", "def test_filter_sequence(seq_arg, seq_src, seq_dest):\n args = parser.parse_args(['-seq', *seq_arg])\n filters = renamer.initfilters(args)\n dest = renamer.get_renames(seq_src, filters, args.extension, args.raw)\n assert dest == seq_dest", "def testSingleFile(self):\n env = self.env\n\n # If only one concat-able source file is present, passes through\n cs = env.ConcatSource('foo1.cc', ['a.cc'])\n self.assertEqual(map(str, cs), ['a.cc'])", "def _rconcat(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(concat_op, other)", "def concat(xss):\n return list(anyconfig.compat.from_iterable(xs for xs in xss))", "def test_filter_extension(ext_arg, ext_src, ext_dest):\n args = parser.parse_args([*ext_arg])\n filters = renamer.initfilters(args)\n dest = renamer.get_renames(ext_src, filters, args.extension, args.raw)\n assert dest == ext_dest", "def test_by_statement_mininimal_data(minimal_mockdata, qfilter):\n res = qfilter.filter(minimal_mockdata, st='st1')\n assert len(res) == 1\n res = qfilter.filter(minimal_mockdata, st='xxx2')\n assert not res == 0", "def test_filter_raw(raw_arg, raw_src, raw_dest):\n args = parser.parse_args([*raw_arg])\n filters = renamer.initfilters(args)\n dest = renamer.get_renames(raw_src, filters, args.extension, args.raw)\n assert dest == raw_dest" ]
[ "0.65016174", "0.64184755", "0.6282409", "0.5935726", "0.59030414", "0.5607358", "0.5581276", "0.55731755", "0.5568929", "0.5547763", "0.55458635", "0.55448776", "0.5541933", "0.553369", "0.5527253", "0.5473628", "0.544644", "0.5384779", "0.53693026", "0.535711", "0.532056", "0.53015745", "0.52977794", "0.52772725", "0.52602243", "0.52416885", "0.5236406", "0.52249885", "0.5224406", "0.5216315" ]
0.72020715
0
Test `map` filter function.
def test_map(self): test_cases = [ Case( description="lists of objects", val=[{"title": "foo"}, {"title": "bar"}, {"title": "baz"}], args=["title"], kwargs={}, expect=["foo", "bar", "baz"], ), Case( description="missing argument", val=[{"title": "foo"}, {"title": "bar"}, {"title": "baz"}], args=[], kwargs={}, expect=FilterArgumentError, ), Case( description="too many arguments", val=[{"title": "foo"}, {"title": "bar"}, {"title": "baz"}], args=["title", ""], kwargs={}, expect=FilterArgumentError, ), Case( description="missing property", val=[{"title": "foo"}, {"title": "bar"}, {"heading": "baz"}], args=["title"], kwargs={}, expect=["foo", "bar", None], ), Case( description="value not an array", val=123, args=["title"], kwargs={}, expect=FilterValueError, ), Case( description="array contains non object", val=[{"title": "foo"}, {"title": "bar"}, 5, []], args=["title"], kwargs={}, expect=FilterValueError, ), Case( description="undefined left value", val=self.env.undefined("test"), args=["title"], kwargs={}, expect=[], ), Case( description="undefined argument", val=[{"title": "foo"}, {"title": "bar"}, {"title": "baz"}], args=[self.env.undefined("test")], kwargs={}, expect=[None, None, None], ), ] self._test(Map, test_cases)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __map_and_filter(_input: MutableSequence[T],\n _map: Callable[[T], Any] = lambda x: x,\n _filter: Callable[[T], bool] = lambda x: True) -> MutableSequence[Any]:\n\n return [_map(x) for x in _input if _filter(x)]", "def custom_filter(function, iterable):\n map_list = []\n\n for i in iterable:\n if function(i):\n map_list.append(i)\n\n return map_list", "def test_filter_mapping_file(self):\r\n self.assertEqual(filter_mapping_file(self.map_data, self.map_headers,\r\n ['a', 'b', 'c', 'd', 'e', 'f']), (self.map_headers, self.map_data))\r\n self.assertEqual(\r\n filter_mapping_file(self.map_data, self.map_headers, ['a']),\r\n (['SampleID', 'Description'], ['a\\tx'.split('\\t')]))", "def test_filter_mapping_file(self):\n self.assertEqual(filter_mapping_file(self.map_data, self.map_headers,\\\n ['a','b','c','d','e','f']), (self.map_headers, self.map_data))\n self.assertEqual(filter_mapping_file(self.map_data, self.map_headers, ['a']),\n (['SampleID','Description'],['a\\tx'.split('\\t')]))", "def map():", "def custom_filter(some_func, iterator_list):\n\n local_iterator = from_input_to_list(iterator_list)\n func_map = [some_func(i) for i in local_iterator]\n true_list = [j for j in func_map if j > 100] # here we can hardcode any condition\n\n return true_list", "def test_map(self, start: Result[int, str], exp: Result[int, str]) -> None:\n assert start.map(lambda x: int(x ** 2)) == exp", "def test_filter_function_all(self):\n self.es.register_filter(lambda x: True)\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: False)\n self.assertFalse(self.es.streamfilter(self.data))", "def test_filter_wea_zero_entry():\n pass", "def test_int_list(self):\n \n self.assertEqual(False, \n maps.map_list([1, 2, 3]))", "def test_filterSamples_no_strict(self):\r\n self.overview_map.filterSamples(['PC.356', 'abc123'], strict=False)\r\n self.assertEqual(self.overview_map.SampleIds, ['PC.356'])\r\n\r\n self.empty_map.filterSamples(['foo'], strict=False)\r\n self.assertEqual(self.empty_map.SampleIds, [])", "def test_filterSamples_no_strict(self):\n self.overview_map.filterSamples(['PC.356', 'abc123'], strict=False)\n self.assertEqual(self.overview_map.SampleIds, ['PC.356'])\n\n self.empty_map.filterSamples(['foo'], strict=False)\n self.assertEqual(self.empty_map.SampleIds, [])", "def map(z):\n pass", "def simple_map(f, l):\n # Again, my first take is a list comprehension.\n return [ f(item) for item in l ]", "def map(self, function):\n pass", "def test_map_args_all_none():\n pass", "def test_string_input(self):\n \n self.assertEqual(False, maps.map_list(1))\n self.assertEqual(False, maps.map_list('false'))", "def test_core_functionality(self):\n # Test typing\n self.run_map_collection(\n _map_collection=self.example_map\n )", "def test_filterSamples_strict(self):\r\n with self.assertRaises(ValueError):\r\n self.overview_map.filterSamples(['PC.356', 'abc123'])\r\n\r\n with self.assertRaises(ValueError):\r\n self.empty_map.filterSamples(['foo'])", "def test_filterSamples(self):\r\n exp = ['PC.356', 'PC.593']\r\n self.overview_map.filterSamples(['PC.593', 'PC.356'])\r\n obs = self.overview_map.SampleIds\r\n self.assertEqual(obs, exp)\r\n\r\n self.overview_map.filterSamples([])\r\n self.assertEqual(self.overview_map.SampleIds, [])", "def test_filterSamples_strict(self):\n with self.assertRaises(ValueError):\n self.overview_map.filterSamples(['PC.356', 'abc123'])\n\n with self.assertRaises(ValueError):\n self.empty_map.filterSamples(['foo'])", "def test_filterSamples(self):\n exp = ['PC.356', 'PC.593']\n self.overview_map.filterSamples(['PC.593', 'PC.356'])\n obs = self.overview_map.SampleIds\n self.assertEqual(obs, exp)\n\n self.overview_map.filterSamples([])\n self.assertEqual(self.overview_map.SampleIds, [])", "def test_filter_function_any(self):\n self.es.register_filter(lambda x: True, ftype='any')\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: False, ftype='any')\n self.assertTrue(self.es.streamfilter(self.data))", "def test_filter_mapping_file_from_mapping_f(self):\n actual = filter_mapping_file_from_mapping_f(self.tutorial_mapping_f,[\"PC.354\",\"PC.355\"])\n expected = \"\"\"#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tTreatment\tDOB\tDescription\nPC.354\tAGCACGAGCCTA\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._354\nPC.355\tAACTCGTCGATG\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._355\"\"\"\n self.assertEqual(actual,expected)", "def get_map_kx_ky_filtered_pyfftw(map,apo,filter_dict):\n try:\n ncore = int(os.environ['OMP_NUM_THREADS'])\n except (KeyError, ValueError):\n ncore = multiprocessing.cpu_count()\n\n if map.ncomp==1:\n map.data=apply_filter(map.data,apo.data,filter_dict,ncore)\n else:\n for i in range(map.ncomp):\n map.data[i]=apply_filter(map.data[i],apo.data,filter_dict,ncore)\n\n return map", "def test_filter_mapping_file_from_mapping_f_negate(self):\n actual = filter_mapping_file_from_mapping_f(self.tutorial_mapping_f,\n [\"PC.356\", \"PC.481\", \"PC.593\", \"PC.607\", \"PC.634\", \"PC.635\", \"PC.636\"],\n negate=True)\n expected = \"\"\"#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tTreatment\tDOB\tDescription\nPC.354\tAGCACGAGCCTA\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._354\nPC.355\tAACTCGTCGATG\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._355\"\"\"\n self.assertEqual(actual,expected)", "def test_filter_mapping_file_from_mapping_f(self):\r\n actual = filter_mapping_file_from_mapping_f(\r\n self.tutorial_mapping_f, [\"PC.354\", \"PC.355\"])\r\n expected = \"\"\"#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tTreatment\tDOB\tDescription\r\nPC.354\tAGCACGAGCCTA\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._354\r\nPC.355\tAACTCGTCGATG\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._355\"\"\"\r\n self.assertEqual(actual, expected)", "def test_map_args_invalid():\n pass", "def testUsingFilterTool(self):\n pass", "def test_filter_mapping_file_from_mapping_f_negate(self):\r\n actual = filter_mapping_file_from_mapping_f(self.tutorial_mapping_f,\r\n [\"PC.356\",\r\n \"PC.481\",\r\n \"PC.593\",\r\n \"PC.607\",\r\n \"PC.634\",\r\n \"PC.635\",\r\n \"PC.636\"],\r\n negate=True)\r\n expected = \"\"\"#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tTreatment\tDOB\tDescription\r\nPC.354\tAGCACGAGCCTA\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._354\r\nPC.355\tAACTCGTCGATG\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._355\"\"\"\r\n self.assertEqual(actual, expected)" ]
[ "0.6945921", "0.6697105", "0.6596213", "0.6476732", "0.62312645", "0.61732423", "0.60880965", "0.6084054", "0.60769737", "0.607596", "0.606519", "0.6056232", "0.60424167", "0.59891844", "0.5942686", "0.59221935", "0.59039664", "0.58928454", "0.5885066", "0.5878272", "0.5855782", "0.58557516", "0.5818244", "0.5726032", "0.5718677", "0.5708825", "0.5687403", "0.56806946", "0.56753165", "0.56583613" ]
0.67475384
1
Test `reverse` filter function.
def test_reverse(self): test_cases = [ Case( description="lists of strings", val=["b", "a", "B", "A"], args=[], kwargs={}, expect=["A", "B", "a", "b"], ), Case( description="lists of things", val=["a", "b", 1, [], {}], args=[], kwargs={}, expect=[{}, [], 1, "b", "a"], ), Case( description="empty list", val=[], args=[], kwargs={}, expect=[], ), Case( description="unexpected argument", val=["a", "b"], args=[", "], kwargs={}, expect=FilterArgumentError, ), Case( description="value not an array", val=123, args=[], kwargs={}, expect=FilterValueError, ), Case( description="undefined left value", val=self.env.undefined("test"), args=[], kwargs={}, expect=[], ), ] self._test(Reverse, test_cases)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reverse(self):\n t = Reverse(Quantize())\n assert t.reverse(8.6) == 9\n assert t.reverse(8.4) == 8\n assert t.reverse(5.3) == 5\n assert numpy.all(t.reverse([8.6, 5.3]) == numpy.array([9, 5], dtype=int))", "def test_reverse(self):\n t = Quantize()\n assert t.reverse(9) == 9.0\n assert t.reverse(5) == 5.0\n assert numpy.all(t.reverse([9, 5]) == numpy.array([9.0, 5.0], dtype=float))", "def test_reverse(self):\n t = Linearize()\n assert t.reverse(1) == numpy.e", "def test_suite():\n test(reverse(\"happy\") == \"yppah\")\n test(reverse(\"Python\") == \"nohtyP\")\n test(reverse(\"\") == \"\")\n test(reverse(\"a\") == \"a\")", "def test_correct_inverted(self):\n tests = [\n 'test.1',\n 'test.2',\n ]\n expected = '-test.1:test.2'\n\n self.assertEqual(test_apps.get_gtest_filter(tests, invert=True), expected)", "def test_reverse(self):\n t = Identity()\n assert t.reverse(\"yo\") == \"yo\"", "def testSortingReverse(self):\n if self.sortingReverse.lower() in [\"1\", \"yes\", \"true\", \"on\"]:\n self.assertTrue(\n self.config.sortingReverse\n )\n elif self.sortingReverse.lower() in [\"0\", \"no\", \"false\", \"off\"]:\n self.assertFalse(\n self.config.sortingReverse\n )\n else:\n self.assertEqual(\n tools.SORTING_REVERSE_DEFAULT,\n self.config.sortingReverse\n )", "def reverse(self):\n enabled = self.lib.iperf_get_test_reverse(self._test)\n\n if enabled:\n self._reverse = True\n else:\n self._reverse = False\n\n return self._reverse", "def get_reverse_complement_unit_tests():\n \n print get_reverse_complement(\"ATGCCCGCTTT\")\n print get_reverse_complement(\"CCGCGTTCA\")\n print get_reverse_complement(\"ACCTTGGAAAATTT\")", "def test_reverse_tails(self):\r\n self.assertEqual(reverse_tails('high'), 'low')\r\n self.assertEqual(reverse_tails('low'), 'high')\r\n self.assertEqual(reverse_tails(None), None)\r\n self.assertEqual(reverse_tails(3), 3)", "def test_reverse(self):\n t = Enumerate([2, \"asfa\", \"ipsi\"])\n assert t.reverse(0) == 2\n assert t.reverse(1) == \"asfa\"\n assert t.reverse(2) == \"ipsi\"\n with pytest.raises(IndexError):\n t.reverse(3)\n assert numpy.all(\n t.reverse([[2, 1], [0, 2]])\n == numpy.array([[\"ipsi\", \"asfa\"], [2, \"ipsi\"]], dtype=object)\n )\n\n # for the crazy enough\n t = Enumerate([2])\n assert t.reverse(0) == 2\n with pytest.raises(IndexError):\n t.reverse(1)\n assert numpy.all(\n t.reverse([[0, 0], [0, 0]]) == numpy.array([[2, 2], [2, 2]], dtype=object)\n )", "def test_generator_for() -> None:\n reversed_str: List[str] = []\n for char in reverse(\"golf\"):\n reversed_str.append(char)\n assert \"\".join(reversed_str) == \"flog\"\n\n assert \"\".join(reverse(\"golf\")) == \"flog\"", "def _can_reverse(self):\n return not bool(self._reverse_callback())", "def test_reverse(self):\n t = Precision()\n assert t.reverse(9.0) == 9.0\n assert t.reverse(5.0) == 5.0\n assert numpy.all(t.reverse([9.0, 5.0]) == numpy.array([9.0, 5.0], dtype=float))", "def test_reverse_rec(self):\n # test the raised value error when non list is passed in\n with self.assertRaises(ValueError):\n reverse_rec(34)\n #normal reversed list\n self.assertEqual(reverse_rec([1,2,3]),[3,2,1])\n #list of size 1 reveresed\n self.assertEqual(reverse_rec([1]), [1])\n #empty list returns empty list\n self.assertEqual(reverse_rec([]), [])\n #test normal reversed list\n self.assertEqual(reverse_rec([3, 2, 2, 3]), [3, 2, 2, 3])\n #test normal reversed list\n self.assertEqual(reverse_rec([1, 2, 3, 4]), [4, 3, 2, 1])", "def test_parser_reverse(self):\n args = [\"directory\", \"--reverse\"]\n parser = setup_parser()\n output = parser.parse_args(args)\n self.assertEqual(output.directory, \"directory\")\n self.assertTrue(output.colorize)\n self.assertFalse(output.fancy)\n self.assertTrue(output.reverse)", "def test_reverse_rec(self):\n self.assertEqual(reverse_rec([1,2,3]),[3,2,1])\n self.assertEqual(reverse_rec([2, 4, 6, 8, 10]),[10, 8, 6, 4, 2]) #larger list\n self.assertEqual(reverse_rec([1, 2]),[2,1]) #list size 2\n self.assertEqual(reverse_rec([]), []) #empty lists\n self.assertEqual(reverse_rec([1]), [1]) #list size 1", "def test_filter_wea_zero_entry():\n pass", "def test_reversed_path(self):\n url_path = reverse('planner:recipes-detail', args='1')\n response = self.client.get(url_path)\n self.assertEqual(response.status_code, 200)", "def reverse(input):\n return input[::-1]", "def reverse(x):\n return x[::-1]", "def test_reversed_path(self):\n url_path = reverse('planner:recipes-list')\n response = self.client.get(url_path)\n self.assertEqual(response.status_code, 200)", "def test_filter_translate(tr_arg, tr_src, tr_dest):\n args = parser.parse_args([\"-tr\", *tr_arg])\n filters = renamer.initfilters(args)\n dest = renamer.get_renames(tr_src, filters, args.extension, args.raw)\n assert dest == tr_dest", "def test_correct_inverted(self):\n tests = [\n 'KIF.test1',\n 'KIF.test2',\n ]\n expected = '-NAME:test1|test2'\n\n self.assertEqual(expected,\n test_apps.get_kif_test_filter(tests, invert=True))", "def get_reverse_complement_unit_tests():\n input_a='ATTATTATT'\n expected_output='AATAATAAT'\n actual_output=get_reverse_complement(input_a)\n print 'Expected Output is ' + expected_output\n print 'Actual Output is ' +actual_output\n \n input_a='ATTCATATT'\n expected_output='AATATGAAT'\n actual_output=get_reverse_complement(input_a)\n print 'Expected Output is ' + expected_output\n print 'Actual Output is ' +actual_output", "def reverse(input=''):\n return input[::-1]", "def reverse_difference():", "def test_op_reverse_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n old_a = numpy.empty_like(a)\n old_a[:] = a[:]\n expect = numpy.array(a[::-1])\n\n offl_a = stream.bind(a)\n offl_r = offl_a.reverse()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(a, r))", "def test_sort_reversed():\n reverse_sorted_data = [3, 2, 1]\n sorted_data = bubble_sort(reverse_sorted_data)\n assert sorted_data == [1, 2, 3]", "def do_revive(self, arg):\n \treturn False" ]
[ "0.66847414", "0.6667177", "0.6579335", "0.6499214", "0.6478599", "0.6319221", "0.6278729", "0.6268115", "0.6027764", "0.5967647", "0.58728814", "0.585832", "0.5847677", "0.5824535", "0.58035773", "0.5802626", "0.5715419", "0.5714377", "0.5682833", "0.5658598", "0.56185347", "0.5615791", "0.56138", "0.56067365", "0.5605535", "0.5593944", "0.55895627", "0.5539677", "0.5492027", "0.5491077" ]
0.7016158
0
Test `sort_natural` filter function.
def test_sort_natural(self): test_cases = [ Case( description="lists of strings", val=["b", "a", "C", "B", "A"], args=[], kwargs={}, expect=["a", "A", "b", "B", "C"], ), Case( description="lists of strings with a None", val=["b", "a", None, "C", "B", "A"], args=[], kwargs={}, expect=[None, "a", "A", "b", "B", "C"], ), Case( description="lists of objects with key", val=[{"title": "foo"}, {"title": "bar"}, {"title": "Baz"}], args=["title"], kwargs={}, expect=[{"title": "bar"}, {"title": "Baz"}, {"title": "foo"}], ), Case( description="lists of objects with missing key", val=[{"title": "foo"}, {"title": "bar"}, {"heading": "Baz"}], args=["title"], kwargs={}, expect=[{"title": "bar"}, {"title": "foo"}, {"heading": "Baz"}], ), Case( description="empty list", val=[], args=[], kwargs={}, expect=[], ), Case( description="too many arguments", val=[{"title": "foo"}, {"title": "bar"}, {"title": "Baz"}], args=["title", "heading"], kwargs={}, expect=FilterArgumentError, ), Case( description="value not an array", val=1234, args=[], kwargs={}, expect=FilterValueError, ), Case( description="undefined left value", val=self.env.undefined("test"), args=[], kwargs={}, expect=[], ), Case( description="undefined argument", val=[{"title": "foo"}, {"title": "bar"}, {"title": "Baz"}], args=[self.env.undefined("test")], kwargs={}, expect=[{"title": "foo"}, {"title": "bar"}, {"title": "Baz"}], ), ] self._test(SortNatural, test_cases)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_version_sorting(self):\n assert natsort(['1', '5', '10', '50']) == ['1', '5', '10', '50']", "def natural_sort(sequence, comparison_callable=natural_sort_comparison):\n sequence.sort(comparison_callable)", "def natural_sort( l ): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n l.sort( key=alphanum_key )\n return l", "def test_dotted_sorting(self):\n assert natsort(['1.5', '1.0']) == ['1.0', '1.5']", "def NaturalSort(l):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n return sorted(l, key = alphanum_key)", "def _sort_natural(names_list, reverse=False):\n def sort_key(val):\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)', val)]\n\n return sorted(names_list, key=sort_key, reverse=reverse)", "def natural_sort_comparison(value1, value2):\n return cmp(_natural_sort_key(value1), _natural_sort_key(value2))", "def _natural_sort(alphanumeric_data):\n try:\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n except Exception as e:\n logger.error(\"Exception in _natural_sort : \" + str(e))\n return sorted(alphanumeric_data, key=alphanum_key, reverse=True)", "def test_reversed_version_sorting(self):\n assert natsort(['1', '5', '10', '50'], reverse=True) == ['50', '10', '5', '1']", "def natural_sort(l):\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key=alphanum_key)", "def natural_sorted(sequence, comparison_callable=natural_sort_comparison):\n temp = copy.copy(seq)\n natsort(temp, comparison_callable)\n return temp", "def test_python_3_compatibility(self):\n assert natsort(['1', 'a']) == ['1', 'a']", "def test_natsort_case_insensitive(self):\r\n\r\n # string with alpha and numerics sort correctly\r\n s = [\r\n 'sample1',\r\n 'sample2',\r\n 'sample11',\r\n 'sample12',\r\n 'SAmple1',\r\n 'Sample2']\r\n\r\n # expected values\r\n exp_natsort = ['SAmple1', 'Sample2', 'sample1', 'sample2', 'sample11',\r\n 'sample12']\r\n exp_natsort_case_insensitive = ['sample1', 'SAmple1', 'sample2',\r\n 'Sample2', 'sample11', 'sample12']\r\n\r\n # test natsort\r\n self.assertEqual(natsort(s), exp_natsort)\r\n # test natsort_case_insensitive\r\n self.assertEqual(natsort_case_insensitive(s),\r\n exp_natsort_case_insensitive)\r\n\r\n s.reverse()\r\n # test natsort\r\n self.assertEqual(natsort(s), exp_natsort)\r\n # test natsort_case_insensitive\r\n self.assertEqual(natsort(list('cbaA321')), list('123Aabc'))\r\n\r\n # strings with alpha only sort correctly\r\n self.assertEqual(natsort_case_insensitive(list('cdBa')), list('aBcd'))\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(natsort_case_insensitive(['11', '2', '1', '0']),\r\n ['0', '1', '2', '11'])\r\n\r\n # strings of floats sort correctly\r\n self.assertEqual(natsort_case_insensitive(['1.11', '1.12', '1.00',\r\n '0.009']), ['0.009', '1.00',\r\n '1.11', '1.12'])\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(natsort_case_insensitive([('11', 'A'), ('2', 'B'),\r\n ('1', 'C'), ('0', 'D')]),\r\n [('0', 'D'), ('1', 'C'),\r\n ('2', 'B'), ('11', 'A')])", "def test_natsort(self):\r\n # string with alpha and numerics sort correctly\r\n s = 'sample1 sample2 sample11 sample12'.split()\r\n self.assertEqual(natsort(s),\r\n 'sample1 sample2 sample11 sample12'.split())\r\n s.reverse()\r\n self.assertEqual(natsort(s),\r\n 'sample1 sample2 sample11 sample12'.split())\r\n self.assertEqual(natsort(list('cba321')), list('123abc'))\r\n\r\n # strings with alpha only sort correctly\r\n self.assertEqual(natsort(list('cdba')), list('abcd'))\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(natsort(['11', '2', '1', '0']),\r\n ['0', '1', '2', '11'])\r\n\r\n # strings of floats sort correctly\r\n self.assertEqual(natsort(['1.11', '1.12', '1.00', '0.009']),\r\n ['0.009', '1.00', '1.11', '1.12'])\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(\r\n natsort([('11', 'A'), ('2', 'B'), ('1', 'C'), ('0', 'D')]),\r\n [('0', 'D'), ('1', 'C'), ('2', 'B'), ('11', 'A')])", "def custom_sort(arr):\n pass", "def keysort(*args, **kwargs): # real signature unknown\n pass", "def sort_nicely(l):\r\n\tl.sort(key=alphanum_key)", "def natural_sort_case_insensitive_comparison(value1, value2):\n return natural_sort_comparison(value1.lower(), value2.lower())", "def test_sorting(sort=selection_sort, num_items=20, max_value=50):\n # TODO: Repeat until all items are in sorted order\n # TODO: Take first unsorted item\n # TODO: Insert it in sorted order in front of items", "def test_listCatalogEntriesWithSortFilters(self):\n expected_orders = {\n 'launch_date': ['25544', '37820'],\n '-launch_date': ['37820', '25544'],\n 'norad_catalog_number': ['25544', '37820'],\n '-norad_catalog_number': ['37820', '25544'],\n }\n\n for param, order in expected_orders.items():\n response = self.client.get(\n '/api/v1/catalogentry/?ordering={}'.format(param)\n )\n content = response.content.decode('utf8')\n json_data = json.loads(content)\n\n for i in range(len(order)):\n self.assertEqual(\n json_data['results'][i]['norad_catalog_number'],\n order[i]\n )", "def _natural_key_sort(string_to_sort):\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)', string_to_sort)]", "def add_sort_filter(source, args, index):\n tags = hxl.TagPattern.parse_list(args.get('sort-tags%02d' % index, ''))\n reverse = (args.get('sort-reverse%02d' % index) == 'on')\n return source.sort(tags, reverse)", "def human_sort(l):\n l.sort(key=alphanum_key)\n return l", "def sort_nicely(l):\n l.sort(key=alphanum_key)\n return l", "def sort_nicely(l):\n l.sort(key=alphanum_key)", "def sort_nicely(l):\n l.sort(key=alphanum_key)", "def sort_nicely(l):\n l.sort(key=alphanum_key)\n return l", "def _NaturalSortByName(node):\n # See: https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/\n name = node.get('name').lower()\n convert = lambda text: int(text) if text.isdigit() else text\n return [convert(c) for c in re.split('([0-9]+)', name)]", "def test_calc_sort_without_after_object(self):\n test_object = self.test.datum_type2\n actual = test_object._calc_sort_value(sort_base_length=3,\n increment=1,\n sort_prefix_parts=[test_object.datum_group.sort]\n )\n expected = 10101\n self.assertEqual(expected, actual)", "def _natural_sort_key(value):\n return map(try_int_cast, re.findall(r'(\\d+|\\D+)', value))" ]
[ "0.65320396", "0.619571", "0.61323243", "0.6105644", "0.6093698", "0.6048497", "0.60407436", "0.6026955", "0.6009246", "0.59946877", "0.5951541", "0.59253305", "0.590383", "0.58909464", "0.5858026", "0.58571583", "0.5835636", "0.5825835", "0.5791668", "0.5790346", "0.57736903", "0.57396656", "0.5727472", "0.57248807", "0.5718775", "0.5718775", "0.571088", "0.570889", "0.5686728", "0.56640303" ]
0.7430661
0
Test `uniq` filter function.
def test_uniq(self): test_cases = [ Case( description="lists of strings", val=["a", "b", "b", "a"], args=[], kwargs={}, expect=["a", "b"], ), Case( description="lists of things", val=["a", "b", 1, 1], args=[], kwargs={}, expect=["a", "b", 1], ), Case( description="empty list", val=[], args=[], kwargs={}, expect=[], ), Case( description="unhashable items", val=["a", "b", [], {}], args=[", "], kwargs={}, expect=FilterArgumentError, ), Case( description="unexpected argument", val=["a", "b"], args=[", "], kwargs={}, expect=FilterArgumentError, ), Case( description="value not an array", val="a, b", args=[], kwargs={}, expect=FilterValueError, ), Case( description="undefined left value", val=self.env.undefined("test"), args=[], kwargs={}, expect=[], ), ] self._test(Uniq, test_cases)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unique():\n\n def _apply_fn(dataset):\n return dataset.unique()\n\n return _apply_fn", "def uniq(input, output, fields, delimiter, encoding, verbose, format_in, format_out, zipfile, filter):\n if verbose:\n enableVerbose()\n options = {}\n options['output'] = output\n options['fields'] = fields\n options['delimiter'] = delimiter\n options['encoding'] = encoding\n options['format_in'] = format_in\n options['format_out'] = format_out\n options['zipfile'] = zipfile\n options['filter'] = filter\n acmd = Selector()\n acmd.uniq(input, options)\n pass", "def unique(iterable, filterfalse=filterfalse):\n seen = set()\n add = seen.add\n for element in filterfalse(seen.__contains__, iterable):\n add(element)\n yield element", "def uniq(seq):\n return sorted(set(seq))", "def test_distinct(self):\n self.Person(name=\"Mr Orange\", age=20).save()\n self.Person(name=\"Mr White\", age=20).save()\n self.Person(name=\"Mr Orange\", age=30).save()\n self.Person(name=\"Mr Pink\", age=30).save()\n assert set(self.Person.objects.distinct(\"name\")) == {\n \"Mr Orange\",\n \"Mr White\",\n \"Mr Pink\",\n }\n assert set(self.Person.objects.distinct(\"age\")) == {20, 30}\n assert set(self.Person.objects(age=30).distinct(\"name\")) == {\n \"Mr Orange\",\n \"Mr Pink\",\n }", "def _unique(li):\n return list(set(li))", "def filterduplicates(client, repeatfactor, tracks): # {{{1\n trackstofilter = client.playlist()\n if len(trackstofilter) < repeatfactor:\n repeatfactor = len(trackstofilter)\n trackstofilter = trackstofilter[-repeatfactor : -1]\n return [t for t in tracks if not t[1]['file'] in trackstofilter]", "def UniqueIterator(iterator):\r\n so_far = set()\r\n def no_dups(x):\r\n if x in so_far:\r\n return False\r\n else:\r\n so_far.add(x)\r\n return True\r\n\r\n return IteratorFilter(iterator, no_dups)", "def uniq(listinput):\n\t\"\"\" This will be provided for the student. \"\"\"\n\toutput = []\n\tfor x in listinput:\n\t\tif x not in output:\n\t\t\toutput.append(x)\n\treturn output", "def uniq(seq):\r\n seen = set()\r\n seen_add = seen.add\r\n return [x for x in seq if x not in seen and not seen_add(x)]", "def uniq(seq):\r\n #TODO: consider building a set out of seq so that the if condition\r\n #is constant time -JB\r\n return [x for i, x in enumerate(seq) if seq.index(x) == i]", "def unique(x):\n\n return list(set(x))", "def test_filter_with_empty_filters(mockdata, qfilter):\n assert len(qfilter.filter(mockdata)) == 100", "def test_filter_fastq(self):\r\n input_seqs = [('Seq1 some comment', 'ACCTTGG', 'BBBBBBB'),\r\n ('s2 some other comment', 'TTGG', 'BBBB'),\r\n ('S3', 'AAGGCCGG', 'BBCtatcc'),\r\n ('S5 some comment', 'CGT', 'BBB'),\r\n ('seq6 some other comment', 'AA', 'BB'),\r\n ('S7', 'T', 's')]\r\n seqs_to_keep = {}.fromkeys(['Seq1',\r\n 's2 some other comment',\r\n 'S3 no comment'])\r\n\r\n actual = fake_output_f()\r\n filter_fastq(input_seqs,\r\n actual,\r\n seqs_to_keep,\r\n negate=False)\r\n self.assertEqual(actual.s, self.filter_fastq_expected1)\r\n\r\n actual = fake_output_f()\r\n filter_fastq(input_seqs,\r\n actual,\r\n seqs_to_keep,\r\n negate=True)\r\n self.assertEqual(actual.s, self.filter_fastq_expected2)", "def uniq(elements):\n us = set()\n ret = []\n for e in elements:\n if e not in us:\n ret.append(e)\n us.add(e)\n return ret", "def uniq(seq):\n # Taken from https://stackoverflow.com/questions/480214\n seen = set()\n seen_add = seen.add\n return [x for x in seq if not (x in seen or seen_add(x))]", "def test_non_valid_input_in_list_end():\n from unique_chars import find_uniq\n with pytest.raises(ValueError):\n find_uniq(['qwwer', 'cake', 14])", "def unique(li):\r\n seen = set()\r\n seen_add = seen.add\r\n return [x for x in li if not (x in seen or seen_add(x))]", "def test_no():\n errors = generate_errors(10, 5)\n assert NoFiltering().filter(errors) == errors", "def listops_uniq(list_a):\r\n retlist = []\r\n for item in list_a:\r\n if item not in retlist:\r\n retlist.append(item)\r\n\r\n return retlist", "def is_unique(x):\n return len(set(x)) == len(x)", "def filter_all(_):\n return True", "def unique(arr):\n seen = set()\n for elem in arr:\n if elem not in seen:\n yield elem\n seen.add(elem)", "def unique(x):\n try:\n tmp = x.flatten()\n if tmp.size == 0:\n return tmp\n tmp.sort()\n idx = concatenate(([True],tmp[1:]!=tmp[:-1]))\n return tmp[idx]\n except AttributeError:\n items = list(set(x))\n items.sort()\n return asarray(items)", "def unique_op(\n input, sorted=True, return_inverse=False, return_counts=False, dtype=flow.int\n):\n if not return_inverse and not return_counts:\n return flow._C.unique(input, sorted, dtype=dtype)\n else:\n return flow._C.unique(\n input,\n sorted,\n return_inverse=return_inverse,\n return_counts=return_counts,\n dtype=dtype,\n )", "def test__remove_duplicates(self):\n\n result = deduped_list\n expected = [\n 'Fred',\n 'Dave',\n 'Sarah',\n 'John',\n 'Matthew',\n 'Joanna',\n 'Marjorie',\n 'Anna',\n 'Tony',\n 'Sam',\n 'Eric',\n 'Susan',\n 'Arthur',\n ]\n\n self.assertListEqual(sorted(result), sorted(expected))", "def _unique_sorted(seq):\n seen = set()\n seen_add = seen.add\n return [x for x in seq if not (x in seen or seen_add(x))]", "def distinct(x):\n return list(set(x))", "def unique_list(var):\n return len([x for x in set(var)]) == len(var)", "def test_filter_otus_from_otu_map(self):\r\n otu_map_in = \"\"\"o1 some comment\ts1_1\ts1_2\r\no2\ts1_3\ts1_4\ts2_5\r\no3\ts2_3\r\n\"\"\"\r\n otu_map_no_single = \"\"\"o1 some comment\ts1_1\ts1_2\r\no2\ts1_3\ts1_4\ts2_5\r\n\"\"\"\r\n otu_map_no_single_double = \"\"\"o2\ts1_3\ts1_4\ts2_5\r\n\"\"\"\r\n otu_map_no_single_min_sample2 = \"\"\"o2\ts1_3\ts1_4\ts2_5\r\n\"\"\"\r\n\r\n # write the test files\r\n fd, in_fp = mkstemp(dir=self.tmp_dir,\r\n prefix='qiime_filter_test', suffix='.txt')\r\n close(fd)\r\n fasting_seqs_f = open(in_fp, 'w')\r\n fasting_seqs_f.write(otu_map_in)\r\n fasting_seqs_f.close()\r\n self.files_to_remove.append(in_fp)\r\n\r\n fd, actual_fp = mkstemp(dir=self.tmp_dir,\r\n prefix='qiime_filter_test', suffix='.txt')\r\n close(fd)\r\n self.files_to_remove.append(actual_fp)\r\n\r\n retained_otus = filter_otus_from_otu_map(in_fp, actual_fp, 2)\r\n self.assertEqual(open(actual_fp).read(), otu_map_no_single)\r\n self.assertEqual(retained_otus, set(['o1 some comment', 'o2']))\r\n\r\n retained_otus = filter_otus_from_otu_map(in_fp, actual_fp, 3)\r\n self.assertEqual(open(actual_fp).read(), otu_map_no_single_double)\r\n self.assertEqual(retained_otus, set(['o2']))\r\n\r\n retained_otus = filter_otus_from_otu_map(in_fp, actual_fp, 2, 2)\r\n self.assertEqual(open(actual_fp).read(), otu_map_no_single_min_sample2)\r\n self.assertEqual(retained_otus, set(['o2']))" ]
[ "0.63492405", "0.62281924", "0.6093811", "0.59848446", "0.5980171", "0.59633833", "0.59403497", "0.59102774", "0.5889821", "0.5874548", "0.5850808", "0.57886237", "0.5765051", "0.5734143", "0.57336146", "0.57023686", "0.5699716", "0.5691954", "0.568361", "0.5671094", "0.56442374", "0.5593366", "0.5582721", "0.5577277", "0.5571773", "0.55433613", "0.55349374", "0.55320907", "0.553011", "0.55037093" ]
0.76420027
0
Test `compact` filter function.
def test_compact(self): test_cases = [ Case( description="lists with nil", val=["b", "a", None, "A"], args=[], kwargs={}, expect=["b", "a", "A"], ), Case( description="empty list", val=[], args=[], kwargs={}, expect=[], ), Case( description="unexpected argument", val=["a", "b"], args=[", "], kwargs={}, expect=FilterArgumentError, ), Case( description="value not an array", val=1, args=[], kwargs={}, expect=FilterValueError, ), Case( description="undefined left value", val=self.env.undefined("test"), args=[], kwargs={}, expect=[], ), ] self._test(Compact, test_cases)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_compact(self, name):", "def compact(items):\n return filter(lambda item: item is not None and len(item) > 0, items)", "def test_filter_mixed_function(self):\n for none_type in (False, True):\n for all_type in (False, True):\n for any_type in (False, True, None):\n result = none_type is False and all_type is True \\\n and (any_type is None or any_type is True)\n self._test_filter(none_type, all_type, any_type, result)", "def compact(fun: _CallableT) -> _CallableT:\n fun.compact = True\n return fun", "def test_filter_wea_zero_entry():\n pass", "def test_filter_function_none(self):\n self.es.register_filter(lambda x: False, ftype='none')\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: True, ftype='none')\n self.assertFalse(self.es.streamfilter(self.data))", "def test_block():\n b = common.Block(['1 1 1', '2 2 3 ', ''])\n expect = ['1 1 1', '2 2 3']\n c = b.compact()\n assert c == expect\n assert b != expect\n\n b.compact(inplace=True)\n assert b == expect", "def filter_all(_):\n return True", "def test_filter_with_empty_filters(mockdata, qfilter):\n assert len(qfilter.filter(mockdata)) == 100", "def test_intern_filter(self):\r\n myType = TypedListType(T.TensorType('float64',\r\n (False, False)))\r\n\r\n x = numpy.asarray([[4, 5], [4, 5]], dtype='float32')\r\n\r\n self.assertTrue(numpy.array_equal(myType.filter([x]), [x]))", "def empty_filter(item, *args, **kwargs):\n return True", "def _compactness_pruning(self):\n feature_phrases = [phrase for phrase in self.frequent_features if self._is_compact(phrase)]\n self.features_phrases = feature_phrases", "def test_filter_function_all(self):\n self.es.register_filter(lambda x: True)\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: False)\n self.assertFalse(self.es.streamfilter(self.data))", "def testUsingFilterTool(self):\n pass", "def compact(self):\n raise NotImplementedError", "def test_no_filter(self):\r\n\r\n d1 = {\"% IDENTITY\": \"97.6\"}\r\n d2 = {\"% IDENTITY\": \"0.0\"}\r\n d3 = {\"% IDENTITY\": \"100.0\"}\r\n\r\n self.assertTrue(no_filter(d1))\r\n self.assertTrue(no_filter(d2))\r\n self.assertTrue(no_filter(d3))", "def test_filter_function_any(self):\n self.es.register_filter(lambda x: True, ftype='any')\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: False, ftype='any')\n self.assertTrue(self.es.streamfilter(self.data))", "def _test_filter(self, none_type, all_type, any_type, result):\n self.es.filter = {'all': [], 'any': [], 'none': []}\n self.es.register_filter(lambda x: none_type, ftype='none')\n self.es.register_filter(lambda x: all_type, ftype='all')\n if any_type is not None:\n self.es.register_filter(lambda x: any_type, ftype='any')\n self.assertEqual(self.es.streamfilter(self.data), result,\n 'Test EventStreams filter mixed function failed for\\n'\n \"'none': {}, 'all': {}, 'any': {}\\n\"\n '(expected {}, given {})'\n .format(none_type, all_type, any_type,\n result, not result))", "def cfilter(func,iterable):\n result = []\n\n for i in iterable:\n\n if func(i) == True:\n result.append(i)\n\n return result", "def test_filter_function_settings(self):\n def foo():\n \"\"\"Dummy function.\"\"\"\n return True\n\n self.es.register_filter(foo)\n self.assertEqual(self.es.filter['all'][0], foo)\n self.assertEqual(self.es.filter['any'], [])\n self.assertEqual(self.es.filter['none'], [])\n\n self.es.register_filter(foo, ftype='none')\n self.assertEqual(self.es.filter['all'][0], foo)\n self.assertEqual(self.es.filter['any'], [])\n self.assertEqual(self.es.filter['none'][0], foo)\n\n self.es.register_filter(foo, ftype='any')\n self.assertEqual(self.es.filter['all'][0], foo)\n self.assertEqual(self.es.filter['any'][0], foo)\n self.assertEqual(self.es.filter['none'][0], foo)", "def compact(seq):\n for item in seq:\n if item:\n yield item", "def test_no_op(self):\n request = RequestFactory().get('/?search=&tags=&status=')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertEquals(filter.qs.filters, {})", "def test_no():\n errors = generate_errors(10, 5)\n assert NoFiltering().filter(errors) == errors", "def is_compact(self):\n return self.n_rays()==0 and self.n_lines()==0", "def test_filter_output_third():\n data = [\n {\n \"name\": \"Bill\",\n \"last_name\": \"Gilbert\",\n \"occupation\": \"was here\",\n \"type\": \"person\",\n },\n {\"is_dead\": True, \"kind\": \"parrot\", \"type\": \"bird\", \"name\": \"polly\"},\n {\"is_dead\": False, \"kind\": \"parrot\", \"type\": \"bird\", \"name\": \"billy\"},\n ]\n\n actual_result = make_filter(name=\"billy\", type=\"bird\").apply(data)\n expected_result = [data[2]]\n assert actual_result == expected_result", "def test_filter_multiple(self):\n self.es.register_filter(foo=False, bar='baz')\n self.assertFalse(self.es.streamfilter(self.data))\n self.es.filter = {'all': [], 'any': [], 'none': []}\n self.es.register_filter(foo=True, bar='baz')\n self.assertTrue(self.es.streamfilter(self.data))\n # check whether filter functions are different\n f, g = self.es.filter['all']\n c = {'foo': True}\n self.assertNotEqual(f(c), g(c))\n c = {'bar': 'baz'}\n self.assertNotEqual(f(c), g(c))", "def _verify_single_or_no_compact(cls):\n methods = [m[0] for m in inspect.getmembers(cls, predicate=callable)]\n n_compact_fns = len([method_name for method_name in methods\n if hasattr(getattr(cls, method_name), 'compact')])\n if n_compact_fns > 1:\n raise errors.MultipleMethodsCompactError()", "def test_explicit_filter(self):\n request = RequestFactory().get('/?status=archived')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertEquals(filter.data.getlist('status'), ['archived'])", "def compact(plugin):\n return plugin.backend.compact()", "def test_filter_comparison_func_false(self):\n\n num_props_original = len(self.test_table._odmldict)\n self.test_table.filter(comparison_func=lambda x, y: True, PropertyName='')\n self.assertEqual(len(self.test_table._odmldict), num_props_original)\n\n self.test_table.filter(comparison_func=lambda x, y: False, PropertyName='')\n self.assertEqual(len(self.test_table._odmldict), 0)" ]
[ "0.63996834", "0.61731136", "0.61543494", "0.6071144", "0.59451234", "0.5849943", "0.581691", "0.5800044", "0.574214", "0.573533", "0.57180494", "0.57152534", "0.56675535", "0.56581366", "0.56312007", "0.55737156", "0.5543245", "0.5501316", "0.5498039", "0.54955107", "0.5490884", "0.5485897", "0.5482403", "0.54737055", "0.54513925", "0.5408208", "0.5402984", "0.5390839", "0.5388654", "0.5341261" ]
0.7841796
0
u""" Get Minimal AWS Linux AMI ID
def minimal_linux_ami(self): client = self.aws.get_client('ec2') try: res = client.describe_images(Owners=['self', '099720109477'], Filters=[ { 'Name': 'virtualization-type', 'Values': ['hvm'] }, { 'Name': 'root-device-type', 'Values': ['ebs'] }, { 'Name': 'architecture', 'Values': ['x86_64'] }, { 'Name': 'description', 'Values': ['Canonical, Ubuntu, 16.04 LTS, amd64 xenial image*'] }]) except ClientError as ex: print(ex) sys.exit() timestep = None current_time = datetime.now(timezone.utc) ami_id = None for image in res['Images']: if timestep: create_time = parse(image['CreationDate']) current_timestep = current_time - create_time if current_timestep < timestep: timestep = current_timestep ami_id = image['ImageId'] else: create_time = parse(image['CreationDate']) timestep = current_time - create_time ami_id = image['ImageId'] return ami_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_self_instance_id():\n\n logging.debug('get_self_instance_id()')\n response = urllib2.urlopen('http://169.254.169.254/1.0/meta-data/instance-id')\n instance_id = response.read()\n return instance_id", "def get_ami_keyname ( app_name ) :\n return app_name + '.ami'", "def get_ami_by_id ( ec2_conn, ami_id ) :\n amis = ec2_conn.get_all_images( image_ids = [ ami_id ] )\n for ami in amis :\n return ami", "def ec2_image_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ec2_image_id\")", "def _GetIdFromInstanceDirStr(instance_dir):\n match = _RE_LOCAL_INSTANCE_ID.match(instance_dir)\n if match:\n return match.group(\"ins_id\")\n\n # To support the device which is not created by acloud.\n if os.path.expanduser(\"~\") in instance_dir:\n return \"1\"\n\n return None", "def test_get_f1_ami_id():\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import get_f1_ami_id\n try:\n ami = get_f1_ami_id()\n except Exception as e:\n pytest.fail(f\"get_f1_ami_id() raised {e} and this likely means you need to run 'scripts/update_test_amis.py'\")\n\n if re.match(r\"^ami-[0-9a-f]+$\",ami) is None:\n pytest.fail(f\"'{ami}' doesn't look like a legit AMI ID and this likely means you need to run 'scripts/update_test_amis.py'\")", "def get_hardware_id():\r\n try:\r\n return utils.run('crossystem hwid').stdout.strip()\r\n except:\r\n logging.info(\"Not Found\")\r\n return -1", "def ami(self):\n return getattr(self._data.ami, self._name, None)", "def get_imguuid(disk_object):\n return disk_object.get_id()", "def GetMachineKey():\n return platform.node()", "def guid_fast_impl(pool):\n with open(f'/proc/spl/kstat/zfs/{pool}/guid') as f:\n return f.read().strip()", "def _generate_machine_id(self):\r\n mach_id = \"machine_\"\r\n try:\r\n gws = netifaces.gateways() # get all gateways\r\n default = gws['default'] # get the default gw\r\n adapter = default[2][1] # get the adapter identifier\r\n real_adapter = netifaces.ifaddresses(adapter) # get the adapter\r\n link_info = real_adapter[netifaces.AF_LINK]\r\n mac = link_info[0]['addr']\r\n mac = re.sub('[:]', '', mac)\r\n except:\r\n mac = \"unsup\"\r\n self.logger.error(\"Getting mac of internet card is not supported, needs netifaces >= 0.10\")\r\n self.machine_id = mach_id + mac", "def aws_external_id(self) -> str:\n return pulumi.get(self, \"aws_external_id\")", "def _get_image_id(image_name, instance_profile_arn=None,\n ec2_client=None, region_name=None):\n owners = []\n filters = []\n image_id = image_name\n if not image_name:\n # Amazon has its own Linux distribution that is largely binary\n # compatible with Red Hat Enterprise Linux.\n image_name = 'amzn2-ami-hvm-2.0.????????.?-x86_64-gp2'\n owners = ['amazon']\n filters = [\n {'Name': 'name', 'Values': [image_name]},\n ]\n elif not image_name.startswith('ami-'):\n if not instance_profile_arn:\n raise RuntimeError(\"instance_profile_arn must be defined when\"\\\n \" image_name is not already an id.\")\n look = re.match(r'arn:aws:iam::(\\d+):', instance_profile_arn)\n owners = [look.group(1)]\n filters = [\n {'Name': 'name', 'Values': [image_name]},\n ]\n\n if filters:\n if not ec2_client:\n ec2_client = boto3.client('ec2', region_name=region_name)\n resp = ec2_client.describe_images(Owners=owners, Filters=filters)\n images = sorted(resp['Images'], key=lambda item: item['CreationDate'],\n reverse=True)\n if len(images) > 1:\n LOGGER.warning(\n \"Found more than one image named '%s' in account '%s',\"\\\n \" picking the first one out of %s\",\n image_name, owners,\n [(image['CreationDate'], image['ImageId'])\n for image in images])\n image_id = images[0]['ImageId']\n return image_id", "def get_id(self):\n try:\n return self.inst.query('*IDN?')[:36]\n except errors.VisaIOError as e:\n logger.warning(e)\n return 'Device not connected.'", "def get_system_id(self):\n return self.machine_config_file_value(\"DEFAULT.SID\").strip('\"')", "def get_instance_id():\n global _instance_id\n if _instance_id == '__unset':\n try:\n _instance_id = _fetch_instance_id()\n except IOError:\n log.exception(\"Exception retrieving InstanceId\")\n _instance_id = None\n\n return _instance_id", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")", "def get_id(disk):\n\n #TODO\n return \"Unknown\"", "def ec2_image_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"ec2_image_id\")", "def instance_id(self) -> str:\n return pulumi.get(self, \"instance_id\")", "def get_generator_id() -> str:\n res = os.name + str(os.getpid()) + str(random.randint(-1000, 1000))\n res = hashlib.sha224(res.encode('utf-8')).digest()\n res = base64.b64encode(res).decode('utf-8')\n return res", "def get_uuid(disk):\n\n #TODO\n return \"Unknown\"", "def detect_own_id() -> str:\n\n pod = os.environ.get('POD_ID', None)\n if pod is not None:\n return pod\n\n user = getpass.getuser()\n host = socket.getfqdn()\n now = datetime.datetime.utcnow().isoformat()\n rnd = ''.join(random.choices('abcdefhijklmnopqrstuvwxyz0123456789', k=6))\n return f'{user}@{host}/{now}/{rnd}'", "def get_ami_by_name ( ec2_conn, ami_name ) :\n amis = ec2_conn.get_all_images( filters = { \"name\": [ ami_name ] } )\n for ami in amis :\n return ami" ]
[ "0.68473345", "0.6364349", "0.62722474", "0.61129767", "0.606595", "0.5926506", "0.5893706", "0.589055", "0.58329403", "0.58002704", "0.5794514", "0.5773664", "0.57633007", "0.57095546", "0.57048184", "0.56966126", "0.5679263", "0.5651188", "0.5651188", "0.5651188", "0.5651188", "0.5651188", "0.5651188", "0.56363404", "0.5628202", "0.56234366", "0.5609671", "0.56041867", "0.55977666", "0.55729854" ]
0.7679751
0
Deletes all tables from the database Returns None
def delete_all_tables(self): if self.__dbfile is not None: for table_name in list(LocalData.table_info.keys()): if self.table_exists(table_name): self._conn.execute("DROP TABLE %s" % table_name) self._conn.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_db():\n for name in TABLES:\n result = execute_query('truncate table {};'.format(name)), ())", "def clean_db():\n db = get_db()\n tables = db.tables\n for table in tables:\n db[table].drop()", "def tear_down():\n db.flush()\n for table in metadata.tables.values():\n db.execute(table.delete())", "def drop_all_tables():\n\tcommon_db.drop_all_tables()", "def deleteAll():\n _table.deleteAll()\n _initialiseGlobals()\n\n return", "def clear_db():\n from flask_monitoringdashboard.database import get_tables, engine\n\n for table in get_tables():\n table.__table__.drop(engine)\n table.__table__.create(engine)", "def drop_tables() -> None:\n print(\"Dropping database tables using SQLAlchemy ORM\")\n Base.metadata.drop_all(engine)\n print(\"Done dropping tables\")", "def delete_tables(db, table_names):\n with tables(db.engine, *table_names) as tpl:\n for tbl in tpl[1:]:\n tbl.delete().execute()", "def clean_db():\n yield\n logging.info(\"Delete table\")\n db.delete_table(\"TestRules\")", "def empty_tables():\n Wordform.objects.all().delete()\n Lemma.objects.all().delete()\n Definition.objects.all().delete()\n Language.objects.all().delete()\n ProperName.objects.all().delete()", "def drop_all_tables(args):\n engine = sqlalchemy.create_engine(CONFIG.db_uri)\n print(\"Dropping all tables on {}...\".format(CONFIG.db_uri), end=\" \")\n Base.metadata.drop_all(bind=engine)\n print(\"finished.\")", "def deleteDBtables(self, tables=None):\n\n # If tables is None, all tables are deleted and re-generated\n if tables is None:\n # Delete all existing tables\n self._c.execute('SET FOREIGN_KEY_CHECKS = 0')\n for table in self.getTableNames():\n self._c.execute(\"DROP TABLE \" + table)\n self._c.execute('SET FOREIGN_KEY_CHECKS = 1')\n\n else:\n # It tables is not a list, make the appropriate list\n if type(tables) is str:\n tables = [tables]\n\n # Remove all selected tables (if exist in the database).\n for table in set(tables) & set(self.getTableNames()):\n self._c.execute(\"DROP TABLE \" + table)\n\n self._conn.commit()\n\n return", "def delete_db():\n db.drop_all()", "def delete_all_from(self, tablename):\n query = 'delete from ' + tablename\n try:\n self.__cur.execute(query)\n self.__conn.commit()\n except Exception as e:\n self.__conn.rollback()\n raise e", "def drop_database_tables(cls):\n cursor = Database.connect_to_db()\n # drop users table\n sql_command = \"\"\" DROP TABLE IF EXISTS users CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop parties table\n sql_command = \"\"\" DROP TABLE IF EXISTS parties CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop offices table\n sql_command = \"\"\" DROP TABLE IF EXISTS offices CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop candidates table\n sql_command = \"\"\" DROP TABLE IF EXISTS candidates CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop votes table\n sql_command = \"\"\" DROP TABLE IF EXISTS votes CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop applications table\n sql_command = \"\"\" DROP TABLE IF EXISTS applications CASCADE;\n \"\"\"\n cursor.execute(sql_command)", "def reset_db():\n\n metadata = sa.MetaData()\n metadata.reflect(engine)\n for tbl in reversed(metadata.sorted_tables):\n tbl.drop(engine)\n create_tables()", "def delete_all_records(db):\n with tables(db.engine) as (connection,):\n metadata = sqlalchemy.MetaData(bind=connection)\n metadata.reflect()\n # We delete the tables in order of dependency, so that foreign-key\n # relationships don't prevent a table from being deleted.\n for tbl in reversed(metadata.sorted_tables):\n tbl.delete().execute()", "def empty_db(self):\n try:\n self.cur.execute(\"DELETE FROM Crashes;\")\n self.con.commit()\n print 'Deleted all records'\n\n except sqlite.Error, e:\n print 'Unable to delete all records.'\n print 'Exception follows:'\n print e", "def drop_db(self) -> None:\n try:\n if not self._check_delete_okay():\n return\n except DatabaseWriteException as e:\n raise e\n\n existing_tables = self.list_tables()\n for table_name in existing_tables:\n self.dynamodb.Table(table_name).delete()", "def clearDatabase():\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)", "def drop_tables(self):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"\"\"DROP TABLE IF EXISTS users CASCADE\"\"\")\n cursor.close()\n con.commit()\n con.close()", "def clear_tables(cursor):\n cursor.execute(\"delete from Review_Votes\")\n cursor.execute(\"delete from Review\")", "def delete_all(cls):\n with sqlite3.connect(cls.dbpath) as connection:\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n SQL = \"DELETE FROM accounts;\"\n cursor.execute(SQL)", "def erase_database():\n metadata = MetaData(engine)\n metadata.reflect()\n metadata.drop_all()\n Base.metadata.create_all(engine)\n return None", "def _drop_db(keep_tables=None):\n server.db.session.remove()\n if keep_tables is None:\n keep_tables = []\n meta = server.db.metadata\n for table in reversed(meta.sorted_tables):\n if table.name in keep_tables:\n continue\n server.db.session.execute(table.delete())\n server.db.session.commit()", "def empty_table(table: str):\n\n db, c = start_db()\n query = f'DELETE FROM {table}'\n\n c.execute(query)\n db.commit()\n db.close()", "def drop_tables(cur, conn) -> None:\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def song_clear():\r\n try:\r\n # Drop all tables then recreate them.\r\n Base.metadata.drop_all(bind=engine)\r\n print colored.red(\"Database cleared successfully.\", bold=12)\r\n Base.metadata.create_all(bind=engine)\r\n except:\r\n session.rollback()", "def drop_tables (cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def refresh_tables(db):\r\n try:\r\n c = db.cursor()\r\n c.execute(\"DROP TABLE waiting\")\r\n c.execute(\"DROP TABLE help\")\r\n c.execute(\"DROP TABLE helped\")\r\n create_tables()\r\n except Error as e:\r\n print(e)" ]
[ "0.84315497", "0.82127285", "0.8170458", "0.8093179", "0.8075382", "0.79561704", "0.78853303", "0.7793635", "0.77400386", "0.7737219", "0.77196485", "0.77169883", "0.7667539", "0.75941366", "0.7584116", "0.7528714", "0.7509895", "0.74784756", "0.74581003", "0.74393415", "0.7438603", "0.7433077", "0.74290645", "0.7382199", "0.73705506", "0.73563164", "0.7337559", "0.73282695", "0.73140264", "0.7311313" ]
0.8532715
0
Test that SpecificLocation will selfcreate an ID object if none is given
def test_specific_location_init_without_arguments() -> None: # init works without arguments loc = SpecificLocation() assert isinstance(loc.id, UID)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_specific_location_init_with_specific_id() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n\n loc = SpecificLocation(id=uid)\n\n assert loc.id == uid", "def test_create_location(self):\n location = self.location\n\n self.assertTrue(isinstance(location, Location))\n self.assertEqual(location.name, \"Test Location\")", "def test_openmrs_location_uuid_set(self):\n cape_town = self.locations['Cape Town']\n case_id = uuid.uuid4().hex\n form, (case, ) = _create_case(domain=self.domain, case_id=case_id, owner_id=cape_town.location_id)\n\n self.assertEqual(\n get_ancestor_location_openmrs_uuid(case),\n self.openmrs_capetown_uuid\n )", "def test_openmrs_location_uuid_none(self):\n joburg = self.locations['Johannesburg']\n self.assertIsNone(joburg.metadata.get(LOCATION_OPENMRS_UUID))\n\n case_id = uuid.uuid4().hex\n form, (case, ) = _create_case(domain=self.domain, case_id=case_id, owner_id=joburg.location_id)\n\n self.assertIsNone(get_ancestor_location_openmrs_uuid(case))", "def testValidateId(self):\n #create a different person and try to use their id\n self.directory.invokeFactory(type_name=\"FSDPerson\",id=\"def456\",firstName=\"Joe\",lastName=\"Blow\")\n self.failUnless('def456' in self.person.validate_id('def456'))\n #create a different content object and try to use its id\n self.directory.invokeFactory(\"Document\", \"mydoc\")\n self.failUnless('mydoc' in self.person.validate_id('mydoc'))", "def create_location(self, location):\n \"Does nothing\"", "def test_create_id_identity(self):\n self.assertIs(Rectangle.create(id=True).id, True)\n self.assertIs(Rectangle.create(id=type).id, type)\n self.assertIs(Rectangle.create(id=None).id, None)", "def test_non_matching_location(self):\n user1 = get_user_model().objects.get(username='test1@example.com')\n self.client.login(username='test1@example.com', password='1')\n\n office = OfficeLocation.objects.all()[0]\n org = OrgGroup.objects.filter(parent__isnull=True)[0]\n\n submission1 = Interest()\n submission1.owner = user1\n submission1.for_coffee = True\n submission1.save()\n submission1.locations.add(office)\n submission1.departments.add(org)\n\n resp = self.client.get(reverse('mystery:mystery'))\n self.assertContains(resp, \"Cancel this\", status_code=200)\n\n user2 = random_user()\n office2 = OfficeLocation()\n office2.id = \"test_id\"\n office2.street = \"test office\"\n office2.city = \"test office\"\n office2.state = \"test office\"\n office2.zip = \"test office\"\n office2.save()\n submission2 = Interest()\n submission2.owner = user2\n submission2.is_active = False\n submission2.save()\n submission2.for_coffee = True\n submission2.locations.add(office2)\n submission2.departments.add(org)\n submission2.is_active = True\n submission2.save()\n\n resp = self.client.get(reverse('mystery:mystery'))\n self.assertContains(resp, \"Cancel this\", status_code=200)", "def location_fixture():\n return _create_location()", "def test_owner_has_no_locations(self):\n self.owner = CommCareUser.create(self.domain, 'no_location', '***', None, None)\n form, (case, ) = _create_case(domain=self.domain, case_id=uuid.uuid4().hex, owner_id=self.owner.get_id)\n location = get_case_location(case)\n self.assertIsNone(location)", "def test_id_missing(self) -> None:\n with pytest.raises(NotFound):\n ObservationType.from_id(-1)", "def test_owner_has_primary_location(self):\n gardens = self.locations['Gardens']\n self.owner = CommCareUser.create(self.domain, 'gardens_user', '***', None, None, location=gardens)\n form, (case, ) = _create_case(domain=self.domain, case_id=uuid.uuid4().hex, owner_id=self.owner.get_id)\n location = get_case_location(case)\n self.assertEqual(location, gardens)", "def test_init_id_type(self):\n self.assertIsInstance(Rectangle(1, 1).id, int)\n self.assertIsInstance(Rectangle(1, 1, id=None).id, int)", "def test_create_id_type(self):\n self.assertIsInstance(Rectangle.create().id, int)", "def test_init_id_identity(self):\n self.assertIs(Rectangle(1, 1, id=True).id, True)\n self.assertIs(Rectangle(1, 1, id=type).id, type)", "def test_init(self):\n self.assertEqual(self.location, Ship(self.location).location)", "def test_openmrs_location_uuid_ancestor(self):\n gardens = self.locations['Gardens']\n self.assertIsNone(gardens.metadata.get(LOCATION_OPENMRS_UUID))\n\n case_id = uuid.uuid4().hex\n form, (case, ) = _create_case(domain=self.domain, case_id=case_id, owner_id=gardens.location_id)\n\n self.assertEqual(\n get_ancestor_location_openmrs_uuid(case),\n self.openmrs_capetown_uuid\n )", "def test_method_get_instance(self):\n\n location = Location.get(TEST_LOCATION)\n\n # make sure one location is returned\n self.assertIsInstance(location, Location)", "def test_owner_is_location(self):\n joburg = self.locations['Johannesburg']\n form, (case, ) = _create_case(domain=self.domain, case_id=uuid.uuid4().hex, owner_id=joburg.location_id)\n location = get_case_location(case)\n self.assertEqual(location, joburg)", "def test_compare() -> None:\n\n obj = SpecificLocation()\n obj2 = SpecificLocation()\n\n assert obj != obj2\n\n obj._id = obj2.id\n\n assert obj == obj2", "def test_id_type_none(self):\n obj = Base(None)\n self.assertTrue(obj.id is 1)", "def test_missingId(self):\n node = Node()\n node.properties[\"datawire_nodeId\"] = \"4567\"\n self.assertEqual(node.getId(), \"4567\")", "def test_get_restaurant_by_id_none(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Greco'\n db.session.add(Restaurant(name=name, creator='test-user@gmail.com'))\n db.session.commit()\n\n # Since this is a freshly created table, the only id should be 1.\n # id 2 does not exist.\n resp = self.test_client.get(self.API_BASE + '/2', headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 404)", "def test_route_schema_init() -> None:\n destination = SpecificLocation()\n rschema = RouteSchema(destination)\n\n assert rschema.destination is not None\n assert rschema.destination._id == destination._id", "def test_init(self):\n st = SampleTemplate(1)\n self.assertTrue(st.id, 1)", "def test_bad_id(self):\n r1 = Square(10, 2)\n self.assertEqual(r1.id, 1)\n\n r2 = Square(2, 10)\n self.assertEqual(r2.id, 2)", "def test_04(self):\n base0 = Base(None)\n self.assertEqual(base0.id, 1)", "def get_location_by_id(self, location_id):", "def test_create_id_equality(self):\n rect = Rectangle(1, 1)\n self.assertNotEqual(rect.id, Rectangle.create().id)\n self.assertNotEqual(rect.id, Rectangle.create(id=None).id)\n self.assertEqual(Rectangle.create(id=0).id, 0)\n self.assertEqual(Rectangle.create(id=0.0).id, 0.0)\n self.assertEqual(Rectangle.create(id=\"0\").id, \"0\")\n self.assertEqual(Rectangle.create(id=(0,)).id, (0,))\n self.assertEqual(Rectangle.create(id=[0]).id, [0])\n self.assertEqual(Rectangle.create(id={0}).id, {0})\n self.assertEqual(Rectangle.create(id={0: 0}).id, {0: 0})", "def test_readable_id_valid(readable_id_value):\n program = ProgramFactory.build(readable_id=readable_id_value)\n program.save()\n assert program.id is not None\n course = CourseFactory.build(program=None, readable_id=readable_id_value)\n course.save()\n assert course.id is not None" ]
[ "0.8358078", "0.6789458", "0.6717033", "0.66556245", "0.6602681", "0.6558521", "0.6484271", "0.6427122", "0.63308305", "0.62980676", "0.62751615", "0.62623805", "0.62326145", "0.62217927", "0.6214734", "0.6178322", "0.6151656", "0.6114206", "0.60996836", "0.6031524", "0.6015083", "0.5982497", "0.5966991", "0.59559864", "0.59536266", "0.5942122", "0.59407157", "0.59387237", "0.5932329", "0.5882371" ]
0.7678006
1
Test that SpecificLocation will use the ID you pass into the constructor
def test_specific_location_init_with_specific_id() -> None: uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684)) loc = SpecificLocation(id=uid) assert loc.id == uid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_specific_location_init_without_arguments() -> None:\n\n # init works without arguments\n loc = SpecificLocation()\n\n assert isinstance(loc.id, UID)", "def get_location_by_id(self, location_id):", "def __init__(self, location_id, x=0, y=0):\r\n self.location_id = location_id\r\n self.x = x\r\n self.y = y", "def test_create_location(self):\n location = self.location\n\n self.assertTrue(isinstance(location, Location))\n self.assertEqual(location.name, \"Test Location\")", "def test_init(self):\n self.assertEqual(self.location, Ship(self.location).location)", "def test_openmrs_location_uuid_set(self):\n cape_town = self.locations['Cape Town']\n case_id = uuid.uuid4().hex\n form, (case, ) = _create_case(domain=self.domain, case_id=case_id, owner_id=cape_town.location_id)\n\n self.assertEqual(\n get_ancestor_location_openmrs_uuid(case),\n self.openmrs_capetown_uuid\n )", "def location_fixture():\n return _create_location()", "def location(self, location_id):\r\n return Location(self, location_id)", "def __init__(self, location):\n self.location = location", "def test_method_get_instance(self):\n\n location = Location.get(TEST_LOCATION)\n\n # make sure one location is returned\n self.assertIsInstance(location, Location)", "def test_compound_with_location(self):\n self.assertEqual(self.compound_with_loc.location, 'extracellular')", "def test_location_is_location_instance(self):\n self.assertIsInstance(self.location, Location)", "def __init__(__self__, *,\n location: pulumi.Input[str]):\n pulumi.set(__self__, \"location\", location)", "def setUp(self):\n self.location = Location.get(TEST_LOCATION)", "def test_location(self):\n self.assertEqual(self.show.country, 'USA')\n self.assertEqual(self.show.state, 'VA')\n self.assertEqual(self.show.city, 'Hampton')", "def __init__(self, loc):\n self.loc = loc", "def test_location(self):\n self.assertEqual(self.show.country, None)\n self.assertEqual(self.show.state, None)\n self.assertEqual(self.show.country, None)", "def test_owner_has_primary_location(self):\n gardens = self.locations['Gardens']\n self.owner = CommCareUser.create(self.domain, 'gardens_user', '***', None, None, location=gardens)\n form, (case, ) = _create_case(domain=self.domain, case_id=uuid.uuid4().hex, owner_id=self.owner.get_id)\n location = get_case_location(case)\n self.assertEqual(location, gardens)", "def create_location(self, location):\n \"Does nothing\"", "def test_location_address(self):\n self.assertIsInstance(self.location.address, Address)\n self.assertEqual(self.location.address, self.address)", "def test_city_country_population(self):\n your_location = location_name(\"lviv\", \"ukraine\", \"123\")\n self.assertEqual(your_location, \"Lviv, Ukraine - Population 123\")", "def test_compare() -> None:\n\n obj = SpecificLocation()\n obj2 = SpecificLocation()\n\n assert obj != obj2\n\n obj._id = obj2.id\n\n assert obj == obj2", "def set_location(self, location):\n self.location = location", "def test_training_location(self):\n self.assertIsInstance(self.one_off_training.location, Location)\n self.assertEqual(self.one_off_training.location, self.location)", "def test_owner_is_location(self):\n joburg = self.locations['Johannesburg']\n form, (case, ) = _create_case(domain=self.domain, case_id=uuid.uuid4().hex, owner_id=joburg.location_id)\n location = get_case_location(case)\n self.assertEqual(location, joburg)", "def test_city_country(self):\n your_location = location_name(\"lviv\", \"ukraine\")\n self.assertEqual(your_location, \"Lviv, Ukraine\")", "def test_init_set_name(self):\n _name = 'test-name'\n _el = MarkerId(_name)\n self.assertEqual(_el.name, _name)", "def test_value_init1(self):\n rect_1 = Rectangle(10, 1)\n self.assertEqual(rect_1.id, 21)", "def __init__(self, componentId, locationId, **kw):\n self.componentId = componentId\n self.locationId = locationId\n super(ComponentInLocationError, self).__init__(**kw)", "def test_city_id(self):\n place = Place()\n self.assertTrue(hasattr(place, \"city_id\"))\n self.assertEqual(type(place.city_id), str)\n self.assertEqual(place.city_id, \"\")" ]
[ "0.7857052", "0.6929304", "0.68323725", "0.67941", "0.67722225", "0.672715", "0.6719905", "0.6682997", "0.66164076", "0.6570459", "0.63615984", "0.6358354", "0.62723994", "0.6271754", "0.6270507", "0.62166953", "0.61853737", "0.61331964", "0.61279035", "0.6127604", "0.6056048", "0.6050704", "0.60270077", "0.6026163", "0.6016772", "0.60111177", "0.5977808", "0.594255", "0.593085", "0.5921573" ]
0.87963134
0
Tests that SpecificLocation generates a pretty representation.
def test_pprint() -> None: uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684)) obj = SpecificLocation(id=uid, name="location") assert obj.pprint == "📌 location (SpecificLocation)@<UID:🙍🛖>"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_formatted_location(self):\n\t\tformatted_location = get_formatted_location('seoul', 'south korea')\n\t\tself.assertEqual(formatted_location, 'Seoul, South Korea')", "def test_compound_with_location(self):\n self.assertEqual(self.compound_with_loc.location, 'extracellular')", "def describe_locations():\n pass", "def print_location(loc):\n print('')\n pass", "def test_route_pprint_property_method() -> None:\n route = Route(schema=RouteSchema(SpecificLocation()))\n assert route.pprint == \"🛣️ (Route)\"", "def test_03_extract(self):\n l_node = config_tools.Yaml(self.m_pyhouse_obj).read_yaml(self.m_filename)\n l_obj = self.m_config._extract_location(l_node.Yaml['Location'])\n l_ret = self.m_pyhouse_obj.House.Location\n # print(PrettyFormatAny.form(l_node, 'C1-03-A'))\n # print(PrettyFormatAny.form(l_obj, 'C1-03-B'))\n # print(PrettyFormatAny.form(l_ret, 'C1-03-C'))\n self.assertEqual(l_obj.Street, '1600 Pennsylvania Ave NW')\n self.assertEqual(l_obj.City, 'Washington')", "def test_02_Dump(self):\n self.m_location.Street = '_test street'\n l_ret = self.m_config.save_yaml_config()\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'C2-02-A - Location', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'C2-02-B - House', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'C2-02-C - Location', 190))\n # print(PrettyFormatAny.form(l_ret, 'C2-02-D - Location', 190))\n # print('Config: {}'.format(l_ret))\n self.assertEqual(l_ret['Location']['City'], 'Washington')", "def test_str(self):\n location = self.location\n\n self.assertEqual(str(location), self.location_raw['name'])", "def test_get_zr_location_structure(self):\n pass", "def test_location(self):\n self.assertEqual(self.show.country, 'USA')\n self.assertEqual(self.show.state, 'VA')\n self.assertEqual(self.show.city, 'Hampton')", "def pretty_location(data):\n\n issue = data.get(\"issue\", \"\")\n if issue:\n issue = \"(%s)\" % issue\n\n pages = data.get(\"pageInfo\", \"\")\n if \"pageInfo\" in data and pages:\n pages = \":\" + pages\n\n location = u\"{title} {volume}{issue}{pages} ({year})\".format(\n title=data.get(\"journalTitle\", \"\"),\n issue=issue,\n volume=data.get(\"journalVolume\", \"\"),\n pages=pages,\n year=data[\"pubYear\"],\n )\n location = location.replace(\" \", \" \")\n if location.endswith(\".\"):\n return location[0:-1]\n return location", "def test_portalPortal(self):\n streets = (\"9:00 Portal\", \"9:00 Portal\")\n for front, cross in (streets, reversed(streets)):\n location = parseLocation(\n \"Theme Camp\",\n \"Camp at Portal\",\n \"9:00 Portal @ 9:00 Portal\",\n front, cross,\n \"50 x 200\"\n )\n self.assertEquals(\n location,\n Location(\n name=\"Camp at Portal\",\n address=RodGarettAddress(\n concentric=None, radialHour=9, radialMinute=0,\n description=\"9:00 Portal, Theme Camp 50x200\",\n ),\n )\n )", "def test_create_location(self):\n location = self.location\n\n self.assertTrue(isinstance(location, Location))\n self.assertEqual(location.name, \"Test Location\")", "def __str__(self):\n return f'{self.location}'", "def test_location() -> None:\n response: models.KamereonVehicleDataResponse = fixtures.get_file_content_as_schema(\n f\"{fixtures.KAMEREON_FIXTURE_PATH}/vehicle_data/location.json\",\n schemas.KamereonVehicleDataResponseSchema,\n )\n response.raise_for_error_code()\n assert response.data is not None\n assert response.data.raw_data[\"attributes\"] == {\n \"gpsLatitude\": 48.1234567,\n \"gpsLongitude\": 11.1234567,\n \"lastUpdateTime\": \"2020-02-18T16:58:38Z\",\n }\n\n vehicle_data = cast(\n models.KamereonVehicleLocationData,\n response.get_attributes(schemas.KamereonVehicleLocationDataSchema),\n )\n\n assert vehicle_data.gpsLatitude == 48.1234567\n assert vehicle_data.gpsLongitude == 11.1234567\n assert vehicle_data.lastUpdateTime == \"2020-02-18T16:58:38Z\"", "def test_city_country_population(self):\n your_location = location_name(\"lviv\", \"ukraine\", \"123\")\n self.assertEqual(your_location, \"Lviv, Ukraine - Population 123\")", "def test_to_string() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid)\n assert str(obj) == \"<SpecificLocation: fb1bb0675bb74c49becee700ab0a1514>\"\n assert obj.__repr__() == \"<SpecificLocation: fb1bb0675bb74c49becee700ab0a1514>\"", "def location_fixture():\n return _create_location()", "def test_location_address(self):\n self.assertIsInstance(self.location.address, Address)\n self.assertEqual(self.location.address, self.address)", "def test_location(self):\n self.assertEqual(self.show.country, None)\n self.assertEqual(self.show.state, None)\n self.assertEqual(self.show.country, None)", "def test_portalBlank(self):\n streets = (\"6:00 Portal\", \"\")\n for front, cross in (streets, reversed(streets)):\n location = parseLocation(\n \"Burning Man Department\",\n \"Fire Conclave Convergence\",\n \"6:00 Portal\",\n front, cross,\n \"133 x 80\"\n )\n self.assertEquals(\n location,\n Location(\n name=\"Fire Conclave Convergence\",\n address=RodGarettAddress(\n concentric=None, radialHour=6, radialMinute=0,\n description=(\n \"6:00 Portal, Burning Man Department 133x80\"\n ),\n ),\n )\n )", "def test_get_zr_location_profile(self):\n pass", "def test_zone_repr(self):\n zone = Zone('example.com')\n self.assertEqual(f'{zone}', 'Zone<example.com>')", "def write_location_page(outfile: TextIO, do_print: bool, loc: TMB_Classes.LocationClass, point_locations: dict,\n location_species: dict, location_bi_names: dict, location_sp_names: dict,\n location_direct_refs: dict, location_cited_refs: dict, references: list,\n locations_range_species: dict, location_keys: Optional[dict]) -> None:\n\n def format_latlon(lat: float, lon: float) -> str:\n \"\"\"\n subfunction to format a lat,lon pair for printing\n \"\"\"\n if lat < 0:\n latdir = \"S\"\n else:\n latdir = \"N\"\n if lon < 0:\n londir = \"W\"\n else:\n londir = \"E\"\n return \"{:1.6f}&deg;{}, {:1.6f}&deg;{}\".format(abs(lat), latdir, abs(lon), londir)\n\n # main function code\n if do_print:\n start_page_division(outfile, \"base_page\")\n else:\n common_header_part1(outfile, loc.trimmed_name, indexpath=\"../\")\n if not loc.unknown:\n start_google_map_header(outfile)\n write_google_map_point_header(outfile, \"location_\" + place_to_filename(loc.name))\n end_google_map_header(outfile)\n common_header_part2(outfile, indexpath=\"../\", include_map=True)\n\n outfile.write(\" <header id=\\\"\" + place_to_filename(loc.name) + \".html\\\">\\n\")\n outfile.write(\" <h1 class=\\\"nobookmark\\\">\" + loc.trimmed_name + \"</h1>\\n\")\n if not do_print:\n outfile.write(\" <nav>\\n\")\n outfile.write(\" <ul>\\n\")\n outfile.write(\" <li><a href=\\\"index.html\\\">\" + fetch_fa_glyph(\"index\") + \"Location Index</a></li>\\n\")\n outfile.write(\" </ul>\\n\")\n outfile.write(\" </nav>\\n\")\n outfile.write(\" </header>\\n\")\n outfile.write(\" <dl>\\n\")\n\n if loc.n_alternates() > 0:\n outfile.write(\" <dt>Also Known As</dt>\\n\")\n outfile.write(\" <dd>\" + \", \".join(loc.alternates) + \"</dd>\\n\")\n if loc.n_parents() > 0:\n outfile.write(\" <dt>Included Within</dt>\\n\")\n if loc.parent is not None:\n p = point_locations[loc.parent]\n dstr = create_location_link(p, p.trimmed_name, do_print)\n else:\n dstr = None\n if loc.n_secondary_parents() > 0:\n if dstr is None:\n dlist = []\n else:\n dlist = [dstr]\n for a in loc.secondary_parents:\n p = point_locations[a]\n dlist.append(create_location_link(p, p.trimmed_name, do_print))\n dstr = \", \".join(dlist)\n outfile.write(\" <dd>\" + dstr + \"</dd>\\n\")\n if loc.unknown:\n outfile.write(\" <dt>Location Could not be Identified</dt>\\n\")\n if loc.notes is not None:\n outfile.write(\" <dd>\" + loc.notes + \"</dd>\\n\")\n else:\n if loc.notes is not None:\n outfile.write(\" <dt>Note</dt>\\n\")\n outfile.write(\" <dd>\" + loc.notes + \"</dd>\\n\")\n outfile.write(\" <dt>Approximate Coordinates</dt>\\n\")\n outfile.write(\" <dd>\" + format_latlon(loc.latitude, loc.longitude) + \"</dd>\\n\")\n outfile.write(\" <div class=\\\"map_section\\\">\\n\")\n if do_print:\n outfile.write(\" <figure>\\n\")\n outfile.write(\" <img src=\\\"\" + TMP_MAP_PATH +\n pointmap_name(\"location_\" + place_to_filename(loc.name)) + \".png\\\" alt=\\\"\" +\n loc.trimmed_name + \"\\\" title=\\\"Map of \" + loc.trimmed_name + \"\\\" />\\n\")\n outfile.write(\" </figure>\\n\")\n else:\n outfile.write(\" <div id=\\\"point_map_canvas\\\" class=\\\"sp_map\\\"></div>\\n\")\n\n outfile.write(\" <div class=\\\"map_download\\\">\\n\")\n outfile.write(\" The red marker indicates the coordinates used to represent this location, \"\n \"yellow markers all other locations contained within this location. Purple markers indicate \"\n \"fossil-only locations or sub-locations.\\n\")\n outfile.write(\" </div>\\n\")\n\n outfile.write(\" </div>\\n\")\n outfile.write(\" </dl>\\n\")\n all_species = set()\n all_species |= location_species[loc.name]\n all_bi_names = set()\n all_bi_names |= location_bi_names[loc.name]\n all_sp_names = set()\n all_sp_names |= location_sp_names[loc.name]\n all_refs = set()\n all_refs |= location_direct_refs[loc.name]\n all_refs |= location_cited_refs[loc.name]\n if loc.n_direct_children() > 0:\n outfile.write(\" <section class=\\\"spsection\\\">\\n\")\n outfile.write(\" <h3 class=\\\"nobookmark\\\">Includes Subareas</h3>\\n\")\n outfile.write(\" <ul class=\\\"locpagelist\\\">\\n\")\n for c in loc.direct_children():\n outfile.write(\" <li>\" + create_location_link(c, c.trimmed_name, do_print) + \"</li>\\n\")\n all_species |= fetch_child_data(c, location_species)\n all_bi_names |= fetch_child_data(c, location_bi_names)\n all_sp_names |= fetch_child_data(c, location_sp_names)\n all_refs |= fetch_child_ref_data(c, location_direct_refs)\n all_refs |= fetch_child_ref_data(c, location_cited_refs)\n outfile.write(\" </ul>\\n\")\n outfile.write(\" </section>\\n\")\n\n # add species\n range_species = set(find_species_by_name(s) for s in locations_range_species[loc])\n range_species -= all_species\n all_species |= range_species\n\n is_error = True\n print_star = False\n print_double = False\n outfile.write(\" <section class=\\\"spsection\\\">\\n\")\n outfile.write(\" <h3 class=\\\"nobookmark\\\">Currently Recognized Species</h3>\\n\")\n if len(all_species) > 0:\n is_error = False\n # outfile.write(\" <section class=\\\"spsection\\\">\\n\")\n # outfile.write(\" <h3 class=\\\"nobookmark\\\">Currently Recognized Species</h3>\\n\")\n outfile.write(\" <ul class=\\\"locpagelist\\\">\\n\")\n for s in sorted(list(all_species)):\n if s in location_species[loc.name]:\n suffix = \"\"\n elif s in range_species:\n suffix = DOUBLEDAGGER\n print_double = True\n else:\n suffix = STAR\n print_star = True\n outfile.write(\" <li>\" +\n create_species_link(s.genus, s.species, do_print, status=s.status, path=\"../\") +\n suffix + \"</li>\\n\")\n outfile.write(\" </ul>\\n\")\n else:\n outfile.write(\" <p>None</p>\\n\")\n outfile.write(\" </section>\\n\")\n\n if len(all_bi_names) > 0:\n is_error = False\n outfile.write(\" <section class=\\\"spsection\\\">\\n\")\n outfile.write(\" <h3 class=\\\"nobookmark\\\">Names Which Have Been Used for This Area</h3>\\n\")\n outfile.write(\" <ul class=\\\"locpagelist\\\">\\n\")\n for s in sorted(list(all_bi_names)):\n if s in location_bi_names[loc.name]:\n suffix = \"\"\n else:\n suffix = STAR\n print_star = True\n outfile.write(\" <li><a href=\\\"\" + rel_link_prefix(do_print, \"../names/\") + name_to_filename(s) +\n \".html\\\">\" + format_name_string(s) + \"</a>\" + suffix + \"</li>\\n\")\n outfile.write(\" </ul>\\n\")\n outfile.write(\" </section>\\n\")\n\n if len(all_sp_names) > 0:\n is_error = False\n outfile.write(\" <section class=\\\"spsection\\\">\\n\")\n outfile.write(\" <h3 class=\\\"nobookmark\\\">Specific Names Which Have Been Used for This Area</h3>\\n\")\n outfile.write(\" <ul class=\\\"locpagelist\\\">\\n\")\n for s in sorted(list(all_sp_names)):\n if s in location_sp_names[loc.name]:\n suffix = \"\"\n else:\n suffix = STAR\n print_star = True\n outfile.write(\" <li><a href=\\\"\" + rel_link_prefix(do_print, \"../names/\") + \"sn_\" + s.name +\n \".html\\\">\" + format_name_string(s.name) + \"</a>\" + suffix + \"</li>\\n\")\n outfile.write(\" </ul>\\n\")\n outfile.write(\" </section>\\n\")\n\n # the following is to identify locations which may no longer used in the DB and can be removed\n # if is_error:\n # report_error(\"Phantom Location: \" + loc.name)\n\n write_annotated_reference_list(outfile, do_print, references, all_refs, location_direct_refs[loc.name],\n location_cited_refs[loc.name], \"../\")\n\n if len(location_direct_refs[loc.name]) != len(location_cited_refs[loc.name]):\n key_str = \"Entries marked with \" + DAGGER + \" represent indirect references to location through citation. \"\n else:\n key_str = \"\"\n if print_star:\n key_str += \"Entries marked with \" + STAR + \" are inferred from subareas. \"\n if print_double:\n key_str += \"Entries marked with \" + DOUBLEDAGGER + \" represent potential inhabitants inferred from \" \\\n \"species ranges.\"\n if key_str != \"\":\n outfile.write(\" <p>\" + key_str.strip() + \"</p>\\n\")\n\n if do_print:\n end_page_division(outfile)\n else:\n common_html_footer(outfile)\n\n # output place specific taxonomic key\n if (location_keys is not None) and (len(all_species) > 0):\n if do_print:\n write_taxonomic_key(outfile, do_print, location_keys[frozenset(all_species)], loc)\n else:\n with open(WEBOUT_PATH + \"locations/keys/\" + place_to_filename(loc.name) + \"_taxkey.html\", \"w\",\n encoding=\"utf-8\") as suboutfile:\n write_taxonomic_key(suboutfile, do_print, location_keys[frozenset(all_species)], loc)\n\n # write out children pages (primary children only)\n if loc.n_children() > 0:\n for c in loc.children:\n if do_print:\n write_location_page(outfile, do_print, c, point_locations, location_species, location_bi_names,\n location_sp_names, location_direct_refs, location_cited_refs, references,\n locations_range_species, location_keys)\n else:\n with open(WEBOUT_PATH + \"locations/\" + place_to_filename(c.name) + \".html\", \"w\",\n encoding=\"utf-8\") as suboutfile:\n write_location_page(suboutfile, do_print, c, point_locations, location_species, location_bi_names,\n location_sp_names, location_direct_refs, location_cited_refs, references,\n locations_range_species, location_keys)", "def test_locations_in_points(self):\n locations = [\n Location.objects.create(name=\"The Piton Foundation\", lat=39.7438167, lng=-104.9884953),\n Location.objects.create(name=\"Hull House\", lat=41.8716782, lng=-87.6474517)\n ]\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n story.locations.add(locations[0])\n story.locations.add(locations[1])\n story.save()\n req = HttpRequest()\n req.GET['story_id'] = story.story_id\n resp = self.resource.get_detail(req)\n dehydrated = simplejson.loads(resp.content)\n self.assertEqual(len(dehydrated['points']), 2)\n for location in locations:\n self.assertPointInList([location.lat, location.lng],\n dehydrated['points'])", "def test_location(self, all_metars):\n expected = [\"KIAH\", 'KGNV', 'KNID', 'KTPA', 'KP60']\n for metar, expected_val in zip(all_metars, expected):\n parser = Parser(metar)\n actual = parser.parse()\n assert expected_val == actual['location']", "def __str__(self):\n\n return f'{self.location}'", "def __str__(self):\n return u'Location({}, {}, {})'.format(self.query_path, self.field, self.visit_counter)", "def test_detail_format(self) -> None:\n r = self.perform_request('detail', True)\n self.assert_json_schema(r.json(), self.get_details_schema())", "def test_publicPlazaRadial(self):\n streets = (\"9:00 Public Plaza\", \"12:15\")\n for front, cross in (streets, reversed(streets)):\n location = parseLocation(\n \"Village\",\n \"Village in Public Plaza\",\n \"9:00 Public Plaza @ 12:15\",\n front, cross,\n \"110 x 200-\"\n )\n self.assertEquals(\n location,\n Location(\n name=\"Village in Public Plaza\",\n address=RodGarettAddress(\n concentric=905, radialHour=12, radialMinute=15,\n description=\"Village 110x200-\",\n ),\n )\n )" ]
[ "0.64041483", "0.6394315", "0.6256238", "0.6169211", "0.61543375", "0.6110976", "0.60569775", "0.6009788", "0.59648657", "0.5960665", "0.57977366", "0.57535416", "0.55598515", "0.55481315", "0.5548042", "0.5547863", "0.55326045", "0.55230683", "0.54892486", "0.5475312", "0.5463475", "0.541001", "0.5400633", "0.5395935", "0.5386543", "0.5383073", "0.53336024", "0.53218913", "0.5316647", "0.5271265" ]
0.70316887
0
Tests that default SpecificLocation serialization works as expected to Protobuf
def test_default_serialization() -> None: uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684)) obj = SpecificLocation(id=uid, name="Test") blob = sy.serialize(obj, to_proto=True) assert sy.serialize(obj) == blob
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_proto_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid), name=\"Test\")\n\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob", "def test_default_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob)\n assert obj == obj2", "def test_proto_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid)\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob, from_proto=True)\n assert obj == obj2", "def test_binary_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = (\n b\"\\n/syft.core.io.location.specific.SpecificLocation\\x12\\x1a\\n\\x12\\n\\x10\"\n + b\"\\xfb\\x1b\\xb0g[\\xb7LI\\xbe\\xce\\xe7\\x00\\xab\\n\\x15\\x14\\x12\\x04Test\"\n )\n\n assert sy.serialize(obj, to_bytes=True) == blob\n assert sy.serialize(obj, to_bytes=True) == blob\n assert sy.serialize(obj, to_bytes=True) == blob", "def test_serialization():\n bb_1 = t2.TBoundingBox(0.4, 0.3, 0.1, top=None) # type:ignore forcing some None/null values\n bb_2 = t2.TBoundingBox(0.4, 0.3, 0.1, top=0.2) # type: ignore\n p1 = t2.TPoint(x=0.1, y=0.1)\n p2 = t2.TPoint(x=0.3, y=None) # type:ignore\n geo = t2.TGeometry(bounding_box=bb_1, polygon=[p1, p2])\n geo_s = t2.TGeometrySchema()\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s\n geo = t2.TGeometry(bounding_box=bb_2, polygon=[p1, p2])\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s", "def test_binary_deserialization() -> None:\n\n blob = (\n b\"\\n/syft.core.io.location.specific.SpecificLocation\\x12\\x1a\\n\\x12\\n\\x10\"\n + b\"\\xfb\\x1b\\xb0g[\\xb7LI\\xbe\\xce\\xe7\\x00\\xab\\n\\x15\\x14\\x12\\x04Test\"\n )\n\n obj = sy.deserialize(blob=blob, from_bytes=True)\n assert obj == SpecificLocation(\n id=UID(value=uuid.UUID(int=333779996850170035686993356951732753684)),\n name=\"Test\",\n )", "def test_compound_with_location(self):\n self.assertEqual(self.compound_with_loc.location, 'extracellular')", "def test_location() -> None:\n response: models.KamereonVehicleDataResponse = fixtures.get_file_content_as_schema(\n f\"{fixtures.KAMEREON_FIXTURE_PATH}/vehicle_data/location.json\",\n schemas.KamereonVehicleDataResponseSchema,\n )\n response.raise_for_error_code()\n assert response.data is not None\n assert response.data.raw_data[\"attributes\"] == {\n \"gpsLatitude\": 48.1234567,\n \"gpsLongitude\": 11.1234567,\n \"lastUpdateTime\": \"2020-02-18T16:58:38Z\",\n }\n\n vehicle_data = cast(\n models.KamereonVehicleLocationData,\n response.get_attributes(schemas.KamereonVehicleLocationDataSchema),\n )\n\n assert vehicle_data.gpsLatitude == 48.1234567\n assert vehicle_data.gpsLongitude == 11.1234567\n assert vehicle_data.lastUpdateTime == \"2020-02-18T16:58:38Z\"", "def test_serialize_no_metadata(self):\n pass # pragma: no cover", "def test_pprint() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"location\")\n assert obj.pprint == \"📌 location (SpecificLocation)@<UID:🙍🛖>\"", "def test_to_string() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid)\n assert str(obj) == \"<SpecificLocation: fb1bb0675bb74c49becee700ab0a1514>\"\n assert obj.__repr__() == \"<SpecificLocation: fb1bb0675bb74c49becee700ab0a1514>\"", "def test_get_zr_location_structure(self):\n pass", "def test_str(self):\n location = self.location\n\n self.assertEqual(str(location), self.location_raw['name'])", "def test_proto_spec(self):\n proto_name = 'org.xlattice.upax'\n node_reg = reg.NodeReg()\n proto_reg = reg.ProtoReg(proto_name, node_reg)\n msg_reg = reg.MsgReg(proto_reg)\n proto_spec = M.ProtoSpec(proto_name, proto_reg)\n self.assertEqual(proto_name, proto_spec.name)\n parent = M.ProtoSpec(proto_name, proto_reg)\n\n msg_name = 'logEntry'\n # the enum is not used\n enum = M.EnumSpec.create('Joe', [\n ('oh', 92), ('hello', 47), ('there', 322), ])\n fields = [\n # pylint: disable=no-member\n M.FieldSpec(\n msg_reg,\n 'timestamp',\n FieldTypes.F_UINT32,\n Quants.REQUIRED,\n 0),\n M.FieldSpec(\n msg_reg,\n 'node_id',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 1),\n M.FieldSpec(\n msg_reg,\n 'key',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 2),\n M.FieldSpec(\n msg_reg,\n 'length',\n FieldTypes.V_UINT32,\n Quants.REQUIRED,\n 3),\n M.FieldSpec(\n msg_reg,\n 'by_',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 4),\n M.FieldSpec(\n msg_reg,\n 'path',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 5),\n ]\n msg_spec = M.MsgSpec(msg_name, msg_reg, proto_spec)\n self.assertEqual(msg_name, msg_spec.name)\n for file in fields:\n msg_spec.add_field(file)\n\n # proto_spec.add_msg(msg_spec) # correctly commented out\n self.round_trip_poto_spec_via_string(proto_spec) # GEEP", "def test_python_to_json(self):\n location = {\n 'address' : '123 Main St.',\n 'lat' : 127.0,\n 'lng' : -42,\n 'name' : 'nowhere',\n 'owner' : ObjectId(),\n '_id' : ObjectId()\n }\n\n parsed = Location.flatten(location)\n\n # these should all be the same\n self.assertEqual(parsed['address'], location['address'])\n self.assertEqual(parsed['lat'], location['lat'])\n self.assertEqual(parsed['lng'], location['lng'])\n self.assertEqual(parsed['name'], location['name'])\n\n # owner should be removed\n self.assertFalse(parsed.has_key('owner'))\n\n # and id should be renamed from _id to id, and flattened\n self.assertFalse(parsed.has_key('_id'))\n self.assertTrue(parsed.has_key('id'))\n self.assertEqual(parsed['id'], str(location['_id']))", "def test06_serialize(self):\n uri = URIRef('http://ex.org/ldprs')\n g = Graph()\n g.add((uri, RDF.type, URIRef('http://ex.org/some_type')))\n g.add((URIRef('http://ex.org/a'), URIRef('http://ex.org/b'), Literal('LITERAL')))\n r = LDPRS(uri=uri, content=g)\n s = r.serialize()\n self.assertIn('@prefix ldp: <http://www.w3.org/ns/ldp#> .', s)\n self.assertIn('ldprs', s) # might prefix or not\n self.assertIn('some_type', s) # might prefix or not\n self.assertIn('ldp:RDFSource', s)\n self.assertIn('ldp:Resource', s)\n self.assertIn('\"LITERAL\"', s)\n #\n s = r.serialize(omits=['content'])\n self.assertIn('ldprs', s) # might prefix or not\n self.assertNotIn('some_type', s) # might prefix or not\n self.assertIn('ldp:RDFSource', s)\n self.assertIn('ldp:Resource', s)\n self.assertNotIn('\"LITERAL\"', s)", "def test_oef_object_transator():\n foo_datamodel = DataModel(\n \"foo\",\n [\n Attribute(\"bar\", int, True, \"A bar attribute.\"),\n Attribute(\"location\", Location, True, \"A location attribute.\"),\n ],\n )\n desc = Description(\n {\"bar\": 1, \"location\": Location(10.0, 10.0)}, data_model=foo_datamodel\n )\n oef_desc = OEFObjectTranslator.to_oef_description(desc)\n new_desc = OEFObjectTranslator.from_oef_description(oef_desc)\n assert desc.values[\"location\"] == new_desc.values[\"location\"]", "def test_oef_serialization_description():\n foo_datamodel = DataModel(\n \"foo\",\n [\n Attribute(\"bar\", int, True, \"A bar attribute.\"),\n Attribute(\"location\", Location, True, \"A location attribute.\"),\n ],\n )\n desc = Description(\n {\"bar\": 1, \"location\": Location(10.0, 10.0)}, data_model=foo_datamodel\n )\n msg = OefSearchMessage(\n performative=OefSearchMessage.Performative.REGISTER_SERVICE,\n dialogue_reference=(str(1), \"\"),\n service_description=desc,\n )\n msg_bytes = OefSearchMessage.serializer.encode(msg)\n assert len(msg_bytes) > 0\n recovered_msg = OefSearchMessage.serializer.decode(msg_bytes)\n assert recovered_msg == msg", "def test_02_Dump(self):\n self.m_location.Street = '_test street'\n l_ret = self.m_config.save_yaml_config()\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'C2-02-A - Location', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'C2-02-B - House', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'C2-02-C - Location', 190))\n # print(PrettyFormatAny.form(l_ret, 'C2-02-D - Location', 190))\n # print('Config: {}'.format(l_ret))\n self.assertEqual(l_ret['Location']['City'], 'Washington')", "def test_convenience_method():\n Club = Map.from_file(\"definitions/Club.buf\")\n\n members = [\n dict(name=\"Bede\", age=20),\n dict(name=\"Jake\", age=21),\n dict(name=\"Cal\", age=22)\n ]\n\n assert bytes(Club(members=members, name=\"Klub\").to_bytes()) == \\\n bytes(Club.to_bytes({\"members\": members, \"name\": \"Klub\"}))", "def test_location_address(self):\n self.assertIsInstance(self.location.address, Address)\n self.assertEqual(self.location.address, self.address)", "def test_string_in_serializer() -> None:\n assert cv.custom_serializer(cv.string) == {\n \"type\": \"string\",\n }", "def test_user_type_simple_attributes_with_roundtrip():\n Person = Map.from_file(\"definitions/Person.buf\")\n me = Person(name=\"Bede Kelly\", age=20)\n bytestream = me.to_bytes()\n new_me = Person.read(bytestream)\n assert \"Bede Kelly\" == new_me.name\n assert 20 == new_me.age", "def test_dumps(self):\n result = self.mapper.dumps(self.serialization)\n self.mapper.to_dict.assert_called_once_with(\"custom\")\n self.serialization.assert_called_once_with(\n self.mapper.to_dict.return_value\n )\n self.assertIs(result, self.serialization.return_value)", "def location_fixture():\n return _create_location()", "def test_serialize_parse(\n tmp_path: Path,\n simple_graph: Graph,\n simple_dataset: Dataset,\n args: Tuple[str, GraphType, DestinationType, Optional[str]],\n) -> None:\n serializer_name, graph_type, destination_type, encoding = args\n format = serializer_dict[serializer_name]\n graph: Union[Graph, Dataset]\n if graph_type == GraphType.QUAD:\n graph = simple_dataset\n elif graph_type == GraphType.TRIPLE:\n graph = simple_graph\n else:\n raise ValueError(f\"graph_type {graph_type!r} is not supported\")\n with destination_type.make_ref(tmp_path) as dest_ref:\n destination = None if dest_ref is None else narrow_dest_param(dest_ref.param)\n serialize_result = graph.serialize(\n destination=destination,\n format=serializer_name,\n encoding=encoding,\n )\n\n logging.debug(\"serialize_result = %r, dest_ref = %s\", serialize_result, dest_ref)\n\n if dest_ref is None:\n if encoding is None:\n assert isinstance(serialize_result, str)\n serialized_data = serialize_result\n else:\n assert isinstance(serialize_result, bytes)\n serialized_data = serialize_result.decode(encoding)\n else:\n assert isinstance(serialize_result, Graph)\n assert dest_ref.path.exists()\n serialized_data = dest_ref.path.read_bytes().decode(\n \"utf-8\" if encoding is None else encoding\n )\n\n logging.debug(\"serialized_data = %s\", serialized_data)\n check_serialized(format, graph, serialized_data)", "def test_serialization(self, example_pep_cfg_path):\n td = tempfile.mkdtemp()\n fn = os.path.join(td, \"serialized_sample.yaml\")\n p = Project(cfg=example_pep_cfg_path)\n sample = p.samples[0]\n sample.set = set([\"set\"])\n sample.dict = dict({\"dict\": \"dict\"})\n sample.list = list([\"list\"])\n sample.to_yaml(fn)\n with open(fn, \"r\") as f:\n contents = f.read()\n assert \"set\" in contents\n assert \"dict\" in contents\n assert \"list\" in contents", "def test_map_deserialization(self):\r\n \r\n original = DeserializationTestModel.create(count=5, text='happy')\r\n nested = original.get_map()\r\n\r\n assert isinstance(nested, dict)\r\n assert nested['vertex'] == original\r\n assert nested['number'] == 5", "def testPickle(self):\n global MyEnum\n global AnotherMessage\n global MyMessage\n\n class MyEnum(messages.Enum):\n val1 = 1\n val2 = 2\n\n class AnotherMessage(messages.Message):\n string = messages.StringField(1, repeated=True)\n\n class MyMessage(messages.Message):\n field1 = messages.IntegerField(1)\n field2 = messages.EnumField(MyEnum, 2)\n field3 = messages.MessageField(AnotherMessage, 3)\n\n message = MyMessage(field1=1, field2=MyEnum.val2,\n field3=AnotherMessage(string=['a', 'b', 'c']))\n message.set_unrecognized_field(\n 'exists', 'value', messages.Variant.STRING)\n message.set_unrecognized_field('repeated', ['list', 0, ('test',)],\n messages.Variant.STRING)\n unpickled = pickle.loads(pickle.dumps(message))\n self.assertEquals(message, unpickled)\n self.assertTrue(AnotherMessage.string is unpickled.field3.string.field)\n self.assertTrue('exists' in message.all_unrecognized_fields())\n self.assertEquals(('value', messages.Variant.STRING),\n message.get_unrecognized_field_info('exists'))\n self.assertEquals((['list', 0, ('test',)], messages.Variant.STRING),\n message.get_unrecognized_field_info('repeated'))", "def test_address_to_tree(self):\n pass" ]
[ "0.7711845", "0.75500697", "0.7217684", "0.6926107", "0.6428363", "0.64130694", "0.6011315", "0.5987244", "0.58243567", "0.5751032", "0.56994116", "0.5697481", "0.5630711", "0.5615444", "0.560221", "0.55844355", "0.5583882", "0.5381914", "0.53770804", "0.5376579", "0.5345937", "0.53110814", "0.5302014", "0.5288854", "0.52647537", "0.52627623", "0.520525", "0.5161789", "0.515921", "0.51587504" ]
0.7852183
0
Tests that default SpecificLocation deserialization works as expected from Protobuf
def test_default_deserialization() -> None: uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684)) obj = SpecificLocation(id=uid, name="Test") blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid)) obj2 = sy.deserialize(blob=blob) assert obj == obj2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_proto_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid)\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob, from_proto=True)\n assert obj == obj2", "def test_proto_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid), name=\"Test\")\n\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob", "def test_binary_deserialization() -> None:\n\n blob = (\n b\"\\n/syft.core.io.location.specific.SpecificLocation\\x12\\x1a\\n\\x12\\n\\x10\"\n + b\"\\xfb\\x1b\\xb0g[\\xb7LI\\xbe\\xce\\xe7\\x00\\xab\\n\\x15\\x14\\x12\\x04Test\"\n )\n\n obj = sy.deserialize(blob=blob, from_bytes=True)\n assert obj == SpecificLocation(\n id=UID(value=uuid.UUID(int=333779996850170035686993356951732753684)),\n name=\"Test\",\n )", "def test_default_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = sy.serialize(obj, to_proto=True)\n\n assert sy.serialize(obj) == blob", "def test_location() -> None:\n response: models.KamereonVehicleDataResponse = fixtures.get_file_content_as_schema(\n f\"{fixtures.KAMEREON_FIXTURE_PATH}/vehicle_data/location.json\",\n schemas.KamereonVehicleDataResponseSchema,\n )\n response.raise_for_error_code()\n assert response.data is not None\n assert response.data.raw_data[\"attributes\"] == {\n \"gpsLatitude\": 48.1234567,\n \"gpsLongitude\": 11.1234567,\n \"lastUpdateTime\": \"2020-02-18T16:58:38Z\",\n }\n\n vehicle_data = cast(\n models.KamereonVehicleLocationData,\n response.get_attributes(schemas.KamereonVehicleLocationDataSchema),\n )\n\n assert vehicle_data.gpsLatitude == 48.1234567\n assert vehicle_data.gpsLongitude == 11.1234567\n assert vehicle_data.lastUpdateTime == \"2020-02-18T16:58:38Z\"", "def test_binary_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = (\n b\"\\n/syft.core.io.location.specific.SpecificLocation\\x12\\x1a\\n\\x12\\n\\x10\"\n + b\"\\xfb\\x1b\\xb0g[\\xb7LI\\xbe\\xce\\xe7\\x00\\xab\\n\\x15\\x14\\x12\\x04Test\"\n )\n\n assert sy.serialize(obj, to_bytes=True) == blob\n assert sy.serialize(obj, to_bytes=True) == blob\n assert sy.serialize(obj, to_bytes=True) == blob", "def test_deserialize(self):\r\n\r\n # test that from_json produces no exceptions\r\n self.assertDeserializeEqual('10:20:30', '\"10:20:30\"')", "def test_compound_with_location(self):\n self.assertEqual(self.compound_with_loc.location, 'extracellular')", "def _post_deserialize (self):\n pass", "def test_deserialize_incomplete(self):\n if (self._cls != 'MetaschemaType') and (len(self._valid_decoded) > 0):\n out = self.instance.serialize(self._valid_decoded[0])\n obj, metadata = self.instance.deserialize(out[:-1])\n self.assert_equal(metadata['incomplete'], True)", "def test_map_deserialization(self):\r\n \r\n original = DeserializationTestModel.create(count=5, text='happy')\r\n nested = original.get_map()\r\n\r\n assert isinstance(nested, dict)\r\n assert nested['vertex'] == original\r\n assert nested['number'] == 5", "def getDeserializer():", "def deserialize_object(d):\n pass", "def test_serialization():\n bb_1 = t2.TBoundingBox(0.4, 0.3, 0.1, top=None) # type:ignore forcing some None/null values\n bb_2 = t2.TBoundingBox(0.4, 0.3, 0.1, top=0.2) # type: ignore\n p1 = t2.TPoint(x=0.1, y=0.1)\n p2 = t2.TPoint(x=0.3, y=None) # type:ignore\n geo = t2.TGeometry(bounding_box=bb_1, polygon=[p1, p2])\n geo_s = t2.TGeometrySchema()\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s\n geo = t2.TGeometry(bounding_box=bb_2, polygon=[p1, p2])\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s", "def test_oef_object_transator():\n foo_datamodel = DataModel(\n \"foo\",\n [\n Attribute(\"bar\", int, True, \"A bar attribute.\"),\n Attribute(\"location\", Location, True, \"A location attribute.\"),\n ],\n )\n desc = Description(\n {\"bar\": 1, \"location\": Location(10.0, 10.0)}, data_model=foo_datamodel\n )\n oef_desc = OEFObjectTranslator.to_oef_description(desc)\n new_desc = OEFObjectTranslator.from_oef_description(oef_desc)\n assert desc.values[\"location\"] == new_desc.values[\"location\"]", "def test_deserialize_with_additional_properties(self):\n\n # Dog is allOf with two child schemas.\n # The OAS document for Dog does not specify the 'additionalProperties' keyword,\n # which means that by default, the Dog schema must allow undeclared properties.\n # The additionalProperties keyword is used to control the handling of extra stuff,\n # that is, properties whose names are not listed in the properties keyword.\n # By default any additional properties are allowed.\n from petstore_api.model import dog, mammal, zebra, banana_req\n data = {\n 'className': 'Dog',\n 'color': 'brown',\n 'breed': 'golden retriever',\n # Below are additional, undeclared properties.\n 'group': 'Terrier Group',\n 'size': 'medium',\n }\n response = self.__response(data)\n _response_for_200 = api_client.OpenApiResponse(\n content={\n self.json_content_type: api_client.MediaType(schema=dog.Dog),\n },\n )\n deserialized = _response_for_200.deserialize(response, self.configuration)\n body = deserialized.body\n self.assertTrue(isinstance(body, dog.Dog))\n self.assertEqual(body['className'], 'Dog')\n self.assertEqual(body['color'], 'brown')\n self.assertEqual(body['breed'], 'golden retriever')\n self.assertEqual(body['group'], 'Terrier Group')\n self.assertEqual(body['size'], 'medium')\n\n # The 'zebra' schema allows additional properties by explicitly setting\n # additionalProperties: true.\n # This is equivalent to 'additionalProperties' not being present.\n data = {\n 'className': 'zebra',\n 'type': 'plains',\n # Below are additional, undeclared properties\n 'group': 'abc',\n 'size': 3,\n 'p1': True,\n 'p2': ['a', 'b', 123],\n }\n response = self.__response(data)\n _response_for_200 = api_client.OpenApiResponse(\n content={\n self.json_content_type: api_client.MediaType(schema=mammal.Mammal),\n },\n )\n deserialized = _response_for_200.deserialize(response, self.configuration)\n body = deserialized.body\n self.assertTrue(isinstance(body, zebra.Zebra))\n self.assertEqual(body['className'], 'zebra')\n self.assertEqual(body['type'], 'plains')\n self.assertEqual(bool(body['p1']), True)\n\n # The 'bananaReq' schema disallows additional properties by explicitly setting\n # additionalProperties: false\n _response_for_200 = api_client.OpenApiResponse(\n content={\n self.json_content_type: api_client.MediaType(schema=banana_req.BananaReq),\n },\n )\n with self.assertRaisesRegex(\n petstore_api.exceptions.ApiTypeError,\n r\"BananaReq was passed 1 invalid argument: \\['unknown-group'\\]\"\n ):\n data = {\n 'lengthCm': 21.2,\n 'sweet': False,\n # Below are additional, undeclared properties. They are not allowed,\n # an exception must be raised.\n 'unknown-group': 'abc',\n }\n response = self.__response(data)\n _response_for_200.deserialize(response, self.configuration)", "def test_deserialize(self):\n prop = VersionProperty(default=\"1.1.1\")\n self.assertEqual(prop.deserialize(\"1.1.1\"), \"1.1.1\")", "def test_get_zr_location_structure(self):\n pass", "def test_python_to_json(self):\n location = {\n 'address' : '123 Main St.',\n 'lat' : 127.0,\n 'lng' : -42,\n 'name' : 'nowhere',\n 'owner' : ObjectId(),\n '_id' : ObjectId()\n }\n\n parsed = Location.flatten(location)\n\n # these should all be the same\n self.assertEqual(parsed['address'], location['address'])\n self.assertEqual(parsed['lat'], location['lat'])\n self.assertEqual(parsed['lng'], location['lng'])\n self.assertEqual(parsed['name'], location['name'])\n\n # owner should be removed\n self.assertFalse(parsed.has_key('owner'))\n\n # and id should be renamed from _id to id, and flattened\n self.assertFalse(parsed.has_key('_id'))\n self.assertTrue(parsed.has_key('id'))\n self.assertEqual(parsed['id'], str(location['_id']))", "def deserialize(self, data):", "def test_03_extract(self):\n l_node = config_tools.Yaml(self.m_pyhouse_obj).read_yaml(self.m_filename)\n l_obj = self.m_config._extract_location(l_node.Yaml['Location'])\n l_ret = self.m_pyhouse_obj.House.Location\n # print(PrettyFormatAny.form(l_node, 'C1-03-A'))\n # print(PrettyFormatAny.form(l_obj, 'C1-03-B'))\n # print(PrettyFormatAny.form(l_ret, 'C1-03-C'))\n self.assertEqual(l_obj.Street, '1600 Pennsylvania Ave NW')\n self.assertEqual(l_obj.City, 'Washington')", "def test_jsonify_decode(self):\n\n Point = namedtuple('Point', ['x', 'y'], False)\n class Foo(Base):\n _table = ClassReadonlyProperty('foos')\n _primary_key = ClassReadonlyProperty('foo_id')\n\n foo_id = IDField('_id')\n str_field = StringField(default='this is default')\n date_field = DateField()\n int_field = IntField()\n bool_field = BoolField()\n list_field = ListField()\n tuple_field = TupleField(np=Point)\n\n json_str = '''{\n \"__class__\": \"Foo\",\n \"foo_id\": \"1234\",\n \"str_field\": \"anything\",\n \"int_field\": 123,\n \"date_field\": \"2014-12-13\",\n \"bool_field\": false,\n \"tuple_field\":{\n \"x\": 1,\n \"y\": 2\n }\n }'''\n foo = Foo.from_jsonify(json.loads(json_str))\n\n self.assertEqual(foo.foo_id, '1234')\n self.assertEqual(foo.int_field, 123)\n self.assertEqual(foo.bool_field, False)\n self.assertEqual(foo.date_field, datetime.date(2014, 12, 13))\n Point = namedtuple('Point', ['x', 'y'], False)\n self.assertEqual(foo.tuple_field, Point(x=1, y=2))", "def test_user_type_simple_attributes_with_roundtrip():\n Person = Map.from_file(\"definitions/Person.buf\")\n me = Person(name=\"Bede Kelly\", age=20)\n bytestream = me.to_bytes()\n new_me = Person.read(bytestream)\n assert \"Bede Kelly\" == new_me.name\n assert 20 == new_me.age", "def __init__(self, ignoreUnknownFields = False):\n super(Deserializer, self).__init__(ignore_unknown_fields = ignoreUnknownFields)", "def deserialize(self, str):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.position is None:\n self.position = geometry_msgs.msg.Point()\n if self.approach is None:\n self.approach = geometry_msgs.msg.Vector3()\n if self.binormal is None:\n self.binormal = geometry_msgs.msg.Vector3()\n if self.axis is None:\n self.axis = geometry_msgs.msg.Vector3()\n if self.width is None:\n self.width = std_msgs.msg.Float32()\n if self.score is None:\n self.score = std_msgs.msg.Float32()\n if self.sample is None:\n self.sample = geometry_msgs.msg.Point()\n end = 0\n _x = self\n start = end\n end += 128\n (_x.position.x, _x.position.y, _x.position.z, _x.approach.x, _x.approach.y, _x.approach.z, _x.binormal.x, _x.binormal.y, _x.binormal.z, _x.axis.x, _x.axis.y, _x.axis.z, _x.width.data, _x.score.data, _x.sample.x, _x.sample.y, _x.sample.z,) = _get_struct_12d2f3d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def testPickle(self):\n global MyEnum\n global AnotherMessage\n global MyMessage\n\n class MyEnum(messages.Enum):\n val1 = 1\n val2 = 2\n\n class AnotherMessage(messages.Message):\n string = messages.StringField(1, repeated=True)\n\n class MyMessage(messages.Message):\n field1 = messages.IntegerField(1)\n field2 = messages.EnumField(MyEnum, 2)\n field3 = messages.MessageField(AnotherMessage, 3)\n\n message = MyMessage(field1=1, field2=MyEnum.val2,\n field3=AnotherMessage(string=['a', 'b', 'c']))\n message.set_unrecognized_field(\n 'exists', 'value', messages.Variant.STRING)\n message.set_unrecognized_field('repeated', ['list', 0, ('test',)],\n messages.Variant.STRING)\n unpickled = pickle.loads(pickle.dumps(message))\n self.assertEquals(message, unpickled)\n self.assertTrue(AnotherMessage.string is unpickled.field3.string.field)\n self.assertTrue('exists' in message.all_unrecognized_fields())\n self.assertEquals(('value', messages.Variant.STRING),\n message.get_unrecognized_field_info('exists'))\n self.assertEquals((['list', 0, ('test',)], messages.Variant.STRING),\n message.get_unrecognized_field_info('repeated'))", "def testWrongTypeAssignment(self):\n self.assertRaises(messages.ValidationError,\n protojson.decode_message,\n MyMessage, '{\"a_string\": 10}')", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.obstacleinfo is None:\n self.obstacleinfo = nubot_common.msg.ObstaclesInfo()\n if self.oppinfo is None:\n self.oppinfo = nubot_common.msg.ObstaclesInfo()\n if self.robotinfo is None:\n self.robotinfo = None\n if self.ballinfo is None:\n self.ballinfo = None\n if self.coachinfo is None:\n self.coachinfo = nubot_common.msg.CoachInfo()\n if self.pass_cmd is None:\n self.pass_cmd = nubot_common.msg.PassCommands()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.obstacleinfo.header.seq, _x.obstacleinfo.header.stamp.secs, _x.obstacleinfo.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.obstacleinfo.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.obstacleinfo.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.obstacleinfo.pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.Point2d()\n _x = val1\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n self.obstacleinfo.pos.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.obstacleinfo.polar_pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.PPoint()\n _x = val1\n start = end\n end += 8\n (_x.angle, _x.radius,) = _get_struct_2f().unpack(str[start:end])\n self.obstacleinfo.polar_pos.append(val1)\n _x = self\n start = end\n end += 12\n (_x.oppinfo.header.seq, _x.oppinfo.header.stamp.secs, _x.oppinfo.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.oppinfo.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.oppinfo.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.oppinfo.pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.Point2d()\n _x = val1\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n self.oppinfo.pos.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.oppinfo.polar_pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.PPoint()\n _x = val1\n start = end\n end += 8\n (_x.angle, _x.radius,) = _get_struct_2f().unpack(str[start:end])\n self.oppinfo.polar_pos.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.robotinfo = []\n for i in range(0, length):\n val1 = nubot_common.msg.RobotInfo()\n _v12 = val1.header\n start = end\n end += 4\n (_v12.seq,) = _get_struct_I().unpack(str[start:end])\n _v13 = _v12.stamp\n _x = _v13\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v12.frame_id = str[start:end].decode('utf-8')\n else:\n _v12.frame_id = str[start:end]\n _x = val1\n start = end\n end += 28\n (_x.AgentID, _x.targetNum1, _x.targetNum2, _x.targetNum3, _x.targetNum4, _x.staticpassNum, _x.staticcatchNum,) = _get_struct_7i().unpack(str[start:end])\n _v14 = val1.pos\n _x = _v14\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _v15 = val1.heading\n start = end\n end += 4\n (_v15.theta,) = _get_struct_f().unpack(str[start:end])\n start = end\n end += 4\n (val1.vrot,) = _get_struct_f().unpack(str[start:end])\n _v16 = val1.vtrans\n _x = _v16\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _x = val1\n start = end\n end += 9\n (_x.iskick, _x.isvalid, _x.isstuck, _x.isdribble, _x.current_role, _x.role_time,) = _get_struct_5Bf().unpack(str[start:end])\n val1.iskick = bool(val1.iskick)\n val1.isvalid = bool(val1.isvalid)\n val1.isstuck = bool(val1.isstuck)\n val1.isdribble = bool(val1.isdribble)\n _v17 = val1.target\n _x = _v17\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n self.robotinfo.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.ballinfo = []\n for i in range(0, length):\n val1 = nubot_common.msg.BallInfo()\n _v18 = val1.header\n start = end\n end += 4\n (_v18.seq,) = _get_struct_I().unpack(str[start:end])\n _v19 = _v18.stamp\n _x = _v19\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v18.frame_id = str[start:end].decode('utf-8')\n else:\n _v18.frame_id = str[start:end]\n start = end\n end += 4\n (val1.ballinfostate,) = _get_struct_i().unpack(str[start:end])\n _v20 = val1.pos\n _x = _v20\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _v21 = val1.real_pos\n _x = _v21\n start = end\n end += 8\n (_x.angle, _x.radius,) = _get_struct_2f().unpack(str[start:end])\n _v22 = val1.velocity\n _x = _v22\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _x = val1\n start = end\n end += 2\n (_x.pos_known, _x.velocity_known,) = _get_struct_2B().unpack(str[start:end])\n val1.pos_known = bool(val1.pos_known)\n val1.velocity_known = bool(val1.velocity_known)\n self.ballinfo.append(val1)\n _x = self\n start = end\n end += 12\n (_x.coachinfo.header.seq, _x.coachinfo.header.stamp.secs, _x.coachinfo.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.coachinfo.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.coachinfo.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 54\n (_x.coachinfo.MatchMode, _x.coachinfo.MatchType, _x.coachinfo.TestMode, _x.coachinfo.pointA.x, _x.coachinfo.pointA.y, _x.coachinfo.pointB.x, _x.coachinfo.pointB.y, _x.coachinfo.angleA, _x.coachinfo.angleB, _x.coachinfo.idA, _x.coachinfo.idB, _x.coachinfo.kickforce, _x.pass_cmd.pass_id, _x.pass_cmd.catch_id, _x.pass_cmd.pass_pt.x, _x.pass_cmd.pass_pt.y, _x.pass_cmd.catch_pt.x, _x.pass_cmd.catch_pt.y, _x.pass_cmd.is_passout, _x.pass_cmd.is_dynamic_pass, _x.pass_cmd.is_static_pass, _x.pass_cmd.is_valid,) = _get_struct_3B4f2h3B2I4f4B().unpack(str[start:end])\n self.pass_cmd.is_passout = bool(self.pass_cmd.is_passout)\n self.pass_cmd.is_dynamic_pass = bool(self.pass_cmd.is_dynamic_pass)\n self.pass_cmd.is_static_pass = bool(self.pass_cmd.is_static_pass)\n self.pass_cmd.is_valid = bool(self.pass_cmd.is_valid)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def testSchemaLoadingAsString(self):\n api = self.ApiFromDiscoveryDoc('latitude.v1.json')\n self.assertEquals(4, len(api._schemas))", "def _deserialize_example(example_proto, labeled=True):\n if labeled:\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string),\n 'patient_id': tf.io.FixedLenFeature([], tf.int64),\n 'sex': tf.io.FixedLenFeature([], tf.int64),\n 'age_approx': tf.io.FixedLenFeature([], tf.int64),\n 'anatom_site_general_challenge': tf.io.FixedLenFeature([], tf.int64),\n 'diagnosis': tf.io.FixedLenFeature([], tf.int64),\n 'target': tf.io.FixedLenFeature([], tf.int64)\n }\n else:\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string)\n }\n\n return tf.io.parse_single_example(example_proto, feature_description)" ]
[ "0.7662707", "0.7108986", "0.6969302", "0.6869886", "0.6192624", "0.60881054", "0.59459627", "0.57979983", "0.5787996", "0.57652116", "0.57554007", "0.57256055", "0.57252336", "0.56568176", "0.56436545", "0.5626855", "0.56126255", "0.55936146", "0.5572628", "0.5544999", "0.55178696", "0.5507375", "0.550637", "0.5499343", "0.54662853", "0.5455204", "0.5442773", "0.5431276", "0.5421393", "0.53869945" ]
0.7796891
0
Tests that default SpecificLocation serialization works as expected to Protobuf
def test_proto_serialization() -> None: uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684)) obj = SpecificLocation(id=uid, name="Test") blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid), name="Test") assert sy.serialize(obj, to_proto=True) == blob assert sy.serialize(obj, to_proto=True) == blob assert sy.serialize(obj, to_proto=True) == blob
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_default_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = sy.serialize(obj, to_proto=True)\n\n assert sy.serialize(obj) == blob", "def test_default_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob)\n assert obj == obj2", "def test_proto_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid)\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob, from_proto=True)\n assert obj == obj2", "def test_binary_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = (\n b\"\\n/syft.core.io.location.specific.SpecificLocation\\x12\\x1a\\n\\x12\\n\\x10\"\n + b\"\\xfb\\x1b\\xb0g[\\xb7LI\\xbe\\xce\\xe7\\x00\\xab\\n\\x15\\x14\\x12\\x04Test\"\n )\n\n assert sy.serialize(obj, to_bytes=True) == blob\n assert sy.serialize(obj, to_bytes=True) == blob\n assert sy.serialize(obj, to_bytes=True) == blob", "def test_serialization():\n bb_1 = t2.TBoundingBox(0.4, 0.3, 0.1, top=None) # type:ignore forcing some None/null values\n bb_2 = t2.TBoundingBox(0.4, 0.3, 0.1, top=0.2) # type: ignore\n p1 = t2.TPoint(x=0.1, y=0.1)\n p2 = t2.TPoint(x=0.3, y=None) # type:ignore\n geo = t2.TGeometry(bounding_box=bb_1, polygon=[p1, p2])\n geo_s = t2.TGeometrySchema()\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s\n geo = t2.TGeometry(bounding_box=bb_2, polygon=[p1, p2])\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s", "def test_binary_deserialization() -> None:\n\n blob = (\n b\"\\n/syft.core.io.location.specific.SpecificLocation\\x12\\x1a\\n\\x12\\n\\x10\"\n + b\"\\xfb\\x1b\\xb0g[\\xb7LI\\xbe\\xce\\xe7\\x00\\xab\\n\\x15\\x14\\x12\\x04Test\"\n )\n\n obj = sy.deserialize(blob=blob, from_bytes=True)\n assert obj == SpecificLocation(\n id=UID(value=uuid.UUID(int=333779996850170035686993356951732753684)),\n name=\"Test\",\n )", "def test_compound_with_location(self):\n self.assertEqual(self.compound_with_loc.location, 'extracellular')", "def test_location() -> None:\n response: models.KamereonVehicleDataResponse = fixtures.get_file_content_as_schema(\n f\"{fixtures.KAMEREON_FIXTURE_PATH}/vehicle_data/location.json\",\n schemas.KamereonVehicleDataResponseSchema,\n )\n response.raise_for_error_code()\n assert response.data is not None\n assert response.data.raw_data[\"attributes\"] == {\n \"gpsLatitude\": 48.1234567,\n \"gpsLongitude\": 11.1234567,\n \"lastUpdateTime\": \"2020-02-18T16:58:38Z\",\n }\n\n vehicle_data = cast(\n models.KamereonVehicleLocationData,\n response.get_attributes(schemas.KamereonVehicleLocationDataSchema),\n )\n\n assert vehicle_data.gpsLatitude == 48.1234567\n assert vehicle_data.gpsLongitude == 11.1234567\n assert vehicle_data.lastUpdateTime == \"2020-02-18T16:58:38Z\"", "def test_serialize_no_metadata(self):\n pass # pragma: no cover", "def test_pprint() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"location\")\n assert obj.pprint == \"📌 location (SpecificLocation)@<UID:🙍🛖>\"", "def test_to_string() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid)\n assert str(obj) == \"<SpecificLocation: fb1bb0675bb74c49becee700ab0a1514>\"\n assert obj.__repr__() == \"<SpecificLocation: fb1bb0675bb74c49becee700ab0a1514>\"", "def test_get_zr_location_structure(self):\n pass", "def test_str(self):\n location = self.location\n\n self.assertEqual(str(location), self.location_raw['name'])", "def test_proto_spec(self):\n proto_name = 'org.xlattice.upax'\n node_reg = reg.NodeReg()\n proto_reg = reg.ProtoReg(proto_name, node_reg)\n msg_reg = reg.MsgReg(proto_reg)\n proto_spec = M.ProtoSpec(proto_name, proto_reg)\n self.assertEqual(proto_name, proto_spec.name)\n parent = M.ProtoSpec(proto_name, proto_reg)\n\n msg_name = 'logEntry'\n # the enum is not used\n enum = M.EnumSpec.create('Joe', [\n ('oh', 92), ('hello', 47), ('there', 322), ])\n fields = [\n # pylint: disable=no-member\n M.FieldSpec(\n msg_reg,\n 'timestamp',\n FieldTypes.F_UINT32,\n Quants.REQUIRED,\n 0),\n M.FieldSpec(\n msg_reg,\n 'node_id',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 1),\n M.FieldSpec(\n msg_reg,\n 'key',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 2),\n M.FieldSpec(\n msg_reg,\n 'length',\n FieldTypes.V_UINT32,\n Quants.REQUIRED,\n 3),\n M.FieldSpec(\n msg_reg,\n 'by_',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 4),\n M.FieldSpec(\n msg_reg,\n 'path',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 5),\n ]\n msg_spec = M.MsgSpec(msg_name, msg_reg, proto_spec)\n self.assertEqual(msg_name, msg_spec.name)\n for file in fields:\n msg_spec.add_field(file)\n\n # proto_spec.add_msg(msg_spec) # correctly commented out\n self.round_trip_poto_spec_via_string(proto_spec) # GEEP", "def test_python_to_json(self):\n location = {\n 'address' : '123 Main St.',\n 'lat' : 127.0,\n 'lng' : -42,\n 'name' : 'nowhere',\n 'owner' : ObjectId(),\n '_id' : ObjectId()\n }\n\n parsed = Location.flatten(location)\n\n # these should all be the same\n self.assertEqual(parsed['address'], location['address'])\n self.assertEqual(parsed['lat'], location['lat'])\n self.assertEqual(parsed['lng'], location['lng'])\n self.assertEqual(parsed['name'], location['name'])\n\n # owner should be removed\n self.assertFalse(parsed.has_key('owner'))\n\n # and id should be renamed from _id to id, and flattened\n self.assertFalse(parsed.has_key('_id'))\n self.assertTrue(parsed.has_key('id'))\n self.assertEqual(parsed['id'], str(location['_id']))", "def test06_serialize(self):\n uri = URIRef('http://ex.org/ldprs')\n g = Graph()\n g.add((uri, RDF.type, URIRef('http://ex.org/some_type')))\n g.add((URIRef('http://ex.org/a'), URIRef('http://ex.org/b'), Literal('LITERAL')))\n r = LDPRS(uri=uri, content=g)\n s = r.serialize()\n self.assertIn('@prefix ldp: <http://www.w3.org/ns/ldp#> .', s)\n self.assertIn('ldprs', s) # might prefix or not\n self.assertIn('some_type', s) # might prefix or not\n self.assertIn('ldp:RDFSource', s)\n self.assertIn('ldp:Resource', s)\n self.assertIn('\"LITERAL\"', s)\n #\n s = r.serialize(omits=['content'])\n self.assertIn('ldprs', s) # might prefix or not\n self.assertNotIn('some_type', s) # might prefix or not\n self.assertIn('ldp:RDFSource', s)\n self.assertIn('ldp:Resource', s)\n self.assertNotIn('\"LITERAL\"', s)", "def test_oef_object_transator():\n foo_datamodel = DataModel(\n \"foo\",\n [\n Attribute(\"bar\", int, True, \"A bar attribute.\"),\n Attribute(\"location\", Location, True, \"A location attribute.\"),\n ],\n )\n desc = Description(\n {\"bar\": 1, \"location\": Location(10.0, 10.0)}, data_model=foo_datamodel\n )\n oef_desc = OEFObjectTranslator.to_oef_description(desc)\n new_desc = OEFObjectTranslator.from_oef_description(oef_desc)\n assert desc.values[\"location\"] == new_desc.values[\"location\"]", "def test_oef_serialization_description():\n foo_datamodel = DataModel(\n \"foo\",\n [\n Attribute(\"bar\", int, True, \"A bar attribute.\"),\n Attribute(\"location\", Location, True, \"A location attribute.\"),\n ],\n )\n desc = Description(\n {\"bar\": 1, \"location\": Location(10.0, 10.0)}, data_model=foo_datamodel\n )\n msg = OefSearchMessage(\n performative=OefSearchMessage.Performative.REGISTER_SERVICE,\n dialogue_reference=(str(1), \"\"),\n service_description=desc,\n )\n msg_bytes = OefSearchMessage.serializer.encode(msg)\n assert len(msg_bytes) > 0\n recovered_msg = OefSearchMessage.serializer.decode(msg_bytes)\n assert recovered_msg == msg", "def test_02_Dump(self):\n self.m_location.Street = '_test street'\n l_ret = self.m_config.save_yaml_config()\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'C2-02-A - Location', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'C2-02-B - House', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'C2-02-C - Location', 190))\n # print(PrettyFormatAny.form(l_ret, 'C2-02-D - Location', 190))\n # print('Config: {}'.format(l_ret))\n self.assertEqual(l_ret['Location']['City'], 'Washington')", "def test_convenience_method():\n Club = Map.from_file(\"definitions/Club.buf\")\n\n members = [\n dict(name=\"Bede\", age=20),\n dict(name=\"Jake\", age=21),\n dict(name=\"Cal\", age=22)\n ]\n\n assert bytes(Club(members=members, name=\"Klub\").to_bytes()) == \\\n bytes(Club.to_bytes({\"members\": members, \"name\": \"Klub\"}))", "def test_location_address(self):\n self.assertIsInstance(self.location.address, Address)\n self.assertEqual(self.location.address, self.address)", "def test_string_in_serializer() -> None:\n assert cv.custom_serializer(cv.string) == {\n \"type\": \"string\",\n }", "def test_user_type_simple_attributes_with_roundtrip():\n Person = Map.from_file(\"definitions/Person.buf\")\n me = Person(name=\"Bede Kelly\", age=20)\n bytestream = me.to_bytes()\n new_me = Person.read(bytestream)\n assert \"Bede Kelly\" == new_me.name\n assert 20 == new_me.age", "def test_dumps(self):\n result = self.mapper.dumps(self.serialization)\n self.mapper.to_dict.assert_called_once_with(\"custom\")\n self.serialization.assert_called_once_with(\n self.mapper.to_dict.return_value\n )\n self.assertIs(result, self.serialization.return_value)", "def location_fixture():\n return _create_location()", "def test_serialize_parse(\n tmp_path: Path,\n simple_graph: Graph,\n simple_dataset: Dataset,\n args: Tuple[str, GraphType, DestinationType, Optional[str]],\n) -> None:\n serializer_name, graph_type, destination_type, encoding = args\n format = serializer_dict[serializer_name]\n graph: Union[Graph, Dataset]\n if graph_type == GraphType.QUAD:\n graph = simple_dataset\n elif graph_type == GraphType.TRIPLE:\n graph = simple_graph\n else:\n raise ValueError(f\"graph_type {graph_type!r} is not supported\")\n with destination_type.make_ref(tmp_path) as dest_ref:\n destination = None if dest_ref is None else narrow_dest_param(dest_ref.param)\n serialize_result = graph.serialize(\n destination=destination,\n format=serializer_name,\n encoding=encoding,\n )\n\n logging.debug(\"serialize_result = %r, dest_ref = %s\", serialize_result, dest_ref)\n\n if dest_ref is None:\n if encoding is None:\n assert isinstance(serialize_result, str)\n serialized_data = serialize_result\n else:\n assert isinstance(serialize_result, bytes)\n serialized_data = serialize_result.decode(encoding)\n else:\n assert isinstance(serialize_result, Graph)\n assert dest_ref.path.exists()\n serialized_data = dest_ref.path.read_bytes().decode(\n \"utf-8\" if encoding is None else encoding\n )\n\n logging.debug(\"serialized_data = %s\", serialized_data)\n check_serialized(format, graph, serialized_data)", "def test_serialization(self, example_pep_cfg_path):\n td = tempfile.mkdtemp()\n fn = os.path.join(td, \"serialized_sample.yaml\")\n p = Project(cfg=example_pep_cfg_path)\n sample = p.samples[0]\n sample.set = set([\"set\"])\n sample.dict = dict({\"dict\": \"dict\"})\n sample.list = list([\"list\"])\n sample.to_yaml(fn)\n with open(fn, \"r\") as f:\n contents = f.read()\n assert \"set\" in contents\n assert \"dict\" in contents\n assert \"list\" in contents", "def test_map_deserialization(self):\r\n \r\n original = DeserializationTestModel.create(count=5, text='happy')\r\n nested = original.get_map()\r\n\r\n assert isinstance(nested, dict)\r\n assert nested['vertex'] == original\r\n assert nested['number'] == 5", "def testPickle(self):\n global MyEnum\n global AnotherMessage\n global MyMessage\n\n class MyEnum(messages.Enum):\n val1 = 1\n val2 = 2\n\n class AnotherMessage(messages.Message):\n string = messages.StringField(1, repeated=True)\n\n class MyMessage(messages.Message):\n field1 = messages.IntegerField(1)\n field2 = messages.EnumField(MyEnum, 2)\n field3 = messages.MessageField(AnotherMessage, 3)\n\n message = MyMessage(field1=1, field2=MyEnum.val2,\n field3=AnotherMessage(string=['a', 'b', 'c']))\n message.set_unrecognized_field(\n 'exists', 'value', messages.Variant.STRING)\n message.set_unrecognized_field('repeated', ['list', 0, ('test',)],\n messages.Variant.STRING)\n unpickled = pickle.loads(pickle.dumps(message))\n self.assertEquals(message, unpickled)\n self.assertTrue(AnotherMessage.string is unpickled.field3.string.field)\n self.assertTrue('exists' in message.all_unrecognized_fields())\n self.assertEquals(('value', messages.Variant.STRING),\n message.get_unrecognized_field_info('exists'))\n self.assertEquals((['list', 0, ('test',)], messages.Variant.STRING),\n message.get_unrecognized_field_info('repeated'))", "def test_address_to_tree(self):\n pass" ]
[ "0.7852183", "0.75500697", "0.7217684", "0.6926107", "0.6428363", "0.64130694", "0.6011315", "0.5987244", "0.58243567", "0.5751032", "0.56994116", "0.5697481", "0.5630711", "0.5615444", "0.560221", "0.55844355", "0.5583882", "0.5381914", "0.53770804", "0.5376579", "0.5345937", "0.53110814", "0.5302014", "0.5288854", "0.52647537", "0.52627623", "0.520525", "0.5161789", "0.515921", "0.51587504" ]
0.7711845
1
Tests that default SpecificLocation deserialization works as expected from Protobuf
def test_proto_deserialization() -> None: uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684)) obj = SpecificLocation(id=uid) blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid)) obj2 = sy.deserialize(blob=blob, from_proto=True) assert obj == obj2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_default_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob)\n assert obj == obj2", "def test_proto_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid), name=\"Test\")\n\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob", "def test_binary_deserialization() -> None:\n\n blob = (\n b\"\\n/syft.core.io.location.specific.SpecificLocation\\x12\\x1a\\n\\x12\\n\\x10\"\n + b\"\\xfb\\x1b\\xb0g[\\xb7LI\\xbe\\xce\\xe7\\x00\\xab\\n\\x15\\x14\\x12\\x04Test\"\n )\n\n obj = sy.deserialize(blob=blob, from_bytes=True)\n assert obj == SpecificLocation(\n id=UID(value=uuid.UUID(int=333779996850170035686993356951732753684)),\n name=\"Test\",\n )", "def test_default_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = sy.serialize(obj, to_proto=True)\n\n assert sy.serialize(obj) == blob", "def test_location() -> None:\n response: models.KamereonVehicleDataResponse = fixtures.get_file_content_as_schema(\n f\"{fixtures.KAMEREON_FIXTURE_PATH}/vehicle_data/location.json\",\n schemas.KamereonVehicleDataResponseSchema,\n )\n response.raise_for_error_code()\n assert response.data is not None\n assert response.data.raw_data[\"attributes\"] == {\n \"gpsLatitude\": 48.1234567,\n \"gpsLongitude\": 11.1234567,\n \"lastUpdateTime\": \"2020-02-18T16:58:38Z\",\n }\n\n vehicle_data = cast(\n models.KamereonVehicleLocationData,\n response.get_attributes(schemas.KamereonVehicleLocationDataSchema),\n )\n\n assert vehicle_data.gpsLatitude == 48.1234567\n assert vehicle_data.gpsLongitude == 11.1234567\n assert vehicle_data.lastUpdateTime == \"2020-02-18T16:58:38Z\"", "def test_binary_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = (\n b\"\\n/syft.core.io.location.specific.SpecificLocation\\x12\\x1a\\n\\x12\\n\\x10\"\n + b\"\\xfb\\x1b\\xb0g[\\xb7LI\\xbe\\xce\\xe7\\x00\\xab\\n\\x15\\x14\\x12\\x04Test\"\n )\n\n assert sy.serialize(obj, to_bytes=True) == blob\n assert sy.serialize(obj, to_bytes=True) == blob\n assert sy.serialize(obj, to_bytes=True) == blob", "def test_deserialize(self):\r\n\r\n # test that from_json produces no exceptions\r\n self.assertDeserializeEqual('10:20:30', '\"10:20:30\"')", "def test_compound_with_location(self):\n self.assertEqual(self.compound_with_loc.location, 'extracellular')", "def _post_deserialize (self):\n pass", "def test_deserialize_incomplete(self):\n if (self._cls != 'MetaschemaType') and (len(self._valid_decoded) > 0):\n out = self.instance.serialize(self._valid_decoded[0])\n obj, metadata = self.instance.deserialize(out[:-1])\n self.assert_equal(metadata['incomplete'], True)", "def test_map_deserialization(self):\r\n \r\n original = DeserializationTestModel.create(count=5, text='happy')\r\n nested = original.get_map()\r\n\r\n assert isinstance(nested, dict)\r\n assert nested['vertex'] == original\r\n assert nested['number'] == 5", "def getDeserializer():", "def deserialize_object(d):\n pass", "def test_serialization():\n bb_1 = t2.TBoundingBox(0.4, 0.3, 0.1, top=None) # type:ignore forcing some None/null values\n bb_2 = t2.TBoundingBox(0.4, 0.3, 0.1, top=0.2) # type: ignore\n p1 = t2.TPoint(x=0.1, y=0.1)\n p2 = t2.TPoint(x=0.3, y=None) # type:ignore\n geo = t2.TGeometry(bounding_box=bb_1, polygon=[p1, p2])\n geo_s = t2.TGeometrySchema()\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s\n geo = t2.TGeometry(bounding_box=bb_2, polygon=[p1, p2])\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s", "def test_oef_object_transator():\n foo_datamodel = DataModel(\n \"foo\",\n [\n Attribute(\"bar\", int, True, \"A bar attribute.\"),\n Attribute(\"location\", Location, True, \"A location attribute.\"),\n ],\n )\n desc = Description(\n {\"bar\": 1, \"location\": Location(10.0, 10.0)}, data_model=foo_datamodel\n )\n oef_desc = OEFObjectTranslator.to_oef_description(desc)\n new_desc = OEFObjectTranslator.from_oef_description(oef_desc)\n assert desc.values[\"location\"] == new_desc.values[\"location\"]", "def test_deserialize_with_additional_properties(self):\n\n # Dog is allOf with two child schemas.\n # The OAS document for Dog does not specify the 'additionalProperties' keyword,\n # which means that by default, the Dog schema must allow undeclared properties.\n # The additionalProperties keyword is used to control the handling of extra stuff,\n # that is, properties whose names are not listed in the properties keyword.\n # By default any additional properties are allowed.\n from petstore_api.model import dog, mammal, zebra, banana_req\n data = {\n 'className': 'Dog',\n 'color': 'brown',\n 'breed': 'golden retriever',\n # Below are additional, undeclared properties.\n 'group': 'Terrier Group',\n 'size': 'medium',\n }\n response = self.__response(data)\n _response_for_200 = api_client.OpenApiResponse(\n content={\n self.json_content_type: api_client.MediaType(schema=dog.Dog),\n },\n )\n deserialized = _response_for_200.deserialize(response, self.configuration)\n body = deserialized.body\n self.assertTrue(isinstance(body, dog.Dog))\n self.assertEqual(body['className'], 'Dog')\n self.assertEqual(body['color'], 'brown')\n self.assertEqual(body['breed'], 'golden retriever')\n self.assertEqual(body['group'], 'Terrier Group')\n self.assertEqual(body['size'], 'medium')\n\n # The 'zebra' schema allows additional properties by explicitly setting\n # additionalProperties: true.\n # This is equivalent to 'additionalProperties' not being present.\n data = {\n 'className': 'zebra',\n 'type': 'plains',\n # Below are additional, undeclared properties\n 'group': 'abc',\n 'size': 3,\n 'p1': True,\n 'p2': ['a', 'b', 123],\n }\n response = self.__response(data)\n _response_for_200 = api_client.OpenApiResponse(\n content={\n self.json_content_type: api_client.MediaType(schema=mammal.Mammal),\n },\n )\n deserialized = _response_for_200.deserialize(response, self.configuration)\n body = deserialized.body\n self.assertTrue(isinstance(body, zebra.Zebra))\n self.assertEqual(body['className'], 'zebra')\n self.assertEqual(body['type'], 'plains')\n self.assertEqual(bool(body['p1']), True)\n\n # The 'bananaReq' schema disallows additional properties by explicitly setting\n # additionalProperties: false\n _response_for_200 = api_client.OpenApiResponse(\n content={\n self.json_content_type: api_client.MediaType(schema=banana_req.BananaReq),\n },\n )\n with self.assertRaisesRegex(\n petstore_api.exceptions.ApiTypeError,\n r\"BananaReq was passed 1 invalid argument: \\['unknown-group'\\]\"\n ):\n data = {\n 'lengthCm': 21.2,\n 'sweet': False,\n # Below are additional, undeclared properties. They are not allowed,\n # an exception must be raised.\n 'unknown-group': 'abc',\n }\n response = self.__response(data)\n _response_for_200.deserialize(response, self.configuration)", "def test_deserialize(self):\n prop = VersionProperty(default=\"1.1.1\")\n self.assertEqual(prop.deserialize(\"1.1.1\"), \"1.1.1\")", "def test_get_zr_location_structure(self):\n pass", "def test_python_to_json(self):\n location = {\n 'address' : '123 Main St.',\n 'lat' : 127.0,\n 'lng' : -42,\n 'name' : 'nowhere',\n 'owner' : ObjectId(),\n '_id' : ObjectId()\n }\n\n parsed = Location.flatten(location)\n\n # these should all be the same\n self.assertEqual(parsed['address'], location['address'])\n self.assertEqual(parsed['lat'], location['lat'])\n self.assertEqual(parsed['lng'], location['lng'])\n self.assertEqual(parsed['name'], location['name'])\n\n # owner should be removed\n self.assertFalse(parsed.has_key('owner'))\n\n # and id should be renamed from _id to id, and flattened\n self.assertFalse(parsed.has_key('_id'))\n self.assertTrue(parsed.has_key('id'))\n self.assertEqual(parsed['id'], str(location['_id']))", "def deserialize(self, data):", "def test_03_extract(self):\n l_node = config_tools.Yaml(self.m_pyhouse_obj).read_yaml(self.m_filename)\n l_obj = self.m_config._extract_location(l_node.Yaml['Location'])\n l_ret = self.m_pyhouse_obj.House.Location\n # print(PrettyFormatAny.form(l_node, 'C1-03-A'))\n # print(PrettyFormatAny.form(l_obj, 'C1-03-B'))\n # print(PrettyFormatAny.form(l_ret, 'C1-03-C'))\n self.assertEqual(l_obj.Street, '1600 Pennsylvania Ave NW')\n self.assertEqual(l_obj.City, 'Washington')", "def test_jsonify_decode(self):\n\n Point = namedtuple('Point', ['x', 'y'], False)\n class Foo(Base):\n _table = ClassReadonlyProperty('foos')\n _primary_key = ClassReadonlyProperty('foo_id')\n\n foo_id = IDField('_id')\n str_field = StringField(default='this is default')\n date_field = DateField()\n int_field = IntField()\n bool_field = BoolField()\n list_field = ListField()\n tuple_field = TupleField(np=Point)\n\n json_str = '''{\n \"__class__\": \"Foo\",\n \"foo_id\": \"1234\",\n \"str_field\": \"anything\",\n \"int_field\": 123,\n \"date_field\": \"2014-12-13\",\n \"bool_field\": false,\n \"tuple_field\":{\n \"x\": 1,\n \"y\": 2\n }\n }'''\n foo = Foo.from_jsonify(json.loads(json_str))\n\n self.assertEqual(foo.foo_id, '1234')\n self.assertEqual(foo.int_field, 123)\n self.assertEqual(foo.bool_field, False)\n self.assertEqual(foo.date_field, datetime.date(2014, 12, 13))\n Point = namedtuple('Point', ['x', 'y'], False)\n self.assertEqual(foo.tuple_field, Point(x=1, y=2))", "def test_user_type_simple_attributes_with_roundtrip():\n Person = Map.from_file(\"definitions/Person.buf\")\n me = Person(name=\"Bede Kelly\", age=20)\n bytestream = me.to_bytes()\n new_me = Person.read(bytestream)\n assert \"Bede Kelly\" == new_me.name\n assert 20 == new_me.age", "def __init__(self, ignoreUnknownFields = False):\n super(Deserializer, self).__init__(ignore_unknown_fields = ignoreUnknownFields)", "def deserialize(self, str):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.position is None:\n self.position = geometry_msgs.msg.Point()\n if self.approach is None:\n self.approach = geometry_msgs.msg.Vector3()\n if self.binormal is None:\n self.binormal = geometry_msgs.msg.Vector3()\n if self.axis is None:\n self.axis = geometry_msgs.msg.Vector3()\n if self.width is None:\n self.width = std_msgs.msg.Float32()\n if self.score is None:\n self.score = std_msgs.msg.Float32()\n if self.sample is None:\n self.sample = geometry_msgs.msg.Point()\n end = 0\n _x = self\n start = end\n end += 128\n (_x.position.x, _x.position.y, _x.position.z, _x.approach.x, _x.approach.y, _x.approach.z, _x.binormal.x, _x.binormal.y, _x.binormal.z, _x.axis.x, _x.axis.y, _x.axis.z, _x.width.data, _x.score.data, _x.sample.x, _x.sample.y, _x.sample.z,) = _get_struct_12d2f3d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def testPickle(self):\n global MyEnum\n global AnotherMessage\n global MyMessage\n\n class MyEnum(messages.Enum):\n val1 = 1\n val2 = 2\n\n class AnotherMessage(messages.Message):\n string = messages.StringField(1, repeated=True)\n\n class MyMessage(messages.Message):\n field1 = messages.IntegerField(1)\n field2 = messages.EnumField(MyEnum, 2)\n field3 = messages.MessageField(AnotherMessage, 3)\n\n message = MyMessage(field1=1, field2=MyEnum.val2,\n field3=AnotherMessage(string=['a', 'b', 'c']))\n message.set_unrecognized_field(\n 'exists', 'value', messages.Variant.STRING)\n message.set_unrecognized_field('repeated', ['list', 0, ('test',)],\n messages.Variant.STRING)\n unpickled = pickle.loads(pickle.dumps(message))\n self.assertEquals(message, unpickled)\n self.assertTrue(AnotherMessage.string is unpickled.field3.string.field)\n self.assertTrue('exists' in message.all_unrecognized_fields())\n self.assertEquals(('value', messages.Variant.STRING),\n message.get_unrecognized_field_info('exists'))\n self.assertEquals((['list', 0, ('test',)], messages.Variant.STRING),\n message.get_unrecognized_field_info('repeated'))", "def testWrongTypeAssignment(self):\n self.assertRaises(messages.ValidationError,\n protojson.decode_message,\n MyMessage, '{\"a_string\": 10}')", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.obstacleinfo is None:\n self.obstacleinfo = nubot_common.msg.ObstaclesInfo()\n if self.oppinfo is None:\n self.oppinfo = nubot_common.msg.ObstaclesInfo()\n if self.robotinfo is None:\n self.robotinfo = None\n if self.ballinfo is None:\n self.ballinfo = None\n if self.coachinfo is None:\n self.coachinfo = nubot_common.msg.CoachInfo()\n if self.pass_cmd is None:\n self.pass_cmd = nubot_common.msg.PassCommands()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.obstacleinfo.header.seq, _x.obstacleinfo.header.stamp.secs, _x.obstacleinfo.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.obstacleinfo.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.obstacleinfo.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.obstacleinfo.pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.Point2d()\n _x = val1\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n self.obstacleinfo.pos.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.obstacleinfo.polar_pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.PPoint()\n _x = val1\n start = end\n end += 8\n (_x.angle, _x.radius,) = _get_struct_2f().unpack(str[start:end])\n self.obstacleinfo.polar_pos.append(val1)\n _x = self\n start = end\n end += 12\n (_x.oppinfo.header.seq, _x.oppinfo.header.stamp.secs, _x.oppinfo.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.oppinfo.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.oppinfo.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.oppinfo.pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.Point2d()\n _x = val1\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n self.oppinfo.pos.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.oppinfo.polar_pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.PPoint()\n _x = val1\n start = end\n end += 8\n (_x.angle, _x.radius,) = _get_struct_2f().unpack(str[start:end])\n self.oppinfo.polar_pos.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.robotinfo = []\n for i in range(0, length):\n val1 = nubot_common.msg.RobotInfo()\n _v12 = val1.header\n start = end\n end += 4\n (_v12.seq,) = _get_struct_I().unpack(str[start:end])\n _v13 = _v12.stamp\n _x = _v13\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v12.frame_id = str[start:end].decode('utf-8')\n else:\n _v12.frame_id = str[start:end]\n _x = val1\n start = end\n end += 28\n (_x.AgentID, _x.targetNum1, _x.targetNum2, _x.targetNum3, _x.targetNum4, _x.staticpassNum, _x.staticcatchNum,) = _get_struct_7i().unpack(str[start:end])\n _v14 = val1.pos\n _x = _v14\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _v15 = val1.heading\n start = end\n end += 4\n (_v15.theta,) = _get_struct_f().unpack(str[start:end])\n start = end\n end += 4\n (val1.vrot,) = _get_struct_f().unpack(str[start:end])\n _v16 = val1.vtrans\n _x = _v16\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _x = val1\n start = end\n end += 9\n (_x.iskick, _x.isvalid, _x.isstuck, _x.isdribble, _x.current_role, _x.role_time,) = _get_struct_5Bf().unpack(str[start:end])\n val1.iskick = bool(val1.iskick)\n val1.isvalid = bool(val1.isvalid)\n val1.isstuck = bool(val1.isstuck)\n val1.isdribble = bool(val1.isdribble)\n _v17 = val1.target\n _x = _v17\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n self.robotinfo.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.ballinfo = []\n for i in range(0, length):\n val1 = nubot_common.msg.BallInfo()\n _v18 = val1.header\n start = end\n end += 4\n (_v18.seq,) = _get_struct_I().unpack(str[start:end])\n _v19 = _v18.stamp\n _x = _v19\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v18.frame_id = str[start:end].decode('utf-8')\n else:\n _v18.frame_id = str[start:end]\n start = end\n end += 4\n (val1.ballinfostate,) = _get_struct_i().unpack(str[start:end])\n _v20 = val1.pos\n _x = _v20\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _v21 = val1.real_pos\n _x = _v21\n start = end\n end += 8\n (_x.angle, _x.radius,) = _get_struct_2f().unpack(str[start:end])\n _v22 = val1.velocity\n _x = _v22\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _x = val1\n start = end\n end += 2\n (_x.pos_known, _x.velocity_known,) = _get_struct_2B().unpack(str[start:end])\n val1.pos_known = bool(val1.pos_known)\n val1.velocity_known = bool(val1.velocity_known)\n self.ballinfo.append(val1)\n _x = self\n start = end\n end += 12\n (_x.coachinfo.header.seq, _x.coachinfo.header.stamp.secs, _x.coachinfo.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.coachinfo.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.coachinfo.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 54\n (_x.coachinfo.MatchMode, _x.coachinfo.MatchType, _x.coachinfo.TestMode, _x.coachinfo.pointA.x, _x.coachinfo.pointA.y, _x.coachinfo.pointB.x, _x.coachinfo.pointB.y, _x.coachinfo.angleA, _x.coachinfo.angleB, _x.coachinfo.idA, _x.coachinfo.idB, _x.coachinfo.kickforce, _x.pass_cmd.pass_id, _x.pass_cmd.catch_id, _x.pass_cmd.pass_pt.x, _x.pass_cmd.pass_pt.y, _x.pass_cmd.catch_pt.x, _x.pass_cmd.catch_pt.y, _x.pass_cmd.is_passout, _x.pass_cmd.is_dynamic_pass, _x.pass_cmd.is_static_pass, _x.pass_cmd.is_valid,) = _get_struct_3B4f2h3B2I4f4B().unpack(str[start:end])\n self.pass_cmd.is_passout = bool(self.pass_cmd.is_passout)\n self.pass_cmd.is_dynamic_pass = bool(self.pass_cmd.is_dynamic_pass)\n self.pass_cmd.is_static_pass = bool(self.pass_cmd.is_static_pass)\n self.pass_cmd.is_valid = bool(self.pass_cmd.is_valid)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def testSchemaLoadingAsString(self):\n api = self.ApiFromDiscoveryDoc('latitude.v1.json')\n self.assertEquals(4, len(api._schemas))", "def _deserialize_example(example_proto, labeled=True):\n if labeled:\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string),\n 'patient_id': tf.io.FixedLenFeature([], tf.int64),\n 'sex': tf.io.FixedLenFeature([], tf.int64),\n 'age_approx': tf.io.FixedLenFeature([], tf.int64),\n 'anatom_site_general_challenge': tf.io.FixedLenFeature([], tf.int64),\n 'diagnosis': tf.io.FixedLenFeature([], tf.int64),\n 'target': tf.io.FixedLenFeature([], tf.int64)\n }\n else:\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string)\n }\n\n return tf.io.parse_single_example(example_proto, feature_description)" ]
[ "0.77971804", "0.7109227", "0.6970353", "0.68700683", "0.6193557", "0.60889876", "0.5947199", "0.57988137", "0.57877314", "0.576459", "0.5755967", "0.57254654", "0.5725275", "0.56569904", "0.5644605", "0.5625937", "0.56136805", "0.5595943", "0.5573725", "0.5545526", "0.551871", "0.5507125", "0.55064076", "0.5498484", "0.54663444", "0.5455031", "0.54428554", "0.54314935", "0.5422437", "0.53861886" ]
0.76633465
1
Tests that binary SpecificLocation serializes as expected
def test_binary_serialization() -> None: uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684)) obj = SpecificLocation(id=uid, name="Test") blob = ( b"\n/syft.core.io.location.specific.SpecificLocation\x12\x1a\n\x12\n\x10" + b"\xfb\x1b\xb0g[\xb7LI\xbe\xce\xe7\x00\xab\n\x15\x14\x12\x04Test" ) assert sy.serialize(obj, to_bytes=True) == blob assert sy.serialize(obj, to_bytes=True) == blob assert sy.serialize(obj, to_bytes=True) == blob
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_default_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = sy.serialize(obj, to_proto=True)\n\n assert sy.serialize(obj) == blob", "def test_binary_deserialization() -> None:\n\n blob = (\n b\"\\n/syft.core.io.location.specific.SpecificLocation\\x12\\x1a\\n\\x12\\n\\x10\"\n + b\"\\xfb\\x1b\\xb0g[\\xb7LI\\xbe\\xce\\xe7\\x00\\xab\\n\\x15\\x14\\x12\\x04Test\"\n )\n\n obj = sy.deserialize(blob=blob, from_bytes=True)\n assert obj == SpecificLocation(\n id=UID(value=uuid.UUID(int=333779996850170035686993356951732753684)),\n name=\"Test\",\n )", "def test_location() -> None:\n response: models.KamereonVehicleDataResponse = fixtures.get_file_content_as_schema(\n f\"{fixtures.KAMEREON_FIXTURE_PATH}/vehicle_data/location.json\",\n schemas.KamereonVehicleDataResponseSchema,\n )\n response.raise_for_error_code()\n assert response.data is not None\n assert response.data.raw_data[\"attributes\"] == {\n \"gpsLatitude\": 48.1234567,\n \"gpsLongitude\": 11.1234567,\n \"lastUpdateTime\": \"2020-02-18T16:58:38Z\",\n }\n\n vehicle_data = cast(\n models.KamereonVehicleLocationData,\n response.get_attributes(schemas.KamereonVehicleLocationDataSchema),\n )\n\n assert vehicle_data.gpsLatitude == 48.1234567\n assert vehicle_data.gpsLongitude == 11.1234567\n assert vehicle_data.lastUpdateTime == \"2020-02-18T16:58:38Z\"", "def test_str(self):\n location = self.location\n\n self.assertEqual(str(location), self.location_raw['name'])", "def test_default_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob)\n assert obj == obj2", "def test_proto_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid), name=\"Test\")\n\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob", "def test_get_zr_location_structure(self):\n pass", "def test_compound_with_location(self):\n self.assertEqual(self.compound_with_loc.location, 'extracellular')", "def test_to_string() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid)\n assert str(obj) == \"<SpecificLocation: fb1bb0675bb74c49becee700ab0a1514>\"\n assert obj.__repr__() == \"<SpecificLocation: fb1bb0675bb74c49becee700ab0a1514>\"", "def test_02_Dump(self):\n self.m_location.Street = '_test street'\n l_ret = self.m_config.save_yaml_config()\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'C2-02-A - Location', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'C2-02-B - House', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'C2-02-C - Location', 190))\n # print(PrettyFormatAny.form(l_ret, 'C2-02-D - Location', 190))\n # print('Config: {}'.format(l_ret))\n self.assertEqual(l_ret['Location']['City'], 'Washington')", "def test_proto_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid)\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob, from_proto=True)\n assert obj == obj2", "def test_03_extract(self):\n l_node = config_tools.Yaml(self.m_pyhouse_obj).read_yaml(self.m_filename)\n l_obj = self.m_config._extract_location(l_node.Yaml['Location'])\n l_ret = self.m_pyhouse_obj.House.Location\n # print(PrettyFormatAny.form(l_node, 'C1-03-A'))\n # print(PrettyFormatAny.form(l_obj, 'C1-03-B'))\n # print(PrettyFormatAny.form(l_ret, 'C1-03-C'))\n self.assertEqual(l_obj.Street, '1600 Pennsylvania Ave NW')\n self.assertEqual(l_obj.City, 'Washington')", "def test_location_address(self):\n self.assertIsInstance(self.location.address, Address)\n self.assertEqual(self.location.address, self.address)", "def test_create_location(self):\n location = self.location\n\n self.assertTrue(isinstance(location, Location))\n self.assertEqual(location.name, \"Test Location\")", "def test_pprint() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"location\")\n assert obj.pprint == \"📌 location (SpecificLocation)@<UID:🙍🛖>\"", "def test_serialization():\n bb_1 = t2.TBoundingBox(0.4, 0.3, 0.1, top=None) # type:ignore forcing some None/null values\n bb_2 = t2.TBoundingBox(0.4, 0.3, 0.1, top=0.2) # type: ignore\n p1 = t2.TPoint(x=0.1, y=0.1)\n p2 = t2.TPoint(x=0.3, y=None) # type:ignore\n geo = t2.TGeometry(bounding_box=bb_1, polygon=[p1, p2])\n geo_s = t2.TGeometrySchema()\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s\n geo = t2.TGeometry(bounding_box=bb_2, polygon=[p1, p2])\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s", "def test_training_location(self):\n self.assertIsInstance(self.one_off_training.location, Location)\n self.assertEqual(self.one_off_training.location, self.location)", "def test_json():\n bounds = MolecularStructureBounds()\n copy = loads(dumps(bounds))\n assert copy == bounds", "def test_python_to_json(self):\n location = {\n 'address' : '123 Main St.',\n 'lat' : 127.0,\n 'lng' : -42,\n 'name' : 'nowhere',\n 'owner' : ObjectId(),\n '_id' : ObjectId()\n }\n\n parsed = Location.flatten(location)\n\n # these should all be the same\n self.assertEqual(parsed['address'], location['address'])\n self.assertEqual(parsed['lat'], location['lat'])\n self.assertEqual(parsed['lng'], location['lng'])\n self.assertEqual(parsed['name'], location['name'])\n\n # owner should be removed\n self.assertFalse(parsed.has_key('owner'))\n\n # and id should be renamed from _id to id, and flattened\n self.assertFalse(parsed.has_key('_id'))\n self.assertTrue(parsed.has_key('id'))\n self.assertEqual(parsed['id'], str(location['_id']))", "def location_fixture():\n return _create_location()", "def test_location_is_location_instance(self):\n self.assertIsInstance(self.location, Location)", "def test_serialize(self):\n r = self.RNA(\"ugagg\")\n assert dumps(r)", "def test_has_location_with_states_with_valid_location():\n state = State(\n \"hello.world\", \"invalid\", {ATTR_LATITUDE: 123.12, ATTR_LONGITUDE: 123.12}\n )\n assert location.has_location(state)", "def test_init(self):\n self.assertEqual(self.location, Ship(self.location).location)", "def test_oef_object_transator():\n foo_datamodel = DataModel(\n \"foo\",\n [\n Attribute(\"bar\", int, True, \"A bar attribute.\"),\n Attribute(\"location\", Location, True, \"A location attribute.\"),\n ],\n )\n desc = Description(\n {\"bar\": 1, \"location\": Location(10.0, 10.0)}, data_model=foo_datamodel\n )\n oef_desc = OEFObjectTranslator.to_oef_description(desc)\n new_desc = OEFObjectTranslator.from_oef_description(oef_desc)\n assert desc.values[\"location\"] == new_desc.values[\"location\"]", "def test_location(self):\n lon = [ 80.0, -78.5, 500.500]\n lat = [np.NaN, 50.0, -60.0]\n\n npt.assert_array_equal(\n qartod.location_test(lon=lon, lat=lat),\n np.ma.array([4, 1, 4])\n )\n\n lon = np.array(lon)\n lat = np.array(lat)\n npt.assert_array_equal(\n qartod.location_test(lon=lon, lat=lat),\n np.ma.array([4, 1, 4])\n )\n\n lon = dask_arr(lon)\n lat = dask_arr(lat)\n npt.assert_array_equal(\n qartod.location_test(lon=lon, lat=lat),\n np.ma.array([4, 1, 4])\n )", "def test_serialize_no_metadata(self):\n pass # pragma: no cover", "def test_address_to_tree(self):\n pass", "def test_dumps(self):\n result = self.mapper.dumps(self.serialization)\n self.mapper.to_dict.assert_called_once_with(\"custom\")\n self.serialization.assert_called_once_with(\n self.mapper.to_dict.return_value\n )\n self.assertIs(result, self.serialization.return_value)", "def test_get_formatted_location(self):\n\t\tformatted_location = get_formatted_location('seoul', 'south korea')\n\t\tself.assertEqual(formatted_location, 'Seoul, South Korea')" ]
[ "0.7375597", "0.72204155", "0.6941698", "0.6747669", "0.6687572", "0.66465783", "0.6593975", "0.65680665", "0.63872945", "0.63758487", "0.62996614", "0.6264945", "0.61968005", "0.6076474", "0.60626924", "0.6027337", "0.5995509", "0.59866995", "0.59510314", "0.5870925", "0.5852066", "0.58392465", "0.58351254", "0.5808407", "0.5795497", "0.57825893", "0.57182974", "0.5707685", "0.5665695", "0.56374234" ]
0.7700117
0
Test that binary SpecificLocation deserialization works as expected
def test_binary_deserialization() -> None: blob = ( b"\n/syft.core.io.location.specific.SpecificLocation\x12\x1a\n\x12\n\x10" + b"\xfb\x1b\xb0g[\xb7LI\xbe\xce\xe7\x00\xab\n\x15\x14\x12\x04Test" ) obj = sy.deserialize(blob=blob, from_bytes=True) assert obj == SpecificLocation( id=UID(value=uuid.UUID(int=333779996850170035686993356951732753684)), name="Test", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_default_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob)\n assert obj == obj2", "def test_binary_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = (\n b\"\\n/syft.core.io.location.specific.SpecificLocation\\x12\\x1a\\n\\x12\\n\\x10\"\n + b\"\\xfb\\x1b\\xb0g[\\xb7LI\\xbe\\xce\\xe7\\x00\\xab\\n\\x15\\x14\\x12\\x04Test\"\n )\n\n assert sy.serialize(obj, to_bytes=True) == blob\n assert sy.serialize(obj, to_bytes=True) == blob\n assert sy.serialize(obj, to_bytes=True) == blob", "def test_proto_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid)\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob, from_proto=True)\n assert obj == obj2", "def test_default_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = sy.serialize(obj, to_proto=True)\n\n assert sy.serialize(obj) == blob", "def test_location() -> None:\n response: models.KamereonVehicleDataResponse = fixtures.get_file_content_as_schema(\n f\"{fixtures.KAMEREON_FIXTURE_PATH}/vehicle_data/location.json\",\n schemas.KamereonVehicleDataResponseSchema,\n )\n response.raise_for_error_code()\n assert response.data is not None\n assert response.data.raw_data[\"attributes\"] == {\n \"gpsLatitude\": 48.1234567,\n \"gpsLongitude\": 11.1234567,\n \"lastUpdateTime\": \"2020-02-18T16:58:38Z\",\n }\n\n vehicle_data = cast(\n models.KamereonVehicleLocationData,\n response.get_attributes(schemas.KamereonVehicleLocationDataSchema),\n )\n\n assert vehicle_data.gpsLatitude == 48.1234567\n assert vehicle_data.gpsLongitude == 11.1234567\n assert vehicle_data.lastUpdateTime == \"2020-02-18T16:58:38Z\"", "def test_proto_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid), name=\"Test\")\n\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob", "def test_deserialize(self):\r\n\r\n # test that from_json produces no exceptions\r\n self.assertDeserializeEqual('10:20:30', '\"10:20:30\"')", "def test_03_extract(self):\n l_node = config_tools.Yaml(self.m_pyhouse_obj).read_yaml(self.m_filename)\n l_obj = self.m_config._extract_location(l_node.Yaml['Location'])\n l_ret = self.m_pyhouse_obj.House.Location\n # print(PrettyFormatAny.form(l_node, 'C1-03-A'))\n # print(PrettyFormatAny.form(l_obj, 'C1-03-B'))\n # print(PrettyFormatAny.form(l_ret, 'C1-03-C'))\n self.assertEqual(l_obj.Street, '1600 Pennsylvania Ave NW')\n self.assertEqual(l_obj.City, 'Washington')", "def test_python_to_json(self):\n location = {\n 'address' : '123 Main St.',\n 'lat' : 127.0,\n 'lng' : -42,\n 'name' : 'nowhere',\n 'owner' : ObjectId(),\n '_id' : ObjectId()\n }\n\n parsed = Location.flatten(location)\n\n # these should all be the same\n self.assertEqual(parsed['address'], location['address'])\n self.assertEqual(parsed['lat'], location['lat'])\n self.assertEqual(parsed['lng'], location['lng'])\n self.assertEqual(parsed['name'], location['name'])\n\n # owner should be removed\n self.assertFalse(parsed.has_key('owner'))\n\n # and id should be renamed from _id to id, and flattened\n self.assertFalse(parsed.has_key('_id'))\n self.assertTrue(parsed.has_key('id'))\n self.assertEqual(parsed['id'], str(location['_id']))", "def test_get_zr_location_structure(self):\n pass", "def test_json():\n bounds = MolecularStructureBounds()\n copy = loads(dumps(bounds))\n assert copy == bounds", "def deserialize(self, data):", "def test_compound_with_location(self):\n self.assertEqual(self.compound_with_loc.location, 'extracellular')", "def deserialize_object(d):\n pass", "def test_str(self):\n location = self.location\n\n self.assertEqual(str(location), self.location_raw['name'])", "def test_DL_import_wrong_file_serialized(self):\n filepath = '5.txt'\n with open(filepath, 'wb') as file:\n pickle.dump([\"This is a wrong dataset\"], file)\n # Check if exception was raised for wrong data type\n with self.assertRaises(Exception):\n flow_processing_input.DetectorsLocation(9999, filepath)\n os.remove(filepath)", "def test_json_to_python(self):\n\n # There seems to be a problem with Flask-Login setting the current_user proxy\n # in api/models.py, which we need t run this test.\n if False:\n self.login_test_user()\n\n location = {\n 'address' : '123 Main St.',\n 'lat' : '127.0', # forgive numbers coming as strings\n 'lng' : -42,\n 'name' : 'nowhere',\n 'id' : str(ObjectId())\n }\n\n expanded = Location.from_json(location)\n\n # these should all be the same\n self.assertEqual(expanded['address'], location['address'])\n self.assertEqual(expanded['lat'], location['lat'])\n self.assertEqual(expanded['lng'], location['lng'])\n self.assertEqual(expanded['name'], location['name'])\n\n # owner should be set by the currently logged in location\n self.assertEqual(expanded['owner'], self.test_location.id)\n\n # id should be renamed from id to _id, and expanded\n self.assertTrue(expanded.has_key('_id'))\n self.assertFalse(expanded.has_key('id'))\n self.assertEqual(str(expanded['_id']), location['id'])", "def test_serialization():\n bb_1 = t2.TBoundingBox(0.4, 0.3, 0.1, top=None) # type:ignore forcing some None/null values\n bb_2 = t2.TBoundingBox(0.4, 0.3, 0.1, top=0.2) # type: ignore\n p1 = t2.TPoint(x=0.1, y=0.1)\n p2 = t2.TPoint(x=0.3, y=None) # type:ignore\n geo = t2.TGeometry(bounding_box=bb_1, polygon=[p1, p2])\n geo_s = t2.TGeometrySchema()\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s\n geo = t2.TGeometry(bounding_box=bb_2, polygon=[p1, p2])\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s", "def test_oef_object_transator():\n foo_datamodel = DataModel(\n \"foo\",\n [\n Attribute(\"bar\", int, True, \"A bar attribute.\"),\n Attribute(\"location\", Location, True, \"A location attribute.\"),\n ],\n )\n desc = Description(\n {\"bar\": 1, \"location\": Location(10.0, 10.0)}, data_model=foo_datamodel\n )\n oef_desc = OEFObjectTranslator.to_oef_description(desc)\n new_desc = OEFObjectTranslator.from_oef_description(oef_desc)\n assert desc.values[\"location\"] == new_desc.values[\"location\"]", "def _post_deserialize (self):\n pass", "def test_02_ReadFile(self):\n l_node = config_tools.Yaml(self.m_pyhouse_obj).read_yaml(self.m_filename)\n l_config = l_node.Yaml\n # print(PrettyFormatAny.form(l_node, 'C1-02-A'))\n # print(PrettyFormatAny.form(l_config, 'C1-02-B'))\n self.assertEqual(l_config['Location']['Street'], '1600 Pennsylvania Ave NW')\n self.assertEqual(len(l_config['Location']), 10)", "def test_map_deserialization(self):\r\n \r\n original = DeserializationTestModel.create(count=5, text='happy')\r\n nested = original.get_map()\r\n\r\n assert isinstance(nested, dict)\r\n assert nested['vertex'] == original\r\n assert nested['number'] == 5", "def test_loader_loads_from_str():\n base_json = '{\"foo\": \"bar\"}'\n json_test = {\"foo\": \"bar\"}\n assert whenzat.loader(base_json, from_file=False) == json_test", "def test_location_address(self):\n self.assertIsInstance(self.location.address, Address)\n self.assertEqual(self.location.address, self.address)", "def test_user_type_simple_attributes_with_roundtrip():\n Person = Map.from_file(\"definitions/Person.buf\")\n me = Person(name=\"Bede Kelly\", age=20)\n bytestream = me.to_bytes()\n new_me = Person.read(bytestream)\n assert \"Bede Kelly\" == new_me.name\n assert 20 == new_me.age", "def test_dumps(self):\n data = \"something\"\n result = self.mapper.loads(self.deser_fn, data)\n self.mapper.from_dict.assert_called_once_with(\n self.deser_fn.return_value, \"custom\"\n )\n self.deser_fn.assert_called_once_with(data)\n self.assertIs(result, self.mapper.from_dict.return_value)", "def test_serialize_no_metadata(self):\n pass # pragma: no cover", "def test_create_from_serialized(self, molecule):\n serialized_molecule = molecule.to_dict()\n molecule_copy = Molecule(serialized_molecule)\n assert molecule == molecule_copy", "def test_location_is_location_instance(self):\n self.assertIsInstance(self.location, Location)", "def test_dump_load(self):\n payload = {\"a\": [1, 2, 3]}\n self.assertEqual(load_json(dump_json(payload)), payload)" ]
[ "0.7560681", "0.74002177", "0.7336826", "0.7013086", "0.6905292", "0.66269207", "0.64547575", "0.63303447", "0.6209737", "0.6172043", "0.61616814", "0.6142617", "0.61180305", "0.6067025", "0.6018473", "0.593757", "0.59359664", "0.5935091", "0.5915604", "0.5915578", "0.584693", "0.5827266", "0.5822428", "0.58194107", "0.58044153", "0.5796166", "0.5767464", "0.57652277", "0.57190466", "0.56830764" ]
0.8264405
0
Takes a semicolondelimited list of values and constructs a SQL WHERE clause to select those values within a given field and table.
def whereClause(table, field, values): # Add field delimiters fieldDelimited = arcpy.AddFieldDelimiters(arcpy.Describe(table).path, field) # Split multivalue at semicolons and strip quotes valueList = [value[1:-1] if (value.startswith("'") and value.endswith("'")) else value for value in values.split(';')] # Determine field type fieldType = arcpy.ListFields(table, field)[0].type # Add single-quotes for string field values if str(fieldType) == 'String': valueList = ["'%s'" % value for value in valueList] # Format WHERE clause in the form of an IN statement whereClause = "%s IN (%s)"%(fieldDelimited, ', '.join(valueList)) return whereClause
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_where_clause(table, field, valueList):\n # Add DBMS-specific field delimiters\n fieldDelimited = arcpy.AddFieldDelimiters(arcpy.Describe(table).path, field)\n # Determine field type\n fieldType = arcpy.ListFields(table, field)[0].type\n # Add single-quotes for string field values\n if str(fieldType) == 'String':\n valueList = [\"'%s'\" % value for value in valueList]\n # Format WHERE clause in the form of an IN statement\n whereClause = \"%s IN(%s)\" % (fieldDelimited, ', '.join(map(str, valueList)))\n return whereClause", "def get_list_query_cond(column: str, val: list, query_params: dict):\n if val is not None and len(val) != 0:\n or_list = []\n for i in range(len(val)):\n param_name = f'{column}{i}'\n query_params[param_name] = val[i]\n or_list.append('AHJ.' + column + '=%(' + param_name + ')s')\n ret_str = '(' + ' OR '.join(or_list) + ') AND '\n return ret_str\n return ''", "def make_where_in(cls, key, value_list):\n\n return \"%s IN (%s)\" % (\n cls.to_attr_str(key), \", \".join(cls.to_value_str_list(value_list)))", "def _getSQLWhere(self, inputTable, queryMeta):\n\t\tsqlPars = {}\n\t\tinputPars = dict((p.name, p.value) for p in inputTable.iterParams())\n\t\treturn base.joinOperatorExpr(\"AND\",\n\t\t\t[cd.asSQL(inputPars, sqlPars, queryMeta)\n\t\t\t\tfor cd in self.condDescs]), sqlPars", "def _sqllist(values):\n items = []\n items.append('(')\n for i, v in enumerate(values):\n if i != 0:\n items.append(', ')\n items.append(sqlparam(v))\n items.append(')')\n return SQLQuery(items)", "def CondStrSelectedLines(LineList):\r\n\tcondstr = []\r\n\tfor line in LineList:\r\n\t\tpat = \"'\" + line + \"'\"\r\n\t\tcondstr.append(pat)\r\n\tSQLcond = \"line_id IN \" + \"(\" + \",\".join(condstr) + \")\"\r\n\treturn SQLcond", "def find_some(self,table,field_list,**query_dict):\n start_sql = 'SELECT '\n sql = ''\n query_sql = ''\n for field in field_list: start_sql += field + ',' \n start_sql = start_sql[0:-1] + ' FROM %s WHERE ' % (table)\n try:\n if query_dict:\n for index in query_dict:\n if not isinstance(query_dict[index],dict): query_sql += \" %s = '%s' and\" % (index,query_dict[index]) \n else: query_sql += \" %s %s '%s' and\" % (index,query_dict[index]['rule'],query_dict[index]['value'])\n sql = (start_sql + query_sql)[0:-3] \n info_list = self.db.query(sql)\n except Exception,e: self.treat_except(e) \n return info_list", "def _build_where_clause(**kwds_filter):\n clause = []\n params = []\n items = kwds_filter.items()\n items = sorted(items, key=lambda x: x[0]) # Ordered by key.\n for key, val in items:\n if nonstringiter(val):\n clause.append(key + ' IN (%s)' % (', '.join('?' * len(val))))\n for x in val:\n params.append(x)\n else:\n clause.append(key + '=?')\n params.append(val)\n\n clause = ' AND '.join(clause) if clause else ''\n return clause, params", "def _build_where_clause(**kwds_filter):\n clause = []\n params = []\n items = kwds_filter.items()\n items = sorted(items, key=lambda x: x[0]) # Ordered by key.\n for key, val in items:\n if _is_nsiterable(val):\n clause.append(key + ' IN (%s)' % (', '.join('?' * len(val))))\n for x in val:\n params.append(x)\n else:\n clause.append(key + '=?')\n params.append(val)\n\n clause = ' AND '.join(clause) if clause else ''\n return clause, params", "def _cond_where_sql(cursor, conds, tables, prefix=None, aggregate=False):\n isa = isinstance\n pieces = []\n for c in conds:\n if isa(c, Query) or (isa(c, Comparison) and c._table in tables):\n sql = c._sql_where(cursor, tables, prefix=prefix,\n aggregate=aggregate)\n if len(sql) > 0:\n pieces.append(sql)\n return pieces", "def select(self, table_name: str, row_filter: dict) -> list:\n sql = 'SELECT * FROM ' + table_name + ' WHERE '\n for key, value in row_filter.items():\n if type(value) is tuple:\n sql += key + ' '\n sql += value[0] + ' '\n sql += \"'\" + value[1] + \"'\"\n elif type(value) == str:\n sql += key + ' = '\n sql += \"'\" + value + \"'\"\n elif value is None:\n sql += key + ' ISNULL '\n else:\n sql += key + ' = '\n sql += str(value)\n if not key == list(row_filter.keys())[-1]:\n sql += ' AND '\n return self.cursor.execute(sql).fetchall()", "def __make_sel(selection):\n sel = []\n param = []\n for key, value in selection.iteritems(): \n if key == \"fn\":\n if value.find('%') >= 0:\n sel.append(\"irods_filepath like %s\")\n else:\n sel.append(\"irods_filepath = %s\")\n elif key == \"expid\":\n sel.append(\"exper_id = %s\".format(value))\n elif key == 'runnum':\n sel.append(\"runnum = %s\".format(value))\n elif key == 'status' and value:\n sel.append(\"status = %s\")\n else:\n continue\n param.append(value)\n\n q = \"WHERE {}\".format(\" AND \".join(sel)) if sel else \"\"\n return q, param", "def get_list_filter(self,table=None,**kwargs):\n # import pdb;pdb.set_trace()\n self.where = '1'\n self.order_by = 'id'\n if not isinstance(table,SqliteTable):\n return\n \n # get the column names for the table\n table_column_names = table.get_column_names()\n \n self._create_filter_session(table.table_name) # ensure it exists\n \n where_list = []\n session_data = session.get(self.HEADER_NAME)\n if session_data and table.table_name in session_data:\n filter_data = session_data[table.table_name][self.FILTERS_NAME]\n for k,v in filter_data.items():\n col = v.get(self.FIELD_NAME)\n val = v.get(self.VALUE)\n kind = v.get(self.TYPE)\n start = v.get(self.DATE_START)\n end = v.get(self.DATE_END)\n if col and (val or start or end):\n \n # if the column name is a physical column in the primary table\n # prepend the column name with the table name to avoid ambiguous column names\n if col in table_column_names and '.' not in col:\n col = table.table_name + '.' + col\n \n if kind == 'date':\n start = iso_date_string(start if start else self.BEGINNING_OF_TIME)\n end = iso_date_string(end if end else self.END_OF_TIME)\n # print(start,end)\n where_list.append(\"\"\"date({col}) >= date('{start}') and date({col}) <= date('{end}')\"\"\".format(col=col,start=start,end=end))\n # print(where_list[-1])\n else:\n where_list.append(\"\"\"{col} LIKE '%{val}%'\"\"\".format(col=col,val=str(val).lower()))\n \n \n # import pdb;pdb.set_trace()\n order_list = []\n for order_data in session_data[table.table_name][self.ORDERS_NAME]:\n for dom_id in order_data.keys():\n col = order_data[dom_id].get(self.FIELD_NAME)\n direction = int(order_data[dom_id].get(self.DIRECTION,0)) #direction will be -1,0 or 1\n if col and direction:\n \n # if the column name is a physical column in the primary table\n # prepend the column name with the table name to avoid ambiguous column names\n # Same as above, but not sure it's really needed in order by...\n if col in table_column_names and '.' not in col:\n col = table.table_name + '.' + col\n\n direction = 'DESC' if direction < 0 else 'ASC'\n collate = ''\n field_type = \"TEXT\"\n try:\n field_type = table.get_column_type(order_data[dom_id]['field_name'])\n except KeyError:\n # the field name may be defined in the query \n pass\n if field_type.lower() == \"text\":\n collate = 'COLLATE NOCASE'\n order_list.append(\"\"\"{col} {collate} {direction}\"\"\".format(col=col,collate=collate,direction=direction))\n \n if where_list:\n self.where = ' and '.join(where_list)\n if order_list:\n self.order_by = ','.join(order_list)\n else:\n self.order_by = table.order_by_col #default order for this table", "def CondStrSelectedStations(StationList):\r\n\tvals = \"(\" + \",\".join([str(e) for e in StationList]) + \")\"\r\n\tSQLcond = \"station_from in \" + vals + \" AND \" + \"station_to in \" + vals\r\n\treturn SQLcond", "def _sql_where(cur, tables, andalso, orelse, prefix=None, aggregate=False):\n disjunctions = []\n andsql = _cond_where_sql(cur, andalso, tables, prefix=prefix,\n aggregate=aggregate)\n andsql = ' AND '.join(andsql)\n\n if len(andsql) > 0:\n andsql = '(%s)' % andsql\n disjunctions.append(andsql)\n disjunctions += _cond_where_sql(cur, orelse, tables, prefix=prefix,\n aggregate=aggregate)\n\n if len(disjunctions) == 0:\n return ''\n return '(%s)' % (' OR '.join(disjunctions))", "def sql_filtered_update(table, set_columns, where_columns, values):\n for index in range(len(set_columns) - 1, -1, -1):\n if values[index] is None:\n del set_columns[index]\n del values[index]\n set_columns = [col + ' = ?' for col in set_columns]\n columns_to_set = ', '.join(set_columns)\n where_columns = [col + ' = ?' for col in where_columns]\n where_condition = ' AND '.join(where_columns)\n query = f'UPDATE {table} SET {columns_to_set} WHERE {where_condition}'\n return query, values", "def sqlwhere(dictionary, grouping=' AND '):\n return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping)", "def sql_filter_foreignkey(my_table='', my_col='', join_table='', colName='', var='', **kw):\n\tif (my_table=='') or (join_table=='') or (colName=='') or (var=='') or (my_col==''):\n\t\treturn dict(sql='',clauseTables=[])\n\telse:\n\t\tclauseTables=[join_table]\n\t\tsql = my_table+\".\"+my_col+\" = \"+join_table+\".id AND \"+join_table+\".\"+colName+\" LIKE '%\"+var+\"%'\"\n\t\treturn dict(sql=sql, clauseTables=clauseTables)", "def sql_filter(my_table='', colName='', var='', **kw):\n\tif (my_table=='') or (colName=='') or (var==''):\n\t\treturn dict(sql='',clauseTables=[])\n\telse:\n\t\tsql = my_table+\".\"+colName+\" LIKE '%\"+var+\"%'\"\n\t\treturn dict(sql=sql,clauseTables=[])", "def queryByAttributeIn(table, attribute, values, access=None, addtl=\"\"):\n if len(values) > MAX_IN_ELEMENTS:\n values1 = values[:MAX_IN_ELEMENTS]\n values2 = values[MAX_IN_ELEMENTS:]\n records1 = queryByAttributeIn(table, attribute, values1, access, addtl)\n records2 = queryByAttributeIn(table, attribute, values2, access, addtl)\n records1.extend(records2)\n return records1\n\n valueString = u\",\".join(u\"'\" + sqlapi.quote(val) + u\"'\" for val in values)\n condition = u\"%s IN (%s)\" % (attribute, valueString)\n records = sqlapi.RecordSet2(table, condition,\n access=access, access_persno=auth.persno,\n addtl=addtl)\n return [records]", "def sqlors(left, lst):\n if isinstance(lst, iters):\n lst = list(lst)\n ln = len(lst)\n if ln == 0:\n return SQLQuery(\"1=2\")\n if ln == 1:\n lst = lst[0]\n\n if isinstance(lst, iters):\n return SQLQuery(['('] + \n sum([[left, sqlparam(x), ' OR '] for x in lst], []) +\n ['1=2)']\n )\n else:\n return left + sqlparam(lst)", "def selectStr_by_list(field, lst):\n exp = ''\n for item in lst:\n if type(item) in [str, unicode]: # sequence\n exp += \"{} = '{}' OR \".format(field, item)\n elif type(item) == float:\n decP = len(repr(item).split(\".\")[1]) # decimal places\n if decP >= 15:\n exp += 'ROUND({},{}) = {} OR '.format(field, decP, repr(item))\n else:\n exp += '{} = {} OR '.format(field, repr(item))\n elif type(item) in [int, long]: # numeric\n exp += '\"{}\" = {} OR '.format(field, item)\n else:\n message(\"'{}' in list, unknown type '{}'\".format(item, type(item)))\n return (exp[:-4])", "def sql_filter_n_foreignkey(my_table='', my_col='', join_table=[], colName=[], var='', **kw):\n\tif (my_table=='') or (join_table=='') or (colName=='') or (var=='') or (my_col=='') or (len(join_table)!=len(colName)) or len(join_table)<2:\n\t\treturn dict(sql='',clauseTables=[])\n\telse:\n\t\tclauseTables=join_table\n\t\tsql = my_table+\".\"+my_col+\" = \"+join_table[0]+\".id AND \"\n\t\ti = 0\n\t\tfor table in join_table:\n\t\t\tif i+1<len(join_table):\n\t\t\t\tsql += table+\".\"+colName[i]+\" = \"+join_table[i+1]+\".id AND \"\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tsql += table+\".\"+colName[i]+\" LIKE '%\"+var+\"%'\"\n\t\treturn dict(sql=sql, clauseTables=clauseTables)\n\t\t #sql_merge(sqls=['sql1', 'sql2', 'sql4'], clauseTables=['one'])", "def load_database_table(database, table, list_where=None) -> list:\n if list_where is None:\n list_where = list()\n query = \"SELECT * FROM \" + table\n for i in range(len(list_where)):\n if i == 0:\n query = query + \" WHERE \" + list_where[i]\n else:\n query = query + \" AND \" + list_where[i]\n query_result = run_query(database, query)\n return query_result", "def __convert_to_sql_where(conditions : List[Tuple[Any, RelationalOperator, Any]]) -> str:\n\n formatted_identifiers = []\n\n for identifier in conditions:\n col_name, relation, value = identifier\n\n if relation == RelationalOperator.Between and len(value) != 2:\n raise ValueError(\"Between relational operator requires the value parameter to be a list of length 2\")\n \n if relation != RelationalOperator.Between:\n value = SecurityDatabaseWrapper._validate_value(value)\n \n col_name = SecurityDatabaseWrapper._validate_column_name(col_name)\n\n formatted_identifiers.append((col_name, relation, value))\n\n where_clause_section = ' AND '.join([f'{col_name} {relation.value} {value}' for col_name, relation, value in formatted_identifiers])\n return f\"({where_clause_section})\"", "def as_sql(self, compiler, connection):\n join_conditions = []\n params = []\n qn = compiler.quote_name_unless_alias\n qn2 = connection.ops.quote_name\n\n # Add a join condition for each pair of joining columns.\n\n for index, (lhs_col, rhs_col) in enumerate(self.join_cols):\n if hasattr(self.join_field, 'get_join_on'):\n join_condition = self.join_field.get_join_on(qn(self.parent_alias), qn2(lhs_col), qn(self.table_alias),\n qn2(rhs_col))\n join_conditions.append(join_condition)\n else:\n join_conditions.append('%s.%s = %s.%s' % (\n qn(self.parent_alias),\n qn2(lhs_col),\n qn(self.table_alias),\n qn2(rhs_col),\n ))\n\n # Add a single condition inside parentheses for whatever\n # get_extra_restriction() returns.\n extra_cond = self.join_field.get_extra_restriction(\n compiler.query.where_class, self.table_alias, self.parent_alias)\n if extra_cond:\n extra_sql, extra_params = compiler.compile(extra_cond)\n join_conditions.append('(%s)' % extra_sql)\n params.extend(extra_params)\n\n if not join_conditions:\n # This might be a rel on the other end of an actual declared field.\n declared_field = getattr(self.join_field, 'field', self.join_field)\n raise ValueError(\n \"Join generated an empty ON clause. %s did not yield either \"\n \"joining columns or extra restrictions.\" % declared_field.__class__\n )\n on_clause_sql = ' AND '.join(join_conditions)\n alias_str = '' if self.table_alias == self.table_name else (' %s' % self.table_alias)\n sql = '%s %s%s ON (%s)' % (self.join_type, qn(self.table_name), alias_str, on_clause_sql)\n return sql, params", "def _sql_where(self, cursor, table, prefix=None, aggregate=False):\n assert False, \"subclass responsibility\"", "def _sample_using_a_list(\n self,\n column_name: str,\n value_list: list,\n ):\n return sa.column(column_name).in_(value_list)", "def QueryTableForFields(self, tableName: str, fields: List[Tuple] = []):\n # putting this here for now, will be commented out. Serves as a development check in case I make mistake\n if tableName not in DB_TABLES:\n raise KeyError(\"Table not valid. Use Table from {}\".format(DB_TABLES))\n if fields == []:\n return self.QueryTableForAll(tableName)\n currTable = self.table(tableName)\n\n queries = []\n for idField in range(len(fields)):\n currField = fields[idField]\n print(currField)\n if type(currField[2]) == list and currField[1] == \"in\":\n # Best to handle this somewhere else? DB shouldn't be too big so is ok to do this\n # actually use query.type.any? Might need to add a parameter to the function\n addedQuery = \"Query().{}.any({})\".format(currField[0], currField[2])\n\n else:\n if type(currField[2]) == int or type(currField[2]) == float or type(currField[2]) == list:\n addedQuery = 'where(\"{}\") {} {}'.format(\n str(currField[0]), str(currField[1]), currField[2]\n )\n else:\n addedQuery = 'where(\"{}\") {} \"{}\"'.format(\n str(currField[0]), str(currField[1]), currField[2]\n )\n\n queries.append(addedQuery)\n\n strQuery = \"\"\n for i, query in enumerate(queries):\n strQuery += query\n if i != len(queries) - 1:\n strQuery += \" and \"\n\n results = currTable.search(eval(strQuery))\n\n # Here I would like to operate on tags\n return results", "def get_sql_str_select(self,table_name, field_names = ['*'],where_ind_name = None, where_ind_value = None, where_clause = None):\n\t\tif (where_ind_name!=None) & (where_ind_value!=None):\n\t\t\tsql_str = 'SELECT %s FROM %s.%s WHERE %s = %s'%(','.join([str(x) for x in field_names]),\t\t\t\t\t\t\t\t\t\t\t\t\tself.__schema_name,\t\t\t\t\t\t\t\t\t\t\t\t\t\ttable_name,\t\t\t\t\t\t\t\t\t\t\t\t\t\t where_ind_name,\t\t\t\t\t\t\t\t\t\t\t\t\t\t where_ind_value)\n\t\telse:\n\t\t\tsql_str = 'SELECT %s FROM %s.%s %s'%(','.join([str(x) for x in field_names]),\t\t\t\t\t\t\t\t\t\t\t\t\tself.__schema_name,\t\t\t\t\t\t\t\t\t\t\t\t\t\ttable_name, where_clause)\n\t\t# print(sql_str)\n\t\treturn sql_str" ]
[ "0.7853446", "0.6152071", "0.5823842", "0.5747859", "0.5664342", "0.56641", "0.56417346", "0.5615715", "0.5549794", "0.55107397", "0.5508221", "0.53860664", "0.53816307", "0.53534734", "0.53163993", "0.5313791", "0.53075683", "0.53036475", "0.52948624", "0.5281941", "0.5253823", "0.52246773", "0.51367253", "0.5075603", "0.50694203", "0.5052339", "0.505184", "0.50182366", "0.50058067", "0.4984516" ]
0.7653736
1
Return a sorted list of mobs that have that name for the purposes of iteration. This method may be expanded in the future to allow searching for mobs by other traits.
def get_named(self, name): if type(name) is str: named = [mob for mob in self.contents() if mob.nombre == name] else: named = [mob for mob in self.contents() if mob.nombre == name.nombre] named.sort(key=lambda o: o.nombre) return named
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def name(self):\n return [o.name for o in self.obs]", "def get_cards(self, name):\n cards = []\n\n for card in self.cards:\n if card.name == name:\n cards.append(card)\n\n return cards", "def cxfind(self, namepart):\n names = [name for name in self if namepart in name]\n names.sort()\n return names", "def effect_list(self):\n moods = []\n for mood in self._moodlist:\n if \"name\" in mood:\n moods.append(mood['name'])\n return moods", "def obs_names(self):\n return self._obs_names", "def find_all_by_name ( self, name, **kw ):\n return self.find_all (\n lambda s, n: s.name == n, c_args=( name, ), **kw\n )", "def search_by_name(self, name):\r\n return self.__filter(self.get_all_persons(), lambda x: name.lower().strip() in x.name.lower().strip())", "def Collection_search_by_name(C: list, name: str) -> list:\r\n result = [ ]\r\n for r in C:\r\n if r.name == name:\r\n result.append(r)\r\n return result", "def getItemsForArtist(self,name):\n return [i for i in self.items if i.artist == name]", "def search(self, name):\n\t\tmatching_contacts = []\n\t\tfor contact in self:\n\t\t\tif name in contact.name:\n\t\t\t\tmatching_contacts.append(contact)\t\n\t\treturn matching_contacts", "def namelist(self):\n return set(self.names())", "def get_symbol(self, name, include_stab=False, fuzzy=False): # pylint: disable=arguments-differ\n result = []\n for sym in self.symbols:\n\n if sym.is_stab and not include_stab:\n continue\n\n if fuzzy:\n if name in sym.name:\n result.append(sym)\n else:\n if name == sym.name:\n result.append(sym)\n\n return result", "def monomers(self):\n return sorted(set([self[x.split(\"_\")[-1]][\"name\"] for x in self.keys]), key=lambda x: -len(x))", "def find_objects_by_name(self, Name):\n #first get all the object properties\n object_ids = []\n for item in self.object_store:\n object_ids.append(item.LocalID)\n \n self.request_objects_properties(object_ids)\n \n pattern = re.compile(Name)\n\n matches = [_object for _object in self.object_store if (_object.Name != None and pattern.match(_object.Name))]\n\n return matches", "def get_obs_ids(self):\n return sorted(self.obsinfos.keys())", "def __contains__(self, name):\r\n name = OrderBy(name).bare\r\n for order_by in self:\r\n if order_by.bare == name:\r\n return True\r\n return False", "def furanose_names(self):\n output = set()\n for item in self.monomers():\n if item in self.furanose_fac:\n output.add(self.furanose_fac[item][\"name\"])\n return list(output)", "def get_attrs_with_name(self, name):\n return self.get_matches([lambda attr: attr.name == name])", "def names(self) -> List:\n ...", "def names(cls):\n return cls.__by_name.keys()", "def most_similar_actors(self, moviename):\n movieid = util.get_movie_id(moviename)\n movie_movie_dict = self.get_movie_movie_vector(moviename)\n if movie_movie_dict == None:\n return None\n actors = []\n for (movie,val) in movie_movie_dict:\n if val <= 0:\n break\n movieid = util.get_movie_id(movie)\n actors = actors + self.get_actors_of_movie(movie)\n if len(actors) >= 10:\n break\n\n actors_of_given_movie = self.get_actors_of_movie(moviename)\n\n actorsFinal = [x for x in actors if x not in actors_of_given_movie]\n\n actornames = []\n for actorid in actorsFinal:\n actor = util.get_actor_name_for_id(actorid)\n actornames.append(actor)\n\n return actornames", "def byname(self, name):\n\n name = name.lower()\n for i in self.bots:\n if name == i.name:\n return i", "def name_startswith(self, name):\n matches = [\n entry\n for entry in self\n if entry is not None and entry.name.startswith(name)\n ]\n return matches", "def Collection_search_name(C:list, name:str) -> list:\r\n restaurants = []\r\n for r in C:\r\n for dish in r.menu:\r\n if name in dish.name:\r\n restaurants.append(r)\r\n return restaurants", "def getMyArmies(self):\n r = []\n for army in self.__armies:\n if (army.getOwner() == 1):\n r.append(army)\n return r", "def __select_names(self, names):\n random.shuffle(names)\n selected = [names[0]]\n if random.random() > 0.7: # 30% de chances de ter dois nomes\n selected.append(names[1])\n return selected", "def get_all_names(self):\r\n return [person.name for person in self.__person_repository.elements]", "def search_for_name(self, name):\n for p in self.books_all:\n if p['name'] == name:\n return p", "def namelist(self):\n return []" ]
[ "0.6153555", "0.594157", "0.591578", "0.58488035", "0.57634735", "0.573525", "0.5705173", "0.5702694", "0.5682394", "0.5681273", "0.5596956", "0.5593977", "0.5591041", "0.55547523", "0.55317295", "0.54679155", "0.54432535", "0.54290605", "0.54099905", "0.53774685", "0.5377313", "0.5355894", "0.5354569", "0.53029853", "0.52956253", "0.52522874", "0.5226837", "0.5195929", "0.51786864", "0.51717883" ]
0.7485591
0
Return the time in seconds shifted by the simulation start time (e.g. as specified in the inp file). This is, this is the time since 12 AM on the first day.
def _shifted_time(self): return self.sim_time + self.options.time.start_clocktime
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prev_shifted_time(self):\n return self._prev_sim_time + self.options.time.start_clocktime", "def startTime(self) -> float:\n try: return self.times[0]\n except IndexError: return 0.0", "def calculate_time(start_time):\r\n return round(time() - start_time, 2)", "def get_time(cls):\n now = rospy.Time.now()\n return now.secs + now.nsecs*(10**-9) # time in seconds", "def _clock_time(self):\n return self._shifted_time % (24*3600)", "def sim_step(self):\n return traci.simulation.getCurrentTime()/1000 # milliseconds to seconds", "def getTime(self):\n return self.step / (self.max_step + int(self.include))", "def initialTime(self):\n return self.params['t0']", "def get_simulation_time(self):\n if not self.simulation_init_time:\n return 0\n if self.simulation_running:\n return time.time() - self.simulation_init_time\n return self.simulation_time", "def dt(self):\n return self._data_writer.get_simulation_time_step_ms()", "def _calculateStarttime(self):\n self.corrected_starttime = deepcopy(\\\n self.fixed_header['Record start time'])\n # Check whether or not the time correction has already been applied.\n if not self.fixed_header['Activity flags'] & 2:\n # Apply the correction.\n self.corrected_starttime += \\\n self.fixed_header['Time correction'] * 0.0001\n # Check for blockette 1001.\n if 1001 in self.blockettes:\n self.corrected_starttime += self.blockettes[1001]['mu_sec'] * \\\n 1E-6", "def get_start_time(self):\n # Timezone and BST not accounted for. Always gives it as GMT.\n create_time = (os.path.getmtime(self.file_path))\n start_time = create_time - len(self.amplitude) / self.fs\n return datetime.fromtimestamp(start_time)", "def get_starttime(self):\n filetime = datetime.datetime.strptime(self.filenametime,\n \"%Y%m%d_%H%M%S\")\n if self.ldat_type != 'acc':\n starttime = filetime\n else:\n starttime = filetime - datetime.timedelta(seconds=512)\n return starttime", "def get_time(self):\n clock = self.pipeline.get_clock()\n tm = clock.get_internal_time()\n return tm / 1.e9", "def start_time(self) -> float:\r\n ...", "def start_time(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"start_time\")", "def start_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"start_time\")", "def __get_starting_time(self):\n return self.__starting_time", "def start_time(self):\n # TODO: use pd.Timestamp instead\n return self.time[0].to_pydatetime()", "def min_time(self):\n #{{{ function to return time of first sample\n\n return self.mintime", "def get_nightly_start_time():\n return 14 # 2PM local Tucson time", "def time(self, step: int) -> float:\n return self._start_time + self._parameters.dt*(step - self._start_step)", "def get_time(self):\n return self.get_timed() / 10.0", "def startup_time_delta(self):\n return int((time.time() - self.startup_timestamp) * 1000.0)", "def start_time(self) -> float:\n return self._start_time", "def __get_times(self):\n data = self.simulate_file.readlines()\n data = list(map(str.strip, data))\n data = list(map(float, data))\n start = data[0]\n times = data[1:]\n return (start, times)", "def start_time(self):\n return RPR.GetAudioAccessorStartTime(self.id)", "def start_time(self) -> str:\n return pulumi.get(self, \"start_time\")", "def start_time(self) -> str:\n return pulumi.get(self, \"start_time\")", "def time():\n master = MasterTimer.getMasterTimer()\n\n if master.end_time:\n return master.end_time - master.start_time\n else:\n return time.time() - master.start_time" ]
[ "0.7487089", "0.7112895", "0.70894873", "0.68668205", "0.6815268", "0.6798686", "0.6756169", "0.67303437", "0.6724895", "0.66210204", "0.661483", "0.6612274", "0.65881646", "0.65733826", "0.6572052", "0.6566302", "0.6539889", "0.65108466", "0.65106434", "0.6473942", "0.64628613", "0.64597726", "0.6423651", "0.6419243", "0.6410766", "0.6379895", "0.63644385", "0.6355014", "0.6355014", "0.6330584" ]
0.76229405
0
Return the time in seconds of the previous solve shifted by the simulation start time. That is, this is the time from 12 AM on the first day to the time at the previous hydraulic timestep.
def _prev_shifted_time(self): return self._prev_sim_time + self.options.time.start_clocktime
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _shifted_time(self):\n return self.sim_time + self.options.time.start_clocktime", "def sim_step(self):\n return traci.simulation.getCurrentTime()/1000 # milliseconds to seconds", "def get_simulation_time(self):\n if not self.simulation_init_time:\n return 0\n if self.simulation_running:\n return time.time() - self.simulation_init_time\n return self.simulation_time", "def timestep(self) -> Optional[float]:\n dt = None\n if len(self.time) > 1 and self.is_equidistant:\n dt = (self.time[1] - self.time[0]).total_seconds() # type: ignore\n return dt", "def dt(self):\n return self._data_writer.get_simulation_time_step_ms()", "def initialTime(self):\n return self.params['t0']", "def getSimulationTime(self):\r\n raise NotImplementedError()", "def compute_time_step():\n\n dt = Hydro.compute_time_step()\n\n return dt", "def getTime(self):\n return self.step / (self.max_step + int(self.include))", "def calculate_time(start_time):\r\n return round(time() - start_time, 2)", "def _get_delta_time(r0):\n\n s1 = random() # To pick time\n epsilon = 0.001 # To avoid division by zero\n lam = (1 / (r0 + epsilon))\n return lam * pow(e, -lam * s1)", "def GetTimeStep(self):\n time_step = None\n\n time_step = self._solver_collection.GetTimeStep()\n \n if not time_step is None:\n\n self.time_step = time_step\n\n return self.time_step", "def startTime(self) -> float:\n try: return self.times[0]\n except IndexError: return 0.0", "def time_step(self):\n\n rho_rel = np.abs(self.rho_dt / self.rho)\n rho_rel_max = np.max(rho_rel)\n e_rel = np.abs(self.e_dt / self.e)\n e_rel_max = np.max(e_rel)\n x_rel = np.abs(self.u / self.dx)\n x_rel_max = np.max(x_rel)\n y_rel = np.abs(self.w / self.dy)\n y_rel_max = np.max(y_rel)\n rel = [rho_rel_max, e_rel_max, x_rel_max, y_rel_max]\n delta = np.max(np.abs(rel))\n\n if 0.1 <= delta <= 1e3:\n self.dt = self.p / delta\n else:\n self.dt = self.p", "def get_time_step(self):\n for body in self.bodies:\n # If body is a Satelite\n if body.name == \"Satelite\":\n # Assuming that acceleration for a small times step is constant\n t = 0.01 * norm(body.velocity) / norm(body.acc)\n if t < self.t:\n return t\n return self.t", "def get_time_step(self):\n return self._time_step", "def get_reltriggertimes(self):\n return np.array(self.trtimes)-self.soundstarttime", "def estimated_time(self):\n self._update()\n if not self.running_mode:\n return 0 if self._is_finished() else float(\"nan\")\n elif self.running_mode == \"local\":\n start = self.processes[0].create_time()\n elif self.running_mode == \"grid\":\n start = self.job[\"start_time\"]\n if start == 0:\n # Queued, but not started\n return float(\"nan\")\n else:\n logger.warning(\"Invalid running_mode attribute\")\n return float(\"nan\")\n current = self.current_step()\n if current <= 0: # If not dumped yet or error\n return float('nan')\n else:\n elapsed = time() - start\n return elapsed * (self.total_steps / current - 1)", "def timeStep(self):\n return self.params['h']", "def break_time(self):\n\t\ts = timedelta()\n\t\tfor i in xrange(1, len(self.toggles)-1, 2):\n\t\t\ts += self.toggles[i+1] - self.toggles[i]\n\n\t\t# If not working need to add the last period of time\n\t\tif not self.status():\n\t\t\ts += datetime.now() - self.toggles[-1]\n\t\treturn s", "def time_step(self):\n return self._time_step", "def time_step(self) -> float:\n return self._timestep", "def getHeadingTime(self) -> float:\n return self.timestep_cached_heading_tm", "def time(self, step: int) -> float:\n return self._start_time + self._parameters.dt*(step - self._start_step)", "def _clock_time(self):\n return self._shifted_time % (24*3600)", "def getCurrentSimulationTime(self):\r\n raise NotImplementedError()", "def solver_time(self):\n return self._stub.List(self._message).solver_time", "def round_trip_time(self):\r\n return self.completion_time - self.launch_time", "def get_elapsed_seconds():\n\tutcnow = datetime.utcnow()\n\tmidnight_utc = datetime.combine(utcnow.date(), time(0))\n\tdelta = utcnow - midnight_utc\n\treturn delta.total_seconds()", "def time():\n master = MasterTimer.getMasterTimer()\n\n if master.end_time:\n return master.end_time - master.start_time\n else:\n return time.time() - master.start_time" ]
[ "0.7158862", "0.67761683", "0.66555315", "0.66283196", "0.64012426", "0.64009786", "0.6364507", "0.63604295", "0.6347445", "0.6292934", "0.6258326", "0.625451", "0.61905634", "0.6159625", "0.6156674", "0.6154028", "0.61382556", "0.6125578", "0.6115283", "0.6097853", "0.6081917", "0.6077499", "0.6050499", "0.60371614", "0.6033363", "0.6025369", "0.5969101", "0.5963708", "0.5941809", "0.5926989" ]
0.75587815
0
Return the clocktime day of the simulation
def _clock_day(self): return int(self._shifted_time / 86400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time_of_day(self):\n return self.time_of_day_value", "def day(self):\n return 0", "def day(self):\n return 0", "def unit_day(self):\n return (self.time_base * 60.0) * 24.0", "def time_of_the_day(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"time_of_the_day\")", "def _unit_day(self):\n return (self.time_base * 60.0) * 24.0", "def get_clock(self):\n return self.clock", "def _clock_time(self):\n return self._shifted_time % (24*3600)", "def day(self) -> int:\n return pulumi.get(self, \"day\")", "def what_night_is_it():\n d = datetime.datetime.utcnow() - datetime.timedelta(7 / 24 + 0.5)\n tonight = int(d.strftime('%Y%m%d'))\n return tonight", "def get_time(self):\r\n\t\tactual_time = '{}, {} de {} del {} ({}:{} {} (UTC))'\r\n\t\tda_taim = actual_time.format(self.wday, self.day, self.mon,\r\n\t\t\t\t\t\t\t\t\t self.year, self.hour, self.min,\r\n\t\t\t\t\t\t\t\t\t self.day_or_night)\r\n\t\treturn da_taim", "def getSimulationTime(self):\r\n raise NotImplementedError()", "def day(self) -> int:\r\n return self._day", "def _get_timebase(self):\n return clock()", "def _get_timebase(self):\n return clock()", "def day(self):\n return self._day", "def day(self):\n return self._day", "def day(self):\n return self._day", "def day(self):\n data = await self.get_data(LIGHT)\n return data['day']", "def clock(self):\n return self._clock", "def get_current_day() -> int:\n return datetime.now().day", "def day(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"day\")", "def day_ts(self):\n return self.raw() // (60 * 24)", "def day(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"day\")", "def day(self):\n try:\n return self.schedule.day\n except Schedule.DoesNotExist:\n return None", "def get_time():\n # Use this one for production:\n now_time = pendulum.now(tz=pendulum.timezone(\"America/New_York\"))\n # Use this one for testing and modify as needed:\n # now_time = pendulum.datetime(2019, 7, 21, 20, 00, tz='America/New_York')\n\n return now_time", "def clock(self):\n # use the monotonic system clock to get a time stamp\n return time.clock_gettime(time.CLOCK_MONOTONIC)", "def clock(self):\r\n return self.__clock", "async def time(self) -> dt.time:\n now = await self.AD.sched.get_now()\n return now.astimezone(self.AD.tz).time()", "def get_time_of_day(self, time_stamp):\n hour = datetime.strptime(time_stamp, self.fmt).hour\n \n if hour < 12:\n return 'morning'\n elif hour > 18:\n return 'evening'\n else:\n return 'afternoon'" ]
[ "0.72421193", "0.7015691", "0.7015691", "0.6804973", "0.67424273", "0.6686264", "0.66749734", "0.65851897", "0.6573514", "0.6541982", "0.65127045", "0.6437197", "0.6330646", "0.63277256", "0.63277256", "0.62924594", "0.62924594", "0.62924594", "0.62901354", "0.6270266", "0.6253312", "0.62400377", "0.6199154", "0.61972666", "0.6189188", "0.6185759", "0.6174166", "0.61173457", "0.60959166", "0.6094181" ]
0.75825566
0
The link registry (as property) or a generator for iteration (as function call) Returns LinkRegistry
def links(self): return self._link_reg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def registry(self):\n return self.__registry", "def registry(self):\n return self._registry", "def view_registry(self) -> None:\n\n arr = self.load_links()[0]\n for i,v in enumerate(arr):\n print(f\"<{i}: {v}>\\n\")\n pass", "def internal_registry(self) -> Dict[str, Resource]:\n return self.manager.registry", "def generate_registry(self):\n\n logger.debug(f'Generating registry for {self}')\n context = ssl._create_unverified_context()\n if self.login and self.password:\n page = urlopen_with_auth(self.url, self.login, self.password, context)\n else:\n page = urlopen(self.url, context=context).read() # may throw HTTPError, URLError\n self.registry = []\n for _, elem in etree.iterparse(BytesIO(page), html=True):\n if elem.tag in self.tags:\n anchor = elem.attrib.get('id', None)\n if anchor:\n insort(self.registry, normalize_content(anchor))\n logger.debug(f'Generated registry:\\n{self.registry}')", "def hookregistry():\n registry = HookRegistry()\n yield registry", "def generate_registry(self):\n\n logger.debug(f'Generating registry for {self}')\n context = ssl._create_unverified_context()\n if self.login and self.password:\n page = urlopen_with_auth(self.url, self.login, self.password, context)\n else:\n page = urlopen(self.url, context=context).read() # may throw HTTPError, URLError\n self.registry = {}\n for _, elem in etree.iterparse(BytesIO(page), html=True):\n if elem.tag in self.tags:\n anchor = elem.attrib.get('id', None)\n if anchor:\n content = normalize_content(elem.text)\n self.registry[content] = normalize_content(anchor)\n logger.debug(f'Generated registry:\\n{self.registry}')", "def extensionregistry():\n registry = ExtensionRegistry()\n yield registry", "def generate_registry(self):\n\n logger.debug(f'Generating registry for {self}')\n if 'paths' not in self.spec:\n raise RuntimeError(f'{self.spec_path} is not a valid OpenAPI spec.')\n for path_, path_info in self.spec['paths'].items():\n for verb, method_info in path_info.items():\n if verb.upper() not in HTTP_VERBS:\n continue\n ref_ext = {}\n ref_ext['tag'] = method_info['tags'][0]\n ref_ext['operation_id'] = method_info['operationId']\n key = self.REGISTRY_KEY_TEMPLATE.format(verb=verb.upper(),\n command=path_)\n self.registry[key] = ref_ext\n logger.debug(f'Generated registry:\\n{self.registry}')", "def iter_links(self):", "def stepregistry():\n registry = StepRegistry()\n yield registry", "def get_links(self):\r\n return self.__links", "def get_registries(self):\n raise NotImplementedError(\"get_registries method is not implemented.\")", "def get_links(self):\r\n return self.links", "def links(self):\r\n return links.RepoLinks(self)", "def get_links(self):\n return (link for link in self.links)", "def _link(self):\n return self._interface(self.fspath)", "def links(self) -> Sequence[Link]:\n return self._links", "def links(self):\n return self.container['links']", "def __graph__(self):\n\n graph = rdflib.Graph()\n for prefix, name in self.rml.namespaces():\n graph.namespace_manager.bind(prefix, name)\n return graph", "def link_name_list(self):\n return list(self._link_reg.keys())", "def __iter__(\n self,\n ) -> Generator[dict[str, str | int | bool | list[dict[str, str]]], None, None]:\n url = API_PATH[\"link_flair\"].format(subreddit=self.subreddit)\n yield from self.subreddit._reddit.get(url)", "def link(self, link):\r\n return links.Link(self, link)", "def links(self):\n\t\treturn self.list_of_links", "def link(self):\n return self.container['link']", "def fetch_registry_content(self):\n for registry_name, registry in self.registries.items():\n if not registry.source:\n continue\n registry.get_repositories()", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links" ]
[ "0.65705675", "0.64430135", "0.6358416", "0.63083994", "0.6102733", "0.604913", "0.5927782", "0.5793796", "0.5768209", "0.5747865", "0.570079", "0.5636082", "0.5598207", "0.5594115", "0.5587638", "0.55498993", "0.5464956", "0.5436829", "0.5404893", "0.5389639", "0.5378682", "0.53663427", "0.536518", "0.53599477", "0.5339666", "0.53363895", "0.5328057", "0.5328057", "0.5328057", "0.5328057" ]
0.6896771
0
The pattern registry (as property) or a generator for iteration (as function call) Returns PatternRegistry
def patterns(self): return self._pattern_reg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MakePattern(self,content):\n return self.register(Pattern(content,reg=self))", "def get_pattern(self):\n if self.pattern is None:\n pattern_str = self.blueprint.pattern()\n pattern_file = self.remgr.lookup_pattern_file(self.blueprint, self.provider)\n self.pattern = pattern.Pattern(pattern_str, pattern_file)\n self.pattern.set_provider(self)\n return self.pattern", "def lookup_pattern(name):\n\treturn _registered_patterns[name]", "def get_pattern(self, name):\n return self.__patterns[name]", "def make_pattern_set(self):\n \n _pattern = []\n for x in range(1,9):\n _pattern.append(self.make_pattern())\n \n self.pattern = _pattern", "def registry(self):\n return self.__registry", "def iter_recipes(self, pattern):\n raise NotImplementedError()", "def getPattern(self):\n return self.pattern", "def pattern_factory(self):\n\t\treturn self.args[1]", "def extensionregistry():\n registry = ExtensionRegistry()\n yield registry", "def get_pattern(self, name):\n return self._pattern_reg[name]", "def generate_registry(self):\n\n logger.debug(f'Generating registry for {self}')\n if 'paths' not in self.spec:\n raise RuntimeError(f'{self.spec_path} is not a valid OpenAPI spec.')\n for path_, path_info in self.spec['paths'].items():\n for verb, method_info in path_info.items():\n if verb.upper() not in HTTP_VERBS:\n continue\n ref_ext = {}\n ref_ext['tag'] = method_info['tags'][0]\n ref_ext['operation_id'] = method_info['operationId']\n key = self.REGISTRY_KEY_TEMPLATE.format(verb=verb.upper(),\n command=path_)\n self.registry[key] = ref_ext\n logger.debug(f'Generated registry:\\n{self.registry}')", "def pattern_gen():\n pattern = \"\"\n\n return pattern", "def registry(self):\n return self._registry", "def patterns(self: TokenMatcher) -> List[Dict[str, Any]]:\n all_patterns = []\n for label, patterns in self._patterns.items():\n for pattern in patterns:\n p = {\"label\": label, \"pattern\": pattern, \"type\": self.type}\n all_patterns.append(p)\n return all_patterns", "def glob(self, pattern, lazy=False):\n def iterator():\n for filename in self.walk(lazy=lazy):\n if fnmatch(filename, pattern):\n yield self.__class__(filename)\n\n return lazy and iterator() or list(iterator())", "def Pattern(self):\r\n\t\tfrom ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.statrequest.pattern.pattern import Pattern\r\n\t\treturn Pattern(self)", "def __init__(self, pattern):\r\n self.pattern = pattern", "def patterns(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"patterns\")", "def pattern(self):\n return self[\"pattern\"]", "def pattern(self):\n return self[\"pattern\"]", "def lookup(self, pattern):\n with self.connect() as c:\n # so we can access results via dictionary\n c.row_factory = sqlite3.Row\n cur = c.cursor()\n for res in cur.execute(self.create_query(\"SELECT *\", pattern)).fetchall():\n yield res", "def get_patterns(self: object, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-patterns\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"get_patterns\",\n keywords=kwargs,\n params=parameters\n )", "def stepregistry():\n registry = StepRegistry()\n yield registry", "def get_patterns(\n self, pipeline: str, label: str, key: str\n ) -> List[Pattern]:", "def pattern(self):\n return self.get_data(\"pattern\")", "def listPatterns(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return TreeLikelihoodBase.listPatterns(self, False)", "def _generators(self):\n return self.free_group.generators", "def register( self, pattern, callback ):\n self.patterns.append((pattern, callback))", "def grep(self, pattern, flags=0, lazy=False):\n\n def iterator():\n for filename in self.walk(lazy=lazy):\n if re.search(pattern, filename, flags):\n yield self.__class__(filename)\n\n return lazy and iterator() or list(iterator())" ]
[ "0.65146554", "0.64546156", "0.633061", "0.6302383", "0.62347597", "0.62250704", "0.6207318", "0.61989623", "0.619185", "0.61466134", "0.60196435", "0.60170513", "0.59838694", "0.59704536", "0.5863253", "0.5800555", "0.57989776", "0.5760038", "0.5692418", "0.56686074", "0.56686074", "0.5622518", "0.5608212", "0.5564709", "0.5547028", "0.55424047", "0.55078954", "0.54650587", "0.5454312", "0.5453847" ]
0.6674461
0
The curve registry (as property) or a generator for iteration (as function call) Returns CurveRegistry
def curves(self): return self._curve_reg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_curve(self, curve):\n key = tuple(curve.points())\n if key not in self.curves:\n # new curve (lock and register)\n curve.is_locked = True # points list must not change, else not valid key\n self.curves[key] = curve\n return self.curves[key]", "def get_curve(self, name):\n return self._curve_reg[name]", "def getCurve(self, *args):\n return _libsbml.SpeciesReferenceGlyph_getCurve(self, *args)", "def registry(self):\n return self.__registry", "def getCurve(self, *args):\n return _libsbml.GeneralGlyph_getCurve(self, *args)", "def getCurve(self, *args):\n return _libsbml.ReferenceGlyph_getCurve(self, *args)", "def curve(self):\n return self.__curve", "def curve_name_list(self):\n return list(self._curve_reg.keys())", "def registry(self):\n return self._registry", "def clone(self):\n return _libsbml.Curve_clone(self)", "def setCurve(self, *args):\n return _libsbml.SpeciesReferenceGlyph_setCurve(self, *args)", "def stepregistry():\n registry = StepRegistry()\n yield registry", "def iterCurves(self):\n for c in range(self.length()):\n yield self.curve(c)", "def generate_registry(self):\n\n logger.debug(f'Generating registry for {self}')\n if 'paths' not in self.spec:\n raise RuntimeError(f'{self.spec_path} is not a valid OpenAPI spec.')\n for path_, path_info in self.spec['paths'].items():\n for verb, method_info in path_info.items():\n if verb.upper() not in HTTP_VERBS:\n continue\n ref_ext = {}\n ref_ext['tag'] = method_info['tags'][0]\n ref_ext['operation_id'] = method_info['operationId']\n key = self.REGISTRY_KEY_TEMPLATE.format(verb=verb.upper(),\n command=path_)\n self.registry[key] = ref_ext\n logger.debug(f'Generated registry:\\n{self.registry}')", "def getCurve(self, *args):\n return _libsbml.ReactionGlyph_getCurve(self, *args)", "def extensionregistry():\n registry = ExtensionRegistry()\n yield registry", "def _generate_instance(self):\n instance = {}\n\n for i in range(self._num_curves):\n curve_points = np.array([])\n valid_curve = False\n # The while loop will be executed until a valid curve is found. Each iteration of the for loop will produce\n # one valid curve, but it may take a while with large numbers in self._num_curves due to multiple \n # iterations of the while loop \n while not valid_curve:\n controlpoints = np.array(\n [np.array([random.uniform(-.5, 1.5), random.uniform(-.5, 1.5), random.uniform(-.5, 1.5)]) for _ in\n range(self._num_control_points)])\n curve = curve_factory.cubic_curve(controlpoints, 4)\n eval_points = np.linspace(0, 1, self._num_eval_points)\n curve_points = np.array(curve(eval_points))\n\n # get all points outside of the unit cube and delete them\n rows_to_delete = np.append(np.where(curve_points < 0)[0], np.where(curve_points > 1)[0])\n reference_rows = np.array(range(len(curve_points)))\n reference_rows = np.delete(reference_rows, rows_to_delete)\n # checks whether the generated curve is valid or not\n if len(reference_rows) < self._min_length or max(np.diff(reference_rows)) > 1:\n # All points are out of the unit cube or the segment which is outside of the unit cube which will be\n # deleted splits the curve in half, thus creating two seperate curve segments in the unit cube\n continue\n\n curve_points = np.delete(curve_points, rows_to_delete, axis=0)\n\n if len(instance) == 0:\n # First generated curve, therefore it cant be invalid in terms of minimal distance to other curves\n break\n\n for tmp_curve_id, tmp_curve_points in instance.items():\n distances = cdist(curve_points, tmp_curve_points)\n if np.min(distances) <= self._min_distance:\n # Generates new curve, because the current one is too close to at least one other curve\n valid_curve = False\n break\n # valid_curve is only true if the distances to all other curves is at least <_min_distance>\n valid_curve = True\n\n instance[f\"curve_{i}\"] = curve_points\n return instance", "def PricingAddCurves(builder, curves):\n return AddCurves(builder, curves)", "def getCurveExplicitlySet(self):\n return _libsbml.SpeciesReferenceGlyph_getCurveExplicitlySet(self)", "def Curve(self, *args):\n return _Adaptor3d.Adaptor3d_HCurve_Curve(self, *args)", "def GetCurve(self, *args):\n return _Adaptor3d.Adaptor3d_HCurve_GetCurve(self, *args)", "def efficiency_curves(self):\n for key in self._efficiency_curves:\n yield key, self._data[key]", "def ticker_generator():\n return (v for v in load_equities().values)", "def getAnimCurve(self, *args, **kwargs):\n ...", "def setCurve(self, *args):\n return _libsbml.GeneralGlyph_setCurve(self, *args)", "def setCurve(self, *args):\n return _libsbml.ReferenceGlyph_setCurve(self, *args)", "def pump_curves(self):\n for key in self._pump_curves:\n yield key, self._data[key]", "def pdf_curve(self):\n return dict(zip(self.strike_grid, list(map(lambda K: self.pdf(K), self.strike_grid))))", "def curve(self, index):\n if index >= len(self) or len(self) == 0:\n print('ERROR Class Graph method Curve: cannot find Curve (index',\n index, ', max possible', len(self), ')')\n return\n return self.data[index]", "def getCurveExplicitlySet(self):\n return _libsbml.ReferenceGlyph_getCurveExplicitlySet(self)" ]
[ "0.63213986", "0.6279888", "0.6143379", "0.5865785", "0.58409965", "0.57949567", "0.57655513", "0.5734903", "0.56743884", "0.5640423", "0.5621362", "0.5588055", "0.5549895", "0.55485934", "0.5528997", "0.55288696", "0.5470255", "0.5464915", "0.54572403", "0.5440797", "0.5402443", "0.5364114", "0.5290935", "0.528609", "0.5281743", "0.52521944", "0.5246384", "0.51421064", "0.5141707", "0.51328564" ]
0.7048159
0
Returns a generator to iterate over all sources Returns A generator in the format (name, object).
def sources(self): for source_name, source in self._sources.items(): yield source_name, source
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n for source in self._sources:\n for row in source.__iter__():\n yield row", "def Sources():\n return _sources", "def sources(source):\n\n source2 = models.Source(name=u\"Bob's Funerals.com\", url=u\"http://www.bobsfunerals.com\")\n source3 = models.Source(name=u\"Jim's Funerals.com\", url=u\"http://www.jimsfunerals.com\")\n return (source, source2, source3)", "def _generate_src():\n for ext in extensions:\n yield self.src_format[ext](f=\"{}{}\".format(name, ext))", "def generators(self):\n return self._generators", "def _generator():\n filename_1 = 'gene.txt'\n filename_2 = 'geneSynonym.txt'\n gene_set_1 = gene_names(filename_1)\n gene_syn = gene_names(filename_2, complete=False)\n genes = gene_set_1 | gene_syn\n return genes", "def sources(self):\n raise NotImplementedError()", "def file_src_dest(self):\n yielded_dests = []\n for mgr_file in reversed(self.manager.contents):\n path = Path(mgr_file)\n for from_path in self.maybe_add_one_path(path):\n stem = from_path.relative_to(path) if path.is_dir() else path.name\n to_path = self.output_files_dir / stem\n resolved = str(to_path.resolve())\n if resolved in yielded_dests: # pragma: no cover\n self.log.debug(\"Already populated\", resolved)\n continue\n yielded_dests += [resolved]\n yield from_path, to_path", "def get_all(self) -> Generator:\n\n for filename in self.list_files():\n yield self.get(filename)", "def __iter__(self):\n yield from self.gen", "def _generators(self):\n return self.free_group.generators", "def virtualsources(self):\n yield from (obj for obj in self._auraliser.virtualsources if obj.subsource.name==self.name)", "def walk(self):\n for project in [self.get_project(name)\n for name in self.project_names]:\n for sample in [project.get_sample(idx)\n for idx in project.sample_ids]:\n yield (project,sample)", "def generate_files(self):\n import re\n for year, url in self.metadata.build.sources.items():\n zf = self.filesystem.download(url)\n for fn in self.filesystem.unzip_dir(zf, re.compile(r'.*all.*', re.IGNORECASE)):\n yield year, fn", "def __iter__(self):\n for sample in self.samples:\n yield sample", "def XsamsSources(Sources):\n\n if not Sources:\n return\n yield '<Sources>'\n for Source in Sources:\n cont, ret = checkXML(Source)\n if cont:\n yield ret\n continue\n G = lambda name: GetValue(name, Source=Source)\n yield '<Source sourceID=\"B%s-%s\"><Authors>\\n' % (NODEID, G('SourceID'))\n authornames = G('SourceAuthorName')\n try:\n authornames = eval(authornames)\n except:\n pass\n if not isiterable(authornames):\n authornames = [authornames]\n for author in authornames:\n yield '<Author><Name>%s</Name></Author>\\n' % author\n\n yield \"\"\"</Authors>\n<Title>%s</Title>\n<Category>%s</Category>\n<Year>%s</Year>\n<SourceName>%s</SourceName>\"\"\" % ( G('SourceTitle'), G('SourceCategory'),\n G('SourceYear'), G('SourceName') )\n\n yield makeOptionalTag('Volume','SourceVolume',G)\n yield makeOptionalTag('PageBegin','SourcePageBegin',G)\n yield makeOptionalTag('PageEnd','SourcePageEnd',G)\n yield makeOptionalTag('UniformResourceIdentifier','SourceURI',G)\n yield makeOptionalTag('DigitalObjectIdentifier','SourceDOI',G)\n yield makeOptionalTag('Comments','SourceComments',G)\n yield '</Source>\\n'\n yield '</Sources>\\n'", "def sources(obj, reftype):", "def __iter__(self):\n yield from self.url.generator", "def __iter__(self) :\n for s in self._samples_to_cache :\n yield s", "def filelist_generator(self):\n for filename in self.filenames:\n yield filename", "def __iter__(self):\n return self.new_generator()", "def __iter__(self):\n\n if self.output_mode:\n process_atom = self._process_atom_output\n self.output_names = self.names[:]\n else:\n process_atom = self._process_atom\n\n if self.output_mode is False:\n getLogger(\"problog_lfi\").debug(\"\\nProcessed Atoms:\")\n for clause in self.source:\n if isinstance(clause, Clause):\n if clause.head.functor == \"query\" and clause.head.arity == 1:\n continue\n extra_clauses = process_atom(clause.head, clause.body)\n for extra in extra_clauses:\n if self.output_mode is False:\n getLogger(\"problog_lfi\").debug(\"\\t\" + str(extra))\n yield extra\n elif isinstance(clause, AnnotatedDisjunction):\n extra_clauses = process_atom(Or.from_list(clause.heads), clause.body)\n for extra in extra_clauses:\n if self.output_mode is False:\n getLogger(\"problog_lfi\").debug(\"\\t\" + str(extra))\n yield extra\n else:\n if clause.functor == \"query\" and clause.arity == 1:\n continue\n # Fact\n extra_clauses = process_atom(clause, None)\n for extra in extra_clauses:\n if self.output_mode is False:\n getLogger(\"problog_lfi\").debug(\"\\t\" + str(extra))\n yield extra\n\n if self.leakprob is not None:\n leakprob_atoms = self._get_leakprobatoms()\n for example_atom in leakprob_atoms:\n yield example_atom.with_probability(Constant(self.leakprob))", "def generators(self) -> List[Generator]:\n return self._generators", "def readSources(self):\n for sourceCount, sourceElement in enumerate(self.root.findall(\".sources/source\")):\n # shall we just read the UFO here?\n filename = sourceElement.attrib.get('filename')\n # filename is a path relaive to the documentpath. resolve first.\n sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename))\n sourceName = sourceElement.attrib.get('name')\n if sourceName is None:\n # if the source element has no name attribute\n # (some authoring tools do not need them)\n # then we should make a temporary one. We still need it for reference.\n sourceName = \"temp_master.%d\"%(sourceCount)\n self.reportProgress(\"prep\", 'load', sourcePath)\n if not os.path.exists(sourcePath):\n raise MutatorError(\"Source not found at %s\"%sourcePath)\n sourceObject = self._instantiateFont(sourcePath)\n # read the locations\n sourceLocationObject = None\n sourceLocationObject = self.locationFromElement(sourceElement)\n\n if sourceLocationObject is None:\n raise MutatorError(\"No location defined for source %s\"%sourceName)\n\n # read lib flag\n for libElement in sourceElement.findall('.lib'):\n if libElement.attrib.get('copy') == '1':\n self.libSource = sourceName\n\n # read the groups flag\n for groupsElement in sourceElement.findall('.groups'):\n if groupsElement.attrib.get('copy') == '1':\n self.groupsSource = sourceName\n\n # read the info flag\n for infoElement in sourceElement.findall(\".info\"):\n if infoElement.attrib.get('copy') == '1':\n self.infoSource = sourceName\n if infoElement.attrib.get('mute') == '1':\n self.muted['info'].append(sourceName)\n\n # read the features flag\n for featuresElement in sourceElement.findall(\".features\"):\n if featuresElement.attrib.get('copy') == '1':\n if self.featuresSource is not None:\n self.featuresSource = None\n else:\n self.featuresSource = sourceName\n\n mutedGlyphs = []\n for glyphElement in sourceElement.findall(\".glyph\"):\n glyphName = glyphElement.attrib.get('name')\n if glyphName is None:\n continue\n if glyphElement.attrib.get('mute') == '1':\n if not sourceName in self.muted['glyphs']:\n self.muted['glyphs'][sourceName] = []\n self.muted['glyphs'][sourceName].append(glyphName)\n\n for kerningElement in sourceElement.findall(\".kerning\"):\n if kerningElement.attrib.get('mute') == '1':\n self.muted['kerning'].append(sourceName)\n\n # store\n self.sources[sourceName] = sourceObject, sourceLocationObject\n self.reportProgress(\"prep\", 'done')", "def inference_generator(self):\r\n self.initialize_if_not(training=False)\r\n self.checkpoint.load_all() # Load available weights\r\n\r\n # TODO: Make more generic by not picking first source\r\n data_source = next(iter(self._train_data.values()))\r\n while True:\r\n fetches = dict(self.output_tensors['train'], **data_source.output_tensors)\r\n start_time = time.time()\r\n outputs = self._tensorflow_session.run(\r\n fetches=fetches,\r\n feed_dict={\r\n self.is_training: False,\r\n self.use_batch_statistics: True,\r\n },\r\n )\r\n outputs['inference_time'] = 1e3*(time.time() - start_time)\r\n yield outputs", "def __iter__(self):\n for p in self.paths:\n yield Document.load(os.path.join(self.dirpath, p), fmt=self.fmt)", "def examples(self):\n for obj_ind in range(len(self.objects)):\n yield self.get_object_intent_by_index(obj_ind)", "def sources(self):\n return self._sources", "def setup(self):\n for gen in self._generators:\n gen.setup()", "def yield_sources_and_targets(\n input_file,\n input_format):\n if input_format == 'wikisplit':\n yield_example_fn = _yield_wikisplit_examples\n elif input_format == 'discofuse':\n yield_example_fn = _yield_discofuse_examples\n elif input_format == 'rewrite':\n yield_example_fn = _yield_rewrite_examples\n else:\n raise ValueError('Unsupported input_format: {}'.format(input_format))\n\n for sources, target in yield_example_fn(input_file):\n yield sources, target" ]
[ "0.6933318", "0.6585145", "0.63167036", "0.62444586", "0.6230182", "0.62263227", "0.6188514", "0.6182752", "0.6164078", "0.6110404", "0.6073849", "0.6025064", "0.5983164", "0.5980554", "0.59729177", "0.5920594", "0.5912745", "0.5887372", "0.5886389", "0.5871467", "0.5869842", "0.5840708", "0.582812", "0.5823887", "0.57947445", "0.57800394", "0.57687294", "0.5766515", "0.57632256", "0.5760951" ]
0.7837107
0
Returns a generator to iterate over all controls Returns A generator in the format (name, object).
def controls(self): for control_name, control in self._controls.items(): yield control_name, control
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_controls(self, recurse: bool) -> Iterator[cat.Control]:\n if self._catalog.groups:\n for group in self._catalog.groups:\n controls = self._get_all_controls_in_group(group, recurse)\n for control in controls:\n yield control\n if self._catalog.controls:\n cat_controls = self._get_all_controls_in_list(self._catalog.controls, recurse)\n for control in cat_controls:\n yield control", "def getControls(self):", "def generate_control_list(label: str, count: int) -> List[cat.Control]:\n controls: List[cat.Control] = []\n for ii in range(count):\n control = generators.generate_sample_model(cat.Control, True)\n control.id = f'{label}-{ii + 1}'\n control.params[0].id = f'{control.id}.param'\n controls.append(control)\n return controls", "def get_objects(self):\n return \\\n self,\\\n self.label,\\\n self.frame_controls, \\\n (\n self.button_decrease,\n self.scale_volume,\n self.button_increase\n )", "def __iter__(self):\n return self._visible_setting_names_gen", "def __iter__(self):\n yield from self.gen", "def all_control_names(self):\n return self._get_control_names(\n zope.testbrowser.interfaces.IControl, self.getForm())", "def iterate(cls):\n for name, value in vars(cls).iteritems():\n if name.startswith('__'):\n continue\n yield (name, value)", "def build_controls(self):\n controlSizer = wx.BoxSizer(wx.HORIZONTAL)\n \n btnData = [{'bitmap':'player_pause.png', \n 'handler':self.on_pause, 'name':'pause'},\n {'bitmap':'player_stop.png',\n 'handler':self.on_stop, 'name':'stop'}]\n for btn in btnData:\n self.build_btn(btn, controlSizer)\n \n return controlSizer", "def control_name_list(self):\n return list(self._controls.keys())", "def fieldsIterator(self):\n for name, field in self.fields.items():\n renderer = self.renderers.get(name)\n if renderer:\n value = renderer(self.instance)\n else:\n value = getattr(self.instance, name)\n yield field.verbose_name, value", "def toControls(self,widget):", "def items(self):\n for name in self.fields:\n yield name, getattr(self, name)", "def __iter__(self):\n handle = self.parent.handle\n cur = getattr(gv, \"first%s\" % self.type)(handle)\n nextitem = getattr(gv, \"next%s\" % self.type)\n while gv.ok(cur):\n yield self.get(gv.nameof(cur))\n cur = nextitem(handle, cur)", "def _generators(self):\n return self.free_group.generators", "def iterator(self):\n yield", "def __iter__(self):\n for name, field in self.iterate_over_fields():\n yield name, field", "def iter_choices(self):\n\n for pk, obj in self._get_object_list():\n if hasattr(obj, self.id):\n selected = getattr(obj, self.id)\n else:\n selected = obj in self.data\n\n yield (pk, self.get_label(obj), selected)", "def __iter__(self):\n return iter(vars(self.obj))", "def pipes(self):\n for name in self._pipes:\n yield name, self._data[name]", "def __iter__(self):\n for key in self.sprite_order:\n if key not in self.sprite_groups:\n # abstract type\n continue\n for s in self.sprite_groups[key]:\n yield s", "def iter_inputs(self) -> Iterable[str]:\n yield from self.static_inputs\n if self.allow_dynamic and self.dynamic_inputs:\n yield from self.dynamic_inputs", "def __iter__(self):\n\n for each in list(self.keys()):\n yield each", "def __call__(self):\n for name in self:\n try:\n yield getattr(self, name)\n except AttributeError:\n raise KeyError(name)", "def __iter__(self):\n for o in self._iter:\n yield o", "def _get_default_controls(self):\n\n pass", "def __next__(self):\n for child in self.children:\n yield child", "def semigroup_generators(self):", "def iterProperties(cls):\n meta = cls.staticMetaObject\n for i in range(meta.propertyCount()):\n yield meta.property(i).name()", "def iter(self):\n\t\tfor element in self.elements:\n\t\t\tyield element" ]
[ "0.6908673", "0.6539071", "0.628452", "0.5979725", "0.58603287", "0.5804641", "0.5767439", "0.5740077", "0.5725284", "0.5711476", "0.5673429", "0.5667829", "0.5641358", "0.56149", "0.5614355", "0.5590635", "0.5561186", "0.55272454", "0.5511409", "0.55027884", "0.55009913", "0.54894876", "0.54664594", "0.5429488", "0.5422977", "0.5416906", "0.5416564", "0.5412063", "0.5407622", "0.5404564" ]
0.81733114
0
Iterator over all junctions
def junctions(self): return self._node_reg.junctions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def junctions(self):\n for node_name in self._junctions:\n yield node_name, self._data[node_name]", "def __iter__(self):\n return iter(self.adjacent)", "def iteredges(self):\n for source, targets in self.successors.items():\n for target in targets:\n yield source, target", "def __iter__(self):\n for i in range(len(self.ks)):\n yield self.get_neighs([i]), self.get_sp_rel_pos([i]),\\\n [self.ks[i]], self.iss", "def __iter__(self):\n return iter(self.parents)", "def __iter__(self):\n return iter(self.chain_list)", "def iteridents(self):\n raise NotImplementedError", "def __iter__(self):\n leaf_paths, leaf_vals = self._find_combinatorial_leaves()\n return self._combinations_generator(leaf_paths, leaf_vals)", "def iterate(self):\n yield self\n for x in self:\n for y in x.iterate():\n yield y", "def __iter__(self):\n\n for i in self._children:\n yield i", "def __iter__(self) -> Iterable[\"AbstractLane\"]:\n for origin in self.graph:\n for destination in self.graph[origin]:\n for index, lane in self.graph[origin][destination].items():\n yield lane", "def iter(self):\n for elem in self:\n if isinstance(elem, Tree):\n for elem2 in elem.iter:\n yield elem2\n else:\n yield elem", "def iter_links(self):", "def __iter__(self):\n for child in self.children:\n yield child", "def __iter__(self):\n return iter(self.chain)", "def iter_chains(self):\n return iter(self.chain_list)", "def __iter__():", "def __iter__():", "def __iter__():", "def __iter__():", "def __iter__(self) -> Iterable[Node]:", "def __iter__(self):\n for x in self.innings:\n yield x", "def junction_names(self):\n return self._junctions", "def __iter__(self):\n for node in self.grammar.walk():\n yield node", "def __iter__(self):\n return iter(self.neighbors.keys())", "def iter_links(self):\n for site in self.iter_sites():\n for u in range(self.dim):\n yield tuple(list(site) + [u])", "def __iter__(self):\n for o in self._iter:\n yield o", "def iter(self):\n\n current = self.head\n while current:\n yield current\n current = current.next", "def __iter__(self):\n return iter(self.__children)", "def __iter__(self):\n for id in self.order():\n inputs = [w for w in self.wires if w['target'][0] == id]\n yield id, inputs" ]
[ "0.8278087", "0.6693829", "0.66877705", "0.64899516", "0.6466883", "0.6348325", "0.6335292", "0.6325255", "0.6298924", "0.62037104", "0.6182745", "0.6135457", "0.611039", "0.61055434", "0.6095036", "0.604867", "0.6037925", "0.6037925", "0.6037925", "0.6037925", "0.6032833", "0.60228103", "0.602279", "0.60131454", "0.60101753", "0.5996973", "0.599056", "0.598508", "0.59645975", "0.5962997" ]
0.6854161
1
Iterator over all tanks
def tanks(self): return self._node_reg.tanks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tanks(self):\n for node_name in self._tanks:\n yield node_name, self._data[node_name]", "def __iter__(self):\n for bucket in self._table:\n if bucket is not None:\n for key in bucket:\n yield key", "def IterRetainedRanks(\n self: \"HereditaryStratumOrderedStoreList\",\n ) -> typing.Iterator[int]:\n # must make copy to prevent invalidation when strata are deleted\n # note, however, that copy is made lazily\n # (only when first item requested)\n ranks = [stratum.GetDepositionRank() for stratum in self._data]\n for rank in ranks:\n assert rank is not None\n yield rank", "def tank_names(self):\n return self._tanks", "def __iter__(self):\n for benchclass in sorted(self.classes.values()):\n yield benchclass", "def nodes(self):\n for node_set in self.itervalues():\n for node in node_set:\n yield node", "def __iter__(self):\n for key in self.sprite_order:\n if key not in self.sprite_groups:\n # abstract type\n continue\n for s in self.sprite_groups[key]:\n yield s", "def __iter__(self):\n n = self.head\n for _ in range(len(self)):\n if n == self.capacity:\n n = 0\n yield self.lst[n]\n n += 1", "def __iter__(self):\n for x in self.innings:\n yield x", "def __iter__(self):\n i = self.head\n while True:\n if not i:\n break\n yield i\n i = i.next\n if not i:\n break", "def iter(self):\n\n current = self.head\n while current:\n yield current\n current = current.next", "def iter_all(self):\n for i in range(self.num_nodes):\n self.iter_node(i)", "def __iter__(self) -> Iterable[Node]:", "def __iter__(self):\r\n \r\n return iter(self._by_number)", "def __iter__(self):\n cur = self.head\n while cur:\n yield cur.item\n cur = cur.next", "def __iter__():", "def __iter__():", "def __iter__():", "def __iter__():", "def __iter__(self):\n return iter(self[h.shards.whole])", "def iterator(self):\n yield", "def __iter__(self):\n for x in self._order:\n yield x", "def iter_nodes(self):", "def __iter__(self):\n\t\treturn self.keys()", "def iter(self):\n for elem in self:\n if isinstance(elem, Tree):\n for elem2 in elem.iter:\n yield elem2\n else:\n yield elem", "def __iter__(self):\n for value in dict.__iter__(self):\n for count in range(self[value]):\n yield value", "def __iter__(self):\n return iterkeys(self._ngrams)", "def iter(self):\n\t\tfor element in self.elements:\n\t\t\tyield element", "def iter(self):\n\t\tfor element in self.elements:\n\t\t\tyield element", "def get_ties(self,rank):\n self._check_ties(rank)\n for k,v in self._ties.iteritems():\n print (self._rid+\",\"+str(k)+\",\"+str(v)).strip()" ]
[ "0.80246234", "0.61414534", "0.6067089", "0.60653913", "0.60528237", "0.59743404", "0.59675145", "0.59599507", "0.59582263", "0.5940111", "0.5920663", "0.59127057", "0.58748466", "0.5863908", "0.5848202", "0.581613", "0.581613", "0.581613", "0.581613", "0.5812649", "0.58057857", "0.5778304", "0.57433736", "0.5727427", "0.57246983", "0.57226753", "0.5722346", "0.57194453", "0.57194453", "0.5714289" ]
0.6538602
1
Adds a tank to the water network model
def add_tank(self, name, elevation=0.0, init_level=3.048, min_level=0.0, max_level=6.096, diameter=15.24, min_vol=0.0, vol_curve=None, overflow=False, coordinates=None): self._node_reg.add_tank(name, elevation, init_level, min_level, max_level, diameter, min_vol, vol_curve, overflow, coordinates)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_tank(self, name, elevation=0.0, init_level=3.048,\n min_level=0.0, max_level=6.096, diameter=15.24,\n min_vol=0.0, vol_curve=None, overflow=False, \n coordinates=None):\n assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, \"name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(elevation, (int, float)), \"elevation must be a float\"\n assert isinstance(init_level, (int, float)), \"init_level must be a float\"\n assert isinstance(min_level, (int, float)), \"min_level must be a float\"\n assert isinstance(max_level, (int, float)), \"max_level must be a float\"\n assert isinstance(diameter, (int, float)), \"diameter must be a float\"\n assert isinstance(min_vol, (int, float)), \"min_vol must be a float\"\n assert isinstance(vol_curve, (type(None), str)), \"vol_curve must be a string\"\n assert isinstance(overflow, (type(None), str, bool, int)), \"overflow must be a bool, 'YES' or 'NO, or 0 or 1\"\n assert isinstance(coordinates, (type(None), (tuple,list,))), \"coordinates must be a tuple\"\n \n elevation = float(elevation)\n init_level = float(init_level)\n min_level = float(min_level)\n max_level = float(max_level)\n diameter = float(diameter)\n min_vol = float(min_vol)\n if init_level < min_level:\n raise ValueError(\"Initial tank level must be greater than or equal to the tank minimum level.\")\n if init_level > max_level:\n raise ValueError(\"Initial tank level must be less than or equal to the tank maximum level.\")\n if vol_curve is not None and vol_curve != '*':\n if not isinstance(vol_curve, six.string_types):\n raise ValueError('Volume curve name must be a string')\n elif not vol_curve in self._curve_reg.volume_curve_names:\n raise ValueError('The volume curve ' + vol_curve + ' is not one of the curves in the ' +\n 'list of volume curves. Valid volume curves are:' + \n str(self._curve_reg.volume_curve_names))\n vcurve = np.array(self._curve_reg[vol_curve].points)\n if min_level < vcurve[0,0]:\n raise ValueError(('The volume curve ' + vol_curve + ' has a minimum value ({0:5.2f}) \\n' +\n 'greater than the minimum level for tank \"' + name + '\" ({1:5.2f})\\n' +\n 'please correct the user input.').format(vcurve[0,0],min_level))\n elif max_level > vcurve[-1,0]:\n raise ValueError(('The volume curve ' + vol_curve + ' has a maximum value ({0:5.2f}) \\n' +\n 'less than the maximum level for tank \"' + name + '\" ({1:5.2f})\\n' +\n 'please correct the user input.').format(vcurve[-1,0],max_level))\n\n tank = Tank(name, self)\n tank.elevation = elevation\n tank.init_level = init_level\n tank.min_level = min_level\n tank.max_level = max_level\n tank.diameter = diameter\n tank.min_vol = min_vol\n tank.vol_curve_name = vol_curve\n tank.overflow = overflow\n self[name] = tank\n if coordinates is not None:\n tank.coordinates = coordinates", "def add_to_water_level(self, amount):\n LandCell.add_to_water_level(self, amount)\n if self.water_level > 0:\n self.reset_food_level()", "def add_to_water_level(self, amount):\n self.water_level += amount\n if self.water_level < 0:\n self.water_level = 0.0", "def add_inputt(self, name='T', control=False):\n inpt = InputT(name=name)\n self.nodes[name] = inpt\n self.rc.add_node(inpt)\n if control: # control input\n if name in self.inp.keys():\n raise Exception('Input temperature already defined')\n self.inp[name] = inpt\n else: # disturbance\n if name in self.dist.keys():\n raise Exception('Input temperature already defined')\n self.dist[name] = inpt", "def _addTurtle(self,turt):\n assert (type(turt) == Turtle), \"Parameter %s is not a valid Turtle object\" % `turt`\n self._turtles.append(turt)", "def add_fuel(self, amount):\n if (self.fuel_level + amount\n <= self.fuel_capacity):\n self.fuel_level += amount\n print(\"Added fuel.\")\n else:\n print(\"The tank won't hold that much.\")", "def add_road(ccTremb):\n pass", "def fill_tank(self):\n print(\"This car has no fuel tank!\")", "def fill_tank(self):\r\n self.fuel_level = self.fuel_capacity", "def add_weight(self):\r\n\r\n # Get the csrf token\r\n csrf = self.extract_csrf('https://wger.de/en/weight/add/')\r\n # Adding referer to the headers\r\n self.headers['Referer'] = API.url_weight\r\n\r\n # Take the weight entires from TOML file\r\n entries = self.cfg.get('payload', {}).get('weight')\r\n # Check for valid entires\r\n if entries:\r\n for payload in entries:\r\n # Add csrf token to payload\r\n payload['csrfmiddlewaretoken'] = csrf\r\n # Test the entry with it's json schema\r\n check.check_entry(path='schemas/weight.json', test=payload)\r\n # Post request\r\n self.add_post(payload, API.url_weight, self.weights)\r\n \r\n # Eliminates the referer from the headers\r\n self.headers.pop('Referer')", "def add_node(self, node):\n self.nodes[node.id] = node\n\n self.layers = max(self.layers, node.layer + 1)", "def add_pagerank(self):\n\n query = '''\n MATCH (c1:)-[r:INTERACTS]->(c2:)\n RETURN c1.name, c2.name, r.weight AS weight\n '''\n ig = IGraph.TupleList(self.graph.run(query), weights=True)\n\n pg = ig.pagerank()\n pgvs = []\n for p in zip(ig.vs, pg):\n print(p)\n pgvs.append({\"name\": p[0][\"name\"], \"pg\": p[1]})\n\n write_clusters_query = '''\n UNWIND {nodes} AS n\n MATCH (c:) WHERE c.name = n.name\n SET c.pagerank = n.pg\n '''\n\n self.graph.run(write_clusters_query, nodes=pgvs)", "def add_fuel(self, amount):\n if (self.fuel_level + amount <= self.fuel_capacity):\n self.fuel_level += amount\n print(\"Added fuel to \"+ self.make + \".\")\n else:\n print(\"The tank won't hold that much.\")", "def fill_gas_tank(self):\n print(\"Filling the tank for\", self.get_descriptive_name())", "def addTN(self, num=1):\n self.tn += num", "def addTP(self, num=1):\n self.tp += num", "def add(self, rank, birth_year, enlisting_year, shirt_color, name):\n # Your implementation here", "def GachaCraftNodeExcelAddTier(builder, Tier):\n return AddTier(builder, Tier)", "def __add_boundary_contrib_prediction(self, bc, b_idx):\n if bc is not None:\n if bc.boundary_condition_type is configuration.BoundaryConditionType.DIRICHLET:\n self.ustar[b_idx] = bc.value(self.time)\n else:\n self.ustar[b_idx] += self.timestep * self.timestep * bc.value(self.time)", "def add(self, obs_t, action, reward, obs_tp1, done):\n if random.uniform(0,1) < self.fifo_frac:\n self.fifo_buffer.add(obs_t, action, reward, obs_tp1, done)\n else:\n self.reservoir_buffer.add(obs_t, action, reward, obs_tp1, done)", "def add_fleet(self, index, *args, **kw):\n\n fleetid = self.fleets.append(ListNode(\"{0!s}\".format(kw.get(\"name\", \"Fleet {0:d}\".format(index))), [\n ListNode(\"Nodes\"),\n ListNode(\"Behaviours\", data=kw.get(\n \"behaviours\", self.defaults[2].get_data()))\n ])\n )\n for i in range(kw.get(\"nodes\", 1)):\n self.add_node(fleetid)", "def add_layer(self, num_nodes, transfer_function=\"Linear\"):\r\n self.weights.append(np.random.randn(self.input_dimension, num_nodes))\r\n self.biases.append(np.random.randn(num_nodes))\r\n self.transferfunction.append(transfer_function)\r\n self.input_dimension = num_nodes", "def add_trees(t1, t2):\n \"*** YOUR CODE HERE ***\"", "def add_node (self, node):\n self.network.add_node(node.id)\n self.network.node[node.id] = node", "def fill_gas_tank(self):\n print(\"\\nThis car need a gas tank!\")", "def addNeighbor(self, neighbor):", "def update(self):\n\n # check to see how much time has passed\n new_time = time.time()\n dt = new_time - self.last_time_stamp\n self.last_time_stamp = new_time\n\n if dt > MAX_DT_PER_UPDATE or self.fast_forward:\n dt = MAX_DT_PER_UPDATE\n\n\n # if we haven't reached the next turn, just update everything\n if dt < self.t_minus:\n self.real_time_update(dt)\n\n # if we HAVE reached the next turn, run up until the turn\n # then do the turn\n # then run the remaining time\n else:\n\n self.real_time_update(self.t_minus)\n\n # add new tanks, if necessary\n for newid in self.pending_tank_ids:\n for t in self.tanks.itervalues():\n if t.ID == newid:\n t.reload_ai()\n break\n else:\n # Tank doesn't already exist! \n # Add it if there is an AI and there is a color left to assign to\n if os.path.isfile(\"../data/\"+newid+\".py\"):\n if len(self.color_queue) > 0:\n try:\n (x,y) = get_rand_spawn_space()\n newtank = Tank(newid,\n \"../data/\"+newid+\".py\",\n copy.deepcopy(self.perma_board),\n x,y)\n except SandboxCodeExecutionFailed:\n # Couldn't create tank. Skip to next tank\n pass\n else:\n self.assign_color(newtank)\n self.tanks[newid] = newtank\n self.scores[newid] = 0\n # Move on to next tank\n self.pending_tank_ids = []\n\n # take the turns!\n tank_coords = {}\n # record positions so that we can give info to the AIs\n for t in self.tanks.itervalues():\n tank_coords[t.ID] = [t.x_pos,t.y_pos]\n # run each individual AI in a random order\n random_tanks = self.tanks.values()\n random.shuffle(random_tanks)\n for t in random_tanks:\n bullet = t.take_turn(tank_coords)\n if bullet:\n self.bullets += [bullet]\n # update all the appropriate stats\n for t in self.tanks.itervalues():\n t.update_stat_file()\n\n self.real_time_update(dt - self.t_minus)\n self.t_minus = TURN_RATE", "def add_to_simulation(self, tick):\n self.current_route_begin_tick = tick\n try:\n traci.vehicle.add(self.id, self._create_new_route(tick), tick, -4, -3)\n traci.vehicle.subscribe(self.id, (tc.VAR_ROAD_ID,))\n # ! currently disabled for performance reasons\n # traci.vehicle.setAccel(self.id, self.acceleration)\n # traci.vehicle.setDecel(self.id, self.deceleration)\n # traci.vehicle.setImperfection(self.id, self.imperfection)\n\n # dump car is using SUMO default routing, so we reroute using the same target\n # putting the next line left == ALL SUMO ROUTING\n traci.vehicle.changeTarget(self.id, self.current_router_result.edges[-1])\n except Exception as e:\n print(\"error adding --> \" + str(e))\n # try recursion, as this should normally work\n # self.addToSimulation(tick)", "def add_layer(self, layer):\n idx = len(self.dict_topo)\n idx += 1\n self.dict_topo[idx] = layer", "async def addTier(self, ctx, tier):\n server_dict = self.get_server_dict(ctx)\n tierList = server_dict.setdefault(\"Tiers\", [])\n \n try:\n tierList.append(tier)\n self.save_data()\n await self.bot.say(\":white_check_mark: {0} added to tier list\".format(tier))\n except:\n await self.bot.say(\":x: Error adding {0} to the tier list\".format(tier))" ]
[ "0.59383786", "0.5583475", "0.55089575", "0.55066186", "0.54286695", "0.53669107", "0.535145", "0.5351197", "0.5335581", "0.53225297", "0.53063965", "0.5265405", "0.5245383", "0.5238348", "0.51864153", "0.5177752", "0.51733613", "0.51558095", "0.5112751", "0.5104057", "0.5101336", "0.50612885", "0.5055933", "0.5026148", "0.5020735", "0.5000034", "0.4995747", "0.49950835", "0.49750608", "0.4971957" ]
0.6289887
0
Adds a reservoir to the water network model
def add_reservoir(self, name, base_head=0.0, head_pattern=None, coordinates=None): self._node_reg.add_reservoir(name, base_head, head_pattern, coordinates)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_reservoir(self, name, base_head=0.0, head_pattern=None, coordinates=None):\n assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, \"name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(base_head, (int, float)), \"base_head must be float\"\n assert isinstance(head_pattern, (type(None), str)), \"head_pattern must be a string\"\n assert isinstance(coordinates, (type(None), (tuple, list))), \"coordinates must be a tuple\"\n \n base_head = float(base_head)\n\n reservoir = Reservoir(name, self)\n reservoir.base_head = base_head\n reservoir.head_pattern_name = head_pattern\n self[name] = reservoir\n if coordinates is not None:\n reservoir.coordinates = coordinates", "def addItem(self, item):\n self.items.append(item)\n self.totalWeight += item", "def add_new_arrival(self):\n pass", "def AddNeuron(self, neur):\n self.neurons.append(neur)", "def add_reservation(self, src, dst,duration, bandwidth):\n \n # locks the self.current_reservations data structure. This is done\n # because there is a thread that could access it concurrently.\n with self.update_lock:\n\n # PART 1, TASK 3.4 check if there is an existing reservation for (src,dst). \n # you can use the self.current_reservations dictionary to check it.\n # If the reservation exists get the path and bw and update the links capacity \n # data structure using `self.add_link_capacity(path, bw)`\n \n # PART 1, TASK 3.1. Once get_available_path is implemented call it to get a path.\n path = self.get_available_path(src, dst, bandwidth)\n\n # PART 1, TASK 3.2 If there is an available path \n if path: \n pass\n # PART 1, TASK 3.2 Get mpls stack of labels\n\n # PART 1, TASK 3.3 get:\n # 1) ingress switch name\n # 2) action name using `mpls_ingress_x_hop` set x as number of labels\n # 3) src and dst ips (your match)\n # 4) make sure all your labels are strings and use them as action parameters\n\n # PART 1, TASK 3.4\n\n # check if its a new or an existing reservation (to update)\n\n # add entry or modify\n # PART 2 TASK 1.4 Configure the associated meter properly.\n\n # update controllers data structures: self.current_reservation & self.links_capacity\n \n\n # PART 1, TASK 3.2 otherwise we print no path available\n else:\n # PART 1, task 4.3 if we dont find a path but the reservation existed\n # you have to erase it while making sure you update links_capacity accordingly \n print(\"\\033[91mRESERVATION FAILURE: no bandwidth available!\\033[0m\")", "def __add__(self, region):\n return Sequence(\n self.weight + region.weight,\n region.finish,\n (*self.regs, region)\n )", "def with_water(self, water):\n self.ingredients.append(water)\n return self", "def add_guest(self, src: int, weight: float):\r\n if not self.has_guest(src):\r\n self.guests[src] = weight", "def add(self, p, s, node) -> None:\n self.place.append(p)\n self.station.append(s)\n self.pi.append(node.pi[p, s] if p != float('inf') else float('inf'))\n self.noncoverage.append(node.noncoverage.left + node.noncoverage.right)\n self.cost.append(node.cost)\n self.delay.append(node.delay)\n self.step.append(node.key)", "def addToReservation():\n\n def fits(x, y):\n \"\"\"\n Check if a job shape's resource requirements will fit within a given node allocation\n \"\"\"\n return y.memory <= x.memory and y.cores <= x.cores and y.disk <= x.disk\n\n def subtract(x, y):\n \"\"\"\n Adjust available resources of a node allocation as a job is scheduled within it.\n \"\"\"\n return Shape(x.wallTime, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk)\n\n def split(x, y, t):\n \"\"\"\n Partition a node allocation into two\n \"\"\"\n return (Shape(t, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk),\n NodeReservation(Shape(x.wallTime - t, x.memory, x.cores, x.disk)))\n\n i = 0 # Index of node reservation\n while True:\n # Case a new node reservation is required\n if i == len(nodeReservations):\n x = NodeReservation(subtract(nodeShape, jS))\n nodeReservations.append(x)\n t = nodeShape.wallTime\n while t < jS.wallTime:\n y = NodeReservation(x.shape)\n t += nodeShape.wallTime\n x.nReservation = y\n x = y\n return\n\n # Attempt to add the job to node reservation i\n x = nodeReservations[i]\n y = x\n t = 0\n \n while True:\n if fits(y.shape, jS):\n t += y.shape.wallTime\n \n # If the jS fits in the node allocation from x to y\n if t >= jS.wallTime:\n t = 0\n while x != y:\n x.shape = subtract(x.shape, jS)\n t += x.shape.wallTime\n x = x.nReservation\n assert x == y\n assert jS.wallTime - t <= x.shape.wallTime\n if jS.wallTime - t < x.shape.wallTime:\n x.shape, nS = split(x.shape, jS, jS.wallTime - t)\n nS.nReservation = x.nReservation\n x.nReservation = nS\n else:\n assert jS.wallTime - t == x.shape.wallTime\n x.shape = subtract(x.shape, jS)\n return \n \n # If the job would fit, but is longer than the total node allocation\n # extend the node allocation\n elif y.nReservation == None and x == nodeReservations[i]:\n # Extend the node reservation to accommodate jS\n y.nReservation = NodeReservation(nodeShape)\n \n else: # Does not fit, reset\n x = y.nReservation\n t = 0\n \n y = y.nReservation\n if y is None:\n # Reached the end of the reservation without success so stop trying to\n # add to reservation i\n break\n i += 1", "def __init__(self, reservoir, input_weights=None, neuron_type=\"tanh\", \n output_type=\"sigmoid\", init_state=\"zeros\", neuron_pars={}, output_neuron_pars={}):\n\n # Weights\n self.reservoir = reservoir\n self.num_neurons = self.reservoir.shape[0]\n self.input_weights = input_weights\n\n # Set neuron types (reservoir)\n self.neuron_pars = neuron_pars\n self.neuron_type = neuron_type\n if self.neuron_type == \"tanh\":\n self.activation_function = self.tanh\n elif self.neuron_type == \"sigmoid\":\n self.activation_function = partial(self.sigmoid, **neuron_pars)\n elif self.neuron_type == \"RLU\":\n self.activation_function = partial(self.rectified_linear_unit, **neuron_pars)\n elif self.neuron_type == \"heaviside\":\n self.activation_function = partial(self.heaviside, **neuron_pars)\n # Set neuron types (output neuron)\n self.output_type = output_type\n if self.output_type == \"tanh\":\n self.output_function = partial(self.tanh, **output_neuron_pars)\n elif self.output_type == \"sigmoid\":\n self.output_function = partial(self.sigmoid, **output_neuron_pars)\n elif self.output_type == \"identity\":\n self.output_function = partial(self.identity, **output_neuron_pars)\n elif self.output_type == \"heaviside\":\n self.output_function = partial(self.heaviside, **output_neuron_pars)\n\n # Generate initial system state\n self.init_state = init_state\n self.current_state = self.GenerateInitialState(self.init_state)\n self.network_history = [ ]", "def reservoir_generator(\n number_of_reservoirs=None, sparsity_level=None, reservoir_dim=None\n):\n reservoir_computing = ReservoirComputing(\n reservoir_dim=reservoir_dim, sparsity_level=sparsity_level\n )\n\n return [reservoir_computing.create_reservoir() for n in range(number_of_reservoirs)]", "def add(self, obs_t, action, reward, obs_tp1, done):\n if random.uniform(0,1) < self.fifo_frac:\n self.fifo_buffer.add(obs_t, action, reward, obs_tp1, done)\n else:\n self.reservoir_buffer.add(obs_t, action, reward, obs_tp1, done)", "def add(self, residential_unit):\n self._residential_units.add(residential_unit)", "def add_entry(source,lbs):\n\tnow = datetime.now()\n\tdate = now.strftime('%m-%d-%Y')\n\tdata = {date: {'Date': date, 'Weight': lbs}}\n\tsource.inject(data)", "def add_bag(self, bag, quantity):\n self.bags.append((bag, quantity))", "def insert_from(self, granary):\n self.gold += granary.gold\n self.wood += granary.wood\n self.stone += granary.stone\n self.food += granary.food\n granary.empty_granary()", "def _addInlet(self, inlet, other): \n self._inlets.append(inlet)\n if self._type == 2 and other._type == 1:\n self._reservoirs.append(other)", "def save (self) :\n\n try:\n # This opens and closes the connections to the database.\n # Since writings occur only every 10 minutes it wouldn't be efficient to\n # let the connection open.\n db = MySQLdb.connect(\n host=config.db_host, db=config.db_name, user=config.db_user, passwd=config.db_password)\n cur = db.cursor()\n\n # get the reservoir height to be able to measure the water level\n sqlQuery = \"SELECT heigth FROM main_reservoir WHERE res_id = {}\".format(self.reservoir)\n cur.execute(sqlQuery)\n # self.waterGap is the height of the part of the reservoir\n # that's out of water. reservoir height minus that value will give the actual water level\n reservoirHeight = cur.fetchone()[0]\n waterLevel = reservoirHeight - self.waterGap\n\n sqlQuery = \"\"\"\n INSERT INTO main_measurement (packetNr, waterLevel, pH, conductivity, reservoir_id, dateTime, salinity, tds)\n VALUES ({}, {}, {}, {}, {}, now(), {}, {})\n \"\"\".format(\n self.packetNr,\n waterLevel,\n self.pH,\n self.conductivity,\n self.reservoir,\n self.salinity,\n self.tds)\n cur.execute(sqlQuery)\n\n db.commit()\n db.close()\n except Exception as e:\n print('[Measurement#save] failed to save instance: {}'.format(e))", "def add_station(self, station):\n self.__stations.append(station)", "def __init__(self, fluid_reservoir=None, all_tubing=None, onchip_reservoir=None):\n self.fluid_reservoir=fluid_reservoir\n self.all_tubing = all_tubing\n self.onchip_reservoir = onchip_reservoir", "def add_weight(self):\r\n\r\n # Get the csrf token\r\n csrf = self.extract_csrf('https://wger.de/en/weight/add/')\r\n # Adding referer to the headers\r\n self.headers['Referer'] = API.url_weight\r\n\r\n # Take the weight entires from TOML file\r\n entries = self.cfg.get('payload', {}).get('weight')\r\n # Check for valid entires\r\n if entries:\r\n for payload in entries:\r\n # Add csrf token to payload\r\n payload['csrfmiddlewaretoken'] = csrf\r\n # Test the entry with it's json schema\r\n check.check_entry(path='schemas/weight.json', test=payload)\r\n # Post request\r\n self.add_post(payload, API.url_weight, self.weights)\r\n \r\n # Eliminates the referer from the headers\r\n self.headers.pop('Referer')", "def addRover(self, rover):\n self.roverList.append(rover)", "def add_rw(self, rd, wrt, inst):\n re_rd = self._rewrite_rd(rd)\n re_wrt = self._rewrite_wrt(wrt)\n if re_rd != \"\" or re_wrt != \"\":\n self.all_rw_list.insert(0, (re_rd, re_wrt, inst))\n log.debug(\"ADD_RW: [%s] /%s:%s/->/%s:%s/\" \\\n % (inst, re_rd, rd, re_wrt, wrt))", "def add(self, destination: n, weight: w):\n self.connections[destination] = weight", "def add(self):\n pass", "def add(source):\n global ostars_\n ostars_.append(source)", "def feed(self, instruction):\n assert len(self.future_buffer) < self.CAPACITY,\\\n 'ReservationStation fed when full'\n self.future_buffer.append(instruction)", "def add(self, item):", "def reserve(self, item, strict=True):\n out.info(\"Trying to reserve {} from pool {}\\n\".format(str(item),\n self.__class__.__name__))\n if item in self.used:\n if strict:\n raise Exception(\"Trying to reserve a used item\")\n else:\n self.used.add(item)" ]
[ "0.61060196", "0.6085037", "0.5708459", "0.57015747", "0.55982244", "0.5592088", "0.5579086", "0.5543703", "0.55212134", "0.5517631", "0.54950887", "0.54280555", "0.5407858", "0.53472775", "0.53240216", "0.531642", "0.5312082", "0.52957845", "0.5283269", "0.5282811", "0.5282764", "0.52399606", "0.5236101", "0.5234876", "0.52063364", "0.5199609", "0.51974326", "0.51913315", "0.5186431", "0.51838624" ]
0.65088904
0
Adds a pipe to the water network model
def add_pipe(self, name, start_node_name, end_node_name, length=304.8, diameter=0.3048, roughness=100, minor_loss=0.0, initial_status='OPEN', check_valve=False): self._link_reg.add_pipe(name, start_node_name, end_node_name, length, diameter, roughness, minor_loss, initial_status, check_valve)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, pipe_element):\n self.__iadd__(pipe_element)", "def __iadd__(self, pipe_element):\n if isinstance(pipe_element, Preprocessing):\n self.preprocessing = pipe_element\n elif isinstance(pipe_element, CallbackElement):\n pipe_element.needs_y = True\n self.elements.append(pipe_element)\n else:\n if isinstance(pipe_element, PipelineElement) or issubclass(\n type(pipe_element), PhotonNative\n ):\n self.elements.append(pipe_element)\n else:\n raise TypeError(\"Element must be of type Pipeline Element\")\n return self", "def add_pipe(self, name, start_node_name, end_node_name, length=304.8,\n diameter=0.3048, roughness=100, minor_loss=0.0, initial_status='OPEN', check_valve=False):\n assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, \"name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(start_node_name, str) and len(start_node_name) < 32 and start_node_name.find(' ') == -1, \"start_node_name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(end_node_name, str) and len(end_node_name) < 32 and end_node_name.find(' ') == -1, \"end_node_name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(length, (int, float)), \"length must be a float\"\n assert isinstance(diameter, (int, float)), \"diameter must be a float\"\n assert isinstance(roughness, (int, float)), \"roughness must be a float\"\n assert isinstance(minor_loss, (int, float)), \"minor_loss must be a float\"\n assert isinstance(initial_status, (str, LinkStatus)), \"initial_status must be a string or LinkStatus\"\n assert isinstance(check_valve, bool), \"check_valve must be a Boolean\"\n \n length = float(length)\n diameter = float(diameter)\n roughness = float(roughness)\n minor_loss = float(minor_loss)\n if isinstance(initial_status, str):\n initial_status = LinkStatus[initial_status]\n \n pipe = Pipe(name, start_node_name, end_node_name, self)\n pipe.length = length\n pipe.diameter = diameter\n pipe.roughness = roughness\n pipe.minor_loss = minor_loss\n pipe.initial_status = initial_status\n pipe._user_status = initial_status\n pipe.check_valve = check_valve\n self[name] = pipe", "def test_add_pipe(caplog):\n\n testapp = holocron.Application()\n marker = None\n\n def processor(app, items):\n nonlocal marker\n marker = 42\n yield from items\n\n testapp.add_processor(\"processor\", processor)\n testapp.add_pipe(\"pipe\", [{\"name\": \"processor\"}])\n\n for _ in testapp.invoke(\"pipe\"):\n pass\n\n assert marker == 42\n assert len(caplog.records) == 0", "def append(self, pipeline):\n for stage in pipeline.pipe:\n self._pipe.append(stage)\n return self", "def build(self, pipe_model, allow_flow_reversal):\n\n self.pipe_model = pipe_model\n\n try:\n cls = pip.str_to_pipe(pipe_model)\n except AttributeError:\n cls = None\n\n if cls:\n obj = cls(name=self.name,\n start_node=self.start_node.name,\n end_node=self.end_node.name, length=self.length,\n allow_flow_reversal=allow_flow_reversal,\n temperature_driven=self.temperature_driven,\n repr_days=self.repr_days)\n else:\n obj = None\n\n if obj is None:\n raise ValueError(\"%s is not a valid class name! (pipe %s)\" % (\n pipe_model, self.name))\n\n self.logger.info(\n 'Pipe model {} added to {}'.format(pipe_model, self.name))\n\n return obj", "def createPipe(self, transaction):\n pipe = detectPipeClass(transaction.dev, transaction.endpt)(self)\n name = \"Dev %s, %s\" % (transaction.dev, transaction.getTransferString())\n self.appendCanvas(name, pipe.stack)\n return pipe", "def pipette(self, groups):\n if len(self.instructions) > 0 and \\\n self.instructions[-1].op == 'pipette':\n self.instructions[-1].groups += groups\n else:\n self.instructions.append(Pipette(groups))", "def append_pipeline(self, pipeline, proba=None, repeat=None):\n self._action_list.append({'name': PIPELINE_ID, 'pipeline': pipeline,\n 'proba': proba, 'repeat': repeat})", "def Piping(T_in, p_in, m_dot, d_inner, l_pipe, f, epsilon_pipe, T_shield, N):\r\n\r\n ## Estimation of the influence of the arcs\r\n # Calculation according to VDI Heatatlas 2013\r\n # Assumption isoenthalpic flow\r\n state_Arc = FlowRestriction(T_in, p_in, m_dot, d_inner, f)\r\n p_Arc = state_Arc.get(\"p\")\r\n T_Arc = state_Arc.get(\"T\")\r\n\r\n ## Estimation of the influence of thermal radiation on the compressible flow\r\n\r\n # Emission coefficent for an enclosed vessel\r\n # Assuming much bigger hot surface -> emissivity of hot surface doesnt matter anymore, just the cold one\r\n # Thus the simple equation can be used\r\n q_pipe = epsilon_pipe * sp.constants.Stefan_Boltzmann * (T_shield**4 - T_Arc**4) #W\r\n\r\n # Calling of the function SimplePipe\r\n state_out = SimplePipe(T_Arc, p_Arc, m_dot, d_inner, l_pipe, N, 0, q_pipe)\r\n #Transfer results\r\n p_out = state_out.get(\"p\")\r\n T_out = state_out.get(\"T\")\r\n h_out = state_out.get(\"h\")\r\n state_out = {\"h\": h_out, \"T\": T_out, \"p\": p_out}\r\n\r\n return state_out", "def inner_pipe (linkp, pn, dt, links1, links2, utype, dtype, p,\n H0, V0, H, V, H10, V10, H20, V20, pump, valve,\n friction, dVdt, dVdx,\n dVdt10, dVdx10, dVdt20, dVdx20):\n\n # Properties of current pipe\n g = 9.8 # m/s^2\n link1 = [p[abs(i)-1] for i in links1]\n link2 = [p[abs(i)-1] for i in links2]\n n = linkp.number_of_segments # spatial discretization\n\n # inner nodes\n if friction == 'steady':\n H[1:-1], V[1:-1] = inner_node_steady(linkp, H0, V0, dt, g)\n elif friction == 'quasi-steady':\n H[1:-1], V[1:-1] = inner_node_quasisteady(linkp, H0, V0, dt, g)\n else:\n H[1:-1], V[1:-1] = inner_node_unsteady(linkp, H0, V0, dt, g,\n dVdx, dVdt)\n\n # Pipe start\n V1 = V10; H1 = H10 #list\n V2 = V0[1]; H2 = H0[1]\n dVdx1 = dVdx10 ; dVdt1 = dVdt10\n dVdx2 = dVdx[0]; dVdt2 = dVdt[1]\n\n if utype[0] == 'Pipe':\n if linkp.start_node.transient_node_type == 'SurgeTank':\n shape = linkp.start_node.tank_shape\n H[0], V[0], Qs = surge_tank(shape, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.start_node.water_level = H[0]\n linkp.start_node.tank_flow = Qs\n elif linkp.start_node.transient_node_type == 'Chamber':\n shape = linkp.start_node.tank_shape\n H[0], V[0], Qs, zp = air_chamber(shape, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.start_node.water_level = zp\n linkp.start_node.tank_flow = Qs\n else:\n elev = linkp.start_node.elevation\n emitter_coeff = linkp.start_node.emitter_coeff + linkp.start_node.demand_coeff\n block_per = linkp.start_node.block_per\n H[0], V[0] = add_leakage(emitter_coeff, block_per, link1, linkp, elev,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n elif utype[0] == 'Pump':\n pumpc = pump[0]\n H[0], V[0] = pump_node(pumpc, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n elif utype[0] == 'Valve':\n valvec = valve[0]\n H[0], V[0] = valve_node(valvec, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n\n # Pipe end\n V1 = V0[n-1]; H1 = H0[n-1]\n V2 = V20; H2 = H20\n dVdx1 = dVdx[n-1] ; dVdt1 = dVdt[n-1]\n dVdx2 = dVdx20; dVdt2 = dVdt20\n if dtype[0] == 'Pipe':\n if linkp.end_node.transient_node_type == 'SurgeTank':\n shape = linkp.end_node.tank_shape\n H[n], V[n], Qs = surge_tank(shape, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.end_node.water_level = H[n]\n linkp.end_node.tank_flow = Qs\n elif linkp.end_node.transient_node_type == 'Chamber':\n shape = linkp.end_node.tank_shape\n H[n], V[n], Qs,zp = air_chamber(shape, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.end_node.water_level = zp\n linkp.end_node.tank_flow = Qs\n else:\n elev = linkp.end_node.elevation\n emitter_coeff = linkp.end_node.emitter_coeff + linkp.end_node.demand_coeff\n block_per = linkp.end_node.block_per\n H[n], V[n] = add_leakage(emitter_coeff, block_per,linkp, link2, elev,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n elif dtype[0] == 'Pump':\n pumpc = pump[1]\n H[n], V[n] = pump_node(pumpc, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n\n elif dtype[0] == 'Valve':\n valvec = valve[1]\n H[n], V[n] = valve_node(valvec, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n return H, V", "def pipe_id(self):\n pass", "def set_pipeline(self):\n dist_pipe = Pipeline([\n ('dist_trans', DistanceTransformer()),\n ('stdscaler', StandardScaler())\n ])\n\n time_pipe = Pipeline([\n ('time_enc', TimeFeaturesEncoder('pickup_datetime')),\n ('ohe', OneHotEncoder(handle_unknown='ignore'))\n ])\n\n preproc_pipe = ColumnTransformer([\n ('distance', dist_pipe, [\"pickup_latitude\", \"pickup_longitude\", 'dropoff_latitude', 'dropoff_longitude']),\n ('time', time_pipe, ['pickup_datetime'])\n ], remainder=\"drop\")\n\n pipe = Pipeline([\n ('preproc', preproc_pipe),\n ('linear_model', LinearRegression())\n ])\n return pipe", "def load_nlu_pipe_from_hdd(pipe_path, request) -> NLUPipeline:\n pipe = NLUPipeline()\n # if env_utils.is_running_in_databricks() :\n # if pipe_path.startswith('/dbfs/') or pipe_path.startswith('dbfs/'):\n # nlu_path = pipe_path\n # if pipe_path.startswith('/dbfs/'):\n # nlp_path = pipe_path.replace('/dbfs','')\n # else :\n # nlp_path = pipe_path.replace('dbfs','')\n # else :\n # nlu_path = 'dbfs/' + pipe_path\n # if pipe_path.startswith('/') : nlp_path = pipe_path\n # else : nlp_path = '/' + pipe_path\n nlu_ref = request # pipe_path\n if os.path.exists(pipe_path):\n if offline_utils.is_pipe(pipe_path):\n # language, nlp_ref, nlu_ref,path=None, is_licensed=False\n # todo deduct lang and if Licensed or not\n\n pipe_components = construct_component_from_pipe_identifier('en', nlu_ref, nlu_ref, pipe_path, False)\n elif offline_utils.is_model(pipe_path):\n c = offline_utils.verify_and_create_model(pipe_path)\n c.info.nlu_ref = nlu_ref\n pipe.add(c, nlu_ref, pretrained_pipe_component=True)\n return PipelineQueryVerifier.check_and_fix_nlu_pipeline(pipe)\n\n else:\n print(\n f\"Could not load model in path {pipe_path}. Make sure the folder contains either a stages subfolder or a metadata subfolder.\")\n raise ValueError\n for c in pipe_components: pipe.add(c, nlu_ref, pretrained_pipe_component=True)\n return pipe\n\n else:\n print(\n f\"Could not load model in path {pipe_path}. Make sure the folder contains either a stages subfolder or a metadata subfolder.\")\n raise ValueError", "def live_network_input_to_pipe(iface=None, p=None):\n\n global g_pipein\n\n print(\"Named Pipe '{0}' has been opened for writing. Waiting for Pipe Reader to connect.\".format(p))\n g_pipein = open(p, 'wb')\n print(\"Connected to Named Pipe '{0}'. Writing binary TDMs into pipe.\".format(p))\n\n if iface is None:\n print(\"Listening on default interface.\")\n try:\n sniff(prn=write_tdm_to_pipe)\n except IOError as e:\n if e.errno == errno.EPIPE:\n print(\"Broken Pipe: EPIPE\")\n else:\n print(\"Listening on interface: {0}\".format(iface))\n try:\n sniff(iface=iface, prn=write_tdm_to_pipe)\n except IOError as e:\n if e.errno == errno.EPIPE:\n print(\"Broken Pipe: EPIPE\")", "def concat(cls, pipe1, pipe2):\n # pylint: disable=protected-access\n if pipe1.dataset != pipe2.dataset and pipe1.dataset is not None and pipe2.dataset is not None:\n raise ValueError(\"Cannot add pipelines with different datasets\")\n\n new_p1 = cls.from_pipeline(pipe1)\n new_p2 = cls.from_pipeline(pipe2)\n new_p1._action_list += new_p2._action_list[:]\n new_p1._variables = {**pipe1._variables, **pipe2._variables}\n new_p1.dataset = pipe1.dataset or pipe2.dataset\n return new_p1", "def add(self):\n self.inp.inputs.add(self)\n self.out.outputs.add(self)", "def set_pipeline(self):\n pipe_distance = make_pipeline(DistanceTransformer(), RobustScaler())\n pipe_time = make_pipeline(TimeFeaturesEncoder(time_column='pickup_datetime'), OneHotEncoder(handle_unknown='ignore'))\n dist_cols = ['pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude']\n time_cols = ['pickup_datetime']\n feat_eng_bloc = ColumnTransformer([('time', pipe_time, time_cols),\n ('distance', pipe_distance, dist_cols)]\n )\n self.pipeline = Pipeline(steps=[('feat_eng_bloc', feat_eng_bloc),\n ('regressor', RandomForestRegressor())])\n return self.pipeline", "def make_pipes(self, pipe_set):\n # Every 60 frames, we draw a new pipe\n if self.frame_number % 60 == 0:\n pipe_set.append(PipeSet())\n self.frame_number = 0 # The frame counter is reset to prevent it from becoming too large", "def __spawn_pipe(self) -> None:\n hole_center = random.randrange(0 + BIRD_HEIGHT, HEIGHT - BIRD_HEIGHT)\n self.pipes.add(Pipe.Pipe(PIPE_LOWER, hole_center))\n self.pipes.add(Pipe.Pipe(PIPE_UPPER, hole_center))", "def putpipe(self):\n task = None\n try:\n task = self._queuepop()\n except:\n task = Task({'empty': True})\n self.wpipe.send(task)", "def pipeline(args) :\n from pipeliner import create_pipeline\n create_pipeline(args)", "def __add_pipeline__(self,pipeline):\n if not re.search('Pipeline',pipeline.obj_type):\n raise Exception(\"Trying to add non-pipeline key to flowcell statistics reports\")\n if not self.pipelines is None:\n self.pipelines += ';'\n self.pipelines += str(pipeline.key) + \":\" + pipeline.obj_type\n else:\n self.pipelines = str(pipeline.key) + \":\" + pipeline.obj_type", "def test_pipe():\n parser = CmdParser([posandtwo, valprog])\n out = parser.parse(\"posandtwo | valprog\")\n assert isinstance(out[0], ProgramNode)\n assert out[0].program_desc == posandtwo\n assert isinstance(out[1], PipeNode)\n assert isinstance(out[2], ProgramNode)\n assert out[2].program_desc == valprog\n assert isinstance(out[3], EndOfCommandNode)", "def _make_fifo(self):\n if os.path.exists(self.fifo_path):\n os.remove(self.fifo_path)\n os.mkfifo(self.fifo_path)", "def __init__(self, teeth = 10, length = 0.3):\n self.teeth = teeth\n self.length = length\n # teeth are every other face\n spans = teeth * 2\n \n pipeObj = cmds.polyPipe(sa = spans)\n self.transform = pipeObj[0] + \"_gear\"\n self.constructor = pipeObj[1]\n \n # rename object\n cmds.rename(pipeObj[0], self.transform)\n\n # this is because the faces we want in Maya are numbered from [spans * 2, spans * 3)\n # *** if you run ls -sl in MEL, Maya gives you all the face names\n sideFaces = range(spans * 2, spans * 3, 2)\n\n # clear any selection you have\n cmds.select(clear = True)\n\n # iterate through every other side face\n for face in sideFaces:\n cmds.select(\"%s.f[%s]\" % (self.transform, face), add = True)\n\n # get the poly extrude face\n self.extrude = cmds.polyExtrudeFacet(ltz = length)[0]\n\n #clean up and return\n cmds.select(clear = True)", "def test_add_pipe_override(caplog):\n\n testapp = holocron.Application()\n marker = None\n\n def processor_a(app, items):\n nonlocal marker\n marker = 42\n yield from items\n\n def processor_b(app, items):\n nonlocal marker\n marker = 13\n yield from items\n\n testapp.add_processor(\"processor_a\", processor_a)\n testapp.add_processor(\"processor_b\", processor_b)\n\n testapp.add_pipe(\"pipe\", [{\"name\": \"processor_a\"}])\n testapp.add_pipe(\"pipe\", [{\"name\": \"processor_b\"}])\n\n for _ in testapp.invoke(\"pipe\"):\n pass\n\n assert marker == 13\n\n assert len(caplog.records) == 1\n assert caplog.records[0].message == \"pipe override: 'pipe'\"", "def save_optimum_pipe(optimum_pipe, zip_file, password=None):\n folder = os.path.splitext(zip_file)[0]\n zip_file = folder + \".photon\"\n\n if os.path.exists(folder):\n logger.warn(\"The file you specified already exists as a folder.\")\n else:\n os.makedirs(folder)\n\n # only save elements without name. Structure of optimum_pipe.elements: [('name', element),...]\n PhotonModelPersistor.save_elements(\n [val[1] for val in optimum_pipe.elements], folder\n )\n\n # write meta infos from pipeline\n with open(os.path.join(folder, \"_optimum_pipe_meta.pkl\"), \"wb\") as f:\n meta_infos = {\"photon_version\": __version__}\n pickle.dump(meta_infos, f)\n\n # get all files\n files = list()\n for root, directories, filenames in os.walk(folder):\n for filename in filenames:\n files.append(os.path.join(root, filename))\n\n if password is not None:\n import pyminizip\n\n pyminizip.compress(files, zip_file, password)\n else:\n with zipfile.ZipFile(zip_file, \"w\") as myzip:\n root_len = len(os.path.dirname(zip_file)) + 1\n for f in files:\n # in order to work even with subdirectories, we need to substract the dirname from our file\n # this is why I'm saving the root_len first\n myzip.write(f, f[root_len:])\n os.remove(f)\n shutil.rmtree(folder)", "def __init__(self):\n self._pipe = []\n self._group = None\n stages = ['on', 'off', 'color', 'transition', 'flash', 'callback',\n 'repeat', 'brightness', 'wait', 'temperature', 'white',\n 'white_up', 'white_down', 'red_up', 'red_down',\n 'green_up', 'green_down', 'blue_up', 'blue_down',\n 'night_light', 'link', 'unlink']\n for name in stages:\n self._add_stage(name)", "def pipes(self): \n return self._link_reg.pipes" ]
[ "0.70902383", "0.639606", "0.60840946", "0.58341885", "0.583066", "0.57448953", "0.5718142", "0.5681793", "0.5593915", "0.5527294", "0.5476688", "0.5435575", "0.5421555", "0.53581893", "0.5324393", "0.5309517", "0.5289012", "0.527887", "0.5258997", "0.52400005", "0.5194908", "0.5142083", "0.51410234", "0.51139665", "0.50811476", "0.50622267", "0.5016996", "0.4999459", "0.49907798", "0.49809843" ]
0.66412914
1
Adds a pump to the water network model
def add_pump(self, name, start_node_name, end_node_name, pump_type='POWER', pump_parameter=50.0, speed=1.0, pattern=None, initial_status='OPEN'): self._link_reg.add_pump(name, start_node_name, end_node_name, pump_type, pump_parameter, speed, pattern, initial_status)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_pump(self, name, start_node_name, end_node_name, pump_type='POWER',\n pump_parameter=50.0, speed=1.0, pattern=None, initial_status='OPEN'):\n assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, \"name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(start_node_name, str) and len(start_node_name) < 32 and start_node_name.find(' ') == -1, \"start_node_name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(end_node_name, str) and len(end_node_name) < 32 and end_node_name.find(' ') == -1, \"end_node_name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(pump_type, str), \"pump_type must be a string\"\n assert isinstance(pump_parameter, (int, float, str)), \"pump_parameter must be a float or string\"\n assert isinstance(speed, (int, float)), \"speed must be a float\"\n assert isinstance(pattern, (type(None), str)), \"pattern must be a string\"\n assert isinstance(initial_status, (str, LinkStatus)), \"initial_status must be a string or LinkStatus\"\n \n if isinstance(initial_status, str):\n initial_status = LinkStatus[initial_status]\n if pump_type.upper() == 'POWER':\n pump = PowerPump(name, start_node_name, end_node_name, self)\n pump.power = pump_parameter\n elif pump_type.upper() == 'HEAD':\n pump = HeadPump(name, start_node_name, end_node_name, self)\n pump.pump_curve_name = pump_parameter\n else:\n raise ValueError('pump_type must be \"POWER\" or \"HEAD\"')\n pump.base_speed = speed\n pump.initial_status = initial_status\n pump.speed_pattern_name = pattern\n self[name] = pump", "def add(self, p):\n self._pumps.add(p)", "def __manage_pump(self):\r\n with self.config_lock:\r\n if self.config['pump_auto_control'] == False:\r\n # Controller doesn't need to do anything about the pump as it is in manual control mode\r\n pass\r\n else:\r\n # Pump is in automatic mode\r\n if self.config['\"pump_auto_control_mode'] == 'normally_off':\r\n # For current functionality there is nothing that can force the pump to turn on (e.g.\r\n # fire extinguishing).\r\n pass\r\n else:\r\n # Pump is normally on.\r\n pump_parameters = self.well_tank_dev.parameters\r\n if self.config['pump_auto_control_turn_off_when_well_empty']:\r\n if pump_parameters ['well_water_presence'] == 'not_present':\r\n # No water in the well\r\n self.well_tank_dev.send_command('pump', 'turn_off')\r\n else:\r\n # Water in the well is present\r\n if self.config['pump_auto_control_turn_off_when_tank_full']:\r\n if pump_parameters['tank'] == 'full':\r\n self.well_tank_dev.send_command('pump', 'turn_off')\r\n else:\r\n self.well_tank_dev.send_command('pump', 'turn_on')\r\n else:\r\n # Do not turn off the pump if the well is empty\r\n if self.config['pump_auto_control_turn_off_when_tank_full']:\r\n if pump_parameters ['tank'] == 'full':\r\n self.well_tank_dev.send_command('pump', 'turn_off')\r\n else:\r\n self.well_tank_dev.send_command('pump', 'turn_on')\r\n else:\r\n # Do not trun off the pump when the tank is full\r\n self.well_tank_dev.send_command('pump', 'turn_on')", "def pump_water(pump_pin, delay=1):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(pump_pin, GPIO.OUT)\n timeout = time.time() + 1.5*60 # 1.5 minutes\n\n try:\n print \"Watering plant...\"\n GPIO.output(pump_pin, GPIO.HIGH)\n\n while get_percent_wet() < 75:\n time.sleep(delay)\n if time.time() > timeout:\n break\n\n GPIO.output(pump_pin, GPIO.LOW)\n GPIO.cleanup(pump_pin)\n return\n\n except:\n GPIO.cleanup(pump_pin)\n\n return", "def __init__(self, initScript=None):\n super(Pump, self).__init__(initScript)\n \n # the isentropic compressor\n self.ideal = IdealPump()\n self.AddUnitOperation(self.ideal, 'Ideal')\n \n # a heater to add the waste heat to the outlet\n self.waste = Heater.Heater()\n self.AddUnitOperation(self.waste, 'Waste')\n self.waste.GetPort(DELTAP_PORT).SetValue(0.0, FIXED_V)\n \n # connect them\n self.ConnectPorts('Ideal', OUT_PORT, 'Waste', IN_PORT)\n\n # energy sensors (needed for signals)\n self.idealQ = Sensor.EnergySensor()\n self.AddUnitOperation(self.idealQ, 'IdealQ')\n self.ConnectPorts('Ideal', IN_PORT + 'Q', 'IdealQ', OUT_PORT)\n \n self.wasteQ = Sensor.EnergySensor()\n self.AddUnitOperation(self.wasteQ, 'WasteQ')\n self.ConnectPorts('Waste', IN_PORT + 'Q', 'WasteQ', OUT_PORT)\n\n self.totalQ = Sensor.EnergySensor()\n self.AddUnitOperation(self.totalQ, 'TotalQ')\n \n # create a signal stream for the efficiency\n self.effStream = Stream.Stream_Signal()\n self.effStream.SetParameterValue(SIGTYPE_PAR, GENERIC_VAR)\n self.AddUnitOperation(self.effStream, 'EfficiencySig')\n \n #set relation between ideal and total Q\n self.set = Set.Set()\n self.AddUnitOperation(self.set, 'Set')\n self.set.SetParameterValue(SIGTYPE_PAR, ENERGY_VAR)\n self.set.GetPort(Set.ADD_PORT).SetValue(0.0, FIXED_V)\n self.ConnectPorts('TotalQ',SIG_PORT, 'Set', SIG_PORT + '0')\n self.ConnectPorts('IdealQ',SIG_PORT, 'Set', SIG_PORT + '1')\n self.ConnectPorts('EfficiencySig', OUT_PORT, 'Set', Set.MULT_PORT)\n \n # energy stream balance\n self.mix = Balance.BalanceOp()\n self.AddUnitOperation(self.mix, 'Mix')\n self.mix.SetParameterValue(NUSTIN_PAR + Balance.S_ENE, 1)\n self.mix.SetParameterValue(NUSTOUT_PAR + Balance.S_ENE, 2)\n self.mix.SetParameterValue(Balance.BALANCETYPE_PAR, Balance.ENERGY_BALANCE)\n \n # connect the mixer ports\n self.ConnectPorts('IdealQ',IN_PORT,'Mix',OUT_PORT + 'Q0')\n self.ConnectPorts('WasteQ',IN_PORT,'Mix',OUT_PORT + 'Q1')\n self.ConnectPorts('TotalQ',OUT_PORT,'Mix', IN_PORT + 'Q0')\n \n # export the flow ports\n self.BorrowChildPort(self.ideal.GetPort(IN_PORT), IN_PORT)\n self.BorrowChildPort(self.waste.GetPort(OUT_PORT), OUT_PORT)\n self.BorrowChildPort(self.totalQ.GetPort(IN_PORT), IN_PORT + 'Q')\n self.BorrowChildPort(self.effStream.GetPort(IN_PORT), EFFICIENCY_PORT)\n self.BorrowChildPort(self.ideal.GetPort(DELTAP_PORT), DELTAP_PORT)\n \n #Change the type of the energy port such that it is in Work units and scaling\n self.totalQ.GetPort(IN_PORT).GetProperty().SetTypeByName(WORK_VAR)", "def set_pump(self, pump: str, state: bool):\r\n if pump == \"NF\":\r\n if state:\r\n self.msg_send_upr.data[:3] = [0x11, 0x00, 0x01]\r\n else:\r\n self.msg_send_upr.data[:3] = [0x11, 0x00, 0x02]\r\n elif pump == \"NT\":\r\n if state:\r\n self.msg_send_upr.data[:3] = [0x11, 0x00, 0x03]\r\n else:\r\n self.msg_send_upr.data[:3] = [0x11, 0x00, 0x04]\r\n self.send_and_flush(self.msg_send_upr)", "def add_lump(self, lumpname, lump):\n assert self.mode == 'W', \"Cannot write a WAD opened in read mode. \" \\\n \"Please consider copying your WAD() into a new one \" \\\n \"using to_bytes and from_bytes methods\"\n if lump is None:\n lump_bytes = bytes()\n else:\n lump_bytes = lump.to_bytes()\n size = len(lump_bytes)\n self['directory'].append(LumpInfo(filepos=self.current_lump_offset, size=size, name=lumpname))\n self['lumps'].append(lump_bytes)\n # Updating directory and header information\n self.current_lump_offset += size\n self['header']['numlumps'] += 1\n # The infotableoffset is always kept at the end of the file\n self['header']['infotableofs'] = self.current_lump_offset", "def add_wolf_to_pack(self, wolf):\n logging.debug(\"Adding wolf {} to pack\".format(wolf.unique_id))\n # When a Wolf is part of a pack\n if (not wolf.pack):\n self.model.schedule.remove(wolf)\n self.model.grid.remove_agent(wolf)\n wolf.pack = True\n self.wolves.append(wolf)", "def _add_bal(self):\n\n c = self.components\n p = self.pipes\n\n # TODO No mass flow reversal yet\n if self.temperature_driven:\n\n lines = self.params['lines'].v()\n\n self.block.mix_temp = Var(self.TIME, lines)\n\n def _temp_bal_incoming(b, t, l):\n\n incoming_comps = collections.defaultdict(list)\n incoming_pipes = collections.defaultdict(list)\n\n for name, comp in c.items():\n if value(comp.get_mflo(t)) >= 0:\n incoming_comps['supply'].append(name)\n else:\n incoming_comps['return'].append(name)\n\n for name, pipe in p.items():\n if value(pipe.get_edge_mflo(self.name, t)) >= 0:\n incoming_pipes['supply'].append(name)\n else:\n incoming_pipes['return'].append(name)\n # Zero mass flow rate:\n if value(\n sum(c[comp].get_mflo(t) for comp in incoming_comps[l]) + \\\n sum(p[pipe].get_edge_mflo(self.name, t) for pipe in\n incoming_pipes[l])) == 0:\n # mixed temperature is average of all joined pipes, actual value should not matter,\n # because packages in pipes of this time step will have zero size and components do not take over\n # mixed temperature in case there is no mass flow\n\n return b.mix_temp[t, l] == (\n sum(c[comp].get_temperature(t, l) for comp in c) +\n sum(p[pipe].get_temperature(self.name, t, l) for\n pipe in p)) / (\n len(p) + len(c))\n\n\n else: # mass flow rate through the node\n return (sum(\n c[comp].get_mflo(t) for comp in incoming_comps[l]) +\n sum(p[pipe].get_edge_mflo(self.name, t) for pipe in\n incoming_pipes[l])) * b.mix_temp[t, l] == \\\n sum(c[comp].get_mflo(t) * c[comp].get_temperature(t,\n l)\n for comp in incoming_comps[l]) + \\\n sum(p[pipe].get_edge_mflo(self.name, t) * p[\n pipe].get_edge_temperature(self.name, t, l)\n for pipe in incoming_pipes[l])\n\n self.block.def_mixed_temp = Constraint(self.TIME,\n lines,\n rule=_temp_bal_incoming)\n\n def _temp_bal_outgoing(b, t, l, comp):\n\n outgoing_comps = collections.defaultdict(list)\n outgoing_pipes = collections.defaultdict(list)\n\n for name, comp_obj in c.items():\n if comp_obj.get_mflo(t) >= 0:\n outgoing_comps['return'].append(name)\n else:\n outgoing_comps['supply'].append(name)\n\n for name, pipe_obj in p.items():\n if pipe_obj.get_edge_mflo(self.name, t) >= 0:\n outgoing_pipes['return'].append(name)\n else:\n outgoing_pipes['supply'].append(name)\n\n if t == 0:\n return Constraint.Skip\n if comp in outgoing_pipes[l]:\n return p[comp].get_edge_temperature(self.name, t, l) == \\\n b.mix_temp[t, l]\n elif comp in outgoing_comps[l]:\n return c[comp].get_temperature(t, l) == b.mix_temp[t, l]\n else:\n return Constraint.Skip\n\n self.block.outgoing_temp_comps = Constraint(self.TIME,\n lines,\n c.keys(),\n rule=_temp_bal_outgoing)\n self.block.outgoing_temp_pipes = Constraint(self.TIME,\n lines,\n p.keys(),\n rule=_temp_bal_outgoing)\n\n elif self.repr_days is None:\n\n def _heat_bal(b, t):\n return 0 == sum(\n self.components[i].get_heat(t) for i in self.components) \\\n + sum(\n pipe.get_edge_heat(self.name, t) for pipe in p.values())\n\n self.block.ineq_heat_bal = Constraint(self.TIME,\n rule=_heat_bal)\n\n def _mass_bal(b, t):\n return 0 == sum(\n self.components[i].get_mflo(t) for i in self.components) \\\n + sum(\n pipe.get_edge_mflo(self.name, t) for pipe in p.values())\n\n self.block.ineq_mass_bal = Constraint(self.TIME,\n rule=_mass_bal)\n\n else:\n def _heat_bal(b, t, c):\n return 0 == sum(\n self.components[i].get_heat(t, c) for i in\n self.components) \\\n + sum(\n pipe.get_edge_heat(self.name, t, c) for pipe in p.values())\n\n self.block.ineq_heat_bal = Constraint(self.TIME, self.REPR_DAYS,\n rule=_heat_bal)\n\n def _mass_bal(b, t, c):\n return 0 == sum(\n self.components[i].get_mflo(t, c) for i in\n self.components) \\\n + sum(\n pipe.get_edge_mflo(self.name, t, c) for pipe in p.values())\n\n self.block.ineq_mass_bal = Constraint(self.TIME, self.REPR_DAYS,\n rule=_mass_bal)", "def __init__(\n self,\n pump_io: HamiltonPumpIO,\n syringe_volume: str,\n name: str,\n address: int = 1,\n **config,\n ):\n super().__init__(name)\n # HamiltonPumpIO\n self.pump_io = pump_io\n ML600._io_instances.add(self.pump_io) # See above for details.\n\n # Pump address is the pump sequence number if in chain. Count starts at 1, default.\n self.address = int(address)\n\n # Syringe pumps only perform linear movement, and the volume displaced is function of the syringe loaded.\n try:\n self.syringe_volume = ureg.Quantity(syringe_volume)\n except AttributeError as attribute_error:\n logger.error(f\"Invalid syringe volume {syringe_volume}!\")\n raise InvalidConfiguration(\n \"Invalid syringe volume provided.\"\n \"The syringe volume is a string with units! e.g. '5 ml'\"\n ) from attribute_error\n\n if self.syringe_volume.m_as(\"ml\") not in ML600.VALID_SYRINGE_VOLUME:\n raise InvalidConfiguration(\n f\"The specified syringe volume ({syringe_volume}) is invalid!\\n\"\n f\"The volume (in ml) has to be one of {ML600.VALID_SYRINGE_VOLUME}\"\n )\n\n self._steps_per_ml = ureg.Quantity(f\"{48000 / self.syringe_volume} step/ml\")\n self._offset_steps = 100 # Steps added to each absolute move command, to decrease wear and tear at volume = 0\n self._max_vol = (48000 - self._offset_steps) * ureg.step / self._steps_per_ml\n\n # This enables to configure on per-pump basis uncommon parameters\n self.config = ML600.DEFAULT_CONFIG | config", "def addpool(miner: Miner, pool):\n api = MinerApi(host=miner.ipaddress, port=int(miner.port))\n jaddpool = api.addpool(\"{0},{1},{2}\".format(pool.url, pool.user, \"x\"))\n return jaddpool[\"STATUS\"][0][\"Msg\"]", "def setPump(self, time, wait):\n c =\"/cli:python /app:matrix /cmd:pump /time:\"+str(time)+ \" /value:\"+ str(value)\n self.addtoCMDlist(c)\n self.sendCMDlist()", "def create_pumper():\n return _Kalamazoo()", "def water_uptake_apsim(self, soil):\r\n soil_wat_avail = np.zeros(soil.total_layers)\r\n soil_wat_supply = np.zeros(soil.total_layers)\r\n daily_ref_evap_transp = soil.daily_ref_evap_transp\r\n transp_pot = daily_ref_evap_transp * self.light_intercpt\r\n # Water available in each layer [mm]\r\n for lyr in soil.layers:\r\n soil_wat_avail[lyr] = ((soil.water_content[lyr] -\r\n soil.perm_wilt_point[lyr]) *\r\n soil.layer_thickness[lyr] *\r\n soil.WATER_DENSITY)\r\n # Water supply\r\n for lyr in soil.layers:\r\n soil_wat_supply[lyr] = (soil_wat_avail[lyr] * soil.kl[lyr])\r\n\r\n # Water uptake (no supply or demand)\r\n if (soil_wat_supply.sum() <= 0) or (transp_pot <= 0):\r\n for lyr in soil.layers:\r\n self.water_uptake[lyr] = 0\r\n else:\r\n # Water uptake (water is not limiting)\r\n if transp_pot < soil_wat_supply.sum():\r\n # distribute demand proportionately to the water supply\r\n for lyr in soil.layers:\r\n self.water_uptake[lyr] = (soil_wat_supply[lyr] /\r\n soil_wat_supply.sum() *\r\n transp_pot)\r\n else:\r\n # Water uptake (water is limiting)\r\n for lyr in soil.layers:\r\n self.water_uptake[lyr] = soil_wat_supply[lyr]\r\n\r\n self.att_transp = self.water_uptake.sum() # mm/day\r\n self.cum_transp += self.att_transp # mm\r\n self.transp_ratio = self.att_transp / transp_pot\r\n self.expect_transp = transp_pot\r\n self.cum_pot_transp += transp_pot", "def pumps(self): \n return self._link_reg.pumps", "def add(self, p, s, node) -> None:\n self.place.append(p)\n self.station.append(s)\n self.pi.append(node.pi[p, s] if p != float('inf') else float('inf'))\n self.noncoverage.append(node.noncoverage.left + node.noncoverage.right)\n self.cost.append(node.cost)\n self.delay.append(node.delay)\n self.step.append(node.key)", "def _add_lamp_outlet(self, model):\r\n\r\n # Create a new CameraItem and set the model\r\n item = LampOutletItem()\r\n item.setModel(model)\r\n\r\n # Create a new CameraInfoWidget and set the model\r\n widget = LampOutletInfoWidget()\r\n widget.setModel(model)\r\n\r\n item.double_clicked.connect(widget.show)\r\n item.deleteSocketAction.connect(model.prepare_for_deletion)\r\n\r\n self.scene().addItem(item)\r\n proxy = self.scene().addWidget(widget)\r\n widget.setProxy(proxy)", "def pump_view(request):\n form = BuildingForm(retrofit_type='Pump')\n return render(request, 'pump.html', context={'form':form})", "def add_pack_to_pack(self, pack):\n logging.debug(\"Merging packs\")\n for wolf in pack.wolves:\n self.add_wolf_to_pack(wolf)\n logging.debug(\"Pack is now {} wolves\".format(len(self.wolves)))\n self.model.schedule.remove(pack)\n self.model.grid.remove_agent(pack)", "def publish_watering_message(uid):\n d = dict()\n d['watering'] = dict()\n d['watering']['timestamp'] = time.time()\n d['watering']['uid'] = uid\n\n message = json.dumps(d)\n logging.info('Publish watering request: %s', message)\n paho.mqtt.publish.single('planteur/watering', message)", "def add_bag(self, bag, quantity):\n self.bags.append((bag, quantity))", "def producer(w):\n assert isinstance(w, WireVector)\n for net in block.logic:\n for dest in net.dests:\n if dest is w:\n return net\n add_node(w, '???')\n return w", "def _add_pool ( self, pool ):\n self._pool_id += 1\n try:\n self._poolstack.append ( pool )\n except:\n self._pool_id -= 1\n raise\n\n self._update_resolver()", "def __init__(self, name=\"\", description=\"\", time_units=\"s\", len_units=\"m\",\n pump_units=\"m3/s\"):\n\n # Set general info\n self._type = 1 # pumping well id\n self.parameters = {'full': True,\n 'rw': 1.,\n 'd': 0.,\n 'l': 1.}\n self.time_units = time_units\n self.len_units = len_units\n self.pump_units = pump_units\n\n # Create pumping well data\n self.pumprate = _Data(dtype=0, name=name, description=description)\n self.pumprate.set_units(self.time_units, self.pump_units)\n\n # Set observation wells and piezometers\n self.wells = []", "def push_model(config):\n util_logger.info('Backing up the model files to wandb')\n martifact = wandb.Artifact('%s_model' % config.wandb_name, type='model')\n martifact.add_dir(os.path.join(config.output_dir,\"best_model\"))\n #matrifact.add_file(os.path.join(config.output_dir,\"trainer_config.json\"))\n wandb.log_artifact(martifact)", "async def change_pump(self, pump, newstate):\n if not self.connected:\n return\n\n # we don't have 7 pumps!\n if pump > MAX_PUMPS:\n return\n\n # we don't have THIS pump\n if not self.pump_array[pump]:\n return\n\n # this is a toggle switch, not on/off\n if self.pump_status[pump] == newstate:\n return\n\n # what we know:\n data = bytearray(9)\n data[0] = M_START\n data[1] = 7\n data[2] = mtypes[BMTS_CONTROL_REQ][0]\n data[3] = mtypes[BMTS_CONTROL_REQ][1]\n data[4] = mtypes[BMTS_CONTROL_REQ][2]\n data[6] = 0x00 # who knows?\n data[8] = M_END\n\n # calculate how many times to push the button\n if self.pump_array[pump] == 2:\n for iter in range(1, 2+1):\n if newstate == ((self.pump_status[pump] + iter) % 3):\n break\n else:\n iter = 1\n\n # now push the button until we hit desired state\n for pushes in range(1, iter+1):\n # 4 is 0, 5 is 2, presume 6 is 3?\n data[5] = C_PUMP1 + pump\n data[7] = messages.Message.crc(data[1:7])\n self.writer.write(data)\n await self.writer.drain()\n await asyncio.sleep(1.0)", "def add(self, mp):\n \n self.tile_contents.append(mp)\n if(self.tile_contents[-1].raised == False):\n self.paint_blocks += 1.00", "def plant(self):\n\t\ttic=time.clock()\n\t\tcommands=[]\n\t\tt=self.m.times\n\t\tauto=self.m.automatic\n\t\tpHeads=self.plantHeads\n\t\t#gather information about the soil at site\n\t\tdeepest=0\n\t\tdeepestPos=None\n\t\tfor h in pHeads:\n\t\t\tdepth=self.G.terrain.humusLayer.getDepth(h.getPos())\n\t\t\tassert depth>=0\n\t\t\tif depth>deepest:\n\t\t\t\tdeepest=depth\n\t\t\t\tdeepestPos=h.getPos()\n\t\tdepth=deepest\n\t\tdigTime=self.m.getDigTime(deepestPos)\n\t\tself.sim.stats['humus depths'].append(depth)\n\t\tif self.m.inverting: #determine the time. Dependent on digTime\n\t\t\tif self.m.invertingMethod=='KO':\n\t\t\t\tinvertTime=self.G.simParam['tCWhenInvKO']\n\t\t\telif self.m.invertingMethod=='Excavator':\n\t\t\t\tinvertTime=self.G.simParam['tInvExcavator']-digTime\n\t\t\telse:\n\t\t\t\traise Exception('cannot identify inverting method %s'%self.m.invertingMethod)\n\t\tfor pH in pHeads:\n\t\t\tpH.reset()\n\t\t\tmoundBould=[]\n\t\t\torig=pH.getPos()#middle of plantinghead\n\t\t\tboul=self.G.terrain.GetBoulders(orig, R=pH.radius)\n\t\t\troots=self.G.terrain.GetRoots(orig,R=pH.radius)\n\t\t\tdirect=self.m.direction-pi/2.+self.posCyl[1] #same dir as from machine to point\n\t\t\tsumA=0\n\t\t\timmobile=self.G.simParam['critStoneSize']\n\t\t\tdibbleDisturb=0.001\n\t\t\tself.m.stopControl()\n\t\t\tself.sim.stats['mound attempts']+=1\n\t\t\tfor r in roots: #determine if a root is hit in the critical area.\n\t\t\t\tif pH.rootCollide(r): #root is within area..\n\t\t\t\t\tprint \"striked a root..\"\n\t\t\t\t\tangle=abs(r.direction-direct)\n\t\t\t\t\tray1=[orig,fun.getCartesian([0,1],fromLocalCart=True, origin=orig, direction=r.direction)]\n\t\t\t\t\tray2=[orig,fun.getCartesian([0,1],fromLocalCart=True, origin=orig, direction=direct)]\n\t\t\t\t\tangle=fun.getAngle(ray1, ray2) #angle between root and planting head\n\t\t\t\t\tpH.strikedImmobile=True\n\t\t\t\t\tself.cmnd(commands, t['haltTime'],auto['haltMound'])\n\t\t\t\t\tfor head in pHeads: head.timeConsumption['halting']+=t['haltTime']\n\t\t\t\t\tif self.G.simParam['noRemound'] or angle>self.m.rootDegreesOK:\n\t\t\t\t\t\tself.debugPrint('pos: %s collided with root. angle was too much %s'%(str(orig), str(angle*180.0/pi)))\n\t\t\t\t\t\tpH.abort=True\n\t\t\t\t\t\tpH.done=True\n\t\t\t\t\telse: #remound\n\t\t\t\t\t\tprint \"remounds\"\n\t\t\t\t\t\tself.cmnd(commands, t['haltTime'],auto['haltMound'])\n\t\t\t\t\t\ttimeTmp=digTime+t['heapTime']\n\t\t\t\t\t\tself.cmnd(commands, timeTmp, auto['mound'])\n\t\t\t\t\t\tfor pH in pHeads:\n\t\t\t\t\t\t\tpH.timeConsumption['halting']+=t['haltTime'] #that's for both, if 2h\n\t\t\t\t\t\t\tpH.remounded=True\n\t\t\t\t\t\t\tpH.timeConsumption['mounding']+=timeTmp\n\t\t\t\t\t\t\n\n\t\t\t\t\t\n\t\t\tif not (pH.abort or pH.strikedImmobile):\n\t\t\t\tfor b in boul:\n\t\t\t\t\t#check if we are inside the scoop. It's the middle of the stone that matters\n\t\t\t\t\t#get local xy-coordinates\n\t\t\t\t\tcylPos=self.m.getCylindrical(b.pos,origin=orig, direction=direct)\n\t\t\t\t\ttwoDdist=self.m.getCartesian(cylPos, origin=orig, direction=direct, local=True)#not really optimal, could be improved\n\t\t\t\t\tinside=False #just to skip a really long if-statement\n\t\t\t\t\tif self.G.simParam['rectangular']:\n\t\t\t\t\t\tif b.radius+b.z>-pH.depth and collide(pH, b, o1pos=orig):\n\t\t\t\t\t\t\tinside=True\n\t\t\t\t\telif b.z**2+twoDdist[1]**2<(b.radius+pH.depth)**2 and collide(pH, b, o1pos=orig): #the first check is for the cylinder, through pythagoras with 2D[1] since cylinder and not sphere\n\t\t\t\t\t\tinside=True\n\t\t\t\t\tif inside: \n \t\t\t\t\t\t#old one: abs(bpos[0])<pH.width/2. and abs(bpos[1])<pH.length/2.:\n\t\t\t\t\t\tmoundBould.append(b)\n\t\t\t\t\t\tsumA+=b.area\n\t\t\t\t\t\tlocalPos=-twoDdist[1], b.z #2D position with z as y-axis\n\t\t\t\t\t\t#now, look how much it occuppies vertically.\n\t\t\t\t\t\ttwoDdist=self.m.getCartesian(cylPos, origin=orig, direction=direct, local=True)#not really optimal, could be improved\n\t\t\t\t\t\tif self.G.simParam['rectangular']:\n\t\t\t\t\t\t\tnodes=[(-pH.length*0.5,0), (-pH.length*0.5, -pH.depth), (pH.length*0.5, -pH.depth), (pH.length*0.5, 0)]\n\t\t\t\t\t\t\tlast=None\n\t\t\t\t\t\t\tpoints=[]\n\t\t\t\t\t\t\tfor node in nodes:#loop over the rectangle edges.\n\t\t\t\t\t\t\t\tif last:\n\t\t\t\t\t\t\t\t\tray=(last,node)\n\t\t\t\t\t\t\t\t\ttmp=col.intersectRaySphere(np.array(ray),b.radius,localPos, additionalInfo=True)\n\t\t\t\t\t\t\t\t\tif type(tmp)!=bool:\n\t\t\t\t\t\t\t\t\t\tfor point in tmp[1:]:\n\t\t\t\t\t\t\t\t\t\t\tpoints.append(list(point))\n\t\t\t\t\t\t\t\tlast=node\n\t\t\t\t\t\t\tassert len(points)!=1 #would be tangent but..\n\t\t\t\t\t\t\tupper=(-twoDdist[1], b.z+b.radius)\n\t\t\t\t\t\t\tlower=(-twoDdist[1], b.z-b.radius)\n\t\t\t\t\t\t\tif not col.pointInPolygon(upper, nodes):\n\t\t\t\t\t\t\t\tif len(points)==0: #it passed through the easy check above...\n\t\t\t\t\t\t\t\t\tupper=-pH.depth\n\t\t\t\t\t\t\t\t\tmoundBould.remove(b)\n\t\t\t\t\t\t\t\t\tsumA-=b.area\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tupper=max([p[1] for p in points])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tupper=upper[1]\n\t\t\t\t\t\t\tif not col.pointInPolygon(lower, nodes):\n\t\t\t\t\t\t\t\tif len(points)==0:\n\t\t\t\t\t\t\t\t\tlower=-pH.depth\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tlower=min([p[1] for p in points])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tlower=lower[1]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tr=b.radius\n\t\t\t\t\t\t\t#look how much of the stone that is within the scoop.\n\t\t\n\t\t\t\t\t\t\tpoints=col.circlesIntersectPoints((0,0), localPos, pH.depth, b.radius)\n\t\t\t\t\t\t\tassert points != False # we know that these circles collide.\n\t\t\t\t\t\t\tif points== True: #all of the stone inside or huge stone\n\t\t\t\t\t\t\t\tupper=b.z+b.radius\n\t\t\t\t\t\t\t\tlower=b.z-b.radius\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tupper=max(points[0][1], points[1][1])\n\t\t\t\t\t\t\t\tif col.pointInCircle((-twoDdist[1], b.z+b.radius), (0,0), pH.depth):\n\t\t\t\t\t\t\t\t\tassert b.z+b.radius>=upper\n\t\t\t\t\t\t\t\t\tupper=b.z+b.radius\n\t\t\t\t\t\t\t\tlower=min(points[0][1], points[1][1])\n\t\t\t\t\t\t\t\tif col.pointInCircle((-twoDdist[1], b.z-b.radius), (0,0), pH.depth):\n\t\t\t\t\t\t\t\t\tassert b.z-b.radius<=lower\n\t\t\t\t\t\t\t\t\tlower=b.z-b.radius\n\t\t\t\t\t\thInside=upper-lower\n\t\t\t\t\t\tassert hInside>=0\n\t\t\t\t\t\tratio=hInside/float(pH.depth)\n\t\t\t\t\t\tpH.strikedImmobile=True\n\t\t\t\t\t\tself.sim.stats['immobile boulder struck']+=1\n\t\t\t\t\t\tself.sim.stats['immobile vol sum']+=b.volume\n\t\t\t\t\t\tif ratio>self.m.immobilePercent:\n\t\t\t\t\t\t\tself.debugPrint(\"ABORTS %s percent is vertically occupided by an imobile boulder\"%str(ratio))\n\t\t\t\t\t\t\tpH.abort=True\n\t\t\t\t\t\t\tpH.done=True\n\t\t\t\t\t\t\tcommands=self.cmnd(commands, t['haltTime'],auto['haltMound'])\n\t\t\t\t\t\t\tfor head in pHeads:\n\t\t\t\t\t\t\t\thead.timeConsumption['halting']+=t['haltTime'] #that's for both, if 2h\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif b.radius>pH.biggestBlock:\n\t\t\t\t\t\t\tpH.biggestBlock=b.radius*2\n\t\t\t\tpH.moundSumA=sumA\t\t\n\t\t\tpH.moundObst=moundBould\n\t\t\th=Hole(orig,terrain=self.G.terrain,z=pH.depth, nodes=pH.getNodes(orig) , isSpherical=False)\n\t\t#time to mound and heap. With the Excavator inverting method, we don't take time for heaping now.\n\t\tif not self.m.inverting:\n\t\t\ttimeTmp=digTime+t['heapTime']\n\t\t\tcommands=self.cmnd(commands, timeTmp,auto['mound'])\n\t\telif self.m.inverting and self.m.invertingMethod=='KO': #heap first..\n\t\t\ttimeTmp=digTime+t['heapTime']\n\t\t\tcommands=self.cmnd(commands, timeTmp,auto['mound'])\n\t\telif self.m.inverting and self.m.invertingMethod=='Excavator': #don't heap..\n\t\t\ttimeTmp=digTime\n\t\t\tcommands=self.cmnd(commands, timeTmp,auto['mound'])\n\t\telse:\n\t\t\traise Exception('Logical error. If we are inverting, we need to use methods KO or Excavator, not %s'%self.invertingMethod)\n\t\tfor pH in pHeads:\n\t\t\tpH.timeConsumption['mounding']+=timeTmp\n\t\t#mounding failures\n\t\tfor h in self.plantHeads:\n\t\t\tif random.uniform(0,1)<self.m.moundingFailureProb and not h.remounded: #failure..\n\t\t\t\t\t\n\t\t\t\tif self.G.simParam['noRemound']:\n\t\t\t\t\th.debugPrint('failed mounding')\n\t\t\t\t\th.abort=True\n\t\t\t\telse:\n\t\t\t\t\th.debugPrint('Failed mounding.. the other heads have to wait')\n\t\t\t\t\tcommands=self.cmnd(commands, digTime+t['heapTime'],auto['mound'])\n\t\t\t\t\tfor pH in self.plantHeads:\n\t\t\t\t\t\tself.sim.stats['remound attempts']+=1\n\t\t\t\t\t\tpH.timeConsumption['mounding']+=digTime+t['heapTime']\n\t\t\t\t\t\tpH.remounded=True\n\t\t#it's time to invert\n\t\tif self.m.inverting:\n\t\t\tcommands=self.cmnd([], invertTime, auto=False)\n\t\t\treinverted=False\n\t\t\treinvertTime=digTime+t['heapTime'] #same for KO and Excv\n\t\t\tfor h in self.plantHeads:\n\t\t\t\tif pH.abort: continue\n\t\t\t\tself.sim.stats['inverting attempts']+=1\n\t\t\t\th.timeConsumption['inverting']+=invertTime\n\t\t\t\tif random.uniform(0,1)<self.m.invertFailureProb: #failure..\n\t\t\t\t\tself.debugPrint('reinverts')\n\t\t\t\t\tif self.G.simParam['noRemound']:\n\t\t\t\t\t\th.debugPrint('failed inverting')\n\t\t\t\t\t\th.abort=True\n\t\t\t\t\telif not reinverted:\n\t\t\t\t\t\treinverted=True\n\t\t\t\t\t\th.debugPrint('Failed mounding.. the other heads have to wait')\n\t\t\t\t\t\tcommands=self.cmnd(commands,reinvertTime,auto['mound'])\n\t\t\t\t\t\tfor pH in self.plantHeads:\n\t\t\t\t\t\t\tself.sim.stats['reinverting attempts']+=1\n\t\t\t\t\t\t\th.timeConsumption['inverting']+=reinvertTime\n\t\tself.plantSignals=0\n\t\tself.pHeadsUsed=0\n\t\tev=[]\n\t\tfor pH in pHeads:\n\t\t\tif not pH.abort: \n\t\t\t\tself.pHeadsUsed+=1\n\t\t\t\tpH.cause=\"plant\"\n\t\t\t\tpH.debugPrint(\"instructs pH to plant %s\")\n\t\t\t\tev.append(pH)\n\t\tif self.pHeadsUsed>0:\n\t\t\tcommands.append((\"interrupt\", ev)) #will later be recognized in run and self.interupt(pH) will be invoked. \n\t\t\tcommands.append((waituntil, self, self.plantingFinished)) #waits for one or both events.\n\t\tPlantingDevice.timesProf[1]+=time.clock()-tic\n\t\treturn commands", "def add_connection(self, house):\n if house not in self.connections and house.max_output < self.capacity:\n self.connections.append(house)\n self.capacity = self.capacity - house.max_output\n house.connected_battery = self.id\n # else:\n # print(\"Battery does not have enough capacity to connect this house\")\n # self.reach_capacity = True", "def add_weight(self):\r\n\r\n # Get the csrf token\r\n csrf = self.extract_csrf('https://wger.de/en/weight/add/')\r\n # Adding referer to the headers\r\n self.headers['Referer'] = API.url_weight\r\n\r\n # Take the weight entires from TOML file\r\n entries = self.cfg.get('payload', {}).get('weight')\r\n # Check for valid entires\r\n if entries:\r\n for payload in entries:\r\n # Add csrf token to payload\r\n payload['csrfmiddlewaretoken'] = csrf\r\n # Test the entry with it's json schema\r\n check.check_entry(path='schemas/weight.json', test=payload)\r\n # Post request\r\n self.add_post(payload, API.url_weight, self.weights)\r\n \r\n # Eliminates the referer from the headers\r\n self.headers.pop('Referer')" ]
[ "0.6452546", "0.5804844", "0.53684664", "0.5346167", "0.5280994", "0.5154602", "0.5111471", "0.5105995", "0.50667065", "0.50452876", "0.50355077", "0.50345284", "0.50326157", "0.4998017", "0.495867", "0.49376225", "0.49181387", "0.4901064", "0.49007452", "0.48770928", "0.48591042", "0.48008117", "0.47802794", "0.47753054", "0.47661108", "0.47656316", "0.4746474", "0.47223857", "0.47197622", "0.47187358" ]
0.66224295
0
Adds a valve to the water network model
def add_valve(self, name, start_node_name, end_node_name, diameter=0.3048, valve_type='PRV', minor_loss=0.0, initial_setting=0.0, initial_status='ACTIVE'): self._link_reg.add_valve(name, start_node_name, end_node_name, diameter, valve_type, minor_loss, initial_setting, initial_status)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_valve(self, name, start_node_name, end_node_name,\n diameter=0.3048, valve_type='PRV', minor_loss=0.0, \n initial_setting=0.0, initial_status='ACTIVE'):\n assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, \"name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(start_node_name, str) and len(start_node_name) < 32 and start_node_name.find(' ') == -1, \"start_node_name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(end_node_name, str) and len(end_node_name) < 32 and end_node_name.find(' ') == -1, \"end_node_name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(diameter, (int, float)), \"diameter must be a float\"\n assert isinstance(valve_type, str), \"valve_type must be a string\"\n assert isinstance(minor_loss, (int, float)), \"minor_loss must be a float\"\n assert isinstance(initial_setting, (int, float, str)), \"initial_setting must be a float or string\"\n assert isinstance(initial_status, (str, LinkStatus)), \"initial_status must be a string or LinkStatus\"\n \n if isinstance(initial_status, str):\n initial_status = LinkStatus[initial_status]\n start_node = self._node_reg[start_node_name]\n end_node = self._node_reg[end_node_name]\n \n valve_type = valve_type.upper()\n \n # A PRV, PSV or FCV cannot be directly connected to a reservoir or tank (use a length of pipe to separate the two)\n if valve_type in ['PRV', 'PSV', 'FCV']:\n if type(start_node)==Tank or type(end_node)==Tank or type(start_node)==Reservoir or type(end_node)==Reservoir:\n msg = '%ss cannot be directly connected to a tank. Add a pipe to separate the valve from the tank.' % valve_type\n logger.error(msg) \n raise RuntimeError(msg)\n if type(start_node)==Reservoir or type(end_node)==Reservoir:\n msg = '%ss cannot be directly connected to a reservoir. Add a pipe to separate the valve from the reservoir.' % valve_type\n logger.error(msg) \n raise RuntimeError(msg)\n \n # TODO check the following: PRVs cannot share the same downstream node or be linked in series\n \n # TODO check the following: Two PSVs cannot share the same upstream node or be linked in series\n \n # TODO check the following: A PSV cannot be connected to the downstream node of a PRV\n \n if valve_type == 'PRV':\n valve = PRValve(name, start_node_name, end_node_name, self)\n valve.initial_setting = initial_setting\n valve._setting = initial_setting\n elif valve_type == 'PSV':\n valve = PSValve(name, start_node_name, end_node_name, self)\n valve.initial_setting = initial_setting\n valve._setting = initial_setting\n elif valve_type == 'PBV':\n valve = PBValve(name, start_node_name, end_node_name, self)\n valve.initial_setting = initial_setting\n valve._setting = initial_setting\n elif valve_type == 'FCV':\n valve = FCValve(name, start_node_name, end_node_name, self)\n valve.initial_setting = initial_setting\n valve._setting = initial_setting\n elif valve_type == 'TCV':\n valve = TCValve(name, start_node_name, end_node_name, self)\n valve.initial_setting = initial_setting\n valve._setting = initial_setting\n elif valve_type == 'GPV':\n valve = GPValve(name, start_node_name, end_node_name, self)\n valve.headloss_curve_name = initial_setting\n valve.initial_status = initial_status\n valve.diameter = diameter\n valve.minor_loss = minor_loss\n self[name] = valve", "def add_edge(self, u, v, val):\n raise NotImplementedError()", "def set_is_watering(valve: Valve, value: bool) -> None:\n valve.is_watering = value", "def add(self, value):", "def addEdge(self,u,v):\r\n self.graph[u].append(v)", "def append(self, v):\n self.data.append(v)", "def AddEarthVelocity(self, ds):\n self.IsEarthVelocity = True\n self.EarthVelocity = ds", "def addEdge(self,u,v,w):\r\n self.graph.append([u,v,w])", "def add_data_single(self, pt, val):\n # It doesn't look like GPytorch has a way to add on data,\n # so we just have to create a new object.\n old_x = self.gp_core.train_inputs[0]\n old_y = self.gp_core.train_targets\n tensor_pt = torch.from_numpy(pt).reshape(1, len(pt))\n new_x = torch.cat((old_x, tensor_pt)).float()\n new_y = torch.cat((old_y, torch.tensor([val]).float())).float()\n self.gp_core = ExactGPModel(new_x, new_y, self.gp_core.covar_module,\n self.gp_core.likelihood)", "def add_electrode(self, e, name, kind, volt):\r\n\t\te.volt = volt\r\n\t\tself.electrode_dict[name] = (kind, e)\r\n\t\tif kind=='dc':\r\n\t\t\tself.dc_electrode_list.append((name,e))\r\n\t\tif kind=='rf':\r\n\t\t\tself.rf_electrode_list.append((name,e))", "def add(self, value):\n pass", "def add_edge(self, u, v):\n self.graph[u].append(v)", "def __init__(self, upstream=None, downstream=None,\n name='', Kv = 0.0, mdot0 = 0.0, verbose=0): \n global _valvecount\n if name == '':\n name = 'Valve_'+`_valvecount`\n _valvecount += 1\n FlowDevice.__init__(self,3,name,verbose)\n if upstream and downstream:\n self.install(upstream, downstream)\n self.setValveCoeff(Kv)", "def add(self, name, value) -> None:\n ...", "def append(self, val):\n self.val.append(val)", "def add_ens(self, ens):\n self.plotly_bt_range.add_ens(ens)", "def add_vertex(self, u, val):\n raise NotImplementedError()", "def add_data_single(self, pt, val):\n self.gp_core.add_data_single(pt, val)", "def add_weakmode(gearfile=None, parametername=None, value=None): \n\n tree = xml.etree.ElementTree.parse(gearfile)\n root = tree.getroot() \n\n maxvalue=value\n\n # List of z positions\n zPositions=[]\n IDs=[]\n # loop over ladders to find the z positions\n for detectors in root.findall('detectors'): \n for detector in detectors.findall('detector'):\n for layers in detector.findall('layers'):\n for layer in layers.findall('layer'):\n\n for ladder in layer.findall('ladder'):\n laddervalue=float(value)+float(ladder.get(parametername))\n zPositions.append(float(ladder.get('positionZ')))\n IDs.append(ladder.get('ID'))\n\n # Get the max z value in the z position list\n maxzpos=max(zPositions)\n\n # Calculate the parameter change per mm in z direction\n slope=float(value)/maxzpos\n\n values=[]\n\n # Calculate the values for the other planes\n for zPosition in zPositions:\n values.append(float(zPosition*slope))\n\n planenumber=0\n for sensID in IDs:\n set_parameter(gearfile=gearfile, sensorID=sensID, parametername=parametername, value=values[planenumber])\n planenumber=planenumber+1", "def add_value(trajectories, val_func):\n for trajectory in trajectories:\n observes = trajectory['observes']\n values = val_func.predict(observes)\n trajectory['values'] = values", "def add(self, p, s, node) -> None:\n self.place.append(p)\n self.station.append(s)\n self.pi.append(node.pi[p, s] if p != float('inf') else float('inf'))\n self.noncoverage.append(node.noncoverage.left + node.noncoverage.right)\n self.cost.append(node.cost)\n self.delay.append(node.delay)\n self.step.append(node.key)", "def add_value(self, value):\n self.value = value", "def add_cost_value(self, var_name, val):\n self.add_other_value(self.cost, var_name, val)\n\n # add the change of cost automatically\n if len(self.cost[var_name]) > 1: # if the cost is not empty\n last_val = self.get_last_last_cost_val(var_name)\n cost_change = abs(val - last_val) / last_val\n self.add_other_value(self.cost_change, var_name + '_change', cost_change)", "def add(self, val):\n key = self.get_key(val)\n self.store.add(key)\n\n # Keep track of summary stats\n self._count += 1\n self._sum += val\n if val < self._min:\n self._min = val\n if val > self._max:\n self._max = val", "def addAresta(self,u,v,peso):\n self.grafo.append([u,v,peso])", "def add_vovnet_config(cfg):\n _C = cfg\n\n _C.MODEL.VOVNET = CN()\n\n _C.MODEL.VOVNET.CONV_BODY = \"V-39-eSE\"\n _C.MODEL.VOVNET.OUT_FEATURES = [\"stage2\", \"stage3\", \"stage4\", \"stage5\"]\n\n # Options: FrozenBN, GN, \"SyncBN\", \"BN\"\n _C.MODEL.VOVNET.NORM = \"FrozenBN\"\n\n _C.MODEL.VOVNET.OUT_CHANNELS = 256\n\n _C.MODEL.VOVNET.BACKBONE_OUT_CHANNELS = 256", "def add_fleet(self, index, *args, **kw):\n\n fleetid = self.fleets.append(ListNode(\"{0!s}\".format(kw.get(\"name\", \"Fleet {0:d}\".format(index))), [\n ListNode(\"Nodes\"),\n ListNode(\"Behaviours\", data=kw.get(\n \"behaviours\", self.defaults[2].get_data()))\n ])\n )\n for i in range(kw.get(\"nodes\", 1)):\n self.add_node(fleetid)", "def add_data(g):\n param = Parameters(g)\n\n # Permeability\n param.set_tensor(\"flow\", tensor.SecondOrderTensor(g.dim, np.ones(g.num_cells)))\n\n # Source term\n source = np.array([rhs(*pt) for pt in g.cell_centers.T])\n param.set_source(\"flow\", g.cell_volumes * source)\n\n # Boundaries\n bound_faces = g.tags[\"domain_boundary_faces\"].nonzero()[0]\n bound_face_centers = g.face_centers[:, bound_faces]\n\n labels = np.array([\"dir\"] * bound_faces.size)\n\n bc_val = np.zeros(g.num_faces)\n bc_val[bound_faces] = np.array([solution(*pt) for pt in bound_face_centers.T])\n\n param.set_bc(\"flow\", BoundaryCondition(g, bound_faces, labels))\n param.set_bc_val(\"flow\", bc_val)\n\n return {\"param\": param}", "def add(self, value):\n self._resolve_copies()\n self.data.append(value)", "def add_var(self, name, comp):\n self._main_model.add_var(name, comp)" ]
[ "0.643315", "0.5798691", "0.5605155", "0.5582534", "0.55659705", "0.55612314", "0.5479006", "0.54574764", "0.54148215", "0.5413817", "0.53985167", "0.5379142", "0.53286594", "0.53122795", "0.5277096", "0.5270513", "0.5226005", "0.5203529", "0.51670414", "0.51572764", "0.5147884", "0.5125684", "0.51157576", "0.5111813", "0.51063114", "0.51056445", "0.51037645", "0.5095665", "0.50879675", "0.5082214" ]
0.6613797
0
Adds a curve to the water network model
def add_curve(self, name, curve_type, xy_tuples_list): self._curve_reg.add_curve(name, curve_type, xy_tuples_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_curve(self):\n pv_name = self._get_full_pv_name(self.pv_name_line_edt.text())\n color = random_color()\n for k, v in self.channel_map.items():\n if color == v.color:\n color = random_color()\n\n self.add_y_channel(pv_name=pv_name, curve_name=pv_name, color=color)", "def AddCurve(self, func: object, leftdomain: int = 1, rightdomain: int = 0, bc: object = <netgen.libngpy._meshing.NGDummyArgument object at 0x00000166FFF8E9F0>, maxh: float = 1e+99) -> None:", "def add_curve(self, **args):\n if \"name\" not in args:\n raise KeyError(\"No curve name given.\")\n if \"coords\" not in args:\n raise KeyError(\"No coordinates given.\")\n if \"values\" not in args:\n raise KeyError(\"No values given.\")\n if len(args[\"coords\"]) != len(args[\"values\"]):\n raise ValueError(\"Number of time coordinate points differs from number of values\")\n entries = len(self.tree['curves']['children'])\n self.tree['curves']['children']['curve' + str(entries)] = self.populate_tree('curve',\n children={})\n parameter = self.tree['curves']['children']['curve' + str(entries)]\n parameter['children']['name'] = self.populate_tree('name', text=args['name'], children={})\n coord_str = \"\"\n value_str = \"\"\n for i, coord in enumerate(args[\"coords\"]):\n if i < (len(args[\"coords\"])-1):\n coord_str = coord_str + str(coord) + \" \"\n value_str = value_str + str(args[\"values\"][i]) + \" \"\n if i == (len(args[\"coords\"])-1):\n coord_str = coord_str + str(coord)\n value_str = value_str + str(args[\"values\"][i])\n parameter['children']['coords'] = self.populate_tree('coords', text=coord_str, children={})\n parameter['children']['values'] = self.populate_tree('values', text=value_str, children={})", "def curve(*args, append: bool=True, bezier: bool=True, degree: float=3, editPoint:\n Union[List[float, float, float], List[List[float, float, float]]]=None, knot:\n Union[float, List[float]]=0.0, name: AnyStr=\"\", objectSpace: bool=True, periodic:\n bool=True, point: Union[List[float, float, float], List[List[float, float,\n float]]]=None, pointWeight: Union[List[float, float, float, float], List[List[float,\n float, float, float]]]=None, replace: bool=True, worldSpace: bool=True,\n **kwargs)->AnyStr:\n pass", "def setCurve(self, index, curve) -> None:\n ...", "def add_curve(self, name, curve_type, xy_tuples_list):\n assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, \"name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(curve_type, (type(None), str)), \"curve_type must be a string\"\n assert isinstance(xy_tuples_list, (list, np.ndarray)), \"xy_tuples_list must be a list of (x,y) tuples\"\n \n curve = Curve(name, curve_type, xy_tuples_list)\n self[name] = curve", "def PricingAddCurves(builder, curves):\n return AddCurves(builder, curves)", "def append(self, curve, idx=None):\n insert = False if idx is None else True\n if isinstance(curve, Graph):\n for c in curve.iterCurves():\n if insert:\n self.data.insert(idx, c)\n idx += 1\n else:\n self.data.append(c) # c are Curves, we can do like that\n elif isinstance(curve, list):\n for c in curve:\n if insert:\n self.data.insert(idx, c)\n idx += 1\n else:\n self.append(c) # call itself, must check if c is a Curve\n elif isinstance(curve, Curve):\n if insert:\n self.data.insert(idx, curve)\n else:\n self.data.append(curve)\n elif isinstance(curve, str) and curve == 'empty':\n curve = Curve([[np.inf], [np.inf]], {})\n if insert:\n self.data.insert(idx, curve)\n else:\n self.data.append(curve)\n else:\n print('Graph.append: failed (type:', type(curve), ')')", "def test_add_curve():\n c1 = Curve(data=data_num, mnemonic='test')\n c2 = c1 + 100\n assert (c2.df.iloc[0][0] - 101) < 0.0001", "def learning_curve(self, features, labels):\n return learning_curve(self._model, features, labels)", "def add(self, y):\n return circuit.add(self, y)", "def addCurveSegment(self, *args):\n return _libsbml.Curve_addCurveSegment(self, *args)", "def fillCurveLE(self):\n\t\tsel = mn.ls( sl = True, dag = True, ni = True, typ = 'nurbsCurve' )\n\t\tself.curve_le.setText( sel[0].name )", "def curveTo(self, *points: Tuple[float, float]) -> None:\n raise NotImplementedError", "def as_curve(self, context):\n curve = bpy.data.curves.new('LINE', type='CURVE')\n curve.dimensions = '2D'\n spline = curve.splines.new('POLY')\n spline.use_endpoint_u = False\n spline.use_cyclic_u = False\n pts = self.pts\n spline.points.add(len(pts) - 1)\n for i, p in enumerate(pts):\n x, y, z = p\n spline.points[i].co = (x, y, 0, 1)\n curve_obj = bpy.data.objects.new('LINE', curve)\n context.scene.collection.objects.link(curve_obj)\n curve_obj.select_set(state=True)", "def oncurve(self, P):\n\t\traise Exception(NotImplemented)", "def add(self, y):\n if self.isGood:\n self.yValues.append(y)\n else:\n self.yValues.append(0.)\n self.lineplot.set_data(np.arange(0, len(self.yValues)), self.yValues)", "def __add__(self, other, sub=False, interpolate=-1, offsets=False, operator='add'):\n def op(x, y, operator):\n if operator == 'sub':\n return x - y\n if operator == 'mul':\n return x * y\n if operator == 'div':\n return x / y\n if operator == 'pow':\n return x ** y\n if operator != 'add':\n print('WARNING Curve.__add__: unexpected operator argument('\n + operator + ').')\n return x + y\n\n if sub == True:\n operator = 'sub'\n selfx = self.x_offsets if offsets else self.x\n selfy = self.y_offsets if offsets else self.y\n if not isinstance(other, Curve): # add someting/number to a Curve\n out = Curve([selfx(), op(selfy(), other, operator)],\n self.getAttributes())\n # remove offset information if use it during calculation\n if offsets:\n out.update({'offset': '', 'muloffset': ''})\n out = out.castCurve(self.classNameGUI())\n return out # cast type\n # curve1 is a Curve\n otherx = other.x_offsets if offsets else other.x\n othery = other.y_offsets if offsets else other.y\n # default mode -1: check if can gain time (avoid interpolating)\n r = range(0, min(len(selfy()), len(othery())))\n if interpolate == -1:\n interpolate = 0 if np.array_equal(selfx(index=r), otherx(index=r)) else 1\n # avoiding interpolation\n if not interpolate:\n le = min(len(selfy()), len(othery()))\n r = range(0, le)\n if le < len(selfy()):\n print('WARNING Curve __add__: Curves not same lengths, clipped',\n 'result to shortest (', len(selfy()), ',', len(othery()),\n ')')\n if not np.array_equal(selfx(index=r), otherx(index=r)):\n print('WARNING Curve __add__ ('+operator+'): Curves not same x',\n 'axis values. Consider interpolation (interpolate=1).')\n out = Curve([selfx(index=r), op(selfy(index=r), othery(index=r), operator)], other.getAttributes())\n out.update(self.getAttributes())\n if offsets: # remove offset information if use during calculation\n out.update({'offset': '', 'muloffset': ''})\n out = out.castCurve(self.classNameGUI())\n return out\n else: # not elementwise : interpolate\n from scipy.interpolate import interp1d\n # construct new x -> all x which are in the range of the other curv\n datax = list(selfx())\n if interpolate == 1: # x from both self and other\n xmin = max(min(selfx()), min(otherx()))\n xmax = min(max(selfx()), max(otherx()))\n # no duplicates\n datax += [x for x in otherx() if x not in datax]\n else:\n # interpolate 2: copy x from self, restrict to min&max of other\n xmin, xmax = min(otherx()), max(otherx())\n datax = [x for x in datax if x <= xmax and x >= xmin]\n reverse = (selfx(index=0) > selfx(index=1)) if len(selfx()) > 1 else False\n datax.sort(reverse=reverse)\n f0 = interp1d(selfx(), selfy(), kind=1)\n f1 = interp1d(otherx(), othery(), kind=1)\n datay = [op(f0(x), f1(x), operator) for x in datax]\n out = Curve([datax, datay], other.getAttributes())\n out.update(self.getAttributes())\n if offsets: # remove offset information if use during calculation\n out.update({'offset': '', 'muloffset': ''})\n out = out.castCurve(self.classNameGUI())\n return out", "def curvesAdd(self, idx0, idx1, interpolate=0, **kwargs):\n kwargs.update({'interpolate': interpolate, 'sub': False})\n return self.curve(idx0).__add__(self.curve(idx1), **kwargs)", "def register_curve(self, curve):\n key = tuple(curve.points())\n if key not in self.curves:\n # new curve (lock and register)\n curve.is_locked = True # points list must not change, else not valid key\n self.curves[key] = curve\n return self.curves[key]", "def set_curve(self, key_curve):\n self.curve = key_curve", "def curve_number(self):", "def fit_curve(x,y,p0,func):\n ifixx = np.zeros(np.array(x).shape)\n data = sodr.Data(x,y)\n model = sodr.Model(func)\n worker = sodr.ODR(data,model,p0,ifixx=ifixx,maxit=500)\n out = worker.run()\n out = worker.restart()\n return out", "def attachCurve(*args, blendBias: Union[float, bool]=0.5, blendKnotInsertion: bool=False,\n caching: bool=True, keepMultipleKnots: bool=True, method: Union[int, bool]=0,\n nodeState: Union[int, bool]=0, parameter: Union[float, bool]=0.1, reverse1:\n bool=False, reverse2: bool=False, constructionHistory: bool=True, name:\n AnyStr=\"\", object: bool=True, replaceOriginal: bool=True, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def _add_final_linear_layer(model_1, model_2):\n data_1 = model_1.weight.data\n data_2 = model_2.weight.data\n\n new_weight = torch.cat((data_1, data_2), dim=1)\n new_bias = model_1.bias + model_2.bias\n\n result_model = torch.nn.Linear(\n model_1.in_features + model_2.in_features, model_1.out_features\n )\n result_model.weight = torch.nn.Parameter(new_weight)\n result_model.bias = torch.nn.Parameter(new_bias)\n\n return result_model", "def add_datum(self, x, fields):\n\t\n\t\tfor name, value in fields.iteritems():\n\t\t\tif name not in self.curves:\n\t\t\t\tcurve = QwtPlotCurve()\n\t\t\t\tcurve.attach(self)\n\t\t\t\tself.curves[name] = [curve, [], []]\n\t\t\t\n\t\t\tstuff = self.curves[name]\n\t\t\tstuff[1].append(x)\n\t\t\tstuff[2].append(value)", "def curve(self):\n return self.__curve", "def setCurve(self, *args):\n return _libsbml.ReferenceGlyph_setCurve(self, *args)", "def add_segment(self, curve, start_y=0, end_y=0):\n palette = \"dark\" if (len(self.segments) / s.RUMBLE_LENGTH) % 2 == 0 else \"light\"\n segment = seg.Segment(palette, len(self.segments), curve, start_y, end_y)\n\n self.segments.append(segment)", "def __iadd__(self, func):\n self.append_plot(func)\n return self" ]
[ "0.6272838", "0.6218104", "0.6078385", "0.5991374", "0.58583546", "0.5828755", "0.5766712", "0.57254916", "0.5713772", "0.56235594", "0.5622298", "0.55829", "0.5549861", "0.55252457", "0.54938287", "0.5488295", "0.54653907", "0.5412902", "0.5401338", "0.53959215", "0.5330481", "0.5283222", "0.52727467", "0.5263855", "0.5252827", "0.52296406", "0.52184033", "0.5205586", "0.5202736", "0.51856214" ]
0.6482353
0
Removes a pattern from the water network model
def remove_pattern(self, name): self._pattern_reg.__delitem__(name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_pattern(input_txt,pattern):\r\n r = re.findall(pattern,input_txt)\r\n\r\n for i in r:\r\n input_txt = re.sub(i,'',input_txt)\r\n return input_txt", "def delete(self, pattern, pattern_type=None):\n\t\tpattern = convert_pattern(pattern, pattern_type)\n\t\twith self.AutoSplitlines():\n\t\t\tself.lines = [line for line in self.lines if not pattern.search(line)]", "def remove_noise(self):\n kernel = np.ones((5, 5), np.uint8)\n self.frame = cv.morphologyEx(self.frame, cv.MORPH_CLOSE, kernel)\n self.frame = cv.morphologyEx(self.frame, cv.MORPH_OPEN, kernel)", "def _clear_pattern(self):\n # pattern group\n self.__interval = None\n self.__days_of_week = set()\n self.__first_day_of_week = None\n self.__day_of_month = None\n self.__month = None\n self.__index = 'first'\n # range group\n self.__start_date = None\n self.__end_date = None\n self.__occurrences = None", "def cleanGraph(self,graph):\n i=0\n while i+1<len(graph):\n if self.getDistance(graph[i],graph[i+1])==0:\n del graph[i+1]\n else:\n i+=1\n return graph", "def perform_noise_removal(mask):\n trans1 = cv.dilate(mask, KERNEL, iterations=4)\n trans1 = cv.erode(trans1, KERNEL, iterations=5)\n return cv.dilate(trans1, KERNEL, iterations=7)", "def deleteTempModels(self):\r\n # research\r\n # remove old control pts and debug annotations from scene\r\n while slicer.util.getNodes('.*') != {}:\r\n nodes = slicer.util.getNodes('.*')\r\n for key,value in zip(nodes.keys(),nodes.values()):\r\n if key != \".temp\": slicer.mrmlScene.RemoveNode(value)\r\n while slicer.util.getNodes('_*') != {}:\r\n nodes = slicer.util.getNodes('_*')\r\n for key,value in zip(nodes.keys(),nodes.values()):\r\n slicer.mrmlScene.RemoveNode(value)\r\n # ruler measurements\r\n while 0 and slicer.util.getNodes('M*') != {}:\r\n nodes = slicer.util.getNodes('M*')\r\n for node in nodes.values():\r\n slicer.mrmlScene.RemoveNode(node)\r\n # ROI annotations\r\n while 0 and slicer.util.getNodes('R*') != {}:\r\n nodes = slicer.util.getNodes('R*')\r\n for node in nodes.values():\r\n slicer.mrmlScene.RemoveNode(node)\r\n # fiducial annotations\r\n while 0 and slicer.util.getNodes('F*') != {}:\r\n nodes = slicer.util.getNodes('F*')\r\n for node in nodes.values():\r\n slicer.mrmlScene.RemoveNode(node)", "def applyMorphologicalCleaning(self, image):", "def train_single_pattern(self, pattern):\n bmu = self.bmu_util.calculate_bmu(pattern)\n self._train(bmu, self.network.weights, pattern)\n self._apply_correction()", "def remove(self):\n self.inp.inputs.discard(self)\n self.out.outputs.discard(self)", "def remove_pattern(file_contents, pattern):\n sub_pattern, replacement = re.compile(pattern), 'CRO_'\n for index, row in enumerate(file_contents):\n file_contents[index][0] = sub_pattern.sub(replacement, row[0])\n return file_contents", "def trim_dag(G):\n SINKS = G.graph['SINKS']\n SOURCES = G.graph['SOURCES']\n\n for node in G.nodes():\n node_attr = G.node[node]\n children = G.successors(node)\n\n if check_key(node_attr, 'is_const', True):\n if len(children) == 0:\n print \"!!!!! \", node\n\n SOURCES.remove(node)\n if node in SINKS:\n SINKS.remove(node)\n for c in children:\n if 'immediates' in G.node[c].keys():\n G.node[c]['immediates'].append(node_attr['value'])\n\n if check_key(node_attr, 'op', \"mv\") or check_key(node_attr, 'is_const', True) :\n parents = G.predecessors(node)\n \n for p in parents:\n for c in children:\n G.add_edge(p, c)\n #print G.predecessors(node), \" -> \", node, \" -> \", G.successors(node)\n G.remove_node(node)\n\n return", "def remove_cycle(self):\n if self.is_cyclic() is True:\n\n while self.is_cyclic():\n min_edge = self.remove_cycle_recur()\n self.remove_edge(min_edge[0], min_edge[1])\n\n print(\"All the cycles are removed from the graph.\")\n else:\n print(\"The graph has no cycle.\")", "def clear_exclude_bits(self):\n self.bitcell_array.init_graph_params()", "def remove_background(frame, bgModel):\n \n global learningRate\n fgmask = bgModel.apply(frame, learningRate=learningRate)\n kernel = np.ones((3, 3), np.uint8)\n fgmask = cv2.erode(fgmask, kernel, iterations=1)\n res = cv2.bitwise_and(frame, frame, mask=fgmask)\n\n return res", "def remove_edge(self, edge: Edge) -> Edge:", "def set_full(self):\n pattern = [[1,1,1,1],\n [1,1,1,1],\n [1,1,1,1],\n [1,1,1,1]]\n self.set_pattern(pattern)", "def deleteAttrPattern(*args, allPatterns: bool=True, patternName: AnyStr=\"\", patternType:\n AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def delete_matches(self, pattern):\n with self.connect() as c:\n cur = c.cursor()\n cur.execute(self.create_query(\"DELETE\", pattern))", "def remove_nodes(model: onnx.ModelProto,\n predicate: Callable) -> onnx.ModelProto:\n # ! this doesn't handle inputs/outputs\n logger = get_root_logger()\n while True:\n connect = None\n for i, node in enumerate(model.graph.node):\n if predicate(node):\n assert len(node.input) == 1\n assert len(node.output) == 1\n connect = (node.input[0], node.output[0])\n logger.info(f'remove node {node.name}')\n del model.graph.node[i]\n break\n if not connect:\n break\n src, dst = connect\n for node in model.graph.node:\n for i, input in enumerate(node.input):\n if input == dst:\n node.input[i] = src\n return model", "def suppress(self):\n self.pattern = hre.begins_not_silently_grouped.sub(\"(?:\", self.pattern)\n self._compiled = None\n self.structure.clear()\n return self", "def reset(self):\n self.patterns = []\n self.number = -1", "def remove_neighbor(self):\n self.fono -= 1", "def remove_line(self, origin):\n current_tile = self.board[origin[0]][origin[1]]\n\n if current_tile.is_dot:\n temp = current_tile.next\n current_tile.next = None\n current_tile = temp\n\n # Remove color of all non dot tiles in line.\n while current_tile and current_tile.color and not current_tile.is_dot:\n temp = current_tile.next\n current_tile.color = None\n current_tile.next = None\n current_tile = temp", "def set_empty(self):\n pattern = [[0,0,0,0],\n [0,0,0,0],\n [0,0,0,0],\n [0,0,0,0]]\n self.set_pattern(pattern)", "def remove_constant_points(path):\n z = path\n while \"UD\" in z or \"DU\" in z or \"LR\" in z or \"RL\" in z:\n z = z.replace(\"UD\", \"\")\n z = z.replace(\"DU\", \"\")\n z = z.replace(\"LR\", \"\")\n z = z.replace(\"RL\", \"\")\n return z", "def _tf_remove_noise_op(self):\n remove_noise_ops = list()\n for var, noise in zip(self.model_variables, self.noise):\n remove_noise_ops.append(tf1.assign_add(var, -noise))\n ret = tf.group(*tuple(remove_noise_ops))\n with tf1.control_dependencies([ret]):\n return tf.no_op()", "def _prune(self, idx):\n idx = list(idx)\n neurons = []\n for nold in self.neurons:\n k = nold[1] # number of neurons\n ix1 = [i for i in idx if i < k] # index for current neuron type\n idx = [i-k for i in idx if i >= k]\n func = nold[0]\n number = len(ix1)\n W = nold[2][:, ix1]\n bias = nold[3][ix1]\n neurons.append((func, number, W, bias))\n self.neurons = neurons", "def del_field_pattern(self):\n self.ui.tableFields.removeRow(self.ui.tableFields.currentRow())", "def noiseRemoval(array, minSize, classes):\n img=array.astype('int')\n for i in range(classes):\n B=(img!=i) # return a bool array\n B = morphology.remove_small_objects(B, min_size=minSize, connectivity=1) \n img[B==False]=i\n \n return img" ]
[ "0.6203475", "0.5598266", "0.55046695", "0.5405062", "0.539219", "0.5379158", "0.53727037", "0.53561664", "0.53032506", "0.52915484", "0.5265123", "0.52626526", "0.52523774", "0.5230492", "0.52108836", "0.51926154", "0.5167957", "0.5167639", "0.5162403", "0.5161164", "0.51583785", "0.51461387", "0.51450723", "0.5144631", "0.51405317", "0.5139", "0.5129752", "0.51262957", "0.51132315", "0.5100903" ]
0.61529595
1
Removes a curve from the water network model
def remove_curve(self, name): self._curve_reg.__delitem__(name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_curve(self, pv_name):\n curve = self.chart.findCurve(pv_name)\n if curve:\n self.chart.removeYChannel(curve)\n del self.channel_map[pv_name]\n self.chart.removeLegendItem(pv_name)\n\n widgets = self.findChildren((QCheckBox, QLabel, QPushButton, QGroupBox), pv_name)\n for w in widgets:\n w.deleteLater()\n\n if len(self.chart.getCurves()) < 1:\n self.enable_chart_control_buttons(False)\n self.show_legend_chk.setChecked(False)", "def RemovePCurve(self, *args):\n return _ShapeBuild.ShapeBuild_Edge_RemovePCurve(self, *args)", "def del_curve(self, key):\n del self[key]\n del self._labels[key]", "def RemoveCurve3d(self, *args):\n return _ShapeBuild.ShapeBuild_Edge_RemoveCurve3d(self, *args)", "def rm_calibration(self):\n\n self.bin_edges_kev = None", "def removeInsignificant(self):\n #TODO make sure this method now works AFTER meanCurves and analyseCures have been run\n \n # Searching for curves that are in the noise\n if len(self.plate.noProtein) > 0:\n thresholdm, i = rh.meanSd([self.originalPlate.wells[x].monoThresh for x in self.plate.noProtein])\n for well in self.originalPlate.wells:\n if not self.originalPlate.wells[well].contents.isControl and well not in self.delCurves:\n if self.originalPlate.wells[well].monoThresh > thresholdm/1.15:\n #self.wells[well].fluorescence = None\n self.delCurves.append(well)\n\n # Searching for curves that have overloaded the sensor\n for well in self.wells:\n if well not in self.delCurves:\n mini = self.wells[well].fluorescence[0]\n maxi = self.wells[well].fluorescence[0]\n\n maxInd = 0\n for i in range(len(self.wells[well].fluorescence)):\n if self.wells[well].fluorescence[i] > maxi:\n maxi = self.wells[well].fluorescence[i]\n maxInd = i\n if self.wells[well].fluorescence[i] < mini:\n mini = self.wells[well].fluorescence[i]\n\n diff = maxi - mini\n\n # A boundry defining how much the points can fluctuate and still be considered flat\n lowFlatBoundry = maxi - 0.005*diff\n\n # Look each way to see how many temperature steps the curve stays flat for\n count = 0\n ind = maxInd - 1\n while ind>=0:\n if self.wells[well].fluorescence[ind] > lowFlatBoundry:\n count += 1\n ind -= 1\n else:\n break\n ind = maxInd+1\n while ind<len(self.wells[well].fluorescence):\n if self.wells[well].fluorescence[ind] > lowFlatBoundry:\n count += 1 \n ind += 1\n else:\n break\n if well not in self.delCurves and count >= 10:\n self.delCurves.append(well) \n return", "def cut_ppl_off(self, G):\r\n for pre, node in list(G.edges):\r\n ew = G.edges[pre, node]['weight']\r\n if ew <= -.95:\r\n G.remove_edge(pre, node)\r\n elif ew >= 1:\r\n G.edges[pre, node]['weight'] = 1.0\r\n else:\r\n continue\r\n return G", "def backward_elimination(X, y, sig_level=0.05):\n regressor_OLS = sm.OLS(y, X).fit()\n max_pvalue = max(regressor_OLS.pvalues).astype(float)\n if max_pvalue < sig_level:\n print(regressor_OLS.summary())\n return X\n else:\n max_pvalue_index = np.argmax(regressor_OLS.pvalues)\n X = np.delete(X, max_pvalue_index, axis=1)\n backward_elimination(X, y, sig_level)", "def unfreeze(net):\n for p in net.parameters():\n p.requires_grad_(True)\n return net", "def remove(self, i):\n assert self.apply_remove_point_rules((self._ys[i], self._xs[i])), 'Removal rules are not satisfied'\n\n if len(self.get_raw_xs()) > 5:\n if self.is_settable:\n self._remove_xs(i)\n self._remove_ys(i)\n self.is_changed = True\n else:\n raise ValueError('graph '+str(self.name)+' is not is_settable')\n elif not self.is_raw_data:\n raise ValueError('Must be at least 5 points for interpolation.')", "def remove_dark(self):\r\n self.decimate(numpy.isfinite(self.z))", "def delY(self):\n del self.components[1]", "def delY(self):\n del self.components[1]", "def clearAnimCurve(animationCurve):\n fnAnimcurve = mUtils.getFn((animationCurve))\n [fnAnimcurve.remove(0) for a in range(int(fnAnimcurve.numKeys))]", "def Trim(self, *args):\n return _Adaptor3d.Adaptor3d_Curve_Trim(self, *args)", "def remove_drawing_poly(self):\n\n self.drawing_poly = QPolygonF()\n self.drawing_points_coords = []\n\n for p in self.drawing_points:\n p.setVisible(False)\n\n for line in self.connecting_line_list:\n line.setVisible(False)\n if self.connecting_line:\n self.connecting_line.setVisible(False)\n self.connecting_line = None\n self.first_draw = True\n if self.set_tooltip:\n self.set_tooltip(\"\")", "def neutral(self):\n\t\treturn AffineCurvePoint(None, None, self)", "def remove_line(self, origin):\n current_tile = self.board[origin[0]][origin[1]]\n\n if current_tile.is_dot:\n temp = current_tile.next\n current_tile.next = None\n current_tile = temp\n\n # Remove color of all non dot tiles in line.\n while current_tile and current_tile.color and not current_tile.is_dot:\n temp = current_tile.next\n current_tile.color = None\n current_tile.next = None\n current_tile = temp", "def unsetCoefficient(self):\n return _libsbml.FluxObjective_unsetCoefficient(self)", "def remove_chain(self, chain, color, current_state):\r\n for position in self.chains[(chain, color)]:\r\n current_state[position[0]][position[1]] = 0\r\n return current_state", "def RemoveZeroVar(chain):\n\treturn chain[:, np.invert((np.sum(np.var(chain, axis=0), axis=1)<1e-10)), :]", "def curves_callback(ds, data_element):\n if data_element.tag.group & 0xFF00 == 0x5000:\n del ds[data_element.tag]", "def anti_deriv(self):\n poly_anti_deriv = [0]\n for i, val in enumerate(self.coeff):\n poly_anti_deriv.append(round(val/(i+1.0), 2))\n return Poly(poly_anti_deriv)", "def deleteNeedleValidationModelsFromScene(self):\r\n # producitve #onbutton\r\n profprint()\r\n while slicer.util.getNodes('manual-seg_*') != {}:\r\n nodes = slicer.util.getNodes('manual-seg_*')\r\n for node in nodes.values():\r\n slicer.mrmlScene.RemoveNode(node)\r\n while slicer.util.getNodes('obturator-seg_*') != {}:\r\n nodes = slicer.util.getNodes('obturator-seg_*')\r\n for node in nodes.values():\r\n slicer.mrmlScene.RemoveNode(node)\r\n # bezier control points\r\n self.deleteTempModels()\r\n while slicer.util.getNodes('template slice position*') != {}:\r\n nodes = slicer.util.getNodes('template slice position*')\r\n for node in nodes.values():\r\n slicer.mrmlScene.RemoveNode(node)", "def remove_edge(self, edge: Edge) -> Edge:", "def removeOutliers(self):\n #With the DSFPlate object, we can just use self.wells.pop() to remove outliers\n visited = []\n discard = []\n for well in self.wells:\n if well not in visited:\n reps = []\n reps += self.originalPlate.repDict[well]\n pairs = combinations(reps,2)\n distMatrix = [[0 for x in range(len(reps))] for y in range(len(reps))]\n for pair in pairs:\n dist = sqrDiffWellFluoro(self.wells[pair[0]].fluorescence,self.wells[pair[1]].fluorescence)\n distMatrix[reps.index(pair[0])][reps.index(pair[1])] = dist\n distMatrix[reps.index(pair[1])][reps.index(pair[0])] = dist\n keep = rh.discardBad(reps,distMatrix,SIMILARITY_THRESHOLD)\n for rep in reps:\n visited.append(rep)\n if rep not in keep:\n discard.append(rep)\n for well in discard:\n self.wells[well].fluorescence = None\n self.delCurves.append(well)\n return", "def free_curvature(self) -> None:\n self.n1.free = True\n self.n2.free = True", "def remove_obstacle(self, x, y):\n self.BOARD[y][x].traversable = True\n self.board_array[y][x] = 0", "def update_weights_negative(self):\n eta = self.config.eta\n self.w_xh -= eta * (self.x.T @ self.h)\n self.w_th -= eta * (self.t.T @ self.h)\n self.w_ho -= eta * (self.h.T @ self.o) \n self.w_hz -= eta * (self.h.T @ self.z)", "def cancel(self) -> None:\n if self.current is not None and self.current.size:\n # reset path\n self.labels.flat[self.current] = False\n self.costs.flat[self.current] = np.finfo('d').max\n # reset path end\n self.labels.flat[self.destiny] = False\n self.costs.flat[self.destiny] = np.finfo('d').max" ]
[ "0.6330724", "0.62075675", "0.56893647", "0.56485957", "0.5623504", "0.56010044", "0.5583296", "0.55401576", "0.5525893", "0.54791206", "0.544778", "0.5441997", "0.5441997", "0.54373914", "0.53785205", "0.5366104", "0.5363655", "0.5363136", "0.5342044", "0.5332126", "0.52993965", "0.52938515", "0.52855366", "0.52565753", "0.5255141", "0.52456", "0.5220163", "0.5218291", "0.521813", "0.5212898" ]
0.6684406
0
Removes a source from the water network model
def remove_source(self, name): logger.warning('You are deleting a source. This could have unintended \ side effects. If you are replacing values, use get_source(name) \ and modify it instead.') source = self._sources[name] self._pattern_reg.remove_usage(source.strength_timeseries.pattern_name, (source.name, 'Source')) self._node_reg.remove_usage(source.node_name, (source.name, 'Source')) del self._sources[name]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def RemoveSource(self,source):\n self._sources.RemoveSource(source)", "def RemoveSource(self, source):\n self._sources.remove(source)", "def removeModelSource(self, modelSource):\n self._modelSources.remove(modelSource)\n if modelSource.isLoaded():\n self._reload()", "def unsetSource(self):\n return _libsbml.ExternalModelDefinition_unsetSource(self)", "def remove(self, name, source):\n self.m.path.assert_absolute(source)\n self._run(name, ['remove', source])\n self.m.path.mock_remove_paths(source)", "def delete(self, source):\n _source = self._source_prefix+source\n assert _source in self.cache.keys()\n del self.cache[_source]", "def remove_virtualsource(self, name):\n self._auraliser.remove_object(name)", "def delete_source(self, src_name: SourceName) -> None:\n while True:\n try:\n response = self.genes.query(\n IndexName=\"src_index\",\n KeyConditionExpression=Key(\"src_name\").eq(src_name.value),\n )\n except ClientError as e:\n raise DatabaseReadException(e)\n records = response[\"Items\"]\n if not records:\n break\n with self.genes.batch_writer(\n overwrite_by_pkeys=[\"label_and_type\", \"concept_id\"]\n ) as batch:\n for record in records:\n try:\n batch.delete_item(\n Key={\n \"label_and_type\": record[\"label_and_type\"],\n \"concept_id\": record[\"concept_id\"],\n }\n )\n except ClientError as e:\n raise DatabaseWriteException(e)\n\n try:\n self.metadata.delete_item(Key={\"src_name\": src_name.value})\n except ClientError as e:\n raise DatabaseWriteException(e)", "def remove_connection(self, source, target):\r\n\r\n connection = (self.coalesce_node(source), self.coalesce_node(target))\r\n self.connections.discard(connection)", "def remove_empty_sources(self):\n for source in [\"dxf\", \"edilizia\", \"easyroom\", \"merged\"]:\n if source in self and not self[source]:\n del self[source]", "def src_delete(state):\n _lib.src_delete(state)", "def remove(self, source, graph, dest):\n return self.server.execute(self._execute_operation(\n source, graph, dest,\n ttypes.ExecuteOperationType.Remove))", "def delSource(A,bSize,comp):\n sA,comp = delSink(A.T,bSize,comp)\n return sA.T,comp", "def delete_source(username, id, force, token=None):\n if not force:\n click.confirm(\n \"Are you sure you want to delete {0} {1}?\".format(username, id), abort=True\n )\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/sources/{1}/{2}?access_token={3}\".format(\n mapbox_api, username, id, mapbox_token\n )\n r = requests.delete(url)\n if r.status_code == 204:\n click.echo(\"Source deleted.\")\n else:\n raise errors.TilesetsError(r.text)", "def keep_potential_source(self):\n self.source = self.potential_source", "def remove_edge(self, source: n, destination: n):\n self._graph[source].remove(destination)\n if not self._directed:\n self._graph[destination].remove(source)", "def deleteRig(self):\n\n allNodes = cmds.ls(\"*\")\n for node in allNodes:\n if cmds.objExists(node + \".sourceModule\"):\n cmds.lockNode(node, lock=False)\n source = cmds.getAttr(node + \".sourceModule\")\n if source == self.name:\n try:\n cmds.delete(node)\n except:\n pass", "def unblock(self, source):\n raise NotImplementedError", "def remove(self, s):\n if s in self.outputs:\n self.outputs.remove(s)\n self.inputs.remove(s)\n del self.conns[s]\n s.close()", "def remove(self):\n self.inp.inputs.discard(self)\n self.out.outputs.discard(self)", "def clear_mappings(g, source):\n\n if \"m\" in g.node[source]:\n del g.node[source][\"m\"]\n\n for n in g.neighbors_iter(source):\n if \"m\" in g.node[n]:\n del g.node[n][\"m\"]", "def remove(self):\n self.__source_gate._unregister_outgoing(self)\n self.__target_slot._unregister_incoming(self)", "def delete_source_from_s3(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"delete_source_from_s3\")", "def delete_source_from_s3(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"delete_source_from_s3\")", "def remove(self, source, destination, port):\n logger.info('Removing path from %s to %s on port %s',\n source, destination, port)\n\n firewall_name = \"bu-%s-%s-%s\" % (destination.network.name, destination.name, port)\n\n def remove_from_ranges(to_remove, address_ranges):\n logger.info(\"Removing %s from %s\", to_remove, address_ranges)\n resulting_ranges = []\n if not address_ranges:\n return None\n for address_range in address_ranges:\n remove_net = ipaddress.IPv4Network(to_remove)\n address_range_network = ipaddress.IPv4Network(address_range)\n if remove_net.overlaps(address_range_network):\n if remove_net.prefixlen > address_range_network.prefixlen:\n new_range_networks = address_range_network.address_exclude(remove_net)\n resulting_ranges.extend([str(new_range_network) for new_range_network\n in new_range_networks])\n else:\n resulting_ranges.extend([str(address_range_network)])\n logger.info(\"New ranges: %s\", resulting_ranges)\n return resulting_ranges\n\n try:\n firewall = self.driver.ex_get_firewall(firewall_name)\n if isinstance(source, CidrBlock):\n firewall.source_ranges = remove_from_ranges(source.cidr_block,\n firewall.source_ranges)\n else:\n source_tag = \"%s-%s\" % (source.network.name, source.name)\n if firewall.source_tags:\n firewall.source_tags = [tag for tag in firewall.source_tags\n if tag != source_tag]\n except ResourceNotFoundError:\n logger.info(\"Firewall %s doesn't exist\", firewall_name)\n return None\n\n # We need this because the default is to add \"0.0.0.0/0\" if these aren't set, which is bad.\n if not firewall.source_tags and not firewall.source_ranges:\n return self.driver.ex_destroy_firewall(firewall)\n return self.driver.ex_update_firewall(firewall)", "def delete_sense_rel(wn, source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source, target))\n (source_synset, source_entry) = decompose_sense_id(source)\n lex_name = wn.synset_by_id(source_synset).lex_name\n entry = wn.entry_by_id(source_entry)\n if change_list:\n change_list.change_entry(wn, entry)\n sense = [sense for sense in entry.senses if sense.id == source][0]\n sense.sense_relations = [\n r for r in sense.sense_relations if r.target != target]", "def remove_node(self, name):\n parent_names = self.get_parents(name)\n self.source_net.remove_node(name)\n\n # Remove sole private parents\n for p in parent_names:\n if p[0] == '_' and self.source_net.degree(p) == 0:\n self.remove_node(p)", "def blacklistSource(self, source):\n log.info(\"blacklisting \" + source)\n if source not in GameConsole.blacklistedSources:\n GameConsole.blacklistedSources.append(source)", "def delete_sources(image_sources):\n index = np.where(image_sources[:, 3] == 0.0)\n active_sources = np.delete(image_sources, index, 0)\n return(active_sources)", "def delete_sense_rel(wn, source, target, change_list=None):\n print(\"Delete %s =*=> %s\" % (source, target))\n (source_synset, source_entry) = decompose_sense_id(source)\n lex_name = wn.synset_by_id(source_synset).lex_name\n wn_source = wn\n entry = wn_source.entry_by_id(source_entry)\n if entry:\n sense = [sense for sense in entry.senses if sense.id == source][0]\n if not any(r for r in sense.sense_relations if r.target == target):\n print(\"No sense relations deleted\")\n else:\n sense.sense_relations = [\n r for r in sense.sense_relations if r.target != target]\n if change_list:\n change_list.change_entry(wn, entry)\n else:\n print(\"No entry for \" + source_entry)" ]
[ "0.77114284", "0.74407005", "0.71957576", "0.69854724", "0.64353293", "0.64186805", "0.6345265", "0.63194674", "0.6298371", "0.62230206", "0.6152543", "0.6127164", "0.60954565", "0.60934883", "0.6064978", "0.60586363", "0.6038401", "0.5981567", "0.5934729", "0.5830534", "0.5810968", "0.57898885", "0.5768022", "0.5768022", "0.57627773", "0.57602966", "0.56156504", "0.55676854", "0.5537301", "0.5526762" ]
0.7522891
1
Get a specific curve
def get_curve(self, name): return self._curve_reg[name]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getCurve(self, *args):\n return _libsbml.GeneralGlyph_getCurve(self, *args)", "def getCurve(self, *args):\n return _libsbml.ReferenceGlyph_getCurve(self, *args)", "def curve(self, index):\n if index >= len(self) or len(self) == 0:\n print('ERROR Class Graph method Curve: cannot find Curve (index',\n index, ', max possible', len(self), ')')\n return\n return self.data[index]", "def curve(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"curve\")", "def GetCurve(self, *args):\n return _Adaptor3d.Adaptor3d_HCurve_GetCurve(self, *args)", "def getCurve(self, *args):\n return _libsbml.ReactionGlyph_getCurve(self, *args)", "def curve(self):\n return self.__curve", "def getCurve(self, *args):\n return _libsbml.SpeciesReferenceGlyph_getCurve(self, *args)", "def getCurve(self, attr: Union[int, Str], view=...) -> AnimCurve:\n ...", "def getCurveSegment(self, *args):\n return _libsbml.Curve_getCurveSegment(self, *args)", "def ex_curve(data):\n rv = []\n try:\n ef = autocomplete_curve_function(data[0])\n ed = autocomplete_curve_direction(data[1])\n period = 2\n try:\n period = max(int(data[2]), 2)\n except ValueError:\n pass\n data = data[3:]\n if not data:\n if consts.VERBOSE:\n print('ERROR: No data for curve')\n return []\n f = CURVE_FUNCTIONS[ef][ed]\n maxi = len(data)-1\n for i in range(period):\n v = f(float(i) / float(period-1))\n di = int(round(v*float(maxi)))\n rv.append(data[di])\n\n except Exception as e:\n if consts.VERBOSE:\n print('ERROR: Curve failed [%s]'%e)\n\n return rv", "def Curve(self, *args):\n return _Adaptor3d.Adaptor3d_HCurve_Curve(self, *args)", "def curve_number(self):", "def getAnimCurve(self, *args, **kwargs):\n ...", "def get_curve(curve_path):\n with open(curve_path, \"r\") as f:\n lines = f.read().split(\"\\n\")\n parts = [line.split(\",\")[0:4] for line in lines]\n\n return np.array(parts[1:-1], dtype=\"float64\")", "def pcurve(self, edge):\n crv, umin, umax = BRep_Tool().CurveOnSurface(\n edge.topods_shape(), self.topods_shape()\n )\n return crv, Interval(umin, umax)", "def curve(self, data):\n x, y, y_smoothed = data\n\n curve_keys = ['color', 'linestyle', 'alpha', 'label']\n curve_config = self.config.filter(curve_keys, prefix='curve_')\n\n curves = self.ax.plot(x, y, **curve_config)\n\n if y_smoothed is not None:\n smoothed_color = scale_lightness(curve_config['color'], scale=.5)\n smoothed_label = self.config.get('smoothed_label')\n _ = self.ax.plot(x, y_smoothed, label=smoothed_label, color=smoothed_color, linestyle='--')\n\n return curves", "def curves(self):\n return self._curve_reg", "def as_curve(self, context):\n curve = bpy.data.curves.new('LINE', type='CURVE')\n curve.dimensions = '2D'\n spline = curve.splines.new('POLY')\n spline.use_endpoint_u = False\n spline.use_cyclic_u = False\n pts = self.pts\n spline.points.add(len(pts) - 1)\n for i, p in enumerate(pts):\n x, y, z = p\n spline.points[i].co = (x, y, 0, 1)\n curve_obj = bpy.data.objects.new('LINE', curve)\n context.scene.collection.objects.link(curve_obj)\n curve_obj.select_set(state=True)", "def findCurvePoints(self, x, y, c):\n\t\tyCurve = []\n\t\tfor xi in x:\n\t\t\tyi = self.polynomialFunct(c, xi)\n\t\t\t\n\t\t\tyCurve.append( yi )\n\t\t\n\t\treturn np.asarray(yCurve)", "def getCurveExplicitlySet(self):\n return _libsbml.GeneralGlyph_getCurveExplicitlySet(self)", "def getCurveExplicitlySet(self):\n return _libsbml.ReferenceGlyph_getCurveExplicitlySet(self)", "def setCurve(self, *args):\n return _libsbml.GeneralGlyph_setCurve(self, *args)", "def __draw_curve(self, points):\n x_pts = []\n y_pts = []\n curvex = []\n curvey = []\n self.debug += 1\n for point in points:\n x_pts.append(point[0])\n y_pts.append(point[1])\n curve = scipy.interpolate.interp1d(x_pts, y_pts, 'cubic')\n if self.debug == 1 or self.debug == 2:\n for i in np.arange(x_pts[0], x_pts[len(x_pts) - 1] + 1, 1):\n curvex.append(i)\n curvey.append(int(curve(i)))\n else:\n for i in np.arange(x_pts[len(x_pts) - 1] + 1, x_pts[0], 1):\n curvex.append(i)\n curvey.append(int(curve(i)))\n return curvex, curvey", "def getCurveExplicitlySet(self):\n return _libsbml.ReactionGlyph_getCurveExplicitlySet(self)", "def FirstParameter(*args):\n return _Geom2dLProp.Geom2dLProp_Curve2dTool_FirstParameter(*args)", "def curveTo(self, *points: Tuple[float, float]) -> None:\n raise NotImplementedError", "def oncurve(self, P):\n\t\traise Exception(NotImplemented)", "def setCurve(self, *args):\n return _libsbml.ReferenceGlyph_setCurve(self, *args)", "def getCurveExplicitlySet(self):\n return _libsbml.SpeciesReferenceGlyph_getCurveExplicitlySet(self)" ]
[ "0.7430819", "0.73042256", "0.7285898", "0.72660524", "0.70774084", "0.7035105", "0.70314497", "0.6966334", "0.6909233", "0.6722947", "0.6711495", "0.6628271", "0.6575409", "0.6557469", "0.6466138", "0.6451115", "0.63523614", "0.62281936", "0.61431664", "0.6096783", "0.60813904", "0.6006927", "0.59401375", "0.5885158", "0.58709985", "0.58688265", "0.5784609", "0.5780496", "0.577959", "0.57512003" ]
0.74706084
0
Get a specific source
def get_source(self, name): return self._sources[name]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_source(self):\n\t\treturn self.source.get_source()", "def getSource():", "def get_source(self):\n return self.source", "def get_source(self):", "def source():\n\n source = models.Source(name=u\"Joe's Funerals.com\", url=u\"http://www.joesfunerals.com\")\n return source", "def _get_source(self, uri: str) -> Optional[_Source]:\n\n for source in self._sources:\n if uri == source.uri:\n return source\n\n return None", "def find_source(self, name):\n t = filter( lambda x: x.name==name, self.point_sources+self.extended_sources)\n return t[0] if len(t)==1 else None", "def get_src(self):\n return self.isy.prog_get_src(self._mydict['id'])", "def getSource(self):\n return self.source", "def get_source(self) -> Optional[str]:\n return self._source", "def get_source(self, source, driver_name=None):\n if not driver_name:\n driver_name = self.driver_name\n driver = ogr.GetDriverByName(driver_name)\n return driver.Open(source, 0)", "def getSource(self):\n return urllib2.urlopen(Parser.SOURCE_URL)", "def Source(self):\r\n\t\treturn self._get_attribute('source')", "def get_source(source_name):\n if source_name == \"SCHOLAR_CENSUS\":\n from mec_data.source.scholar import ScholarSource\n\n return ScholarSource()\n elif source_name == \"UNIVERSITY_CENSUS\":\n from mec_data.source.university import UniversitySource\n\n return UniversitySource()", "def read(self, source):\n _source = self._source_prefix+source\n return self.cache[_source]", "def source(self) -> Optional[str]:\n return pulumi.get(self, \"source\")", "def get_source(self, format: str) -> Source:\n if format in SOURCE_MAP:\n s = SOURCE_MAP[format]\n return s(self)\n else:\n raise TypeError(f\"{format} in an unrecognized format\")", "def getSource(self):\n return self.__source", "def get_source(self, source_name: str) -> Optional[Tag]:\n # sanitize the name, this will also add index if there isn't one\n source_name, *_ = Tags._sanitize_tag(source_name, 0, 0)\n return next(self.tags.filter(name=source_name, valid=None), None)", "def get_source(self, subset: str) -> Source:\n key, version = self._subsets[subset]\n return self._catalog[key][version]", "def get_source(self, filenum):\n return self.get_s_sect()[filenum] if filenum < len(self.get_s_sect()) else None", "def source(self) -> str:\n return pulumi.get(self, \"source\")", "def source(self) -> str:\n return pulumi.get(self, \"source\")", "def source(self) -> str:\n return pulumi.get(self, \"source\")", "def source(self) -> str:\n return pulumi.get(self, \"source\")", "def source(self) -> str:\n return pulumi.get(self, \"source\")", "def source(self) -> str:\n return pulumi.get(self, \"source\")", "def getSource(self, url):\n try:\n f = urllib2.urlopen(url)\n source = f.read()\n f.close()\n return source\n except urllib2.URLError:\n raise HNException(\"Error getting source from \" + url + \". Your internet connection may have something funny going on, or you could be behind a proxy.\")", "def get_from_sources(self,index,doc_type,document_id):\n return self.sources.get(index, {}).get(doc_type, {}).get(document_id,{})", "def get_source(self, key, files):\n raise NotImplementedError" ]
[ "0.7696782", "0.7522081", "0.7431137", "0.7377622", "0.72502756", "0.7175143", "0.7173024", "0.7112712", "0.7101868", "0.70995736", "0.7044947", "0.7036322", "0.70349556", "0.7008906", "0.6993891", "0.6992051", "0.6958572", "0.6942624", "0.6923416", "0.68866533", "0.6872768", "0.68723065", "0.68723065", "0.68723065", "0.68723065", "0.68723065", "0.68723065", "0.68510884", "0.68299776", "0.6825935" ]
0.76889575
1
Get a list of junction names Returns list of strings
def junction_name_list(self): return list(self._node_reg.junction_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def junction_names(self):\n return self._junctions", "def junctions(self):\n return self._node_reg.junctions", "def junctions(self):\n for node_name in self._junctions:\n yield node_name, self._data[node_name]", "def get_names(cat):\n res = []\n while cat:\n res.append(cat.name)\n cat = cat.parent_id\n return res", "def names(self) -> list[str]:", "def getNames(self) -> List[unicode]:\n ...", "def junction_char(self):\n ...", "def get_names(self):\r\n names = []\r\n for p in self.people:\r\n names.append(p.get_name())\r\n return names", "def get_names_short(self):\r\n return [p.get_name() for p in self.people]", "def get_all_jeeps():\n return \", \".join(cars['Jeep'])", "def names(self):\n return list(item.name for item in self.mechanisms)", "def get_all_names(self):\r\n return [person.name for person in self.__person_repository.elements]", "def names(cls) -> List[str]:", "def names(self) -> List:\n ...", "def get_motoneurons_names(self):\n\t\treturn self._motoneuronsNames", "def named_entities(self) -> List[str]:", "def name_get(self):\n res = []\n for employee in self:\n name = employee.name\n name = ' '.join([name or '', employee.middle_name or '', employee.last_name or ''])\n res.append((employee.id, name))\n return res", "def player_names(players):\r\n string = ''\r\n for p in players:\r\n string = string + p.name + ', '\r\n return string", "def nameList(self):\r\n return [self.name.lower(), self.code] + self._otherNames", "def _get_kdl_joint_names(self):\n num_links = self._urdf_chain.getNrOfSegments()\n num_joints = self._urdf_chain.getNrOfJoints()\n joint_names = []\n for i in range(num_links):\n link = self._urdf_chain.getSegment(i)\n joint = link.getJoint()\n joint_type = joint.getType()\n # JointType definition: [RotAxis,RotX,RotY,RotZ,TransAxis,\n # TransX,TransY,TransZ,None]\n if joint_type > 1:\n continue\n joint_names.append(joint.getName())\n assert num_joints == len(joint_names)\n return copy.deepcopy(joint_names)", "def make_junctions(RefSeqs,readLength):\r\n\r\n nRefSeqs = len(RefSeqs)\r\n htLength = readLength - 1 #in matlab readLength-1, but index starts from 0 for python \r\n\r\n junctionSeqs = []\r\n heads = []\r\n tails = []\r\n names = []\r\n\r\n for iSeq in range(nRefSeqs):\r\n heads.append(RefSeqs[iSeq].seq[0:htLength])\r\n tails.append(RefSeqs[iSeq].seq[-htLength:])\r\n names.append(list(RefSeqs[iSeq].id))\r\n if '_' in names[iSeq]:\r\n names[iSeq][names[iSeq].index('_'):]='' #Removes everything from the name after the '_'\r\n \r\n for hSeq in range(nRefSeqs): \r\n for tSeq in range(nRefSeqs): \r\n # create the junctions\r\n junctionNm = 'Junction_'+''.join(names[tSeq])+'_'+''.join(names[hSeq])\r\n junctionSeq = tails[tSeq] + heads[hSeq]\r\n junctionSeqs.append(SeqRecord(id=junctionNm,name=junctionNm, description=junctionNm, seq=junctionSeq)) #miss description\r\n\r\n return junctionSeqs", "def tag_list(self, obj): # pylint: disable=no-self-use\n return u\", \".join(o.name for o in obj.tags.all())", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def names(self):\n if isinstance(self.name, string_types):\n return [self.name]\n else:\n return list(self.name)", "def namelist(self):\n return []", "def return_names(self):\n return self.__name_list", "def taglist(self):\n tags = []\n for tag in self.tags:\n tags.append(tag.title)\n return ', '.join(map(str, tags))" ]
[ "0.8087006", "0.69687134", "0.68006426", "0.6432575", "0.63967204", "0.6359245", "0.6332422", "0.6287477", "0.6035099", "0.59415925", "0.5880238", "0.5832492", "0.5803015", "0.57876426", "0.57208526", "0.57171524", "0.5715702", "0.568492", "0.5673629", "0.5627065", "0.56265527", "0.5626146", "0.56078017", "0.56078017", "0.559899", "0.559899", "0.559411", "0.5591344", "0.5565948", "0.5561327" ]
0.8256131
0
Get a list of tanks names Returns list of strings
def tank_name_list(self): return list(self._node_reg.tank_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tank_names(self):\n return self._tanks", "def tanks(self):\n for node_name in self._tanks:\n yield node_name, self._data[node_name]", "def rank_names(self):\n return ['Domain', # 0\n 'Phylum', # 1\n 'Class', # 2\n 'Order', # 3\n 'Family', # 4\n 'Genus', # 5\n 'Species'] # 6", "def getTileNames(cls):\n return sorted(TILENAMEMAP.keys())", "def names(self) -> list[str]:", "def getNames(self) -> List[unicode]:\n ...", "def tanks(self):\n return self._node_reg.tanks", "def get_nice_names(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[1])\n return result", "def itemnames():\n g = ['KIS_NA_39', 'VII_57', 'MX_48', 'MX_56', 'KIS_NA_42', 'VII_54',\n 'MX_S_48', 'MX_S_52', 'MX_52', 'KIS_NA_45', 'KIS_NA_51', 'MIP_45',\n 'MIP_49', 'MIP_52', 'MIP_plus_48', 'MIP_plus_51', 'MX_42', 'MX_45',\n 'MIP_G_42', 'KIS_42', 'KIS_NA_48']\n return(g)", "def names(self) -> List:\n ...", "def makeTableNamesList(n, ):", "def teammates_player_names(self):\n return [p.name for p in self.teammates]", "def names(cls) -> List[str]:", "def extract_names(register):\n names = []\n for i in range(len(register) - 1): # len() -> no of columns\n first_name = str(register.iloc[i][2]).capitalize()\n last_name = str(register.iloc[i][1]).upper()\n name = last_name + ' ' + first_name\n names.append(name)\n names = list(set(names))\n return names", "def get_player_names(self):\n names = [user['name'] for user in self.server.status().raw['players']['sample']]\n return names", "def get_team_names(driver):\n name_elements = driver.find_elements_by_class_name(\"name\")\n team_names = [name.text for name in name_elements]\n return team_names", "def get_output_node_names(self, node_name):\n # (str) -> list\n node = self.get_node(node_name)\n return node.tops", "def player_names(players):\r\n string = ''\r\n for p in players:\r\n string = string + p.name + ', '\r\n return string", "def names():\n pass", "def get_all_names(cls, exclude_values: Iterator['CommonBucksType'] = None) -> Tuple[str]:\n name_list: Tuple[str] = tuple([value.name for value in cls.get_all(exclude_values=exclude_values)])\n return name_list", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def get_list_of_names():\n conn = r.connect(host=HOST, port=PORT, db=DB)\n names = r.table(FINGERPRINT_TABLE)[\"name\"].run(conn)\n return list(names)", "def getOthNames( self ):\n\n if self.othNames:\n return self.othNames.keys()\n \n nSets = self.adb.get( \"nOths\" )\n for id1 in range( nSets ):\n name = self.adb.get( \"othName\", id1 )\n self.othNames[ name ] = id1\n\n return self.othNames.keys()", "def get_names(self):\n return self.names", "def tracker_list():\n trackers = db.execute(\"SELECT DISTINCT name FROM trackers\")\n names = [tup[0] for tup in trackers.fetchall()]\n return names", "def make_label_names(name_lsit):\n\n hover_label_names = []\n for x in range(len(name_lsit)):\n temp1 = name_lsit[x]\n hover_label_names.append(temp1)\n\n return hover_label_names", "def findRanks(toBeRanked, values):\n\treturn list(map(lambda e: findRank(e, values), toBeRanked))", "def furanose_names(self):\n output = set()\n for item in self.monomers():\n if item in self.furanose_fac:\n output.add(self.furanose_fac[item][\"name\"])\n return list(output)", "def speciesNames(self):\n nsp = self.nSpecies()\n return map(self.speciesName,range(nsp))" ]
[ "0.8056458", "0.6646708", "0.6427893", "0.6385724", "0.6325658", "0.62808025", "0.6279592", "0.6128254", "0.60252315", "0.5951407", "0.5919693", "0.59042984", "0.58878714", "0.5863227", "0.585431", "0.584664", "0.58040273", "0.57890725", "0.578157", "0.5747603", "0.57416886", "0.57416886", "0.57316643", "0.57075846", "0.5703405", "0.5687968", "0.56752735", "0.5664993", "0.56612575", "0.56577015" ]
0.75850743
1
Get a list of reservoir names Returns list of strings
def reservoir_name_list(self): return list(self._node_reg.reservoir_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reservoir_names(self):\n return self._reservoirs", "def names(self) -> list[str]:", "def getNames(self) -> List[unicode]:\n ...", "def donor_names():\n names = list()\n for name in donor_db:\n names = names + [name[0]]\n return names", "def itemnames():\n g = ['KIS_NA_39', 'VII_57', 'MX_48', 'MX_56', 'KIS_NA_42', 'VII_54',\n 'MX_S_48', 'MX_S_52', 'MX_52', 'KIS_NA_45', 'KIS_NA_51', 'MIP_45',\n 'MIP_49', 'MIP_52', 'MIP_plus_48', 'MIP_plus_51', 'MX_42', 'MX_45',\n 'MIP_G_42', 'KIS_42', 'KIS_NA_48']\n return(g)", "def namelist(self):\n return []", "def _list_of_availability_strings():\n names = [availability.name for availability in Availability]\n return names", "def names(self) -> List:\n ...", "def names(cls) -> List[str]:", "def name_get(self):\n result = []\n for r in self:\n result.append((r.id, u\"%s %s\" % ('PO', r.name)))\n return result", "def get_names(self):\n return [doc['name'] for doc in self.vocab]", "def get_list_of_names():\n conn = r.connect(host=HOST, port=PORT, db=DB)\n names = r.table(FINGERPRINT_TABLE)[\"name\"].run(conn)\n return list(names)", "def GetResourceNames(self):\r\n return [x.name for x in self.resources]", "def donor_names():\n return list(donor_db.keys())", "def reservoirs(self): \n return self._node_reg.reservoirs", "def return_names(self):\n return self.__name_list", "def prv_name_list(self):\n return list(self._link_reg.prv_names)", "def get_names(self):\r\n names = []\r\n for p in self.people:\r\n names.append(p.get_name())\r\n return names", "def names(self):\n if isinstance(self.name, string_types):\n return [self.name]\n else:\n return list(self.name)", "def gpv_name_list(self):\n return list(self._link_reg.gpv_names)", "def reservoirs(self):\n for node_name in self._reservoirs:\n yield node_name, self._data[node_name]", "def get_names(self):\n return self.names", "def name_get(self):\n res = [(r.id, r.name) for r in self]\n return res", "def get_rnames(self):\n for row in self._get_references_node():\n yield row['name']", "def prv_names(self):\n return self._prvs", "def names():\n pass", "def check_all_preservations(cls, bijection: BijectionType) -> Iterator[str]:\n return (stats.name for stats in cls._get_all() if stats.preserved_in(bijection))", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.namelist()" ]
[ "0.84352976", "0.6965079", "0.6796028", "0.671071", "0.6577268", "0.65600306", "0.6528046", "0.6493303", "0.6435559", "0.62036914", "0.61941165", "0.61810017", "0.6166763", "0.6131687", "0.6107919", "0.61021644", "0.60691506", "0.60660154", "0.6060537", "0.6058289", "0.60526514", "0.6037921", "0.60267323", "0.6019673", "0.598837", "0.5983286", "0.5981678", "0.597829", "0.597829", "0.59738475" ]
0.8379338
1
Get a list of link names Returns list of strings
def link_name_list(self): return list(self._link_reg.keys())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_kdl_link_names(self):\n num_links = self._urdf_chain.getNrOfSegments()\n link_names = []\n for i in range(num_links):\n link_names.append(self._urdf_chain.getSegment(i).getName())\n return copy.deepcopy(link_names)", "def list(self):\n\t\treturn self.link_words", "def list_links(self, node, dd):\n link_list = dd[node]['links']\n link_names = []\n for link in link_list:\n if \"subgroup\" in link:\n sublinks = list(link[\"subgroup\"])\n for sublink in sublinks:\n link_names.append(sublink[\"name\"])\n else:\n link_names.append(link[\"name\"])\n return link_names", "def get_links(self):\n msg = self.get_message()\n return msg.split()", "def old_list_links(self, link_list, dd):\n link_names = []\n for link in link_list:\n if \"subgroup\" in link:\n sublinks = list(link[\"subgroup\"])\n for sublink in sublinks:\n link_names.append(sublink[\"name\"])\n else:\n link_names.append(link[\"name\"])\n return link_names", "def get_links(self) -> List[str]:\n return self.__links", "def links(self):\n\t\treturn self.list_of_links", "def names(self) -> list[str]:", "def getNames(self) -> List[unicode]:\n ...", "def getLinks():\n curDIR = os.path.dirname(os.path.realpath(__file__))\n inputfile_path = os.path.join(curDIR, 'sitemaplinks.txt')\n links=[]\n with open(inputfile_path,'r') as outputfile:\n for line in outputfile:\n links.append(line.split(',')[0])\n return links", "def getlinklist(self):\n d = []\n try:\n con = hcpsdk.Connection(self.target, debuglevel=self.debuglevel)\n except Exception as e:\n raise hcpsdk.HcpsdkError(str(e))\n else:\n self.connect_time = con.connect_time\n try:\n r = con.GET('/mapi/services/replication/links')\n except Exception as e:\n d.append('Error: {}'.format(str(e)))\n else:\n if r.status == 200:\n # Good status, get and parse the Response\n x = r.read()\n self.service_time = con.service_time2\n root = Et.fromstring(x)\n for child in root:\n if child.tag == 'name':\n d.append(child.text)\n else:\n raise (hcpsdk.HcpsdkError('{} - {}'.format(r.status, r.reason)))\n finally:\n # noinspection PyUnboundLocalVariable\n con.close()\n\n return d", "def get_names(url):\n\t# get html element tree\n\ttree = get_tree(url)\n\t# Names are text within <a> elements in this list\n\t# xpath returns a list with alternating last and first names as elements\n\t# Concatenate each last name and first name pair and put in new list as full name\n\tnames = tree.xpath('//*[@id=\"research-teachinglist\"]/li//a//text()')\n\tfull_names = []\n\tfor i in range(0, len(names)-1, 2):\n\t\tfull_names.append(names[i] + names[i+1])\n\n\treturn full_names", "def list_urls(self, prefix: str = \"\", etl_name: str = None) -> Iterable[str]:", "def get_links(names, html):\n ###TODO\n people = []\n readweb = BeautifulSoup(html, 'html.parser')\n for a in readweb.find_all('a'):\n person = os.path.basename(str(a.get('href')))\n if person in names:\n people.append(person)\n return SortedSet(people)\n pass", "def _links_get(self, cr, uid, context=None):\n obj = self.pool.get('res.request.link')\n ids = obj.search(cr, uid, [])\n res = obj.read(cr, uid, ids, ['object', 'name'], context)\n return [(r['object'], r['name']) for r in res]", "def links(self) -> str:\n return pulumi.get(self, \"links\")", "def get_links(self):\n return (link for link in self.links)", "def getLinks(self):\n\t\threfs = []\n\t\tfor link in self.bsource.find_all('a'):\n\t\t\threfs.append(link.get('href'))\n\t\treturn hrefs", "def getExpandedLinks():", "def gen_links(text):\n return []", "def get_links(self):\r\n return self.links", "def reponames(gh, user):\n return [u.split('/')[-1] for u in urls(gh, user)]", "def getLinks(content):\n soup = BeautifulSoup(content, 'lxml')\n links = set([link.get('href') for link in soup.find_all('a')])\n return links", "def link_to_string(link_list):\r\n api = authorise(consumer_key, consumer_secret, access_token, access_token_secret)\r\n\r\n list_of_names = []\r\n\r\n for id_num in link_list:\r\n list_of_names.append(get_user(api, id_num).screen_name)\r\n\r\n # Creating string:\r\n result_str = \"\"\r\n for i in range(len(list_of_names) - 1):\r\n result_str = result_str + list_of_names[i] + \" <-> \"\r\n\r\n result_str = result_str + list_of_names[-1]\r\n\r\n return result_str", "def extract_links(self, source: str) -> List[str]:\n return self.links_compiled_exp.findall(source)", "def get_links() -> list:\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36\",\n \"Accept\": \"text/html\",\n \"Accept-Encoding\": \"gzip, deflate\",\n }\n p = re.compile(r'\\d+.html')\n base_url = 'http://stateoftheunion.onetwothree.net/texts/'\n essay_url = base_url + 'index.html'\n res = requests.get(essay_url, headers=headers)\n soup = BeautifulSoup(res.content, 'html')\n links = soup.find_all('a')\n sotu_links = {link.text: base_url + link.get('href', '') for link in links if re.match(p, link.get('href', ''))}\n return sotu_links", "def get_names():\n only_links = SoupStrainer(\"a\")\n names = set()\n doc = requests.get(NAMES_URL).content\n links = BeautifulSoup(doc, \"html.parser\", parse_only=only_links)\n pokemon = links.find_all(title=re.compile(\"(\\w+)(\\s){1}(\\(Pokémon\\))\"))\n for cell in pokemon:\n names.add(str(cell.string))\n \n\n return names", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()", "def names(self) -> List:\n ..." ]
[ "0.7595612", "0.7404677", "0.72829336", "0.7264005", "0.72046065", "0.69500434", "0.69243145", "0.69173944", "0.68098545", "0.6758766", "0.6710052", "0.67067623", "0.65946317", "0.65372044", "0.65343434", "0.65168136", "0.64991564", "0.6481789", "0.64781207", "0.6465065", "0.6443217", "0.6431833", "0.6348655", "0.63451695", "0.6341336", "0.6335799", "0.62900406", "0.6286263", "0.6286263", "0.6278057" ]
0.7673522
0
Get a list of pipe names Returns list of strings
def pipe_name_list(self): return list(self._link_reg.pipe_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pipe_names(self):\n return self._pipes", "def pipes(self): \n return self._link_reg.pipes", "def get_pipeline_names() -> Iterable[str]:\n for item in sorted((SRC / \"pipelines\").iterdir()):\n if not item.name.startswith(\"_\") and not item.is_file():\n yield item.name", "def pipes(self):\n for name in self._pipes:\n yield name, self._data[name]", "def separate_pipe(s):\n return s.split('|')", "def names(self) -> list[str]:", "def get_output_names():\n names = [device.name for device in get_devices() if device.is_output]\n return list(sorted(names))", "def psv_name_list(self):\n return list(self._link_reg.psv_names)", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()", "def get_vsys_fifo_names(backend):\n return (_VSYS_FMT_IN % backend, _VSYS_FMT_OUT % backend)", "def _go_list(self, *args):\n return subprocess.check_output((\"go\", \"list\") + self.tag_args + args).strip().split(\"\\n\")", "def pump_name_list(self):\n return list(self._link_reg.pump_names)", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def protocol_names(self):\n l = self.protocols()\n retval = [str(k.name) for k in l]\n return retval", "def pbv_name_list(self):\n return list(self._link_reg.pbv_names)", "def _name_of_all_containers(compose_project: str) -> List[str]:\n run_result = subprocess.run(\n [\n \"docker\",\n \"ps\",\n \"--all\",\n \"--filter\",\n f\"name={compose_project}\",\n \"--format\",\n \"table {{.Names}}\",\n ],\n capture_output=True,\n )\n containers: List[str] = run_result.stdout.decode(\"utf-8\").split(\"\\n\")\n containers = containers[1:] # remove the table column header\n containers = [c for c in containers if c] # filter empty\n if not containers:\n raise ValueError(f\"Couldn't find any containers for '{compose_project}'\")\n return containers", "def get_pipe_ids(url, arg):\n encoded_pipelines = live_url_request(url, arg)\n return encoded_pipelines", "def processNames(self):\n # MODIFIED 11/1/16 OLD:\n return list(item.process.name for item in self.process_tuples)\n # # MODIFIED 11/1/16 NEW:\n # return sorted(list(item.process.name for item in self.process_tuples))\n # MODIFIED 11/1/16 END", "def containers():\n # TODO: can there be multiple names?\n cmd = [ 'docker', 'ps', '--format', '{{.Names}}' ]\n with popen_text(cmd) as docker:\n for ln in docker.stdout:\n yield ln[:-1]", "def power_pump_name_list(self):\n return list(self._link_reg.power_pump_names)", "def _command_as_list(module_name):\n parts = module_name.split('.')\n for part in COMMANDS_PACKAGE_NAME.split('.'):\n if parts[0] == part:\n parts = parts[1:]\n return [SCRIPT_COMMAND] + parts", "def get_name_list(msh, varname):\n return [str(chartostring(v)) for v in msh.variables[varname]]", "def pump_names(self):\n return self._pumps", "def __current_pipeline_list__(self,mockdb):\n pipelines = []\n if self.pipelines is None:\n return pipelines\n pipelines_dict = self.pipelines.split(';')\n for d in pipelines_dict:\n pipeline_key, obj_type = d.split(':')\n try:\n\t\tpipeline = mockdb[obj_type].objects[int(pipeline_key)]\n except KeyError:\n sys.exit(\"Key error in determining pipeline for report.\\n\")\n pipelines.append(pipeline)\n return pipelines", "def gpv_name_list(self):\n return list(self._link_reg.gpv_names)", "def get_names(dep):\n res = [dep.name]\n return res", "def get_command_names(self):\n return list(self.commands.keys())", "def output_names(self):\n return []" ]
[ "0.82978183", "0.71116775", "0.65844274", "0.6444399", "0.6193895", "0.61290246", "0.6103078", "0.6090233", "0.6083763", "0.6083763", "0.6031202", "0.60280323", "0.60100174", "0.59890234", "0.59890234", "0.5932565", "0.59230214", "0.58982307", "0.5873035", "0.583595", "0.5787462", "0.57771313", "0.5769005", "0.5763446", "0.57551914", "0.57420915", "0.57290477", "0.5716391", "0.5676067", "0.5638418" ]
0.84123486
0
Get a list of pump names (both types included) Returns list of strings
def pump_name_list(self): return list(self._link_reg.pump_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pump_names(self):\n return self._pumps", "def power_pump_name_list(self):\n return list(self._link_reg.power_pump_names)", "def power_pump_names(self):\n return self._power_pumps", "def get_pump_stringlist(self):\n return text_pump", "def head_pump_name_list(self):\n return list(self._link_reg.head_pump_names)", "def get_pump_list(self):\n return self.pump_array", "def pumps(self):\n for name in self._pumps:\n yield name, self._data[name]", "def head_pump_names(self):\n return self._head_pumps", "def pumps(self): \n return self._link_reg.pumps", "def name_get(self):\n result = []\n for r in self:\n result.append((r.id, u\"%s %s\" % ('PO', r.name)))\n return result", "def names(self) -> list[str]:", "def getNames(self) -> List[unicode]:\n ...", "def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def player_names(players):\r\n string = ''\r\n for p in players:\r\n string = string + p.name + ', '\r\n return string", "def protein_name_collector():\n resL = []\n with open(\"bob/processed/bobprots_down.csv\", \"r\") as inpF:\n for inpLine in inpF:\n inpLine = inpLine.split(\",\")\n resL.append(inpLine[0].strip(\" \\n\"))\n return resL", "def get_unit_names(status, application_name):\n return [name for name, unit in get_units(status, application_name)]", "def names(cls) -> List[str]:", "def GetNamesOfPieces(self):\n assert self.RecoveredEnoughPieces()\n result = []\n base = self.fileName + dibs_constants.fileSeparator \n for p in self.piecesRecovered.keys():\n result.append(base + p)\n return result", "def processNames(self):\n # MODIFIED 11/1/16 OLD:\n return list(item.process.name for item in self.process_tuples)\n # # MODIFIED 11/1/16 NEW:\n # return sorted(list(item.process.name for item in self.process_tuples))\n # MODIFIED 11/1/16 END", "def get_output_names():\n names = [device.name for device in get_devices() if device.is_output]\n return list(sorted(names))", "def get_pokemon_names():\n with open(POKEMON_FILE, 'r') as fh:\n pokemon = json.load(fh)\n return [name.lower() for name in pokemon]", "def getNames():\r\n return [\"Server1\", \"Server2\", \"Client1\", \"Client2\"]", "def get_names(self):\r\n names = []\r\n for p in self.people:\r\n names.append(p.get_name())\r\n return names", "def furanose_names(self):\n output = set()\n for item in self.monomers():\n if item in self.furanose_fac:\n output.add(self.furanose_fac[item][\"name\"])\n return list(output)", "def names(self) -> List:\n ...", "def get_pinnames(self):\n return self.pnames", "def _get_names(self, item_type):\n data = self.get_json('Get-{} -VMMServer $scvmm_server'.format(item_type))\n if data:\n return [item['Name'] for item in data] if isinstance(data, list) else [data[\"Name\"]]\n else:\n return None", "def get_name_list(msh, varname):\n return [str(chartostring(v)) for v in msh.variables[varname]]", "def readWaveformNames(self):\n self.sendMessage('WLIST:SIZE?')\n ansr=self.readMessage()\n msg=[]\n for i in xrange (1,int(ansr)+1):\n msg.append('WLIST:NAME? '+str(i))\n self.sendMessage(msg)\n wnames = self.readMessage()\n names=re.findall('\".*?\"',wnames)\n strippednames=[]\n for name in names:\n strippednames.append(name.rstrip('\"').lstrip('\"'))\n return strippednames", "def namelist(self):\n return []" ]
[ "0.7911566", "0.75667715", "0.70810467", "0.68184704", "0.67374855", "0.6724712", "0.6678243", "0.6582998", "0.6389015", "0.6045076", "0.5974296", "0.59574044", "0.5891369", "0.5801314", "0.5729742", "0.5713783", "0.56620663", "0.565727", "0.56552035", "0.565081", "0.56356615", "0.56047535", "0.55155385", "0.5505858", "0.54832625", "0.5460994", "0.5451252", "0.54353875", "0.54022557", "0.5394591" ]
0.8058499
0
Get a list of head pump names Returns list of strings
def head_pump_name_list(self): return list(self._link_reg.head_pump_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def head_pump_names(self):\n return self._head_pumps", "def pump_name_list(self):\n return list(self._link_reg.pump_names)", "def power_pump_name_list(self):\n return list(self._link_reg.power_pump_names)", "def pump_names(self):\n return self._pumps", "def head_pumps(self):\n for name in self._head_pumps:\n yield name, self._data[name]", "def names(self) -> list[str]:", "def power_pump_names(self):\n return self._power_pumps", "def get_short_names(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[0])\n return result", "def getNames():\r\n return [\"Server1\", \"Server2\", \"Client1\", \"Client2\"]", "def getNames(self) -> List[unicode]:\n ...", "def get_names(self):\r\n names = []\r\n for p in self.people:\r\n names.append(p.get_name())\r\n return names", "def get_player_names(self):\n names = [user['name'] for user in self.server.status().raw['players']['sample']]\n return names", "def getHeadParts(self):\n return self.headParts", "def get_names_short(self):\r\n return [p.get_name() for p in self.people]", "def player_names(players):\r\n string = ''\r\n for p in players:\r\n string = string + p.name + ', '\r\n return string", "def get_pump_stringlist(self):\n return text_pump", "def protein_name_collector():\n resL = []\n with open(\"bob/processed/bobprots_down.csv\", \"r\") as inpF:\n for inpLine in inpF:\n inpLine = inpLine.split(\",\")\n resL.append(inpLine[0].strip(\" \\n\"))\n return resL", "def tracker_list():\n trackers = db.execute(\"SELECT DISTINCT name FROM trackers\")\n names = [tup[0] for tup in trackers.fetchall()]\n return names", "def names(self) -> List:\n ...", "def __get_names(self): \n names_str = self.names_text.get(1.0, END)\n names = names_str.splitlines()\n return names", "def get_names(source):\n names = [row[\"name\"] for row in source]\n return sorted(names)", "def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def get_output_names(hf):\n return sorted(map(str, hf['/output/data'].keys()))", "def get_pump_list(self):\n return self.pump_array", "def name_get(self):\n result = []\n for r in self:\n result.append((r.id, u\"%s %s\" % ('PO', r.name)))\n return result", "def get_pinnames(self):\n return self.pnames", "def namelist(self):\n return []", "def names(self):\n return [line.split(',')[0] for line in self.lines]", "def get_nice_names(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[1])\n return result", "def get_patient_names(self):\n\t# use pre-defined patient names\n\tif (self.data_names is not None):\n\t\tassert (os.path.isfile(self.data_names))\n\t\twith open(self.data_names) as f:\n\t\t\tcontent = f.readlines()\n\t\tpatient_names = [x.strip() for x in content]\n\t# use all the patient names in data_root\n\telse:\n\t\tpatient_names = os.listdir(self.data_root[0])\n\t\tpatient_names = [name for name in patient_names if 'brats' in name.lower()]\n\treturn patient_names" ]
[ "0.81699145", "0.74455684", "0.6964368", "0.6947486", "0.63413054", "0.6206856", "0.6171997", "0.6103357", "0.6101508", "0.609973", "0.60207105", "0.6008463", "0.5996784", "0.5958609", "0.59400934", "0.5917989", "0.5891312", "0.5883272", "0.5867631", "0.58626765", "0.58320594", "0.58261925", "0.5820222", "0.5809374", "0.57825357", "0.5768085", "0.57500994", "0.5746659", "0.56886524", "0.56626827" ]
0.8411579
0
Get a list of power pump names Returns list of strings
def power_pump_name_list(self): return list(self._link_reg.power_pump_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def power_pump_names(self):\n return self._power_pumps", "def pump_name_list(self):\n return list(self._link_reg.pump_names)", "def pump_names(self):\n return self._pumps", "def head_pump_name_list(self):\n return list(self._link_reg.head_pump_names)", "def get_pump_stringlist(self):\n return text_pump", "def pumps(self): \n return self._link_reg.pumps", "def get_pump_list(self):\n return self.pump_array", "def head_pump_names(self):\n return self._head_pumps", "def pumps(self):\n for name in self._pumps:\n yield name, self._data[name]", "def get_powerups() -> tuple:\n return tuple(PowerUp.powers.keys())", "def power_pumps(self):\n for name in self._power_pumps:\n yield name, self._data[name]", "def list_power_supply_units(self):\n\n doc = self.client.enumerate(uris.CIM_PowerSupply)\n\n psus = doc.find('.//s:Body/wsen:EnumerateResponse/wsman:Items',\n wsman.NS_MAP)\n\n return [self._parse_psus(psu) for psu in psus]", "def names(self) -> list[str]:", "def name_get(self):\n result = []\n for r in self:\n result.append((r.id, u\"%s %s\" % ('PO', r.name)))\n return result", "def get_list_powers(self):\r\n _debug('simq03b_api.get_list_powers')\r\n \r\n s = self.query('SOUR:LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a", "def getNames(self) -> List[unicode]:\n ...", "def get_list_powers(self):\r\n return self._api.get_list_powers()", "def pump_curve_names(self):\n return list(self._pump_curves)", "def get_powerpoints(args):\n if 'powlist' in args:\n return args.powlist\n\n minpower = args.envconfig.get('minpow', DEF_MINPOWER)\n maxpower = args.envconfig.get('maxpow', DEF_MAXPOWER)\n powerpoints = []\n if 'powstep' in args: # Initial point: DEF_MINPOWER\n ppoint = minpower\n while ppoint <= maxpower:\n powerpoints.append(ppoint)\n ppoint += args.powstep\n if 'pownum' in args: # Extreme points not used\n pstep = (maxpower - minpower) / (args.pownum + 1)\n ppoint = minpower + pstep\n for _ in range(args.pownum):\n powerpoints.append(ppoint)\n ppoint += pstep\n\n return powerpoints", "def readWaveformNames(self):\n self.sendMessage('WLIST:SIZE?')\n ansr=self.readMessage()\n msg=[]\n for i in xrange (1,int(ansr)+1):\n msg.append('WLIST:NAME? '+str(i))\n self.sendMessage(msg)\n wnames = self.readMessage()\n names=re.findall('\".*?\"',wnames)\n strippednames=[]\n for name in names:\n strippednames.append(name.rstrip('\"').lstrip('\"'))\n return strippednames", "def player_names(players):\r\n string = ''\r\n for p in players:\r\n string = string + p.name + ', '\r\n return string", "def GetNamesOfPieces(self):\n assert self.RecoveredEnoughPieces()\n result = []\n base = self.fileName + dibs_constants.fileSeparator \n for p in self.piecesRecovered.keys():\n result.append(base + p)\n return result", "def get_name_list(msh, varname):\n return [str(chartostring(v)) for v in msh.variables[varname]]", "def get_list_powers(self):\r\n s = self.query('SOUR1:LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a", "def get_list_powers(self):\r\n s = self.query('SOUR1:LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a", "def get_list_powers(self):\r\n return self.ps", "def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def get_pinnames(self):\n return self.pnames", "def get_output_names():\n names = [device.name for device in get_devices() if device.is_output]\n return list(sorted(names))", "def get_list_powers(self):\r\n s = self.query('LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a" ]
[ "0.8048565", "0.80370945", "0.798992", "0.6704519", "0.66724175", "0.6669718", "0.6619897", "0.66024184", "0.63278764", "0.62715787", "0.6242387", "0.61856943", "0.608296", "0.60596144", "0.5981608", "0.5979851", "0.5920514", "0.58885986", "0.58619136", "0.5823269", "0.5822333", "0.5818427", "0.57879573", "0.5743294", "0.5743294", "0.5738005", "0.57113874", "0.57008106", "0.56952447", "0.5649359" ]
0.8360706
0
Get a list of valve names (all types included) Returns list of strings
def valve_name_list(self): return list(self._link_reg.valve_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def valve_names(self):\n return self._valves", "def names(self) -> list[str]:", "def names(self):\n if type(self.name) is types.StringType:\n return [self.name]\n else:\n return list(self.name)", "def all_values(cls) -> List[str]:\n return list(member.value for member in cls.__members__.values())", "def names(cls) -> List[str]:", "def getNames(self) -> List[unicode]:\n ...", "def values(cls) -> t.List[t.Union[str, NameTitle]]:\n return list(cls.__labels__.values())", "def names(self):\n if isinstance(self.name, string_types):\n return [self.name]\n else:\n return list(self.name)", "def names(self):\r\n return self.get_field(self.name_field)", "def typedAntennaNames() :\n a=s.getAntennaAssignments()\n namelist = []\n for i in a:\n namelist.append( i.typedAntennaName )\n return namelist", "def get_name_list(msh, varname):\n return [str(chartostring(v)) for v in msh.variables[varname]]", "def get_name_value(self):\n name, value = self.get()\n if not isinstance(name, list):\n name = [name]\n if not isinstance(value, list):\n value = [value]\n return list(zip(name, value))", "def availableValues(self):\n return [x.name for x in self._field.enum_type.values]", "def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]", "def getValueListName(self):\n assert self.isSearchResponse()\n names = [field.name for field in self.getFields()]\n # We assume that there are exactly two fields in every\n # SearchResponse: nextPageToken and the value list.\n names.remove('nextPageToken')\n assert len(names) == 1\n return names[0]", "def getOptionsNames(self) -> List[unicode]:\n ...", "def names(self) -> List:\n ...", "def name(self) -> List[NameAndValue]:\n return self._name", "def psv_name_list(self):\n return list(self._link_reg.psv_names)", "def get_vlans_list(self):\n return self.vlans.keys()", "def gettypes(self):\n return [str(self.sd.xlate(t[0])) for t in self.sd.types]", "def get_names(self):\n return [doc['name'] for doc in self.vocab]", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def choices() -> List[str]:\n return [t.name.upper() for t in ConfigurationVariable]", "def get_names(dep):\n res = [dep.name]\n return res", "def valves(self):\n for name in self._valves:\n yield name, self._data[name]", "def get_all_names(cls, exclude_values: Iterator['CommonBucksType'] = None) -> Tuple[str]:\n name_list: Tuple[str] = tuple([value.name for value in cls.get_all(exclude_values=exclude_values)])\n return name_list", "def get_all_names(cls, exclude_values: Iterator['CommonBusinessAdvertisingType'] = None) -> Tuple[str]:\n name_list: Tuple[str] = tuple([value.name for value in cls.get_all(exclude_values=exclude_values)])\n return name_list", "def _getNames(self):\n return self._items.keys()" ]
[ "0.75549895", "0.6897611", "0.68669784", "0.68631047", "0.6844718", "0.67509496", "0.66171193", "0.65831435", "0.65557593", "0.6527351", "0.6493272", "0.64087355", "0.63995713", "0.63643575", "0.6360965", "0.63165", "0.6314579", "0.62659514", "0.6198061", "0.6177901", "0.6140403", "0.6121714", "0.6120538", "0.6120538", "0.6119934", "0.61048293", "0.60876125", "0.6066302", "0.60583043", "0.6054223" ]
0.7783707
0
Get a list of prv names Returns list of strings
def prv_name_list(self): return list(self._link_reg.prv_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prv_names(self):\n return self._prvs", "def names(self) -> list[str]:", "def getNames(self) -> List[unicode]:\n ...", "def names(cls) -> List[str]:", "def get_names(self):\r\n names = []\r\n for p in self.people:\r\n names.append(p.get_name())\r\n return names", "def names(self) -> List:\n ...", "def namelist(self):\n return []", "def get_names(name):\n pos = name.rfind('.')\n if pos != -1:\n return [name[pos + 1:]]\n return [name]", "def get_names(dep):\n res = [dep.name]\n return res", "def getnames(self) -> List[Dict[str, Any]]:\n # NOTE: warning this does not yet support pagination\n return self.rpc_call(\"getnames\")", "def return_names(self):\n return self.__name_list", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def player_names(players):\r\n string = ''\r\n for p in players:\r\n string = string + p.name + ', '\r\n return string", "def get_names(self):\n return self.names", "def pyranose_names(self):\n output = set()\n for item in self.pyranoses():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def name_get(self):\n result = []\n for r in self:\n result.append((r.id, u\"%s %s\" % ('PO', r.name)))\n return result", "def get_ordered_adversary_names(self) -> List[str]:\n pass", "def psv_name_list(self):\n return list(self._link_reg.psv_names)", "def __get_names(self): \n names_str = self.names_text.get(1.0, END)\n names = names_str.splitlines()\n return names", "def get_all_names(self):\r\n return [person.name for person in self.__person_repository.elements]", "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]", "def names(self):\n if isinstance(self.name, string_types):\n return [self.name]\n else:\n return list(self.name)", "def get_subnode_names(self) -> List[str]:\n\t\t# Variables\n\t\tnames: List[str] = []\n\n\t\t# Iterate over nodes\n\t\tfor subnode in self.subnodes:\n\t\t\tnames.append(subnode.name)\n\t\t# Return Names\n\t\treturn sorted(names, key=str.lower)", "def gpv_name_list(self):\n return list(self._link_reg.gpv_names)", "def get_uniprot_names(uniprot_result):\n name_lines = [l for l in uniprot_result.split('\\n') if l.startswith('DE')]\n\n names = []\n\n for nm_line in name_lines:\n if 'Full=' in nm_line:\n names.append(nm_line.split('Full=')[-1][:-1])\n elif 'Short=' in nm_line:\n names.append(nm_line.split('Short=')[-1][:-1])\n\n return names" ]
[ "0.7555528", "0.73201424", "0.70572925", "0.6875848", "0.6829884", "0.6716961", "0.6604352", "0.6572593", "0.6521208", "0.6510447", "0.645756", "0.63750947", "0.63750947", "0.6340543", "0.6340543", "0.6325614", "0.6278456", "0.62449247", "0.61938864", "0.6189884", "0.6185686", "0.6175417", "0.61703956", "0.61333483", "0.61333483", "0.61323756", "0.6110107", "0.6087834", "0.60838145", "0.6072076" ]
0.8030329
0
Get a list of psv names Returns list of strings
def psv_name_list(self): return list(self._link_reg.psv_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def psv_names(self):\n return self._psvs", "def names(self) -> list[str]:", "def getNames(self) -> List[unicode]:\n ...", "def names(cls) -> List[str]:", "def psvs(self):\n for name in self._psvs:\n yield name, self._data[name]", "def names(self) -> List:\n ...", "def protein_name_collector():\n resL = []\n with open(\"bob/processed/bobprots_down.csv\", \"r\") as inpF:\n for inpLine in inpF:\n inpLine = inpLine.split(\",\")\n resL.append(inpLine[0].strip(\" \\n\"))\n return resL", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def get_pokemon_names():\n with open(POKEMON_FILE, 'r') as fh:\n pokemon = json.load(fh)\n return [name.lower() for name in pokemon]", "def names(self):\n return [line.split(',')[0] for line in self.lines]", "def __get_names(self): \n names_str = self.names_text.get(1.0, END)\n names = names_str.splitlines()\n return names", "def gpv_name_list(self):\n return list(self._link_reg.gpv_names)", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()", "def prv_names(self):\n return self._prvs", "def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]", "def gpv_names(self):\n return self._gpvs", "def return_names(self):\n return self.__name_list", "def pyranose_names(self):\n output = set()\n for item in self.pyranoses():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def get_names(dep):\n res = [dep.name]\n return res", "def output_names(self):\n return []", "def read_names_list(file_path):\r\n\tnames_list = []\r\n\twith open(file_path) as file:\r\n\t for line in file:\r\n\t cline = line.rstrip().split()\r\n\t #row_id = cline[0]\r\n\t row_name = cline[1:]\r\n\t #names_list.append((row_id, \" \".join(row_name)))\r\n\t names_list.append(\" \".join(row_name))\r\n\treturn names_list", "def names(path, filter=None):", "def read_psv(name, first=False):\n res = [i.strip().split('|') for i in open(name, 'r').readlines()]\n return res[1:] if first else res", "def pbv_names(self):\n return self._pbvs", "def get_names(self):\n return self.names", "def name_get(self):\n result = []\n for r in self:\n result.append((r.id, u\"%s %s\" % ('PO', r.name)))\n return result", "def get_names(lines): \n next = False \n names = []\n for line in lines:\n if next:\n if len(line) == 1:\n break\n else:\n tmp = line.split()\n names.append(tmp[1])\n if line.startswith('Sequences loaded ...'):\n next = True\n return names", "def getFileNames():\n input_path = \"/Users/tim/OneDrive/Master/Text_Mining/project/texts/glenarvon_html/\"\n temp_list = os.listdir(input_path)\n name_list = [i for i in temp_list if i[-4:] == \"html\"]\n name_list.sort(key=natural_keys) # see http://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside\n return name_list, input_path" ]
[ "0.7399235", "0.7064878", "0.6588478", "0.6548767", "0.6485365", "0.6352624", "0.63394755", "0.6327527", "0.6327527", "0.63156044", "0.6274496", "0.62336254", "0.6201393", "0.61899", "0.61899", "0.6181303", "0.6138216", "0.61024606", "0.6079889", "0.6078942", "0.60608804", "0.6054801", "0.6050636", "0.60302705", "0.60200286", "0.59777236", "0.5955148", "0.59206396", "0.5919628", "0.59172505" ]
0.72743374
1
Get a list of pbv names Returns list of strings
def pbv_name_list(self): return list(self._link_reg.pbv_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pbv_names(self):\n return self._pbvs", "def gpv_name_list(self):\n return list(self._link_reg.gpv_names)", "def psv_name_list(self):\n return list(self._link_reg.psv_names)", "def pbvs(self):\n for name in self._pbvs:\n yield name, self._data[name]", "def gpv_names(self):\n return self._gpvs", "def names(self) -> list[str]:", "def get_vgs() -> List[str]:\n p = subprocess.run(\n [\"vgs\", \"--reportformat\", \"json\"], check=True, capture_output=True\n )\n output = json.loads(p.stdout)\n return [vg[\"vg_name\"] for vg in output[\"report\"][0][\"vg\"]]", "def names(cls) -> List[str]:", "def listPVs(self):\n for pv in self._pvlist:\n print pv", "def prv_names(self):\n return self._prvs", "def prv_name_list(self):\n return list(self._link_reg.prv_names)", "def get_pv_names(k8s_cli, namespace, error_template):\n cmd = \"{} get -n {} PersistentVolumeClaim --selector={} -o=custom-columns=VOLUME:.spec.volumeName --no-headers\" \\\n .format(k8s_cli, namespace, OPERATOR_LABEL)\n missing_resource_template = f\"Namespace '{namespace}': Skip collecting information for PersistentVolumeClaim. \" \\\n f\"Server has no resource of type PersistentVolumeClaim\"\n output = run_shell_command_with_retries(cmd, KUBCTL_GET_YAML_RETRIES, error_template, missing_resource_template)\n return output.split()", "def psv_names(self):\n return self._psvs", "def getNames(self) -> List[unicode]:\n ...", "def get_pokemon_names():\n with open(POKEMON_FILE, 'r') as fh:\n pokemon = json.load(fh)\n return [name.lower() for name in pokemon]", "def names(self) -> List:\n ...", "def name_list(qbo_session):\n\n return qbo_session.name_list()", "def fcv_name_list(self):\n return list(self._link_reg.fcv_names)", "def GetNamesOfPieces(self):\n assert self.RecoveredEnoughPieces()\n result = []\n base = self.fileName + dibs_constants.fileSeparator \n for p in self.piecesRecovered.keys():\n result.append(base + p)\n return result", "def get_name_list(msh, varname):\n return [str(chartostring(v)) for v in msh.variables[varname]]", "def protocol_names(self):\n l = self.protocols()\n retval = [str(k.name) for k in l]\n return retval", "def get_list_of_names():\n conn = r.connect(host=HOST, port=PORT, db=DB)\n names = r.table(FINGERPRINT_TABLE)[\"name\"].run(conn)\n return list(names)", "def valve_name_list(self):\n return list(self._link_reg.valve_names)", "def vbd_list(name=None, call=None):\n if call == \"function\":\n raise SaltCloudSystemExit(\n \"This function must be called with -a, --action argument.\"\n )\n if name is None:\n return \"A name kwarg is rquired\"\n ret = {}\n data = {}\n session = _get_session()\n vms = session.xenapi.VM.get_by_name_label(name)\n if len(vms) == 1:\n vm = vms[0]\n vbds = session.xenapi.VM.get_VBDs(vm)\n if vbds is not None:\n x = 0\n for vbd in vbds:\n vbd_record = session.xenapi.VBD.get_record(vbd)\n data[\"vbd-{}\".format(x)] = vbd_record\n x += 1\n ret = data\n return ret", "def getLocalPluginNames():\r\n return [os.path.basename(f) for f in glob(buildPath('*.dll'))]", "def name_get(self):\n result = []\n for r in self:\n result.append((r.id, u\"%s %s\" % ('PO', r.name)))\n return result", "def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]", "def listBuilderNames():", "def listBuilderNames():", "def _list_of_availability_strings():\n names = [availability.name for availability in Availability]\n return names" ]
[ "0.82071203", "0.71072763", "0.69930744", "0.68813866", "0.66668886", "0.6622456", "0.6417987", "0.63324535", "0.6273439", "0.6254718", "0.6246069", "0.6238829", "0.62047076", "0.61949503", "0.6152581", "0.60600245", "0.59967786", "0.59805226", "0.5975194", "0.5934706", "0.5886293", "0.58850515", "0.58432746", "0.57919914", "0.57849044", "0.57817036", "0.5755129", "0.57515156", "0.57515156", "0.5748328" ]
0.8257615
0
Get a list of tcv names Returns list of strings
def tcv_name_list(self): return list(self._link_reg.tcv_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tcv_names(self):\n return self._tcvs", "def names(self) -> list[str]:", "def names(cls) -> List[str]:", "def getNames(self) -> List[unicode]:\n ...", "def get_names(self):\n import tc\n opts_list = []\n for k, v in self.__class__.__dict__.iteritems():\n if isinstance(v, tc.TC):\n opts_list.append(k)\n opts_list = sorted(opts_list)\n return opts_list", "def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]", "def fcv_name_list(self):\n return list(self._link_reg.fcv_names)", "def names(self) -> List:\n ...", "def fcv_names(self):\n return self._fcvs", "def get_names(dep):\n res = [dep.name]\n return res", "def get_names(self):\n return [doc['name'] for doc in self.vocab]", "def typedAntennaNames() :\n a=s.getAntennaAssignments()\n namelist = []\n for i in a:\n namelist.append( i.typedAntennaName )\n return namelist", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def tank_name_list(self):\n return list(self._node_reg.tank_names)", "def termnames(self):\n\n names = []\n for term in self.terms:\n names += [term.termname]\n return names", "def termnames(self):\n\n names = []\n for term in self.terms:\n names += [term.termname]\n return names", "def get_cora_label_names():\n # type: () -> List[str]\n return _label_names", "def namelist(self):\n return []", "def get_feature_names(self):\n ...", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def names():\n pass", "def get_short_names(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[0])\n return result", "def get_names_short(self):\r\n return [p.get_name() for p in self.people]", "def get_names(self):\r\n names = []\r\n for p in self.people:\r\n names.append(p.get_name())\r\n return names", "def names(filter=None):", "def get_names(self):\n return self.names", "def currentAntennaNames(carmaOnly=False) :\n a=s.getAntennaAssignments()\n namelist = []\n for i in a:\n cname = i.carmaAntennaName\n tname = i.typedAntennaName\n if (carmaOnly) :\n names = i.carmaAntennaName\n else :\n names = \"%s(%s)\" %(cname,tname)\n namelist.append(names)\n return namelist", "def getNames():\n\n return ('run', 'Run Network')" ]
[ "0.8012389", "0.7307783", "0.7146537", "0.709622", "0.6900165", "0.6722755", "0.6684833", "0.6668676", "0.66517746", "0.6465502", "0.6454279", "0.62444496", "0.6233946", "0.6233946", "0.6201808", "0.61725366", "0.61725366", "0.6156361", "0.6143343", "0.6134101", "0.6115569", "0.6115569", "0.61114883", "0.61042744", "0.6100685", "0.6087272", "0.6083135", "0.60804695", "0.60646325", "0.60582185" ]
0.82840574
0
Get a list of fcv names Returns list of strings
def fcv_name_list(self): return list(self._link_reg.fcv_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fcv_names(self):\n return self._fcvs", "def get_fnames(self):\n return self.fnames[:]", "def names(self) -> list[str]:", "def names(cls) -> List[str]:", "def getFeatureNames(self):\n return [\"f100\", \"f103\", \"f104\"]", "def tcv_name_list(self):\n return list(self._link_reg.tcv_names)", "def facenames ( self ):\n self._facenames = []\n self.EnumerateFacenames()\n return self._facenames", "def getNames(self) -> List[unicode]:\n ...", "def getafNames(self):\n names = [self.af_dict['polar_files'][i] for i in range(len(self.af_dict['polar_files']))]\n return names", "def getFeatureNames(self):\n feature_names = super().getFeatureNames()\n feature_names.extend([\"f101\", \"f102\", \"f105\", \"fNum\", \"fCapStart\", \"fCapNoStart\"])\n return feature_names", "def names(self) -> List:\n ...", "def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]", "def get_names(dep):\n res = [dep.name]\n return res", "def get_list_of_names():\n conn = r.connect(host=HOST, port=PORT, db=DB)\n names = r.table(FINGERPRINT_TABLE)[\"name\"].run(conn)\n return list(names)", "def all_facenames ( ):\n global facenames\n \n if facenames is None:\n facenames = FontEnumerator().facenames()\n facenames.sort()\n return facenames", "def furanose_names(self):\n output = set()\n for item in self.monomers():\n if item in self.furanose_fac:\n output.add(self.furanose_fac[item][\"name\"])\n return list(output)", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def get_feature_names(self):\n ...", "def filenames(self):\n names = []\n for furi in np.asarray(self.fileuris).flat:\n names.append(furi)\n return names", "def all_fov_names(fov_directory=FOV_DIRECTORY):\n fov_names = [fname[:-4] for fname in os.listdir(fov_directory)\n if (fname.endswith(\".txt\")) and (not fname.startswith(\"$\"))]\n return fov_names", "def tcv_names(self):\n return self._tcvs", "def getFeatureNames(self):\n pass", "def getFeatureNames(self, featureClassName):\n return self.featureClasses[featureClassName].getFeatureNames()", "def names(filter=None):", "def get_filenames(self):\n return [doc['filename'] for doc in self.vocab]", "def get_files_name(files):\n names = []\n for name in files:\n filename = files[name].filename\n names.append(filename)\n return names", "def namelist(self):\n return []" ]
[ "0.78478754", "0.712436", "0.7058395", "0.69252557", "0.6803095", "0.6760557", "0.6676739", "0.6672468", "0.65755755", "0.65727544", "0.6418178", "0.63568324", "0.63470227", "0.63281006", "0.63159966", "0.6260134", "0.62199223", "0.62199223", "0.621741", "0.621741", "0.6176964", "0.61597866", "0.6137519", "0.6131055", "0.61196667", "0.6104287", "0.6051564", "0.6040558", "0.6004393", "0.598554" ]
0.8191783
0
Get a list of gpv names Returns list of strings
def gpv_name_list(self): return list(self._link_reg.gpv_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gpv_names(self):\n return self._gpvs", "def get_vgs() -> List[str]:\n p = subprocess.run(\n [\"vgs\", \"--reportformat\", \"json\"], check=True, capture_output=True\n )\n output = json.loads(p.stdout)\n return [vg[\"vg_name\"] for vg in output[\"report\"][0][\"vg\"]]", "def psv_name_list(self):\n return list(self._link_reg.psv_names)", "def names(self) -> list[str]:", "def getNames(self) -> List[unicode]:\n ...", "def list_gpo(self, _):\n results = self.engine.query(self.engine.GPO_INFO_FILTER(), [\"cn\", \"displayName\"])\n for gpo in results:\n print(\"{cn}: {name}\".format(cn=gpo[\"cn\"], name=gpo[\"displayName\"]))", "def get_names(self):\r\n names = []\r\n for p in self.people:\r\n names.append(p.get_name())\r\n return names", "def pbv_name_list(self):\n return list(self._link_reg.pbv_names)", "def psv_names(self):\n return self._psvs", "def names(cls) -> List[str]:", "def itemnames():\n g = ['KIS_NA_39', 'VII_57', 'MX_48', 'MX_56', 'KIS_NA_42', 'VII_54',\n 'MX_S_48', 'MX_S_52', 'MX_52', 'KIS_NA_45', 'KIS_NA_51', 'MIP_45',\n 'MIP_49', 'MIP_52', 'MIP_plus_48', 'MIP_plus_51', 'MX_42', 'MX_45',\n 'MIP_G_42', 'KIS_42', 'KIS_NA_48']\n return(g)", "def gpvs(self):\n for name in self._gpvs:\n yield name, self._data[name]", "def get_goniometers_names():\n return [gon.name for gon in goniometers]", "def names(self) -> List:\n ...", "def pbv_names(self):\n return self._pbvs", "def get_ocsp_gnames(self):\n urls = ['uri:' + u for u in self.ocsp_urls]\n return self.load_gnames(urls)", "def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]", "def prv_name_list(self):\n return list(self._link_reg.prv_names)", "def prv_names(self):\n return self._prvs", "def listPVs(self):\n for pv in self._pvlist:\n print pv", "def get_san_gnames(self):\n return self.load_gnames(self.san)", "def get_pv_names(k8s_cli, namespace, error_template):\n cmd = \"{} get -n {} PersistentVolumeClaim --selector={} -o=custom-columns=VOLUME:.spec.volumeName --no-headers\" \\\n .format(k8s_cli, namespace, OPERATOR_LABEL)\n missing_resource_template = f\"Namespace '{namespace}': Skip collecting information for PersistentVolumeClaim. \" \\\n f\"Server has no resource of type PersistentVolumeClaim\"\n output = run_shell_command_with_retries(cmd, KUBCTL_GET_YAML_RETRIES, error_template, missing_resource_template)\n return output.split()", "def get_srv_ppgrp_name(self):\n pp_grp_name_lst = list()\n for srv_grp in self.srv_grp_lst:\n pp_grp = list()\n for srv in srv_grp:\n pp_grp.append(\n (srv['name'] + '_pt_in', srv['name'] + '_pt_out'))\n pp_grp_name_lst.append(pp_grp)\n return pp_grp_name_lst", "def player_names(players):\r\n string = ''\r\n for p in players:\r\n string = string + p.name + ', '\r\n return string", "def get_names(dep):\n res = [dep.name]\n return res", "def name_get(self):\n result = []\n for r in self:\n result.append((r.id, u\"%s %s\" % ('PO', r.name)))\n return result", "def get_names_short(self):\r\n return [p.get_name() for p in self.people]", "def get_name_list(msh, varname):\n return [str(chartostring(v)) for v in msh.variables[varname]]", "def return_names(self):\n return self.__name_list", "def get_population_names (f,a,s):\r\n ## a, s not used but needed for function to match general function format\r\n global gv\r\n aa = f.readline().strip()\r\n popnamelist = []\r\n i = 0\r\n foundghost = False\r\n while aa.find(\"Population\") >= 0:\r\n popname = aa.split()[3]\r\n if popname.upper() == \"GHOST\":\r\n foundghost = True\r\n popnamelist.append(popname)\r\n i += 1\r\n aa = f.readline().strip()\r\n if gv[\"useghost\"] == True and foundghost == False: # for compatibility with older output files\r\n popnamelist.append('ghost')\r\n anames = []\r\n if gv[\"usealtnames\"]:\r\n for line in open(gv[\"altnamefilename\"],\"r\"):\r\n temp = line.strip()\r\n if len(temp) > 0:\r\n anames.append(temp)\r\n anames = anames[0:len(popnamelist)]\r\n gv[\"altpopnames\"] = list(anames)\r\n return popnamelist" ]
[ "0.8337195", "0.7465264", "0.71278745", "0.69160676", "0.67721885", "0.674753", "0.66832805", "0.6682054", "0.6636047", "0.6615088", "0.6587514", "0.6531277", "0.6456293", "0.64385325", "0.64284253", "0.6414002", "0.6388162", "0.63829565", "0.6365987", "0.6354151", "0.63018876", "0.6296768", "0.6288498", "0.62521964", "0.62456745", "0.62356335", "0.6170074", "0.6163701", "0.6129481", "0.61049426" ]
0.84747857
0
Get a list of pattern names Returns list of strings
def pattern_name_list(self): return list(self._pattern_reg.keys())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def names(self) -> list[str]:", "def _get_wild_tasks(self, pattern):\n wild_list = []\n for t_name in self._def_order:\n if fnmatch.fnmatch(t_name, pattern):\n wild_list.append(t_name)\n return wild_list", "def pattern_filter(patterns, name):\n return [pat for pat in patterns if fnmatch.fnmatchcase(name, pat)]", "def patterns(self):\n return self._pattern_reg", "def names(cls) -> List[str]:", "def _glob_list(pattern, full_path=False):\n if full_path:\n return sorted(glob.glob(pattern))\n else:\n return sorted([os.path.basename(fpath) for fpath in glob.glob(pattern)])", "def patterns(self: TokenMatcher) -> List[Dict[str, Any]]:\n all_patterns = []\n for label, patterns in self._patterns.items():\n for pattern in patterns:\n p = {\"label\": label, \"pattern\": pattern, \"type\": self.type}\n all_patterns.append(p)\n return all_patterns", "def get_gti_extensions_from_pattern(lchdulist, name_pattern=\"GTI\"):\n hdunames = [h.name for h in lchdulist]\n pattern_re = re.compile(\"^\" + name_pattern + \"$\")\n gtiextn = []\n for ix, extname in enumerate(hdunames):\n if pattern_re.match(extname):\n gtiextn.append(ix)\n return gtiextn", "def getFilenameList(path, pattern='*.nii.gz'):\n\n filename = [os.path.basename(x) for x in sorted(glob.glob(os.path.join(path, pattern)))]\n\n return filename", "def _make_patterns(patterns):\n field_registry = display_fields.FieldRegistry()\n\n pattern_list = display_pattern.ScreenPatternList(\n field_registry=field_registry,\n )\n for pattern in patterns:\n pattern_list.add(pattern.split('\\n'))\n return pattern_list", "def get_available_patterns() -> list:\n path_folder = os.path.join(config.ROOT_PATH, config.FOLDER_PATTERNS)\n return [f.replace(\".cells\", \"\") for f in os.listdir(path_folder) if os.path.isfile(os.path.join(path_folder, f)) and f.endswith(\".cells\")]", "def get_patterns(\n self, pipeline: str, label: str, key: str\n ) -> List[Pattern]:", "def filenamePatterns(self):\n return ['*.'+e for e in self.filenameExtensions]", "def get_pattern_labels(self, pipeline: str) -> Set[str]:", "def get_pattern(self):\n pattern = list()\n for item in self.gradual_items:\n pattern.append(item.gradual_item.tolist())\n return pattern", "def glob_fmt(pattern: str, *templates: Strings) -> List[str]:\n results: List[str] = []\n for wildcards in glob_extract(pattern):\n for template in each_string(*templates):\n results.append(copy_annotations(template, template.format(**wildcards)))\n return results", "def paths_to_ymd_string_list(paths, pattern):\n se = re.compile(pattern).search\n return [ymd_tuple_to_string(m.group(1,2,3)) for p in paths for m in [se(p)] if m]", "def patterns(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"patterns\")", "def to_globs(self, build_patterns: Iterable[str]) -> Tuple[str, ...]:", "def extract_pattern(self, patterns):\n\n # if we have more patterns or\n # a single one which is not a file:\n if len(patterns) > 1 or (\n len(patterns) == 1 and not os.path.isfile(patterns[0])):\n return patterns\n\n else:\n pattern = patterns[0]\n pat_list = []\n # if PATTERN is a file, extract all patterns\n if os.path.isfile(pattern):\n try:\n with open(pattern, \"r\", encoding=\"utf-8\") as p_file:\n for line in p_file:\n pat_list.append(line.strip())\n except Exception:\n print(\"The selected PATH-file cannot be opened! \"\n \"Please choose another one.\")\n sys.exit()\n\n return pat_list", "def getNames(self) -> List[unicode]:\n ...", "def patterns(self) -> List[Dict[str, Any]]:\n all_patterns = []\n for label, patterns in self.fuzzy_patterns.items():\n for pattern, kwargs in zip(patterns[\"patterns\"], patterns[\"kwargs\"]):\n ent_label, ent_id = self._split_label(label)\n p = {\"label\": ent_label, \"pattern\": pattern.text, \"type\": \"fuzzy\"}\n if kwargs:\n p[\"kwargs\"] = kwargs\n if ent_id:\n p[\"id\"] = ent_id\n all_patterns.append(p)\n for label, patterns in self.regex_patterns.items():\n for pattern, kwargs in zip(patterns[\"patterns\"], patterns[\"kwargs\"]):\n ent_label, ent_id = self._split_label(label)\n p = {\"label\": ent_label, \"pattern\": pattern, \"type\": \"regex\"}\n if kwargs:\n p[\"kwargs\"] = kwargs\n if ent_id:\n p[\"id\"] = ent_id\n all_patterns.append(p)\n return all_patterns", "def listtypes(self):\n\n pattern_types = [i for i in sorted(self._allowed_patterns.iterkeys())]\n\n return pattern_types", "def findMatchingNames(regname, map):\n list = []\n regname += \"$\"\n\n # Find the existing items that match this string\n\n for name in map:\n regexp = re.compile(regname).match(name)\n if regexp:\n list.append(regexp)\n\n return list", "def pattern_gen():\n pattern = \"\"\n\n return pattern", "def compile_filename_patterns(pattern_list):\n\n pats=list(pattern_list)\n for i in range(len(pats)):\n if isinstance(pats[i],str):\n if pats[i].startswith('re:'):\n pats[i]=pats[i][3:]\n else:\n pats[i]=fnmatch.translate(pats[i])\n pats[i]=re.compile(pats[i])\n return pats", "def get_name(self):\n return self._pattern", "def patterns(self) -> List[AttributeRulerPatternType]:\n all_patterns = []\n for i in range(len(self.attrs)):\n p = {}\n p[\"patterns\"] = self.matcher.get(str(i))[1]\n p[\"attrs\"] = self._attrs_unnormed[i] # type: ignore\n p[\"index\"] = self.indices[i] # type: ignore\n all_patterns.append(p)\n return all_patterns # type: ignore[return-value]", "def getPossibleMatchesList(self):\n return [p for p in self._patterns if p.startswith(self._keyCode)]", "def names(self) -> List:\n ..." ]
[ "0.6853399", "0.6688259", "0.66735446", "0.662816", "0.6604652", "0.6583273", "0.6579248", "0.65776634", "0.6446731", "0.64373344", "0.639123", "0.6373305", "0.62809545", "0.6243343", "0.62412816", "0.623562", "0.6233539", "0.6227802", "0.6211251", "0.619828", "0.6139878", "0.6095696", "0.60853297", "0.6067161", "0.60526735", "0.6052114", "0.6021415", "0.5990968", "0.5986376", "0.5981019" ]
0.8001064
0
Get a list of curve names Returns list of strings
def curve_name_list(self): return list(self._curve_reg.keys())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pump_curve_names(self):\n return list(self._pump_curves)", "def _curveNamesToList(settings):\n return [getattr(GroupName, val) for val in settings.eccCurves]", "def efficiency_curve_names(self):\n return list(self._efficiency_curves)", "def volume_curve_names(self):\n return list(self._volume_curves)", "def headloss_curve_names(self):\n return list(self._headloss_curves)", "def untyped_curve_names(self):\n defined = set(self._data.keys())\n untyped = defined.difference(self._pump_curves, self._efficiency_curves, \n self._headloss_curves, self._volume_curves)\n return list(untyped)", "def curves(self):\n return self._curve_reg", "def getElementName(self):\n return _libsbml.Curve_getElementName(self)", "def get_curves(p):\n curve_list = []\n for well in p:\n curves = well.data.keys()\n for c in curves:\n curve_list.append(c)\n return sorted(set(curve_list))", "def ex_curve(data):\n rv = []\n try:\n ef = autocomplete_curve_function(data[0])\n ed = autocomplete_curve_direction(data[1])\n period = 2\n try:\n period = max(int(data[2]), 2)\n except ValueError:\n pass\n data = data[3:]\n if not data:\n if consts.VERBOSE:\n print('ERROR: No data for curve')\n return []\n f = CURVE_FUNCTIONS[ef][ed]\n maxi = len(data)-1\n for i in range(period):\n v = f(float(i) / float(period-1))\n di = int(round(v*float(maxi)))\n rv.append(data[di])\n\n except Exception as e:\n if consts.VERBOSE:\n print('ERROR: Curve failed [%s]'%e)\n\n return rv", "def get_curve(self, name):\n return self._curve_reg[name]", "def nurbsCurveToBezier(*args, **kwargs)->List[AnyStr]:\n pass", "def names(self) -> list[str]:", "def getGraphPointsNames(self):\n return [gp.id for gp in self.getGraphPoints()]", "def serialize(self):\r\n values = ','.join(i.serialize() for i in self.curves)\r\n return \"[{}]\".format(values)", "def get_names(dep):\n res = [dep.name]\n return res", "def curves(self, attr, value, strLower=False, strStartWith=False):\n out = []\n for c in self.iterCurves():\n lbl = c.getAttribute(attr)\n if isinstance(lbl, type(value)):\n if strLower:\n if isinstance(lbl, str):\n lbl = lbl.lower()\n if isinstance(value, str):\n value = value.lower()\n if lbl == value:\n out.append(c)\n elif (isinstance(lbl, str) and strStartWith\n and lbl[:len(value)] == value):\n out.append(c)\n return out", "def get_symbol(self):\n return []", "def autocomplete_curve_function(s):\n s = s.strip().upper()\n if not s:\n return CURVE_FUNCTIONS_ORDERED[0]\n for i in CURVE_FUNCTIONS_ORDERED:\n if i.startswith(s):\n return i\n if consts.VERBOSE:\n print('ERROR: Bad curve function %s'%s)\n return CURVE_FUNCTIONS_ORDERED[0]", "def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]", "def getCurve(self, *args):\n return _libsbml.GeneralGlyph_getCurve(self, *args)", "def getGraphPointNamesString(self):\n names = []\n for gp in self.getGraphPoints():\n if hasattr(aq_base(gp), 'isBroken') and gp.isBroken():\n names.append('%s(<span style=\"color: red\">missing</span>)' %\n gp.id)\n else:\n names.append(gp.id)\n return ', '.join(names)", "def getCurve(self, *args):\n return _libsbml.SpeciesReferenceGlyph_getCurve(self, *args)", "def legend_names(self):\n return [leg.label for leg in self.legends]", "def names(cls) -> List[str]:", "def name(self):\n return [o.name for o in self.obs]", "def get_currencies_names():\n names = [x for x in cur_dict]\n return names", "def getListOfCurveSegments(self):\n return _libsbml.Curve_getListOfCurveSegments(self)", "def endog_names(self):\n return self.data.ynames", "def getNames(self) -> List[unicode]:\n ..." ]
[ "0.7918395", "0.78808326", "0.77684337", "0.7411239", "0.71585196", "0.6744637", "0.6661399", "0.6598523", "0.6596619", "0.65196663", "0.6311063", "0.62766385", "0.6136272", "0.6124383", "0.5961749", "0.5954731", "0.5933657", "0.5894027", "0.58754605", "0.5824049", "0.5807217", "0.57923853", "0.57807255", "0.57779795", "0.5757743", "0.57462156", "0.57345885", "0.57284474", "0.57168406", "0.57112277" ]
0.81836087
0
Get a list of source names Returns list of strings
def source_name_list(self): return list(self._sources.keys())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def source(self) -> list:\n sources = self.source_control.list_sources()\n sources_list = [source['label'] for source in sources]\n return sources_list", "def source_list(self):\n return [g[\"name\"] for g in self._galleries]", "def get_names(source):\n names = [row[\"name\"] for row in source]\n return sorted(names)", "def source_list(self):\n return self._source_list", "def source_list(self):\n return self._source_list", "def Sources():\n return _sources", "def names(self) -> list[str]:", "def sources(source):\n\n source2 = models.Source(name=u\"Bob's Funerals.com\", url=u\"http://www.bobsfunerals.com\")\n source3 = models.Source(name=u\"Jim's Funerals.com\", url=u\"http://www.jimsfunerals.com\")\n return (source, source2, source3)", "def source_parameter_names(self):\n return [x for x, y in self.transformations.items() if isinstance(y, str)]", "def get_alert_sources_as_text_list():\n\n text_list = \"\"\n for alert_source in ALERT_SOURCES[0:-1]:\n text_list += alert_source + \", \"\n if text_list:\n text_list += \" and \"\n text_list += ALERT_SOURCES[-1]\n\n return text_list", "def source_list(self):\n return [\n source.Name for source in self.coordinator.data.sources if not source.Hidden\n ]", "def source_list(self):\n return list(self._group.streams_by_name().keys())", "def get_sources():\n url = base_url + \"sources\"\n params = {\"language\": \"en\"}\n resp = requests.get(url, params=params)\n data = resp.json()\n sources = [src['id'].strip() for src in data['sources']]\n print(\"all the sources:\")\n print(sources)\n return sources", "def getNames(self) -> List[unicode]:\n ...", "def get_sources():\n url = base_url + \"sources\"\n params = {\"language\": \"en\"}\n resp = requests.get(url, params=params)\n data = resp.json()\n sources = [src['id'].strip() for src in data['sources']]\n print(\"all the sources\")\n print(sources)\n return sources", "def get_strings(src_file):\n res = []\n try:\n res = open(src_file,'r').readlines()\n res = [x.strip() for x in res]\n except:\n res = []\n return res", "def names(cls) -> List[str]:", "def usage(self):\n names = self.sources.keys()\n return sorted([(n.replace('__', '.'), self._resolve(n)) for n in names],\n key=lambda el: el[0])", "def source_ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"source_ids\")", "def getSourcePaths(self, makeGlyphs=True, makeKerning=True, makeInfo=True):\n paths = []\n for name in self.sources.keys():\n paths.append(self.sources[name][0].path)\n return paths", "def source_list(self):\n return list(self._client.group.streams_by_name().keys())", "def get_source_info_list(self):\n self._get_source_info_list = pa_source_info_cb_t(self._source_info_cb)\n pa_context_get_source_info_list(self._context,\n self._get_source_info_list,\n None)", "def get_all_sources(remit):\n if remit == 'panzer' or remit == 'pandoc':\n os.chdir('source-'+remit)\n sourcelist = [name for name in os.listdir(\".\") if os.path.isdir(name)]\n os.chdir('..')\n else:\n # get the maximal list of sources for a diff\n pandoc_list = get_all_sources('pandoc')\n panzer_list = get_all_sources('panzer')\n sourcelist = list(set(pandoc_list+panzer_list))\n sourcelist.sort()\n return sourcelist", "def get_source_tags(self):\n return ['en:' + self.tag_manager.normalize_tag_wtokenization(t, self.tries['en'], prefixed=False) for t in self.tag_manager.unprefixed_source_tags]", "def parse_sources(xml_element):\r\n sources = xml_element.get('sources')\r\n if sources:\r\n return [location.strip() for location in sources.split(';')]", "def get_short_names(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[0])\n return result", "def source_list(self):\n return self._playlists", "def getImageName(self):\n return [os.path.basename(name) for name in self.meta['sources']]", "def sources(self):\n return self._sources.keys()", "def get_sources(self, target):\n return sorted(list({t[0].split('.')[0]\n for t in self.mapping.items()\n if target in [c.split('.')[0]\n for c in type(t[1]) is dict and t[1].keys() or ()]}))" ]
[ "0.7640286", "0.7293398", "0.7200503", "0.7097758", "0.7097758", "0.696268", "0.6953154", "0.68827164", "0.68759656", "0.68512136", "0.6798987", "0.6763301", "0.66584516", "0.66349", "0.6595274", "0.657544", "0.6540999", "0.6520172", "0.6509632", "0.65048426", "0.650278", "0.643385", "0.6400153", "0.6388556", "0.63698655", "0.63508815", "0.63424087", "0.63159144", "0.6310295", "0.6302338" ]
0.8032265
0
Get a list of control/rule names Returns list of strings
def control_name_list(self): return list(self._controls.keys())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rule_names(self):\n return self.rules.keys()", "def get_rules(self):\n rules = []\n for item in self.name:\n rules.append(item)\n return rules", "def _get_control_names(self, interface, form):\n return sorted([control.name\n for control in form.controls\n if interface.providedBy(control)])", "def names(self) -> list[str]:", "def all_control_names(self):\n return self._get_control_names(\n zope.testbrowser.interfaces.IControl, self.getForm())", "def get_cora_label_names():\n # type: () -> List[str]\n return _label_names", "def extract_rule_names(self):\n if self.scanner == YARA:\n return sorted({result['rule'] for result in self.results})\n if self.scanner == CUSTOMS and 'matchedRules' in self.results:\n return self.results['matchedRules']\n # We do not have support for the remaining scanners (yet).\n return []", "def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]", "def getOptionsNames(self) -> List[unicode]:\n ...", "def names(cls) -> List[str]:", "def legend_names(self):\n return [leg.label for leg in self.legends]", "def getNames(self) -> List[unicode]:\n ...", "def getNames():\n\n return ('run', 'Run Network')", "def get_control_ids(self) -> List[str]:\n return self._control_dict.keys()", "def get_steps_names(self) -> List[str]:\n return [step.Name.lower() for step in self.Sequence if isinstance(step, Step) and step.Name != \"\"]", "def tcv_name_list(self):\n return list(self._link_reg.tcv_names)", "def get_names(self):\n import tc\n opts_list = []\n for k, v in self.__class__.__dict__.iteritems():\n if isinstance(v, tc.TC):\n opts_list.append(k)\n opts_list = sorted(opts_list)\n return opts_list", "def GetResourceNames(self):\r\n return [x.name for x in self.resources]", "def names(self) -> List:\n ...", "def controls(self):\n controls = []\n for i in range(len(self.__listReq)):\n if self.__controlsChecks[i].isChecked():\n controls.append(self.__listReq[i].get(\"id\"))\n return controls", "def choices() -> List[str]:\n return [t.name.upper() for t in ConfigurationVariable]", "def submit_control_names_all_forms(self):\n forms = [self.getForm(index=index)\n for index, _ in enumerate(self._getAllResponseForms())]\n names = [\n self._get_control_names(\n zope.testbrowser.interfaces.ISubmitControl, x)\n for x in forms]\n return names", "def nameList(self):\r\n return [self.name.lower(), self.code] + self._otherNames", "def get_names(self):\n selected_masks = self._component_obj.get_support()\n return [feature_name for (selected, feature_name) in zip(selected_masks, self.input_feature_names) if selected]", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()", "def lookupcontrols(self):\n\t\tcontrols = []\n\t\tcontrolname = None\n\t\tqs = ValidatedSLIQuerySet(self.user_details)\n\t\tcontrol_id = int(qs.queryset_single_screen(self.screenid).values_list('controlscreen')[0][0])\n\t\tif control_id and control_id in self.user_details['authorized_screens']:\n\t\t\tcontrols = [(r[0], str(r[0])) for r in list(db.SLSDatapoint.objects.filter(\n\t\t\t\trelscreen__id=control_id).values_list('replicate').distinct())]\n\t\t\tcontrolname = \"(Name wildtype control: \" + db.Screen.objects.filter(id=control_id).values_list('name')[0][0] + \")\"\n\t\telse:\n\t\t\tself.error = fv.no_controls_available\n\t\treturn controls, controlname", "def getConditionNames():\n return _conditions.keys()", "def get_names(dep):\n res = [dep.name]\n return res", "def getElementName(self):\n return _libsbml.ListOfRules_getElementName(self)" ]
[ "0.71425015", "0.68443525", "0.6620647", "0.6553509", "0.6531148", "0.63990444", "0.63443166", "0.63027054", "0.6252463", "0.6241359", "0.6145582", "0.61365986", "0.6088305", "0.60836315", "0.60831684", "0.60462165", "0.60083985", "0.5993568", "0.5942411", "0.5872572", "0.58685523", "0.58531356", "0.5815835", "0.5806834", "0.579096", "0.579096", "0.5763613", "0.5762126", "0.5756804", "0.57563454" ]
0.729938
0
The number of junctions
def num_junctions(self): return len(self._node_reg.junction_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count", "def len(self):\n start = self.head\n count = 0\n while start:\n count+=1\n start = start.getLink()\n return count", "def num_joins(self):\n ret_val = self._num_joins()\n return ret_val", "def get_num_connections(self):\n\n synapses = 0\n for mat in self.weights:\n synapses += mat.size\n return synapses", "def __len__(self) -> int:\n return 1 + sum(len(child) for child in self.children)", "def size(self):\n return (len(self.nodes), sum([len(x.outgoing_relations) for x in self.nodes.values()]))", "def __len__(self):\n return 1 + sum([len(child) for child in self.children])", "def deep_len(lnk):\n \"*** YOUR CODE HERE ***\"\n if lnk == Link.empty:\n return 0\n elif type(lnk) is not Link:\n return 1\n else:\n return deep_len(lnk.first) + deep_len(lnk.rest)", "def number_of_trail_edges(self):\n return len(list(self.trail_edges()))", "def num_joins(self):\n return self._num_joins", "def get_length(self):\n curr = self.head\n length = 0\n\n while curr != None:\n length += 1\n curr = curr.link\n\n return length", "def size(self):\n try:\n return len(self._adjacency_list)\n except Exception as error:\n print(f'An error occurred: {error}')", "def break_count(self):\n return len(self.link_ids) + len(self.crossring_cleavages)", "def count(self):\n return len(self.__links)", "def count(self):\n\t\treturn len(list(self.nodes))", "def get_length(self):\n pointer = self.head\n counter = 0\n while pointer:\n counter += 1\n pointer = pointer.next_node\n return counter", "def numnems(self):\n count = 0\n for o in self._objs.values():\n count += len(o.netifs())\n return count", "def __len__(self) -> int:\n return len(self.links)", "def count(self):\n node = self.head\n i = 0\n while node:\n i += 1\n node = node.next\n\n return i", "def length(self):\n return self.linked_list.length()", "def number_of_trails(self):\n return len(list(self.trails()))", "def degree(self) -> int:\n return len(self.neighbours)", "def degree(self) -> int:\n return len(self.neighbours)", "def degree(self) -> int:\n return len(self.neighbours)", "def nb_triples(self) -> int:\n return 0", "def size(self):\n return len(self._adjacency_list)", "def num_cochains(self) -> int:\n if self.__num_cochains__ is not None:\n return self.__num_cochains__\n return self.ptr.numel() + 1", "def __len__(self):\n return self.count_of(CUBA.NODE)", "def __len__(self):\n return len(self.subgraph_list)", "def num_adversaries(self) -> int:\n pass" ]
[ "0.7173659", "0.69816774", "0.69730014", "0.6608103", "0.65723664", "0.6569851", "0.6567663", "0.65596044", "0.65587217", "0.65556127", "0.6511058", "0.647438", "0.6459975", "0.6436464", "0.64354825", "0.6421428", "0.64130574", "0.6397925", "0.637273", "0.636859", "0.6362135", "0.6361008", "0.6361008", "0.6361008", "0.63542795", "0.6350594", "0.6346151", "0.6340365", "0.63341755", "0.6325981" ]
0.77778614
0
The number of tanks
def num_tanks(self): return len(self._node_reg.tank_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tanks(self):\n return self._node_reg.tanks", "def num_janks(self, interval=None):\n return len(self.jank_intervals(interval=interval))", "def n_trees(self):\n return len(self.data_kd)", "def getNumTiles(self):\n\t\treturn self.numTiles", "def rank():\n return 0", "def getNumTiles(self):\n return len(list(product(list(range(self.width+1))[1:], list(range(self.height+1))[1:])))", "def size(self):\n return self.prev(self.rank).prev().rank + 1", "def norders(self):\n return 21", "def __len__(self):\r\n return len(self.ranks)", "def tank_names(self):\n return self._tanks", "def getNumTiles(self):\n return self.height * self.width", "def getNumTiles(self):\n return (self.width) * (self.height)", "def count(self):\r\n return self.count_helper(self.top_node)", "def count(self):\n return len(self.deck)", "def getNumTiles(self):\n return self.numTiles\n #raise NotImplementedError", "def n_thres(self):\n return np.size(self.thres)", "def size(self):\r\n # Anthony stage 2\r\n return number_size(self.n) - 1", "def n_levels(self):\n return len(self.scales)", "def nbr_nodes(tree_depth):\n return 2**(tree_depth+1)-1", "def n_rounds(self) -> int:\n return self.y.shape[0]", "def nbr_tours(self):\n nbr_tours = 0\n for i in range(3):\n for j in range(3):\n if self.grille[i][j] != 0:\n nbr_tours += 1\n return nbr_tours", "def __rank__(self) -> int:", "def num_tiles(self):\n return self.num_row_tiles * self.num_col_tiles", "def get_number_of_testing(self):\n return self.n_test", "def nlevels(self) -> int:\n return len(self._levels)", "def numel(self):\n return self.t.size", "def count():", "def tally(self):\n return self.count", "def get_number_rows(rk_settings, rock_height, star_height):\r\n\tavailable_space_y = (rk_settings.screen_height -\r\n\t\t\t\t\t\t(3 * star_height) - rock_height)\r\n\tnumber_rows = int(available_space_y / (2 * star_height))\r\n\treturn number_rows", "def n_levels(self):\n return self.primary_header['Number of levels']" ]
[ "0.7776518", "0.7585978", "0.6880398", "0.6690311", "0.65511477", "0.65338445", "0.65303934", "0.65251786", "0.6523661", "0.6511023", "0.6502101", "0.6457421", "0.64225304", "0.6412317", "0.63993996", "0.63987553", "0.63728577", "0.6364467", "0.6339812", "0.63004667", "0.62827957", "0.6272853", "0.6270902", "0.6267479", "0.62625736", "0.6260889", "0.6259958", "0.6253087", "0.6246232", "0.62455374" ]
0.8292153
0
The number of reservoirs
def num_reservoirs(self): return len(self._node_reg.reservoir_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count():", "def get_num_slots(self):\n # Your code here\n return self.capacity", "def get_num_slots(self):\n # Your code here\n return len(self.data)", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def reservoirs(self): \n return self._node_reg.reservoirs", "def __len__(self):\n return self.info.NumRings()", "def numResidues(self):\n resIDs = set (self.resnum)\n return len (resIDs)", "def count(self):\n # TODO not implemented yet\n return 0", "def number_of_crew(self):\n return self._number_of_crew", "def num_wires(self):", "def count(self):\n return self.vcount", "def carn_count(self):\n return len(self.carnivores)", "def num_carns(self):\n return self._num_carns", "def get_slots(self) -> int:", "def count(self):\n return int()", "def count(self):\n return int()", "def count():\r\n c = eNine.get()\r\n eNine.delete(0, END)\r\n count = int(c)\r\n count += 1\r\n eNine.insert(0, count)", "def numResidues(self):\n\n return self.getHierView().numResidues()", "def get_iter_num(self):\n\tif len(self.cost) > 0:\n first_key = list(self.cost.keys())[0]\n num = len(self.cost[first_key]) - 1\n\telse:\n\t first_key = list(self.prim_var.keys())[0]\n num = len(self.prim_var[first_key]) - 1\n\treturn num", "def Available(self) -> int:", "def Available(self) -> int:", "def Available(self) -> int:", "def count_standard_residues(self):\n n = 0\n for na in self.iter_standard_residues():\n n += 1\n return n", "def count_non_standard_residues(self):\n n = 0\n for frag in self.iter_non_standard_residues():\n n += 1\n return n", "def Capacity(self) -> int:", "def __used(self):\n tot=0\n assign={}\n for c in self.assigned :\n if not assign.has_key(c.start) :\n assign[c.start]=c.end\n tot+=c.end-c.start+1\n return tot", "def getSegmentCount(self) -> int:\n ..." ]
[ "0.67223126", "0.66856825", "0.65716654", "0.6531035", "0.6531035", "0.6531035", "0.6531035", "0.6511663", "0.6507631", "0.6439152", "0.6438949", "0.642677", "0.6345383", "0.63144374", "0.6282734", "0.6279368", "0.6278311", "0.627725", "0.627725", "0.6232956", "0.6221794", "0.621477", "0.62109786", "0.62109786", "0.62109786", "0.61477333", "0.6125323", "0.6094613", "0.6092809", "0.6081449" ]
0.7678594
0
The number of pipes
def num_pipes(self): return len(self._link_reg.pipe_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pipe_length(self, pipe):\n\n if pipe not in self.components:\n raise KeyError(\n '{} is not recognized as an existing pipe'.format(pipe))\n\n return self.components[pipe].get_length()", "def get_number_of_output_ports(self):\n return 1", "def ShowPipeStats(cmd_args=None):\n print \"Number of pipes: {: d}\".format(kern.globals.amountpipes)\n print \"Memory used by pipes: {:s}\".format(sizeof_fmt(int(kern.globals.amountpipekva)))\n print \"Max memory allowed for pipes: {:s}\".format(sizeof_fmt(int(kern.globals.maxpipekva)))", "def n_outputs(self):\n return len(self.output_names())", "def count(self):\n return len(self._commands)", "def N_shells(self):\n return self._N_shells", "def get_num_outputs(self):\n return len(self.outputs)", "def num_commands(self):\n return len(self.commands)", "def get_number_of_datasets_in_fifo(self):\n return self.read_byte_data(APDS_9960.GESTURE_FIFO_LEVEL_REG_ADDRESS)", "def n_channels(self):\n return len(self.channels)", "def n_outputs(self):\n return self.__n_outputs", "def num_processes():\n return 1", "def get_open_fds():\n pid = os.getpid()\n procs = subprocess.check_output([\"lsof\", \"-w\", \"-Ff\", \"-p\", str(pid)])\n procs = procs.decode(\"utf-8\")\n\n return len([s for s in procs.split(\"\\n\") if s and s[0] == \"f\" and s[1:].isdigit()])", "def nOutputs(self):\n\n\t\treturn self._nOutputs", "def get_number_of_input_connections(self):\n return 1", "def num_processes(self, new_value):", "def get_num_chunks(self) -> int:", "def num_streams(self):\n self._num_streams = self.lib.iperf_get_test_num_streams(self._test)\n return self._num_streams", "def num_pumps(self):\n return len(self._link_reg.pump_names)", "def count(self):\n return len(self._components)", "def n_components(self):\n return 1", "def count():", "def get_control_count(cmd):\n return len(cmd.control_qubits)", "def max_noutput_items(self) -> \"int\":\n return _beamforming_swig.beamformer_sptr_max_noutput_items(self)", "def num_outputs(cls) -> list[int]:\n return [5] * 10", "def num_devices(self):\n # put every device into bypass mode (IR = all 1's)\n tdi = bits.bits()\n tdi.ones(_flush_size)\n self.driver.scan_ir(tdi)\n # now each DR is a single bit\n # the DR chain length is the number of devices\n return self.dr_length()", "def count():\n\n @sinks\n def _dagpype_internal_fn_act(target):\n n = 0\n try:\n while True:\n (yield)\n n += 1\n except GeneratorExit:\n target.send(n)\n target.close()\n\n return _dagpype_internal_fn_act", "def num_processes(self):\n return 1", "def count(self):\n return len(self.read_ints())", "def pipes(self): \n return self._link_reg.pipes" ]
[ "0.6861697", "0.67390794", "0.6627592", "0.655506", "0.6550658", "0.6462149", "0.6444095", "0.6358948", "0.6337157", "0.6326174", "0.63176036", "0.6302765", "0.625705", "0.62213284", "0.6208864", "0.6206694", "0.61970145", "0.6150049", "0.6144482", "0.6142951", "0.6136111", "0.61344326", "0.61287415", "0.6121498", "0.61167455", "0.61077857", "0.61061066", "0.61027724", "0.60731053", "0.60679394" ]
0.8202305
0
The number of pumps
def num_pumps(self): return len(self._link_reg.pump_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_nrof_pumps(self):\n pumps = 0\n for p in self.pump_array:\n if p:\n pumps += 1\n return pumps", "def num_wires(self):", "def count():", "def npulses(self):\n return self.header.pulse_count", "def numpsus():\n click.echo(_wrapper_get_num_psus())", "def pumps(self): \n return self._link_reg.pumps", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def get_marble_count(self):", "def tally(self):\n return self.count", "def count(self):\n return len(self.deck)", "def get_numpins(self):\n return self.numpins", "def count(self):\n # TODO not implemented yet\n return 0", "def nspins(self):\n return len(self)", "def determine_number_of_packets(self):\n self.Ltot = 4. * np.pi * np.sum(self.eta * self.dV)\n self.L = self.Ltot / float(self.Npackets)\n\n self.npackets_cell = (4. * np.pi * self.eta * self.dV /\n self.L).astype(np.int)\n self.npackets_cell_cum_frac = (\n np.cumsum(self.npackets_cell).astype(np.float) /\n np.sum(self.npackets_cell))", "def buses_count(self):\n\n count = 0\n for line in self.__bus_dict.values():\n # for item in buses:\n count += len(line)\n return count", "def count(self):\n return int()", "def count(self):\n return int()", "def nClumps(self):\n \n return len(self)", "def numberConsumed(self):\n\n\t\treturn len([bottle for bottle in self.bottles if bottle.consumption != None])", "def number_of_players(self) -> int:\n return self.param.number_of_players", "def get_num_psus(self):\n return len(self._psu_list)", "def number_of_launches(self):\n return self._number_of_launches", "def bpCount(file):\n amount_bp = len(file)\n return amount_bp", "def number_of_bells(self) -> int:\n return self._tower.number_of_bells", "def __len__(self) -> int:\n num_batches, remainder = divmod(len(self.mapped_triples), self.batch_size)\n if remainder and not self.drop_last:\n num_batches += 1\n return num_batches", "def number_of_beds(self):\n return self._number_of_beds", "def count(self) -> int:\n return pulumi.get(self, \"count\")" ]
[ "0.82744265", "0.6849408", "0.67384434", "0.6730982", "0.66905665", "0.660744", "0.6517272", "0.6517272", "0.6517272", "0.6517272", "0.6470648", "0.6430279", "0.642148", "0.641826", "0.64141464", "0.6402074", "0.6397082", "0.639609", "0.63788354", "0.63788354", "0.63771194", "0.6364708", "0.6363494", "0.63568866", "0.63026196", "0.6300474", "0.6283895", "0.6273696", "0.6270862", "0.6266266" ]
0.77231
1
The number of valves
def num_valves(self): return len(self._link_reg.valve_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nvar(self):\n return len(self.v)", "def __len__(self):\n return self.nb_iterations", "def count(self):\n return self.vcount", "def valency(self):\n return len(self.neighbors())", "def num_params(self):", "def __len__(self):\n return len(self._varvals)", "def __len__(self):\n return self.nparticles", "def __len__(self):\n return len(self.vals)", "def __len__(self):\n n = 1\n for valTuple in self._valListOfLists:\n n *= len(valTuple)\n return n", "def __len__(self):\n\n return len(self.fvals)", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def numel(self) -> int:\n return sum(p.numel() for p in self.parameters)", "def numel(self):\n return self.t.size", "def nVariables(self):\n return len(self.variables)", "def __len__(self) -> int:\n return len(self.variables)", "def count_params(self):\n self.N = 0\n for name, param in self.model.named_parameters():\n self.N += param.numel()\n self.N_list.append(self.N)", "def v_size(self) -> int:\n return self.nodes_on_graph", "def num_vars(self):\n return len(self.bounds.lb)", "def number_of_iterations(self) -> int:\n pass", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def v_size(self) -> int:\n return len(self.Nodes)", "def num_wires(self):", "def number_of_electrodes(self):\n return self._pre_kernel.shape[1]", "def __len__(self):\n return len(self._nums)", "def count(self):\n\t\treturn len(list(self.nodes))", "def N ( self ) :\n return self.__N", "def len(self):\n return self.n" ]
[ "0.7356873", "0.7152376", "0.7128234", "0.7079117", "0.70560277", "0.7013356", "0.6965341", "0.69152635", "0.6889811", "0.6839744", "0.68240225", "0.6792508", "0.67643374", "0.67640054", "0.67554945", "0.6729148", "0.67072624", "0.669019", "0.6684467", "0.6675353", "0.6675353", "0.6675353", "0.6675353", "0.66665727", "0.6662699", "0.6655836", "0.6654028", "0.66530675", "0.66502386", "0.66405225" ]
0.8071682
0
The number of patterns
def num_patterns(self): return len(self._pattern_reg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __len__(self) -> int:\n n_fuzzy_patterns = sum(len(p[\"patterns\"]) for p in self.fuzzy_patterns.values())\n n_regex_patterns = sum(len(p[\"patterns\"]) for p in self.regex_patterns.values())\n return n_fuzzy_patterns + n_regex_patterns", "def npatterns(self):\n return len(self.patterns)", "def get_count(self):\n\n return len(self._pattern)", "def __len__(self: TokenMatcher) -> int:\n return len(self._patterns)", "def get_pattern_count(sequence, pattern):\n return len(re.findall(r'(?=' + pattern + ')', sequence))", "def getNPatterns(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return TreeLikelihoodBase.getNPatterns(self)", "def pattern_count(text, pattern):\n return len([i\n for i in range(0, len(text) - len(pattern) + 1)\n if text[i:i + len(pattern)] == pattern])", "def queryLengthOfPattern(self):\n self._lengthOfPattern = \\\n self._readInt('How many pegs are in the secret', 1, 10)\n return self._lengthOfPattern", "def count_pattern(sentence, pattern):\n n = len(pattern)\n counter = 0\n for i in range(len(sentence) - n + 1):\n if sentence[i:i+n] == pattern:\n counter += 1\n\n return counter", "def count():", "def PatternCount(text, pattern):\n\n count = 0\n for i in range(0, len(text)-len(pattern)+1):\n if text[i:i+len(pattern)] == pattern:\n count += 1\n return count", "def count(self):\n return len(self.find())", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def support_count(pattern, D):\n support_count = 0\n tmp_p = set(pattern)\n for transaction in D:\n if tmp_p <= set(transaction):\n support_count += 1\n return support_count", "def _n_matches(gold_tokens, pred_tokens):\n matcher = difflib.SequenceMatcher(None, gold_tokens, pred_tokens)\n return sum(match.size for match in matcher.get_matching_blocks())", "def factories(self):\n return self.__as_str.count(\"f\") * 4", "def utr5_motif_counts(self, pattern):\n return len(re.findall(pattern.upper(), self.five_prime_utr_sequence.upper()))", "def count(self):\n # TODO not implemented yet\n return 0", "def pattern_count(text, pattern):\n\n count = 0\n len_text = len(text)\n len_pattern = len(pattern)\n for i in range(len_text - len_pattern):\n if pattern in text[i:i + len_pattern]:\n count = count + 1\n else:\n continue\n return count", "def count(pattern, string, overlapping=True, sensitive=True, regexp=False):\n return len(SE.findall(pattern, string, overlapping, sensitive, regexp))", "def utr3_motif_counts(self, pattern):\n return len(re.findall(pattern.upper(), self.three_prime_utr_sequence.upper()))", "def fixation_count(self) -> int:\n return len([fix for fix in self.fixations if not fix.excluded])", "def count_patterns(pattern, file):\n count = 0\n with open(file, 'r') as f:\n for line in f:\n if re.search(pattern, line):\n count += 1\n print(\"The pattern '{}' appears {} times.\".format(pattern, count))", "def count_pegs(self):\r\n count = 0\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"1\":\r\n count += 1\r\n\r\n return count", "def getLargestPatternLength(self):\n return self._patternLimit", "def count(seq):\n\treturn sum(1 for x in seq)", "def matched_length(self) -> int:\n return sum(seg.matched_length for seg in self.segments)" ]
[ "0.8040715", "0.80276746", "0.79672825", "0.7956098", "0.77537733", "0.76355374", "0.70745206", "0.6905691", "0.6862375", "0.68336916", "0.66170734", "0.6587431", "0.6543572", "0.6543572", "0.6543572", "0.6543572", "0.6514328", "0.65043736", "0.6479225", "0.64716905", "0.64658237", "0.6463275", "0.6403275", "0.63920486", "0.63894", "0.6382862", "0.63577485", "0.63570577", "0.63543814", "0.63513327" ]
0.81838506
0
The number of curves
def num_curves(self): return len(self._curve_reg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def curve_number(self):", "def getNumCurveSegments(self):\n return _libsbml.Curve_getNumCurveSegments(self)", "def num_quadrature_points(self) -> int:", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def num_sigmas(self):\n return 2*self.n + 1", "def numberOfPoints(self):\n return 20000", "def number_of_basis(self):\n return self._pre_kernel.shape[0]", "def nr_points(self):\n return len(self.x)", "def norders(self):\n return 21", "def _number_of_intervals(self):\n return self._number_of_levels - 1", "def number_of_electrodes(self):\n return self._pre_kernel.shape[1]", "def num_polys(self):\n ret_val = self._num_polys()\n return ret_val", "def getNumberOfAxes(self):\n return self.numAxes", "def n_rays(self):\n try: \n return self._n_rays\n except AttributeError:\n self._n_rays = 0\n for r in self.rays(): self._n_rays += 1\n return self._n_rays", "def n_levels(self):\n return len(self.scales)", "def amount(self):\n return len(self.circles)", "def n_cs(self):\n return np.size(self._cs, 0)", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)", "def nPoints(self):\n return self._c_param.shrake_rupley_n_points", "def n_thres(self):\n return np.size(self.thres)", "def num_params(self):", "def _get_parameter_count(self):\n parameters_d = 5;\n size_h = self.model.size_h\n return (size_h - 1) + size_h * (\n (size_h - 1) + parameters_d + (self.model.size_aa - 1) + \n (self.model.size_ss - 1) + (self.model.size_cis - 1)\n )", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def n_components(self):\n return 1", "def num_arcs(self):\n return len(self.arcs)", "def getNumberOfTraces(self) -> int:\n\n if not self.debug:\n self.myFieldFox.write(\"CALC:PAR:COUN?\")\n ret = self.myFieldFox.read()\n else:\n ret = 4\n return ret" ]
[ "0.69003946", "0.68829596", "0.68708044", "0.67858136", "0.66785693", "0.6449481", "0.6299548", "0.6258773", "0.62481177", "0.62191", "0.6205878", "0.6196767", "0.6180489", "0.61516505", "0.61496425", "0.61298734", "0.61173856", "0.60934234", "0.60934234", "0.6089018", "0.6087523", "0.6077801", "0.60723233", "0.6043379", "0.6043379", "0.6043379", "0.6043379", "0.5971605", "0.5923143", "0.5911042" ]
0.8231799
0
The number of sources
def num_sources(self): return len(self._sources)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_all_srcs_count(dataset: linux.LinuxSourcesDataset):\n # FIXME(cec): This value does not appear to stable across platforms, but it\n # should be.\n assert abs(len(dataset.all_srcs) - 26091) < 1000", "def fileCount(self):\n pass", "def getFileCount(self) -> int:\n ...", "def __number_of_files(self):\n self.__get_files()\n return len(self.files)", "def count():", "def num_chunking_units(self):\n if self._source_paths:\n return len(self._source_paths)\n return 1", "def getnrfiles(self):\n return len(self.filenames)", "def len(self):\n # print(self.processed_file_names)\n return self.len_", "def get_num_of_images(self):", "def count(self):\n return len(self.__links)", "def count_len(self):\n total = 0\n for filename in self.filenames:\n f = open(os.path.join(self.directory, filename))\n line_count = 0\n for _ in f:\n line_count += 1\n if line_count < self.window_size:\n continue\n else:\n total += line_count - self.window_size + 1\n return total", "def numberFiles(self):\n return self.n", "def GetNumberOfEventSources(self):\n event_source_stream_number = self._last_stream_numbers['event_source']\n\n number_of_event_sources = 0\n for stream_number in range(1, event_source_stream_number):\n offset_table = self._GetSerializedDataOffsetTable(\n 'event_source', stream_number)\n\n number_of_event_sources += offset_table.number_of_offsets\n\n number_of_event_sources += self._GetNumberOfSerializedAttributeContainers(\n 'event_sources')\n return number_of_event_sources", "def test_kernel_srcs_count(dataset: linux.LinuxSourcesDataset):\n # FIXME(cec): This value does not appear to stable across platforms, but it\n # should be.\n assert abs(len(dataset.kernel_srcs) - 310) < 10", "def len(self):\n start = self.head\n count = 0\n while start:\n count+=1\n start = start.getLink()\n return count", "def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size", "def n_total_files(self):\n return len(self.fileinfo)", "def countSites(self):\n self.ni = len(self.sites)\n return self.ni", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def get_source_counts(self):\n return deepcopy(self._source_counts)", "def count(self):\n # TODO not implemented yet\n return 0", "def getCount(self):\n return _osgAnimation.Target_getCount(self)", "def get_source_count(self, stmt):\n return self.get_source_count_by_hash(stmt.get_hash(shallow=True))", "def __len__(self):\n\n return math.ceil(len(self.img_files) * self.gen_count / self.batch_size)", "def Count():\n return CheckForError(lib.Generators_Get_Count())", "def _number_of_samples(self):\n return len(self._raw_data.samples)", "def count_samples(self):\n return sum(SEQ_LENGTHS)" ]
[ "0.74113506", "0.7155835", "0.69133234", "0.6856751", "0.68429273", "0.68368053", "0.68331873", "0.68192124", "0.6790306", "0.67514884", "0.67217726", "0.6709941", "0.6708362", "0.66989625", "0.66856146", "0.6671262", "0.66590977", "0.66583157", "0.66367596", "0.66367596", "0.66367596", "0.66367596", "0.6583587", "0.6580574", "0.65504164", "0.6505529", "0.6492336", "0.6491775", "0.6481403", "0.64713424" ]
0.8289444
0
The number of controls
def num_controls(self): return len(self._controls)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_count_of_controls(self, recurse: bool) -> int:\n return len(list(self.get_all_controls(recurse)))", "def count(self):\r\n\r\n return len(self.widgets_list)", "def XPCountChildWidgets(inWidget):\n pass", "def __len__(self):\n return len(self._forms)", "def CountButtons(self):\r\n\r\n n = 0\r\n \r\n if self.HasCaption() or self.HasCaptionLeft():\r\n if isinstance(wx.GetTopLevelParent(self.window), AuiFloatingFrame):\r\n return 1\r\n \r\n if self.HasCloseButton():\r\n n += 1\r\n if self.HasMaximizeButton():\r\n n += 1\r\n if self.HasMinimizeButton():\r\n n += 1\r\n if self.HasPinButton():\r\n n += 1\r\n\r\n return n", "def get_control_count(cmd):\n return len(cmd.control_qubits)", "def getNumElements(self):\n return 1", "def getNumElements(self):\n return 1", "def size(self):\n\t\treturn len(self.lables)", "def get_num_of_choices(self) -> int:\n return len(self._choices)", "def count(self):\n return len(self._components)", "def getListBoxItemCount( self, cCtrlName ):\n oControl = self.getControl( cCtrlName )\n return oControl.getItemCount()", "def getNumElements(self):\n return 0", "def total_form_count(self):\n if self.initial_extra:\n count = len(self.initial_extra) if self.initial_extra else 0\n count += self.extra\n return count\n else:\n return forms.BaseInlineFormSet.total_form_count(self)", "def number_syllables(self):\n return len(self.array_form)", "def size(self):\n return len(self.selected)", "def get_num_of_images(self):", "def n_components(self):\n return 1", "def n_inputs(self):\n return len(self.input_names())", "def get_num_items(self):\r\n return self.num_items", "def get_number_of_models():\n return 8", "def getNumElements(self):\n raise Exception(\"Didn't expect this to get called.\")", "def count(self):\n return len(self)", "def NUMBER_OF_REC_CHOICE():\n return 13", "def __len__(self):\n return len(self.name_and_box_index)", "def get_num_inputs(self):\n return len(self.inputs)", "def __len__(self):\n #return len(self._tagged_values_dict)\n return len(list(self._visible_setting_names_gen))", "def size(self):\n return self._N", "def __len__(self):\n return 3", "def __len__(self):\n return 3" ]
[ "0.75083387", "0.7496899", "0.7116853", "0.68915015", "0.68793744", "0.68754846", "0.66907966", "0.66517556", "0.6629138", "0.65655446", "0.6551462", "0.64709383", "0.64610255", "0.6443094", "0.6413949", "0.6412288", "0.64081395", "0.63826525", "0.63275117", "0.63180983", "0.6292613", "0.6284004", "0.62815386", "0.62724185", "0.6271502", "0.626893", "0.62666357", "0.62503034", "0.62399435", "0.62399435" ]
0.81790185
0
Assign demands using values in a DataFrame. New demands are specified in a pandas DataFrame indexed by time (in seconds). The method resets junction demands by creating a new demand pattern and using a base demand of 1. The demand pattern is resampled to match the water network model pattern timestep. This method can be used to reset demands in a water network model to demands from a pressure dependent demand simulation.
def assign_demand(self, demand, pattern_prefix='ResetDemand'): for junc_name in demand.columns: # Extract the node demand pattern and resample to match the pattern timestep demand_pattern = demand.loc[:, junc_name] demand_pattern.index = pd.TimedeltaIndex(demand_pattern.index, 's') resample_offset = str(int(self.options.time.pattern_timestep))+'S' demand_pattern = demand_pattern.resample(resample_offset).mean() / self.options.hydraulic.demand_multiplier # Add the pattern # If the pattern name already exists, this fails pattern_name = pattern_prefix + junc_name self.add_pattern(pattern_name, demand_pattern.tolist()) # Reset base demand junction = self.get_node(junc_name) junction.demand_timeseries_list.clear() junction.demand_timeseries_list.append((1.0, pattern_name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_analysis_time(self, t):\n for z in self.zones:\n z.set_demand_rate_per_t(t)", "def __init__(self, data_frame, mins_set):\n # super(FeaturePrevDelays, self).__init__()\n self.df = data_frame.copy()\n self.mins_set = mins_set", "def set_model_df_at_time_step(self):\n\n for subset in self.subset_list:\n ts = subset.get_time_step()\n self.model_df.loc[ts] = subset.get_model_df().loc[ts]", "def simulate(self, when):\n self._series = None\n self._series = self.series[:when]", "def single_curtailment_or_shift_each_day_between_12_and_14_pm(\n start: datetime, end: datetime, resolution: timedelta\n) -> DataFrame:\n imbalance_start_time = \"12:00\"\n imbalance_end_time = \"14:00\"\n imbalance_value = -2 # MW\n imbalance_price_between_2_and_3_am = 10 # EUR/MWh\n imbalance_price_otherwise = 5 # EUR/MWh\n df = initialize_df(\n columns=[\"Imbalance (in MW)\", \"Price (in EUR/MWh)\"],\n start=start,\n end=end,\n resolution=resolution,\n )\n df[\"Imbalance (in MW)\"] = 0\n df[\"Imbalance (in MW)\"].iloc[\n df.index.indexer_between_time(\n start_time=imbalance_start_time,\n end_time=imbalance_end_time,\n include_end=False,\n )\n ] = imbalance_value\n df[\"Price (in EUR/MWh)\"] = imbalance_price_otherwise\n df[\"Price (in EUR/MWh)\"].iloc[\n df.index.indexer_between_time(\n start_time=imbalance_start_time,\n end_time=imbalance_end_time,\n include_end=False,\n )\n ] = imbalance_price_between_2_and_3_am\n return df", "def _add_delta_times_to_df(self, route_df):\n\n \n\n route_df = route_df.assign(delta_times = self.delta_times)\n #route_df = route_df.assign(total_time = self.route_time)\n\n\n return route_df", "def single_curtailment_or_shift_each_day_between_10_and_12_am(\n start: datetime, end: datetime, resolution: timedelta\n) -> DataFrame:\n imbalance_start_time = \"10:00\"\n imbalance_end_time = \"14:00\"\n imbalance_value = 2 # MW\n imbalance_price_between_2_and_3_am = 10 # EUR/MWh\n imbalance_price_otherwise = 5 # EUR/MWh\n df = initialize_df(\n columns=[\"Imbalance (in MW)\", \"Price (in EUR/MWh)\"],\n start=start,\n end=end,\n resolution=resolution,\n )\n df[\"Imbalance (in MW)\"] = 0\n df[\"Imbalance (in MW)\"].iloc[\n df.index.indexer_between_time(\n start_time=imbalance_start_time,\n end_time=imbalance_end_time,\n include_end=False,\n )\n ] = imbalance_value\n df[\"Price (in EUR/MWh)\"] = imbalance_price_otherwise\n df[\"Price (in EUR/MWh)\"].iloc[\n df.index.indexer_between_time(\n start_time=imbalance_start_time,\n end_time=imbalance_end_time,\n include_end=False,\n )\n ] = imbalance_price_between_2_and_3_am\n return df", "def demand(self, demand):\n\n self._demand = demand", "def artificial_data(dt1, dt2, minutes=1):\n\n def fxweek(x):\n return 2 - x * (1 - x)\n\n def sat(x):\n return 2 * x + 2\n\n data = []\n dt = datetime.timedelta(minutes=minutes)\n while dt1 < dt2:\n if dt1.weekday() == 6:\n dt1 += dt\n continue\n if minutes <= 120 and not (dt1.hour >= 8 and dt1.hour <= 18):\n dt1 += dt\n continue\n x = (dt1.hour - 8) / 10\n if dt1.weekday() == 5:\n y = sat(x)\n else:\n y = fxweek(x)\n data.append({'time': dt1, 'y': y})\n dt1 += dt\n df = pandas.DataFrame(data)\n df['y'] += numpy.random.randn(df.shape[0]) * 0.1\n df['time'] = pandas.DatetimeIndex(df['time'])\n return df", "def resample(self, dataframes, freq='5s'):\n\n for df in dataframes:\n yield df.resample(freq, fill_method='bfill')", "def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression.SYMBOL.tolist() # set new column names to transposed expression_data \n \n new_exp = self.expression.T.ix[1:,:] # transpose\n new_exp.columns = cols\n self.expression = new_exp # add columns\n self.data = pd.merge(self.expression,self.samples,left_index = True,right_index=True) # merged data sets\n #pd.merge(df1,df2,how = 'left',left_index=True,right_index=True) # do a left join", "def clean_ferc714_hourly_demand_matrix(df: pd.DataFrame) -> pd.DataFrame:\n ts = pudl.analysis.timeseries_cleaning.Timeseries(df)\n ts.flag_ruggles()\n return ts.to_dataframe(copy=False)", "def reset_after_one_day_of_operation(self, stop_month, stop_day):\n status = self.data_obj.start_the_next_day()\n if status is None:\n # already simulated all days\n return\n if stop_month == self.data_obj.MONTH and stop_day == self.data_obj.day_of_run:\n print('stopped after reaching {} days of month {}'.format(stop_day, stop_month))\n return\n print(\"reset_after_one_day_of_operation\")\n\n self.daily_OD_demand = self.data_obj.DEMAND_SOURCE\n # removes caches, except for rebal cost function, which is always fixed\n self._get_supply_per_zone.cache_clear()\n self._get_demand_per_zone.cache_clear()\n self._get_both_supply_and_demand_per_zone.cache_clear()\n self._get_demand_supply_costs_df.cache_clear()\n self._get_both_supply_and_demand_per_zone.cache_clear()\n # this is the demand file operator uses to inform zones\n # self.operator.demand_fare_stats_of_the_day = pd.read_csv(\n # \"./Data/Daily_stats/stats_for_day_{}.csv\".format(self.data_obj.day_of_run)\n # )\n if self.data_obj.MONTH != self.operator.month:\n print('data month is ', self.data_obj.MONTH)\n print('operator month is ', self.operator.month)\n print('switching')\n self.operator.month = self.data_obj.MONTH\n self.operator.demand_fare_stats_of_the_month = pd.read_csv(\n './Data/stats_for_{}_18.csv'.format(self.operator.month))\n self.operator.demand_fare_stats_of_the_day = self.operator.demand_fare_stats_of_the_month.query(\n 'Day=={}'.format(self.data_obj.day_of_run))\n else:\n self.operator.demand_fare_stats_of_the_day = self.operator.demand_fare_stats_of_the_month.query(\n 'Day=={}'.format(self.data_obj.day_of_run))\n\n vs = self.operator.demand_fare_stats_of_the_day.time_of_day_index_15m.values * 15 * 60\n vs = np.vectorize(_convect_time_to_peak_string)(vs)\n self.operator.demand_fare_stats_of_the_day[\"time_of_day_label\"] = vs # this throws the pandas warning\n ports = pd.get_dummies(self.operator.demand_fare_stats_of_the_day.time_of_day_label)\n self.operator.demand_fare_stats_of_the_day = self.operator.demand_fare_stats_of_the_day.join(ports)\n # TODO: self.daily_OD_demand is wrong, and in addition is not updated\n for v in self.vehicles:\n v.reset(self.data_obj.day_of_run, self.data_obj.MONTH)\n for z in self.zones:\n z.reset(self.daily_OD_demand, self.data_obj.WARMUP_TIME_HOUR * 3600)\n\n self.operator.revenues = []", "def __init__(self, pandas_dataframe, dates_column, target_column, regressors=None, train_test_split=0.66, seed=7,\n look_back=1, look_forward=1, interval=0):\n data = pd.DataFrame(index=pandas_dataframe[dates_column].values, data=pandas_dataframe[target_column].values)\n # Calculate the training set size\n train_size = int(len(data)*train_test_split)\n # Scale the data pre-train/test split\n scaler = MinMaxScaler(feature_range=(0, 1))\n self.scaler = scaler\n data = scaler.fit_transform(data)\n # Get the time series as stationary (for the given interval, if 0 don't make it a series of 0)\n if interval > 0:\n data = difference(data, interval)\n # Map the series to a supervised problem (values for days 1-n with regressors for these days to predict days\n # n + 1 ... n + k\n x, y = timeseries_to_supervised(data, look_back=look_back, look_forward=look_forward)\n # Split train and test\n self.x_train, self.y_train = x[:train_size], y[:train_size]\n self.x_test, self.y_test = x[train_size:], y[train_size:]\n # Use regressors if required\n if regressors is not None:\n self.x_train, self.x_test = add_regressors(self.x_train, self.x_test, regressors, pandas_dataframe,\n dates_column, look_forward, look_back)\n # Set last attributes\n self.seed = seed\n self.look_back = look_back\n self.look_forward = look_forward\n self.regressors = regressors", "def impute_ferc714_hourly_demand_matrix(df: pd.DataFrame) -> pd.DataFrame:\n results = []\n for year, gdf in df.groupby(df.index.year):\n logger.info(f\"Imputing year {year}\")\n keep = df.columns[~gdf.isnull().all()]\n tsi = pudl.analysis.timeseries_cleaning.Timeseries(gdf[keep])\n result = tsi.to_dataframe(tsi.impute(method=\"tnn\"), copy=False)\n results.append(result)\n return pd.concat(results)", "def hourly_bid_ask_spread_depth(df_liquidity, starttime, start_time_depth=\"30/09/2019 15:00\", minute_depth=30):\n\n # convert string like input in timestamp\n start_time = pd.Timestamp(starttime, tz='UTC')\n\n # obtain end times for hourly like sequence\n end_time = start_time+pd.Timedelta(minute_depth, unit='minutes')\n\n output = assign_to_time_window_depth(df_liquidity,\n start_time,\n end_time,\n df_liquidity['Start Validity Date'],\n df_liquidity['End Validity Date']\n )\n\n return output", "def set_data(self, df):\r\n # Check data is correct.\r\n cols = df.shape[1]\r\n conditions = [cols > 2,\r\n df.index.name == 'r',\r\n df.columns[0] == 't']\r\n if False in conditions:\r\n raise ValueError(f'{self} wrong data set.')\r\n\r\n # Set attributes and log\r\n self.data = df\r\n self._set_rate()\r\n logger.debug(f'{self} set data')", "def recreate_sampling_times(\n data: DataFrame,\n step_length: float,\n start_time: float,\n end_time: float,\n plot_col=None,\n) -> DataFrame:\n\n first_time_in_df = data[DFKeys.TIME.value].iloc[0]\n if start_time < first_time_in_df:\n raise ValueError(\"start time cannot precede first time in df\")\n\n get_shifted_time = lambda row: row[DFKeys.TIME.value] - start_time\n shifted_timestamps = data.apply(get_shifted_time, axis=1).rename(\n DFKeys.TIME.value, axis=1\n )\n\n duration = end_time - start_time\n timesteps = np.arange(0, duration, step_length)\n new_columns = [pd.Series(timesteps, name=DFKeys.TIME.value)]\n columns_except_time = data.columns.difference(\n [\n DFKeys.TIME.value,\n \"child_frame_id\",\n \"header.frame_id\",\n \"header.seq\",\n \"header.stamp.nsecs\",\n \"header.stamp.secs\",\n \"pose.covariance\",\n \"twist.covariance\",\n \"pins_0\",\n \"pins_1\",\n \"pins_2\",\n \"pins_3\",\n \"pins_4\",\n \"pins_5\",\n \"pins_6\",\n \"pins_7\",\n ]\n )\n\n for col_name in columns_except_time:\n f = interp1d(shifted_timestamps.values, data[col_name].values)\n new_columns.append(pd.Series(f(timesteps), name=col_name))\n\n data_new = pd.concat(new_columns, axis=1)\n\n if plot_col in data.columns:\n SAVEDIR = Path(\"results/interpolation\")\n sea.set_style(\"white\")\n # plt.figure(figsize=(5, 2.5))\n sea.lineplot(x=shifted_timestamps.values, y=data[plot_col], label=\"original\")\n sea.lineplot(\n x=DFKeys.TIME.value, y=plot_col, data=data_new, label=\"interpolated\"\n )\n # plt.ylabel(\"Velocity\")\n # plt.savefig(SAVEDIR.joinpath(\"%s.pdf\" % plot_col))\n plt.show()\n\n return data_new", "def init_columns(cycle_df, datatype):\n (cycle_ind_col, data_point_col, volt_col, curr_col, dis_cap_col, char_cap_col, charge_or_discharge) = col_variables(datatype)\n assert type(cycle_df) == pd.DataFrame\n assert volt_col in cycle_df.columns\n assert dis_cap_col in cycle_df.columns\n assert char_cap_col in cycle_df.columns\n\n cycle_df = cycle_df.reset_index(drop=True)\n cycle_df['dV'] = None\n cycle_df['Discharge_dQ'] = None\n cycle_df['Charge_dQ'] = None\n #cycle_df['Discharge_dQ/dV'] = None\n #cycle_df['Charge_dQ/dV'] = None\n return cycle_df", "def _initialize_df(self, df):\n df['values'] = (self.tc.instrument_returns['cumulative'] *\n self.tc.starting_cash).mul(self.target_weights, axis=1).values * (1 - self.tc.commission)\n df['allocations'] = self.df['values'].div(df['values'].sum(axis=1), axis=0)\n df['returns'] = (df['values'].sum(axis=1)).pct_change(1).fillna(0)", "def ts_resample(self):\n try:\n ts_freq = pd.DataFrame(\n index=pd.date_range(self.ts_df.index[0], self.ts_df.index[len(self.ts_df) - 1], freq=self.freq),\n columns=['dummy'])\n except ValueError:\n self._uvts_cls_logger.exception(\"Exception occurred, possibly incompatible frequency!\")\n sys.exit(\"STOP\")\n\n if self.fill_method == 'ffill':\n self.ts_df = ts_freq.join(self.ts_df).drop(['dummy'], axis=1)\n self.ts_df.y = self.ts_df.y.fillna(method='ffill')\n # if np.isnan ( self.ts_df.y ).any ():\n # self.ts_df.y = self.ts_df.y.fillna ( method='bfill' )\n else: # interp\n xp = np.linspace(0, self.ts_df.size, self.ts_df.size, endpoint=False)\n fp = self.ts_df['y']\n # join\n self.ts_df = ts_freq.join(self.ts_df).drop(['dummy'], axis=1)\n # pick new points\n x = np.linspace(0, ts_freq.size, ts_freq.size, endpoint=False)\n x = x[self.ts_df['y'].isna()]\n print(x.size)\n print(x)\n\n # put the values\n self.ts_df.y[self.ts_df['y'].isna()] = np.interp(x, xp, fp)\n\n if np.isnan(self.ts_df.y).any():\n self._uvts_cls_logger.warning(\"Some NaN found, something went wrong, check the data!\")\n sys.exit(\"STOP\")\n\n self._uvts_cls_logger.info(\"Time series resampled at frequency: \" + str(self.ts_df.index.freq) +\n \". New shape of the data: \" + str(self.ts_df.shape))\n self._uvts_cls_logger.info(\"Using time series data of range: \" + str(min(self.ts_df.index)) + ' - ' + str(\n max(self.ts_df.index)) + \" and shape: \" + str(self.ts_df.shape))\n\n return self", "def fillPositions(self):\r\n if self.th is not None:\r\n self.df['POSITION'] = self.th.positions['Qty']\r\n self.df['REGS'] = self.th.positions['REGS']\r\n self.df['144A'] = self.th.positions['144A']\r\n self.df['POSITION'].fillna(0, inplace=True)\r\n self.df['REGS'].fillna(0, inplace=True)\r\n self.df['144A'].fillna(0, inplace=True)\r\n self.df['RISK'] = -self.df['RISK_MID'] * self.df['POSITION'] / 10000.", "def reset_initial_values(self):\n #### TODO: move reset conditions to /sim\n self.sim_time = 0.0\n self._prev_sim_time = None\n\n for name, node in self.nodes(Junction):\n node._head = None\n node._demand = None\n node._pressure = None\n node._leak_demand = None\n node._leak_status = False\n node._is_isolated = False\n\n for name, node in self.nodes(Tank):\n node._head = node.init_level+node.elevation\n node._prev_head = node.head\n node._demand = None\n node._leak_demand = None\n node._leak_status = False\n node._is_isolated = False\n\n for name, node in self.nodes(Reservoir):\n node._head = None # node.head_timeseries.base_value\n node._demand = None\n node._leak_demand = None\n node._is_isolated = False\n\n for name, link in self.links(Pipe):\n link._user_status = link.initial_status\n link._setting = link.initial_setting\n link._internal_status = LinkStatus.Active\n link._is_isolated = False\n link._flow = None\n link._prev_setting = None\n\n for name, link in self.links(Pump):\n link._user_status = link.initial_status\n link._setting = link.initial_setting\n link._internal_status = LinkStatus.Active\n link._is_isolated = False\n link._flow = None\n if isinstance(link, PowerPump):\n link.power = link._base_power\n link._prev_setting = None\n\n for name, link in self.links(Valve):\n link._user_status = link.initial_status\n link._setting = link.initial_setting\n link._internal_status = LinkStatus.Active\n link._is_isolated = False\n link._flow = None\n link._prev_setting = None\n\n for name, control in self.controls():\n control._reset()", "def _auto_fill(series: TimeSeries, **interpolate_kwargs) -> TimeSeries:\n\n series_temp = series.pd_dataframe()\n\n # pandas interpolate wrapper, with chosen `method`\n if \"limit_direction\" not in interpolate_kwargs:\n interpolate_kwargs[\"limit_direction\"] = \"both\"\n interpolate_kwargs[\"inplace\"] = True\n series_temp.interpolate(**interpolate_kwargs)\n return TimeSeries.from_dataframe(\n series_temp,\n freq=series.freq,\n static_covariates=series.static_covariates,\n hierarchy=series.hierarchy,\n )", "def increment_time(self, **kwargs):\n \n #Pull all optional keyword arguements\n if 'timerange' in kwargs:\n timerange = kwargs.pop('timerange')\n else:\n timerange = 7\n \n if 'display' in kwargs:\n displayflag = kwargs.pop('display')\n else:\n displayflag = 1\n \n if 'auto' in kwargs:\n autoflag = kwargs.pop('auto')\n else:\n autoflag = 0\n \n if 'triggered' in kwargs:\n triggered_rules = kwargs.pop('triggered')\n else:\n triggered_rules = []\n \n #Run simulation one day at a time until specified end point is reached\n count = range(0,timerange)\n for i in count:\n \n \n #Increment one day if at least one infected person remains. If not, end the simulation\n if self.SD_Map.IPop.value() > 1:\n time = self.timeSeries[-1]\n self.timeSeries.append(time+1)\n self.SD_Map.update_all(self.timestep(), len(self.timeSeries)-2)\n else:\n print('Done!')\n \n #Update the time display\n self.timev.set(self.timeSeries[-1])\n \n #Add any triggered rules to the rule log display\n if triggered_rules != []:\n day_text = self.translate('Day')+' ' + str(self.timeSeries[-1]) \n rule_text = '; ' + self.translate('Rules') + ': ' + str(triggered_rules)[1:-1]\n log_text = day_text + rule_text\n self.list_info_boxes['Log'].insert(tk.END, log_text)\n \n #If appropriate, update all of the graphs\n if displayflag == 1:\n if self.arrangment == ['Map', 'Graph']:\n index = 2\n invertflag = 1\n else:\n index = 0\n invertflag = 0\n \n #Select all of the graphs\n canvaslist = []\n for entrylist in self.graph_canvas_list:\n for entry in entrylist:\n canvaslist.append(entry)\n\n #For each graph, delete it and replace it with an update graph\n for canvas in canvaslist:\n if index < 2:\n col = 0\n inputindex = index\n self.figures[index].clear()\n plt.close(self.figures[index])\n else:\n col = 1\n inputindex = index - 2\n if invertflag:\n self.figures[inputindex].clear()\n plt.close(self.figures[inputindex])\n else:\n self.figures[index].clear()\n plt.close(self.figures[index])\n \n #Make new graph\n framename = canvas.get_tk_widget().master\n canvas.get_tk_widget().destroy()\n graph = self.translate(self.graph_setting_list[col][inputindex].get(),\n input_language=self.language,\n output_language='english')\n canvas,fig = self.make_graph(framename, graph,\n gridpos = inputindex*2+1)\n self.graph_canvas_list[col][inputindex]=canvas\n \n #Update figures list\n if invertflag:\n self.figures[inputindex] = fig\n else:\n self.figures[index] = fig\n index += 1", "def downsample_panel(minute_rp, daily_rp, mkt_close):\n\n cur_panel = minute_rp.get_current()\n sids = minute_rp.minor_axis\n day_frame = pd.DataFrame(columns=sids, index=cur_panel.items)\n dt1 = trading.environment.normalize_date(mkt_close)\n dt2 = trading.environment.next_trading_day(mkt_close)\n by_close = functools.partial(get_date, mkt_close, dt1, dt2)\n for item in minute_rp.items:\n frame = cur_panel[item]\n func = get_sample_func(item)\n # group by trading day, using the market close of the current\n # day. If events occurred after the last close (yesterday) but\n # before today's close, group them into today.\n dframe = frame.groupby(lambda d: by_close(d)).agg(func)\n for stock in sids:\n day_frame[stock][item] = dframe[stock].ix[dt1]\n # store the frame at midnight instead of the close\n daily_rp.add_frame(dt1, day_frame)", "def reset(self):\n\n\t\tf = self.no_of_ferries\n\t\tt = self.no_of_discrete_time_intervals\n\t\tvmax = self.maximam_velocity_vector\n\t\tports = self.port_coordinates_vector\n\t\ttrips = self.no_of_trips_vector\n\t\thaltTime = self.halt_time_at_port\n\t\tstartBuffer = self.buffer_before_start\n\n\t\tschedule = np.array([[0.0 for x in range(t)] for y in range(f)])\n\t\t\n\t\t#Find distance from port co-ordinates\n\t\tportA = ports[0]\n\t\tportB = ports[1]\n\t\tself.dst = dst = distance.euclidean(portA, portB)\n\n\t\tfinishTime = [0.0 for x in range(f)]\n\t\tstartTime = [0.0 for x in range(f)]\n\n\t\t#Calculate total time for all ferries to complete required trips considering respective maximum velocities\n\t\tfor fIndex in range(f):\n\t\t\tif(fIndex > 0):\n\t\t\t\tstartTime[fIndex] = startTime[fIndex - 1] + startBuffer #TODO: Randomize start time\n\t\t\ttripTime = ((2 * dst * trips[fIndex])/vmax[fIndex]) + haltTime\n\t\t\tfinishTime[fIndex] = (startTime[fIndex] + tripTime)\n\n\t\tself.time_step = time_step = max(finishTime)/(t-1);\n\t\tlogging.debug(\"Time step: %f hrs\" % time_step)\n\t\tlogging.debug(\"Total time: %s hrs\" % format(max(finishTime), '.2f'))\n\n\t\tself.fSchedule = schedule = self.getLinearSchedule(schedule, startTime)\t\n\t\treturn schedule;", "def test_mase():\n joined_data = pd.DataFrame({'temp': [1, 2, 1, 2, 1, 2, 1, 2, 2] + [2] * day_factor,\n 'dt': [1575082800, 1575093600, 1575104400, 1575115200, 1575126000, 1575136800, 1575147600, 1575158400, 1575169200, 1575180000, 1575190800, 1575201600, 1575212400, 1575223200, 1575234000, 1575244800, 1575255600],\n 'today': ['2019-11-30'] * 7 + ['2019-12-01'] * day_factor + ['2019-12-02'] * 2,\n 't5': [4.0, 3, 4.0, 3, 4.0, 3, 4.0, 3, 4.0] + [4.0] * day_factor,\n 't4': [3, 1, 3, 1, 3, 1, 3, 1, 3] + [3] * day_factor,\n 't3': [2.0, 4.0, 2.0, 4.0, 2.0, 4.0, 2.0, 4.0, 2.0] + [2.0] * day_factor,\n 't2': [1, 5, 1, 5, 1, 5, 1, 5, 1] + [1] * day_factor,\n 't1': [1.0, 3, 1.0, 3, 1.0, 3, 1.0, 3, 1.0] + [1.0] * day_factor})\n\n assert masetx(joined_data, 'temp') == [np.nan, np.nan, np.nan, 1, 2.25]", "def __set_time_data(self, tdata):\n assert tdata.shape[-1] == self._nt\n self._in_time = tdata\n self._in_freq = None", "def bootstrap(data, start=None, end=None, period_length=3, paths=100, replace=True):\n if start:\n data = data.loc[start:]\n if end:\n data = data.loc[:end]\n\n daily = data.resample(\"B\").first()\n data_indexed = pd.DataFrame(\n {\n \"open\": data[\"open\"] / data[\"close\"],\n \"high\": data[\"high\"] / data[\"close\"],\n \"low\": data[\"low\"] / data[\"close\"],\n \"close\": data[\"close\"].pct_change(),\n \"volume\": data[\"volume\"],\n \"barCount\": data[\"barCount\"],\n }\n )\n data_indexed = data_indexed.iloc[1:]\n\n days = len(daily.index)\n draws = int(days / period_length)\n\n d = np.random.choice(daily.index[:-period_length], size=(draws, paths))\n lookup_table = pd.Series(daily.index.shift(period_length), index=daily.index)\n\n output = []\n for path in d.T:\n p = pd.concat([data_indexed.loc[i : lookup_table[i]].iloc[:-1] for i in path])\n p.set_index(\n pd.date_range(freq=\"min\", start=data.index[0], periods=len(p), name=\"date\"),\n inplace=True,\n )\n\n p[\"close\"] = (p[\"close\"] + 1).cumprod() * data.iloc[0][\"close\"]\n o = pd.DataFrame(\n {\n \"open\": p[\"open\"] * p[\"close\"],\n \"high\": p[\"high\"] * p[\"close\"],\n \"low\": p[\"low\"] * p[\"close\"],\n \"close\": p[\"close\"],\n \"volume\": p[\"volume\"],\n \"barCount\": p[\"barCount\"],\n }\n )\n output.append(o)\n return output" ]
[ "0.5316171", "0.50743294", "0.5053788", "0.5013383", "0.49570274", "0.49486694", "0.4925754", "0.48887652", "0.4883495", "0.4799642", "0.4782267", "0.47726718", "0.47573262", "0.4728699", "0.4728453", "0.47173283", "0.47091067", "0.46539465", "0.4650706", "0.46276408", "0.46205124", "0.45910168", "0.45902875", "0.45762014", "0.45725933", "0.45516688", "0.45464662", "0.45317236", "0.45190966", "0.45142207" ]
0.73366606
0
Query link attributes, for example get all pipe diameters > threshold
def query_link_attribute(self, attribute, operation=None, value=None, link_type=None): link_attribute_dict = {} for name, link in self.links(link_type): try: if operation == None and value == None: link_attribute_dict[name] = getattr(link, attribute) else: link_attribute = getattr(link, attribute) if operation(link_attribute, value): link_attribute_dict[name] = link_attribute except AttributeError: pass return pd.Series(link_attribute_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_attr(graph: nx.Graph, attr: str):\n return [data[attr] for _, _, data in graph.edges.data()]", "def get_attributes(\n attribute_category: Optional[str] = Query(None, alias='attributeCategory'),\n attribute_db_id: Optional[str] = Query(None, alias='attributeDbId'),\n attribute_name: Optional[str] = Query(None, alias='attributeName'),\n germplasm_db_id: Optional[str] = Query(None, alias='germplasmDbId'),\n external_reference_i_d: Optional[str] = Query(None, alias='externalReferenceID'),\n external_reference_source: Optional[str] = Query(\n None, alias='externalReferenceSource'\n ),\n page: Optional[int] = None,\n page_size: Optional[int] = Query(None, alias='pageSize'),\n authorization: Optional[constr(regex=r'^Bearer .*$')] = Query(\n None, alias='Authorization'\n ),\n) -> GermplasmAttributeListResponse:\n pass", "def getGeometricElements(link):\n visuals = []\n collisions = []\n if 'visual' in link:\n visuals = [link['visual'][v] for v in link['visual']]\n if 'collision' in link:\n collisions = [link['collision'][v] for v in link['collision']]\n return visuals, collisions", "def reqs (self):\n return (link for s, d, link in self.network.edges_iter(data=True) if\n link.type == Link.REQUIREMENT)", "def test_can_filter_attributes(self):\n text = '<b><a href=\"\" target=\"_blank\">Example</a></b>'\n filter = Bleach(\n tags=['a'],\n attributes=dict(a=['href', 'title'])\n )\n filtered = filter.filter(text)\n expected = '<a href=\"\">Example</a>'\n self.assertEquals(expected, filtered)", "def select_attribute(instances, available_attributes, domain):\n\n\n entropies = {}\n for att in available_attributes:\n entropies[att] = entropy_new(instances, att, domain)\n \n next_attrib, (_ent, leaves) = min(list(entropies.items()), key=lambda x: x[1][0])\n \n return next_attrib, leaves", "def amh_attr_filter_query(self):\n \n attr_filter_query = \"\"\"\n WITH {final_cte_name} as (\n -- Pull list of devices that were active (has any row; don't need TVT >0) in the past 4 weeks\n SELECT DISTINCT device_id\n FROM tubidw.all_metric_hourly\n WHERE DATE_TRUNC('week',hs) >= dateadd('week',-4,DATE_TRUNC('week',GETDATE()))\n AND DATE_TRUNC('week',hs) < DATE_TRUNC('week',GETDATE())\n {attr_filter} -- attribute filters dynamically populate here\n -- TODO: currently can't get a metric/attribute combo filter, like \"devices that watched at least 50% of a specific content_id\"\n )\n \"\"\"\n return attr_filter_query", "def main(attr, condition, val):\r\n data=dict()\r\n listOfData=list()\r\n PREFIX = \"\"\"\r\n PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\r\n PREFIX owl: <http://www.w3.org/2002/07/owl#>\r\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\r\n PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\r\n PREFIX : <http://www.semanticweb.org/user/ontologies/2020/7/untitled-ontology-32#>\r\n \"\"\"\r\n FEATURES = [\"Age\", \"BS Fast\", \"BS pp\", \"Plasma R\", \"Plasma F\", \"HbAlc\"]\r\n\r\n request = PREFIX + \"\"\"\r\n SELECT ?age ?bsf ?bsp ?plasmaR ?plasmaF ?hbalc\r\n WHERE {{ \r\n ?subject :hasID ?id; \r\n :hasAge ?age;\r\n :hasBloodSugar ?bsf;\r\n :hasBloodPressure ?bsp;\r\n :hasPlasmaR ?plasmaR;\r\n :hasPlasmaF ?plasmaF;\r\n :hasHBALC ?hbalc.\r\n FILTER({attr} {condition} {val}).\r\n }}\r\n \"\"\".format(attr=attr, condition=condition, val=val)\r\n\r\n graph = connectOntology(os.path.join(settings.BASE_DIR, \"dbo3.owl\"))\r\n if graph:\r\n results = getData(graph, request)\r\n else:\r\n print(\"Data could not be fetched\")\r\n\r\n for result in results:\r\n for feature, row in zip(FEATURES, result):\r\n data[feature]=float(stripData(row))\r\n #print(\"{}: {}\".format(feature, stripData(row)))\r\n listOfData.append(data)\r\n\r\n #print((listOfData))\r\n return listOfData", "def test_attribute_authenticated_has_attributes(testapp, login_fixture, fill_the_db):\n response = testapp.get('/attribute/1/1', params=login_fixture)\n assert len(response.html.find_all(\"img\")) == 2", "def prvs(self): \n return self._link_reg.prvs", "def batch_get_link_attributes(self,\n TypedLinkSpecifier: Dict[str, Any],\n AttributeNames: List[str]) -> Dict[str, Any]:\n return {\n 'GetLinkAttributes': {\n 'TypedLinkSpecifier': TypedLinkSpecifier,\n 'AttributeNames': AttributeNames\n }\n }", "def get_attributevalues(\n attribute_value_db_id: Optional[str] = Query(None, alias='attributeValueDbId'),\n attribute_db_id: Optional[str] = Query(None, alias='attributeDbId'),\n attribute_name: Optional[str] = Query(None, alias='attributeName'),\n germplasm_db_id: Optional[str] = Query(None, alias='germplasmDbId'),\n external_reference_i_d: Optional[str] = Query(None, alias='externalReferenceID'),\n external_reference_source: Optional[str] = Query(\n None, alias='externalReferenceSource'\n ),\n page: Optional[int] = None,\n page_size: Optional[int] = Query(None, alias='pageSize'),\n authorization: Optional[constr(regex=r'^Bearer .*$')] = Query(\n None, alias='Authorization'\n ),\n) -> GermplasmAttributeValueListResponse:\n pass", "def links (self):\n return (link for src, dst, link in self.network.edges_iter(data=True) if\n link.type == Link.STATIC or link.type == Link.DYNAMIC)", "def getResourceAttributes(self, authenticationToken, guid):\r\n pass", "def visit(self, node):\n super(_GetattrNodeVisitor, self).visit(node)", "def attribute_lookup(obj, *_, **query):\n for query_str, query_val in query.items():\n if not _attribute_lookup(\n obj, query_str, query_val,\n SEP, OperatorCollection,\n ):\n return False\n return True", "def search_params_for_link(link):\n return {\n 'filter_link': link,\n 'debug': 'include_withdrawn',\n 'fields[]': [\n 'indexable_content',\n 'title',\n 'description',\n 'expanded_organisations',\n 'expanded_topics',\n ],\n }", "def getRuntimeAttrs(ad):\n \n re_runtime = re.compile('^(.*)Runtime$')\n\n # some attributes should always be ignored\n re_ignore = re.compile('^DC(Socket|Pipe)')\n ignored_attrs = ['SCGetAutoCluster_cchit']\n\n attrs = []\n for key in ad.keys():\n match = re_runtime.match(key)\n if match:\n attr = match.groups()[0]\n if not (re_ignore.match(attr) or (attr in ignored_attrs)):\n attrs.append(attr)\n\n return attrs", "def findall_by_attr(node, value, name=\"name\", maxlevel=None, mincount=None, maxcount=None):\n return _findall(\n node,\n filter_=lambda n: _filter_by_name(n, name, value),\n maxlevel=maxlevel,\n mincount=mincount,\n maxcount=maxcount,\n )", "def get_condition(self) -> dict:\n url = self.base_url + \"/condition\"\n condition = self._session.get(url).json()\n keys = [\"bandwidth\", \"latency\", \"jitter\", \"loss\"]\n result = {k: v for (k, v) in condition.items() if k in keys}\n return result", "def nattributes(self):\n return self.host.nattributes", "def getAllAttributes(self, limit=None):\n return self.getAttributeRange(limit=limit)", "def find_attributes(result):\n soup = BeautifulSoup(result.text, \"lxml-xml\")\n modifiers = soup.find_all('Modifier')\n\n weights = dict()\n for modifier in modifiers:\n attribute = modifier.string.strip().encode(\"utf-8\")\n weight = int(modifier.get('weight'))\n weights[attribute] = weight\n\n most_common = Counter(weights).most_common(7)\n attributes = [attribute[0] for attribute in most_common]\n\n return attributes", "def print_attribute(attributes):\n for attribute in attributes:\n print ' ',\n change_color_by_tag(attribute)\n if attribute['ExtAttributes']:\n print_extattributes_of_member(attribute['ExtAttributes'])\n print attribute['Type'],\n print attribute['Name']", "def getResourceAttributes(self, authenticationToken, guid):\r\n self.send_getResourceAttributes(authenticationToken, guid)\r\n return self.recv_getResourceAttributes()", "def get_attributes():\n bot_id = socket.gethostname().lower().split('.', 1)[0]\n return os_utilities.get_attributes(bot_id)", "def attributes(self):\n return self.host.attributes", "def test_get_attributes(self):\n pass", "def attribute_search(self, attribute, filters):\n for i in self.response_info['results']:\n if filters in i[attribute]:\n self.output.append(i)\n self.counter += 1", "def get_all_attribute(self):\n for attr, value in self.__dict__.items():\n print(attr, value)" ]
[ "0.5418465", "0.5310432", "0.52736616", "0.52311105", "0.5068784", "0.50490856", "0.5038029", "0.5005454", "0.4983717", "0.49739105", "0.4949673", "0.49282217", "0.4916343", "0.48925012", "0.4888643", "0.48490494", "0.4836985", "0.48068938", "0.4806892", "0.4803262", "0.47883493", "0.47872433", "0.4783093", "0.47810063", "0.47773743", "0.47736296", "0.4769988", "0.47266468", "0.47259405", "0.47229365" ]
0.62613106
0
Convert all controls to rules. Note that for an exact match between controls and rules, the rule timestep must be very small.
def convert_controls_to_rules(self, priority=3): for name in self.control_name_list: control = self.get_control(name) if isinstance(control, Control): act = control.actions()[0] cond = control.condition rule = Rule(cond, act, priority=priority) self.add_control(name.replace(' ', '_')+'_Rule', rule) self.remove_control(name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_all_rules(self):\n\n def compatible(pattern1, pattern2, direction):\n \"\"\"Returns `True` if `pattern2` is compatible with `pattern1` in the `direction`,\n otherwise return `False`.\"\"\"\n if direction == 0:\n return pattern1[:-1] == pattern2[1:]\n if direction == 2:\n return [line[:-1] for line in pattern1] == [line[1:] for line in pattern2]\n\n for index in range(len(self.patterns)):\n for ind in range(index + 1):\n for direction in (0, 2):\n if compatible(self.patterns[index], self.patterns[ind], direction):\n self.rules[index][direction].add(ind)\n self.rules[ind][direction + 1].add(index)", "def simplify_rules(self):\n for rule in self.grammar:\n if re.search(r'->', rule):\n temp = re.split(r'->', rule)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. The rule does not have the RHS.\")\n return\n\n lhs = temp[0]\n rhs = temp[1]\n temp = []\n\n if re.search(r'\\|', rhs):\n temp = re.split(r'\\|', rhs)\n if len(temp[1].strip()) == 0:\n print(\"Invalid rule. Unnecessary use of `|`.\")\n return\n\n for i in range(0, len(temp)):\n temp[i] = temp[i].strip()\n\n if len(temp) == 0:\n temp.append(rhs.strip())\n self.rules[lhs.strip()] = temp\n temp = []\n else:\n self.rules[lhs.strip()] = temp\n\n else:\n print(\"Invalid rule. The rule is not deriving anything.\")\n return\n\n print(\"Modified rules : \")\n print(self.rules)", "def _rules_to_trxf_dnf_ruleset(self, rules, label):\n conjunctions = list()\n for rule in rules:\n conjunction = self._rule_to_trxf_conjunction(rule)\n conjunctions.append(conjunction)\n dnf_ruleset = DnfRuleSet(conjunctions, label)\n return dnf_ruleset", "def _create_rules(rules, node_rules, node_atrrs):\n for node_attr, node_value in node_atrrs.iteritems():\n if node_attr not in node_rules:\n continue\n for rule in node_rules[node_attr]:\n # if isinstance(rule['from'], REGEX_TYPE) and node_value.startswith('mediumtext'):\n if rule['from'] == node_value:\n rules[node_attr] = rule['to']", "def extract_rules(self, labels=None):\n # Extract flat list of rules in array form\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for dt in self._rule_dump]))\n \n # Convert each sub-rule into text, join together with '&' and then add to rules\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=labels, scaler=self.ext_scaler)) for r in rules])\n \n return self.rules", "def _compile_rules(self):\n for state, table in self.RULES.items():\n patterns = []\n actions = []\n nextstates = []\n for i, row in enumerate(table):\n if len(row) == 2:\n pattern, action_ = row\n nextstate = None\n elif len(row) == 3:\n pattern, action_, nextstate = row\n else:\n fstr = \"invalid RULES: state {}, row {}\"\n raise CompileError(fstr.format(state, i))\n patterns.append(pattern)\n actions.append(action_)\n nextstates.append(nextstate)\n reobj = re.compile(\"|\".join(\"(\" + p + \")\" for p in patterns))\n self._rules[state] = (reobj, actions, nextstates)", "def apply_rules(self, to_convert: str, index: bool = False, debugger: bool = False) -> Union[str, Tuple[str, Indices]]:\n indices = {}\n rules_applied = []\n\n if not self.case_sensitive:\n to_convert = to_convert.lower()\n\n if self.norm_form:\n to_convert = normalize(to_convert, self.norm_form)\n\n # initialized converted\n converted = to_convert\n\n if index:\n input_index = 0\n output_index = 0\n new_index = {}\n for char in range(len(to_convert)):\n # account for many-to-many rules making the input index\n # outpace the char-by-char conversion\n if char < input_index:\n continue\n if not char in new_index or new_index[char]['input_string'] != to_convert[char]:\n input_index = char\n new_index[char] = {'input_string': to_convert[char],\n 'output': {}}\n # intermediate form refreshes on each new char\n intermediate_conversion = to_convert\n rule_applied = False\n # go through rules\n for io in self.mapping:\n io_copy = copy.deepcopy(io)\n # find all matches.\n for match in io_copy['match_pattern'].finditer(intermediate_conversion):\n match_index = match.start()\n # if start index of match is equal to input index,\n # then apply the rule and append the index-formatted tuple\n # to the main indices list\n if match_index == input_index:\n if self.out_delimiter:\n # Don't add the delimiter to the last segment\n if not char + (len(io_copy['in']) - 1) >= len(to_convert) - 1:\n io_copy['out'] += self.out_delimiter\n # convert the final output\n output_sub = re.sub(\n re.compile(r'{\\d+}'), '', io_copy['out'])\n intermediate_output = intermediate_conversion[:char] + re.sub(\n io_copy[\"match_pattern\"], output_sub, intermediate_conversion[char:])\n if debugger and intermediate_conversion != intermediate_output:\n applied_rule = {\"input\": intermediate_conversion,\n \"rule\": io_copy, \"output\": intermediate_output}\n rules_applied.append(applied_rule)\n # update intermediate converted form\n intermediate_conversion = intermediate_output\n # get the new index tuple\n non_null_index = self.return_index(\n input_index, output_index, io_copy['in'], io_copy['out'],\n to_convert, new_index)\n # if it's not empty, then a rule has applied and it can overwrite\n # the previous intermediate index tuple\n if non_null_index:\n rule_applied = True\n new_index = {**new_index, **non_null_index}\n # if you've gone past the input_index, you can safely break from the loop\n elif match_index > input_index:\n break\n # increase the index counters\n # new_index = self.convert_index_to_tuples(new_index)\n # if the rule applied\n if rule_applied and new_index[char]['output']:\n # add the new index to the list of indices\n indices = {**indices, **new_index}\n # get the length of the new index inputs and outputs\n # and increase the input counter by the length of the input\n input_index = max(new_index.keys())\n input_index += 1\n # do the same with outputs\n outputs = {}\n for v in new_index.values():\n outputs = {**outputs, **v['output']}\n output_index = max(outputs.keys())\n output_index += 1\n else:\n # if a rule wasn't applied, just add on the input character\n # as the next input and output character\n new_index = {**new_index, **{input_index: {'input_string': to_convert[input_index],\n 'output': {output_index: to_convert[input_index]}}}}\n # merge it\n indices = {**indices, **new_index}\n # add one to input and output\n input_index += 1\n output_index += 1\n else:\n # if not worrying about indices, just do the conversion rule-by-rule\n for io in self.mapping:\n io_copy = copy.deepcopy(io)\n if self.out_delimiter:\n io_copy['out'] += self.out_delimiter\n output_sub = re.sub(re.compile(r'{\\d+}'), '', io_copy['out'])\n if re.search(io_copy[\"match_pattern\"], converted):\n inp = converted\n outp = re.sub(\n io_copy[\"match_pattern\"], output_sub, converted)\n if debugger and inp != outp:\n applied_rule = {\"input\": inp,\n \"rule\": io_copy, \"output\": outp}\n rules_applied.append(applied_rule)\n converted = outp\n # Don't add the delimiter to the last segment\n converted = converted.rstrip()\n if index and debugger:\n io_states = Indices(indices)\n return (io_states.output(), io_states, rules_applied)\n if debugger:\n return (converted, rules_applied)\n if index:\n io_states = Indices(indices)\n return (io_states.output(), io_states)\n return converted", "def update_acc_by_rules(self) -> None:\n for rule, coeff in self.rules.items():\n acc_delta = rule(self) # can't call self.rule\n self.update_acc(acc_delta, coeff)", "def apply_rules(term: Term, rules):\n return functools.reduce(apply_rule, rules, term)", "def rules(cls, search_pattern: str, *rules: Dict[str, str]) -> PhonTransform:\n rule_dict = {k: v for rule in rules for k, v in rule.items()}\n sub_func = lambda match: rule_dict[match.group()]\n return cls(search_pattern, sub_func)", "def rule(self, rules):\n\n if not isinstance(rules, list):\n rules = [rules]\n\n for rule in rules:\n self.__addRule(rule)", "def apply_rules(self):\n if len(self.rules) == 0:\n return\n for gene in self.population:\n for rule in self.rules:\n if rule.type == \"gene\":\n rule.check_and_apply(gene)", "def validate(self):\n for rule in self.get_rules():\n rule.validate(self.get_val())", "def convert_rules(args: argparse.Namespace) -> int:\n \n with StringIO() as asp_if:\n rules2asp(args.rules, outf=asp_if)\n args.out.write(evaluate_template(asp_if.getvalue()))\n return 0", "def make_rules(self, old_rules):\n rules = defaultdict(set)\n\n def recurse_disc_rule(attr, rule):\n \"\"\"\n Recursively partition multivalued discrete attributes if\n its worth it\n \"\"\"\n\n\n ro = RuleObj(rule,\n self.bad_err_funcs,\n self.good_err_funcs,\n self.bad_tables,\n self.good_tables)\n\n if not self.prune_rule(ro):\n return set([ro])\n \n c = rule.filter.conditions[0]\n var_type = rule.data.domain[c.position].var_type\n\n if (var_type == Orange.feature.Type.Discrete):\n if len(c.values) == 1:\n return [ro]\n \n refiner = BeamRefiner(attrs=[attr], fanout=10)\n ret = set()\n for _, newrule in refiner(rule):\n ret.update(recurse_disc_rule(attr, newrule))\n return ret\n else:\n if len(rule.data) < self.min_pts:\n return [ro]\n return [ro]\n\n # XXX: figure out this logic!\n\n refiner = BeamRefiner(attrs=[attr], fanout=2)\n ret = set()\n for _, newrule in refiner(rule):\n newro = RuleObj(newrule,\n self.bad_err_funcs,\n self.good_err_funcs,\n self.bad_tables,\n self.good_tables)\n ret.update(recurse_disc_rule(attr, newrule))\n\n \n if old_rules is None:\n base_rule = SDRule(self.full_table, None) \n refiner = BeamRefiner(attrs=self.cols, fanout=10)\n #refiner = BeamRefiner(attrs=['recipient_nm'], fanout=30) \n\n \n for attr, rule in refiner(base_rule):\n ros = recurse_disc_rule(attr, rule)\n #self.top_k({None:ros})\n ros = filter(self.prune_rule, ros)\n rules[(attr,)].update(ros)\n\n else:\n attrs = old_rules.keys()\n for a_idx, attr1 in enumerate(attrs):\n for attr2 in attrs[a_idx+1:]:\n merged_attrs = set(attr1).union(attr2)\n max_attrs_len = max(len(attr1), len(attr2))\n if len(merged_attrs) == max_attrs_len:\n continue\n \n \n a1rules, a2rules = old_rules[attr1], old_rules[attr2]\n\n for ro in self.merge_dims(a1rules, a2rules):\n key = ro.rule.attributes\n\n #self.top_k({None:(ro,)})\n if self.prune_rule(ro):\n rules[key].add(ro)\n \n return rules", "def match_rules(rules, wm):\n res = []\n for r in rules:\n new_patterns = match_rule(r[0],r[1],r[2], wm)\n if new_patterns:\n print(\" Match succeeds\")\n print(\" Adding assertions to WM\")\n else:\n print(\" Match fails\")\n for n in new_patterns:\n if (n not in wm) and (n not in res):\n print(\" \",n)\n res.append(n)\n # print(\"new patterns so far = \", res)\n # print()\n # for testing\n # break\n return res", "def match_rules(\n dataset,\n rules: List[Rule],\n answers=None,\n bsize=500,\n stop_all_have_rules=False,\n stop_all_correct_rules=False,\n):\n # filling transaction matrix\n max_word_id = max(max(d) for d in dataset)\n transactions_matrix = np.zeros((len(dataset), max_word_id + 1), dtype=bool)\n for i, d in enumerate(dataset):\n transactions_matrix[i, d] = True\n\n transactions_matrix = torch.from_numpy(transactions_matrix).bool().cuda()\n pad_index = transactions_matrix.shape[1]\n N = transactions_matrix.shape[0]\n \n # pad index\n transactions_matrix = torch.cat(\n (transactions_matrix, torch.ones(N, 1).bool().cuda()), dim=1,\n )\n\n best_rules = dict()\n best_correct_rule = dict()\n all_rules = [[] for _ in range(len(transactions_matrix))]\n correct_rules = [[] for _ in range(len(transactions_matrix))]\n\n # Progress bars and iterables\n pbar = tqdm(total=len(transactions_matrix))\n pbar.set_description(\"Total rules found \")\n pbar_correct = tqdm(total=len(transactions_matrix))\n pbar_correct.set_description(\"Correct rules found\")\n \n for i in tqdm(range(0, len(rules), bsize), desc=\"Rules processed\"):\n rs = rules[i : i + bsize]\n itemsets = [r.itemset for r in rs]\n max_length = max([len(r) for r in itemsets])\n itemsets = [list(r) + [pad_index] * (max_length - len(r)) for r in itemsets]\n indexes_concerned = (\n (transactions_matrix[:, itemsets].all(dim=2).nonzero())\n .detach()\n .cpu()\n .numpy()\n ) # (N * 2) where 2 = (trans_id, rule_id)\n transactions_for_rule = [[] for _ in range(len(rs))]\n\n num_trans_found = 0\n num_correct_trans_found = 0\n\n for j in range(len(indexes_concerned)):\n trans_id, rule_id = indexes_concerned[j]\n rule_id = rule_id + i\n rule = rules[rule_id]\n transactions_for_rule[rule_id - i].append(trans_id)\n if trans_id not in best_rules:\n num_trans_found += 1\n best_rules[trans_id] = rule\n all_rules[trans_id].append(rule)\n if rule.ans == answers[trans_id]:\n if trans_id not in best_correct_rule:\n best_correct_rule[trans_id] = rule\n num_correct_trans_found += 1\n correct_rules[trans_id].append(rule)\n\n pbar.update(num_trans_found)\n pbar_correct.update(num_correct_trans_found)\n\n if stop_all_have_rules and len(best_rules) == len(transactions_matrix):\n break\n if stop_all_correct_rules and len(best_correct_rule) == len(\n transactions_matrix\n ):\n break\n pbar.close()\n pbar_correct.close()\n del transactions_matrix\n\n return (\n all_rules,\n correct_rules,\n )", "def getRules(self):\n self._rules = {}\n _RuleSet = self._sets[self._currentSet - 1 ]\n for oneSet in _RuleSet :\n \n if len(oneSet) < 2 : \n pass \n \n for x in range(1, max(floor(len(oneSet) / 2),2) ):\n \n comb = combinations(oneSet, x)\n for item in comb:\n remaining = tuple(x for x in oneSet if x not in item)\n self._rules[(item,remaining)] = 0\n self._rules[(remaining,item)] = 0", "def add_rules(self, rules):\n self.model_sort.handler_block(self.row_reordered_signal)\n i = len(self.model)\n format_protocol_int = lambda s: 'ip' if not s else '\\n'.join(map(Operator.to_string, s))\n format_protocol = lambda s, n: '\\n'.join(set(n)) if n else format_protocol_int(s)\n format_int = lambda s: \"any\" if len(s) == 0 else '\\n'.join(map(Operator.to_string, s))\n format = lambda s, n: '\\n'.join(set(n)) if n else format_int(s)\n for r in rules:\n self.model_sort.get_model().append([r.identifier,\n r.name,\n format_protocol(r.protocol, r.protocol_name),\n format(r.ip_source, r.ip_source_name),\n format(r.port_source, r.port_source_name),\n format(r.ip_dest, r.ip_dest_name),\n format(r.port_dest, r.port_dest_name),\n r.action.to_string(),\n r.action.get_action_color(),\n '#FFFFFF' if i % 2 == 0 else '#DCDCDC'])\n i += 1\n self.model_sort.handler_unblock(self.row_reordered_signal)", "def test_multiple_rules(self):\n rule = (\n 'alert(name:\"test1\"; side:client; match:\"A\",1; replace:\"B\";)\\n'\n 'alert(name:\"test2\"; side:client; match:\"B\",1; replace:\"A\";)\\n')\n tests = {\n (\"ABCD\", \"BACD\"): [\"proxying connection from\",\n \"INFO : filter matched: 'test1'\",\n \"INFO : filter matched: 'test2'\", ],\n }\n\n self.run_rules(rule, tests, echo=True)\n\n rule = (\n 'alert(name:\"test1\"; side:client; match:\"A\",1;)\\n'\n 'alert(name:\"test2\"; side:client; match:\"B\",1;)\\n')\n tests = {\n (\"ABCD\", \"ABCD\"): [\"proxying connection from\",\n \"INFO : filter matched: 'test1'\",\n \"INFO : filter matched: 'test2'\", ],\n }\n\n self.run_rules(rule, tests, echo=True)\n\n rule = (\n 'alert(name:\"test1\"; side:client; match:\"B\",1;)\\n'\n 'alert(name:\"test2\"; side:client; match:\"A\",1;)\\n')\n tests = {\n (\"ABCD\", \"ABCD\"): [\"proxying connection from\",\n \"INFO : filter matched: 'test2'\",\n \"INFO : filter matched: 'test1'\", ],\n }\n\n self.run_rules(rule, tests, echo=True)", "def generate_rules(self):\n for rule in self._parser.conditionals:\n\n all_in_facts, matrix = self._generate_rules(rule)\n if all_in_facts is True:\n self.new_fact_from_facts(rule)\n else:\n facts = self._parser.conditionals[rule][1]\n #print(rule, facts, matrix)", "def update_all_rules():\n try:\n for i in range(1, len(RULES_FOR_BRANCHES)):\n set_next_rule_to_redis(i, database.get_next_active_rule(i))\n logging.info(\"Rules updated\")\n except Exception as e:\n logging.error(\"Exeption occured while updating all rules. {0}\".format(e))", "def update_rules():\n update_all_rules()\n return \"OK\"", "def add_all_conversions(self):\n model = self.model\n # Mathematical expressions\n self.convert_assignments(model.get_assignments())\n # Connections\n for conn in getattr(model, u'connection', []):\n comp1 = model.get_component_by_name(conn.map_components.component_1)\n comp2 = model.get_component_by_name(conn.map_components.component_2)\n for mapping in conn.map_variables:\n var1 = model.get_variable_by_name(comp1.name, mapping.variable_1)\n var2 = model.get_variable_by_name(comp2.name, mapping.variable_2)\n self.convert_mapping(mapping, comp1, comp2, var1, var2)\n return", "def rules_with_cases(cls, search_pattern: str, CaseEnum: Type[TCaseEnum], detect_case: Callable[[Match], TCaseEnum], rules: Callable[[TCaseEnum], List[Dict[str, str]]]) -> PhonTransform:\n cases: List[TCaseEnum] = list(CaseEnum)\n rule_dict = {case: {k: v for rule in rules(case) for k, v in rule.items()} for case in cases}\n sub_func = lambda match: rule_dict[detect_case(match)][match['key']]\n return cls(search_pattern, sub_func)", "def getAllDecisionRules(self):\n\n #check this shit lol?\n thetas = self.getAllTheta()\n human_actions = self.getAllHumanActions()\n return [list(zip(thetas, item)) for item in itertools.product(human_actions, repeat=len(thetas))]", "def _apply_pattern_rules(flags, input_tensors, output_tensors, tensor_list, tensor_map):\n matched_pattern = OpPatterns.OPAQUE_PATTERN\n for rule, target_pattern in OP_PATTERN_RULES.items():\n if matched_pattern != OpPatterns.OPAQUE_PATTERN:\n break\n # One rule for multiple patterns\n if isinstance(target_pattern, tuple):\n for pattern in target_pattern:\n if rule(flags, pattern, SIMPLE_MAPPING, input_tensors,\n output_tensors, tensor_list, tensor_map):\n matched_pattern = pattern\n break\n elif rule(flags, input_tensors, output_tensors,\n tensor_list, tensor_map) and isinstance(target_pattern, OpPatterns):\n # One rule for one pattern\n matched_pattern = OP_PATTERN_RULES[rule]\n break\n elif not isinstance(target_pattern, OpPatterns):\n raise ValueError(\"Wrong Subpattern rule dictionary format: \" +\n \"Pattern expected but received \" + str(type(target_pattern)))\n return matched_pattern", "def get_real_rules():\n real = {}\n\n for name, rule in RULES.items():\n q = GraphMetric.select(GraphMetric.metric).where(\n GraphMetric.metric % name).group_by(GraphMetric.metric)\n\n for i in q:\n real[i.metric] = rule\n return real", "def cleanUpRules(self):\n\n\t\t# initialize\n\t\tscoreDict = {}\n\t\tnewRules = {}\n\n\t\t# loop through rules\n\t\tfor i, tup in enumerate(self.generatedRules):\n\n\n\t\t\tantecedent = str(tup[0].antecedent)\n\n\t\t\t# if there is no rule in the scoredictionary yet with the same antecedent, put it in both dictionaries\n\t\t\tif (not antecedent in scoreDict):\n\t\t\t\tnewRules[antecedent] = tup[0]\n\t\t\t\tscoreDict[antecedent] = tup[1]\n\t\t\telse:\n\n\t\t\t\t# if there is, then first compare if the degree is higher before overwriting\n\t\t\t\tif (tup[1] > scoreDict[antecedent]):\n\t\t\t\t\tnewRules[antecedent] = tup[0]\n\t\t\t\t\tscoreDict[antecedent] = tup[1]\n\t\t\t\telse:\n\t\t\t\t\t# not higher? don't overwrite\n\t\t\t\t\tcontinue\n\n\t\t# save rules\n\t\tself.generatedRules = []\n\t\tfor key in newRules:\n\t\t\tself.generatedRules.append(newRules[key])\n\n\t\treturn", "def set_switches_from_rule_nbr(self):\n for rule_switch, enabled in zip(CA_World.bin_0_to_7, self.int_to_8_bit_binary(self.rule_nbr)):\n SimEngine.gui_set(rule_switch, value=(True if enabled=='1' else False))" ]
[ "0.6332641", "0.59601104", "0.58393854", "0.57896286", "0.5788332", "0.5754123", "0.56642663", "0.56304467", "0.55068797", "0.5395985", "0.5301954", "0.5270951", "0.5237025", "0.5214121", "0.5211153", "0.5207527", "0.5184299", "0.5180485", "0.51692605", "0.5154638", "0.5142492", "0.51319546", "0.5124943", "0.50922155", "0.50710595", "0.5054028", "0.5026914", "0.49865758", "0.49839795", "0.49753606" ]
0.6874837
0
Adds a pattern to the water network model. The pattern can be either a list of values (list, numpy array, etc.) or
def add_pattern(self, name, pattern=None): assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, "name must be a string with less than 32 characters and contain no spaces" assert isinstance(pattern, (list, np.ndarray, Pattern)), "pattern must be a list or Pattern" if not isinstance(pattern, Pattern): pattern = Pattern(name, multipliers=pattern, time_options=self._options.time) else: #elif pattern.time_options is None: pattern.time_options = self._options.time if pattern.name in self._data.keys(): raise ValueError('Pattern name already exists') self[name] = pattern
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_pattern(self, start, stop, pattern):\n self.coord2pattern[start] = []\n self.coord2pattern[start].append(pattern)", "def add_pattern(self, pattern):\n self.patterns.append(pattern)", "def add_pattern(self, name, pattern=None):\n self._pattern_reg.add_pattern(name, pattern)", "def add_patterns(self, patterns: Iterable[AttributeRulerPatternType]) -> None:\n for p in patterns:\n self.add(**p) # type: ignore[arg-type]", "def add_pattern(self, pattern, callback):\n self.patterns.append((pattern, callback))", "def add_pattern(self, command, pattern_string):\n parts = pattern_string.split(\":\")\n if len(parts) != 2:\n raise AssertionError(\"Cannot parse '{value}' to pattern\".format(value = value))\n pattern_id = parts[0]\n pattern_values = parts[1].split(',')\n pattern = Pattern(pattern_id, pattern_values)\n self._config.add_pattern(command, pattern)", "def train_single_pattern(self, pattern):\n bmu = self.bmu_util.calculate_bmu(pattern)\n self._train(bmu, self.network.weights, pattern)\n self._apply_correction()", "def make_pattern_set(self):\n \n _pattern = []\n for x in range(1,9):\n _pattern.append(self.make_pattern())\n \n self.pattern = _pattern", "def set_pattern(self, pattern):\n for ir, row in enumerate(pattern):\n for ic, col in enumerate(row):\n relay_n = ir*len(row) + ic\n self.relay.set(relay_n, bool(col))", "def MakePattern(self,content):\n return self.register(Pattern(content,reg=self))", "def save_pattern(self, pattern: Pattern):", "def save_pattern(self, pattern: Pattern):", "def learn_pattern_Hebb(self, pattern):\n if pattern.shape != self.shape:\n # TODO: this could be written in a clearer way\n ValueError(\"The pattern shape does not match the network one.\")\n\n pattern_flat = pattern.flatten()\n\n # Convert the bool array to an array with +-1\n pattern_pm = 2*pattern_flat.astype(bool) - 1\n\n # Update adjacency matrix according to Hebb's rule \n adjmatrix_change = np.outer(pattern_pm, pattern_pm).astype(float)\n self.network.adjmatrix = np.average(\n [self.network.adjmatrix, adjmatrix_change], axis=0,\n weights=[self.npatterns, 1])\n\n # Update neighbour lists (isingmodel.Ising method)\n self.update_neighbours()\n\n # Store the pattern in the patterns list\n self.patterns.append(pattern)", "def set_inputs(self, pattern: np.ndarray):\n self.x = np.array(pattern[:self.n_inputs]).reshape((1, self.n_inputs))", "def _maybe_add_pattern(attr, patterns):\n handler_type = getattr(attr, '_gen_handler', False)\n\n if not handler_type:\n return\n if handler_type not in ['call', 'cast', 'info']:\n raise AttributeError(\"unknown handler type {}\".format(handler_type))\n\n o = attr._gen_order\n p = attr._gen_pattern\n LOG.debug(\"adding {} {} with pattern {}\".format(handler_type,\n attr,\n p))\n patterns[handler_type].append((o, p))", "def setPattern(self, value):\n return self._set(pattern=value)", "def pattern(self, pattern):\n if pattern is None:\n raise ValueError(\"Invalid value for `pattern`, must not be `None`\") # noqa: E501\n\n self._pattern = pattern", "def setPattern(self,Apattern,Bpattern,Cpattern):\n self.coeffPattern = [Apattern,Bpattern,Cpattern]\n for i in range(self.m):\n self._updateEstimatorSize(i)", "def add(\n self: TokenMatcher,\n label: str,\n patterns: List[List[Dict[str, Any]]],\n on_match: TokenCallback = None,\n ) -> None:\n for pattern in patterns:\n if len(pattern) == 0:\n raise ValueError(\"pattern cannot have zero tokens.\")\n if isinstance(pattern, list):\n self._patterns[label].append(list(pattern))\n else:\n raise TypeError(\"Patterns must be lists of dictionaries.\")\n self._callbacks[label] = on_match", "def setPatterns(self, value):\n return self._set(patterns=value)", "def do_pattern(l, pattern, repeat=1):\n command = create_pattern_command(pattern, repeat)\n l.write(command)", "def make_pattern(self):\n probability = random.SystemRandom().random()\n if probability < 0.1:\n _pattern = [0 for x in range(32)]\n elif probability > 0.5:\n pattern_num = SECURE_RANDOM.choice(CLASSIC)\n _probability = random.SystemRandom().random()\n if _probability > 0.80:\n _pattern = [1 if random.SystemRandom().random() < pattern_num/32 else 0 for x in range(1,33)]\n elif _probability < 0.40:\n _offset = random.SystemRandom().randint(2, 16)\n _pattern = [1 if (x == _offset) or (x % pattern_num == _offset) else 0 for x in range(1,33)]\n else:\n _pattern = [1 if (x == 1) or (x % pattern_num == 1) else 0 for x in range(1,33)]\n else:\n pattern_num = SECURE_RANDOM.choice(INNER_CLASSIC)\n _probability = random.SystemRandom().random()\n if _probability > 0.50:\n _pattern = [1 if (x == 1) or (x % pattern_num == 1) else 0 for x in range(1,33)]\n else:\n _pattern = [1 if random.SystemRandom().random() < pattern_num/32 else 0 for x in range(1,33)]\n\n if not self.global_swing:\n _probability = random.SystemRandom().random()\n if _probability > 0.3:\n _pattern.extend([random.SystemRandom().uniform(0.01, 0.5), random.SystemRandom().randint(1, 14), 0])\n else:\n _pattern.extend([0,1,0])\n else: \n _pattern.extend([0,1,1]) \n\n return _pattern", "def BuildPatterns(self, entry):\n N = self.N\n for ent in WaveFunction.symmetry(entry, self.options['Ref'], self.options['Rot']):\n index = len(self.patterns)\n\n if self.options['PeriIpt']:\n width, height = len(ent) - 1, len(ent[0]) - 1\n ent = [ent[x][:] + ent[x][:N - 1] for x in range(len(ent))]\n ent = ent[:] + ent[:N - 1]\n else:\n width, height = len(ent) - N + 1, len(ent[0]) - N + 1\n\n matrix = [[None] * height for _ in range(width)]\n for x in range(width):\n for y in range(height):\n # Extract an N*N matrix as a pattern with the upper left corner being (x, y).\n pat = tuple(tuple(ent[x1][y:y + N]) for x1 in range(x, x + N))\n\n # If this pattern already exists, simply increment its weight. Otherwise, records\n # the new pattern and initializes its weight as 1, then increment the pattern index.\n try:\n matrix[x][y] = self.patterns[pat]\n self.weights[matrix[x][y]] += 1\n except KeyError:\n self.patterns[pat] = matrix[x][y] = index\n self.weights.append(1)\n self.rules.append([set() for _ in range(4)])\n index += 1\n self.make_rule((x, y), matrix)", "def add_patterns(self, patterns: Iterable[Dict[str, Any]],) -> None:\n # disable the nlp components after this one\n # in case they haven't been initialized / deserialised yet\n try:\n current_index = self.nlp.pipe_names.index(self.name)\n subsequent_pipes = [\n pipe for pipe in self.nlp.pipe_names[current_index + 1 :]\n ]\n except ValueError:\n subsequent_pipes = []\n\n with self.nlp.disable_pipes(subsequent_pipes):\n fuzzy_pattern_labels = []\n fuzzy_pattern_texts = []\n fuzzy_pattern_kwargs = []\n fuzzy_pattern_ids = []\n regex_pattern_labels = []\n regex_pattern_texts = []\n regex_pattern_kwargs = []\n regex_pattern_ids = []\n\n for entry in patterns:\n try:\n if isinstance(entry, dict):\n if entry[\"type\"] == \"fuzzy\":\n fuzzy_pattern_labels.append(entry[\"label\"])\n fuzzy_pattern_texts.append(entry[\"pattern\"])\n fuzzy_pattern_kwargs.append(entry.get(\"kwargs\", {}))\n fuzzy_pattern_ids.append(entry.get(\"id\"))\n elif entry[\"type\"] == \"regex\":\n regex_pattern_labels.append(entry[\"label\"])\n regex_pattern_texts.append(entry[\"pattern\"])\n regex_pattern_kwargs.append(entry.get(\"kwargs\", {}))\n regex_pattern_ids.append(entry.get(\"id\"))\n else:\n warnings.warn(\n f\"\"\"Spaczz pattern \"type\" must be \"fuzzy\" or \"regex\",\\n\n not {entry[\"label\"]}. Skipping this pattern.\"\"\",\n PatternTypeWarning,\n )\n else:\n raise TypeError((\"Patterns must be an iterable of dicts.\"))\n except KeyError:\n raise ValueError(\n (\n \"One or more patterns do not conform\",\n \"to spaczz pattern structure:\",\n \"{label (str), pattern (str), type (str),\",\n \"optional kwargs (Dict[str, Any]),\",\n \"and optional id (str)}.\",\n )\n )\n\n fuzzy_patterns = []\n for label, pattern, kwargs, ent_id in zip(\n fuzzy_pattern_labels,\n self.nlp.pipe(fuzzy_pattern_texts),\n fuzzy_pattern_kwargs,\n fuzzy_pattern_ids,\n ):\n fuzzy_pattern = {\n \"label\": label,\n \"pattern\": pattern,\n \"kwargs\": kwargs,\n \"type\": \"fuzzy\",\n }\n if ent_id:\n fuzzy_pattern[\"id\"] = ent_id\n fuzzy_patterns.append(fuzzy_pattern)\n\n regex_patterns = []\n for label, pattern, kwargs, ent_id in zip(\n regex_pattern_labels,\n regex_pattern_texts,\n regex_pattern_kwargs,\n regex_pattern_ids,\n ):\n regex_pattern = {\n \"label\": label,\n \"pattern\": pattern,\n \"kwargs\": kwargs,\n \"type\": \"regex\",\n }\n if ent_id:\n regex_pattern[\"id\"] = ent_id\n regex_patterns.append(regex_pattern)\n\n self._add_patterns(fuzzy_patterns, regex_patterns)", "def add_pattern_bd(x, dataset='cifar10', pattern_type='square', agent_idx=-1):\n x = np.array(x.squeeze())\n \n # if cifar is selected, we're doing a distributed backdoor attack (i.e., portions of trojan pattern is split between agents, only works for plus)\n if dataset == 'cifar10':\n if pattern_type == 'plus':\n start_idx = 5\n size = 6\n if agent_idx == -1:\n # vertical line\n for d in range(0, 3): \n for i in range(start_idx, start_idx+size+1):\n x[i, start_idx][d] = 0\n # horizontal line\n for d in range(0, 3): \n for i in range(start_idx-size//2, start_idx+size//2 + 1):\n x[start_idx+size//2, i][d] = 0\n else:# DBA attack\n #upper part of vertical \n if agent_idx % 4 == 0:\n for d in range(0, 3): \n for i in range(start_idx, start_idx+(size//2)+1):\n x[i, start_idx][d] = 0\n \n #lower part of vertical\n elif agent_idx % 4 == 1:\n for d in range(0, 3): \n for i in range(start_idx+(size//2)+1, start_idx+size+1):\n x[i, start_idx][d] = 0\n \n #left-part of horizontal\n elif agent_idx % 4 == 2:\n for d in range(0, 3): \n for i in range(start_idx-size//2, start_idx+size//4 + 1):\n x[start_idx+size//2, i][d] = 0\n \n #right-part of horizontal\n elif agent_idx % 4 == 3:\n for d in range(0, 3): \n for i in range(start_idx-size//4+1, start_idx+size//2 + 1):\n x[start_idx+size//2, i][d] = 0\n \n elif dataset == 'fmnist': \n if pattern_type == 'square':\n for i in range(21, 26):\n for j in range(21, 26):\n x[i, j] = 255\n \n elif pattern_type == 'copyright':\n trojan = cv2.imread('../watermark.png', cv2.IMREAD_GRAYSCALE)\n trojan = cv2.bitwise_not(trojan)\n trojan = cv2.resize(trojan, dsize=(28, 28), interpolation=cv2.INTER_CUBIC)\n x = x + trojan\n \n elif pattern_type == 'apple':\n trojan = cv2.imread('../apple.png', cv2.IMREAD_GRAYSCALE)\n trojan = cv2.bitwise_not(trojan)\n trojan = cv2.resize(trojan, dsize=(28, 28), interpolation=cv2.INTER_CUBIC)\n x = x + trojan\n \n elif pattern_type == 'plus':\n start_idx = 5\n size = 5\n # vertical line \n for i in range(start_idx, start_idx+size):\n x[i, start_idx] = 255\n \n # horizontal line\n for i in range(start_idx-size//2, start_idx+size//2 + 1):\n x[start_idx+size//2, i] = 255\n \n elif dataset == 'fedemnist':\n if pattern_type == 'square':\n for i in range(21, 26):\n for j in range(21, 26):\n x[i, j] = 0\n \n elif pattern_type == 'copyright':\n trojan = cv2.imread('../watermark.png', cv2.IMREAD_GRAYSCALE)\n trojan = cv2.bitwise_not(trojan)\n trojan = cv2.resize(trojan, dsize=(28, 28), interpolation=cv2.INTER_CUBIC)/255\n x = x - trojan\n \n elif pattern_type == 'apple':\n trojan = cv2.imread('../apple.png', cv2.IMREAD_GRAYSCALE)\n trojan = cv2.bitwise_not(trojan)\n trojan = cv2.resize(trojan, dsize=(28, 28), interpolation=cv2.INTER_CUBIC)/255\n x = x - trojan\n \n elif pattern_type == 'plus':\n start_idx = 8\n size = 5\n # vertical line \n for i in range(start_idx, start_idx+size):\n x[i, start_idx] = 0\n \n # horizontal line\n for i in range(start_idx-size//2, start_idx+size//2 + 1):\n x[start_idx+size//2, i] = 0\n \n return x", "def register( self, pattern, callback ):\n self.patterns.append((pattern, callback))", "def createAttrPatterns(*args, patternDefinition: AnyStr=\"\", patternFile: AnyStr=\"\",\n patternType: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def _add_patterns(\n self, fuzzy_patterns: List[Dict[str, Any]], regex_patterns: List[Dict[str, Any]]\n ) -> None:\n for entry in fuzzy_patterns + regex_patterns:\n label = entry[\"label\"]\n if \"id\" in entry:\n ent_label = label\n label = self._create_label(label, entry[\"id\"])\n self._ent_ids[label] = (ent_label, entry[\"id\"])\n pattern = entry[\"pattern\"]\n kwargs = entry[\"kwargs\"]\n if isinstance(pattern, Doc):\n self.fuzzy_patterns[label][\"patterns\"].append(pattern)\n self.fuzzy_patterns[label][\"kwargs\"].append(kwargs)\n elif isinstance(pattern, str):\n self.regex_patterns[label][\"patterns\"].append(pattern)\n self.regex_patterns[label][\"kwargs\"].append(kwargs)\n else:\n raise ValueError(\n (\n \"One or more patterns do not conform\",\n \"to spaczz pattern structure:\",\n \"{label (str), pattern (str), type (str),\",\n \"optional kwargs (Dict[str, Any]),\",\n \"and optional id (str)}.\",\n )\n )\n\n for label, pattern in self.fuzzy_patterns.items():\n self.fuzzy_matcher.add(label, pattern[\"patterns\"], pattern[\"kwargs\"])\n for label, pattern in self.regex_patterns.items():\n self.regex_matcher.add(label, pattern[\"patterns\"], pattern[\"kwargs\"])", "def __init__(self, pattern):\r\n self.pattern = pattern", "def test_add_patterns(ruler: SpaczzRuler, patterns: List[Dict[str, Any]]) -> None:\n assert len(ruler) == len(patterns)" ]
[ "0.7099591", "0.7086511", "0.66157454", "0.6437363", "0.64248204", "0.62355673", "0.60420597", "0.60162663", "0.5999167", "0.59823906", "0.59552026", "0.59552026", "0.5909474", "0.57770187", "0.57718015", "0.5771128", "0.572893", "0.5681611", "0.5672396", "0.5663754", "0.565315", "0.56313765", "0.56181806", "0.55611897", "0.556086", "0.5528248", "0.54538566", "0.5439339", "0.54372567", "0.53286767" ]
0.7114446
0
A new default pattern object
def default_pattern(self): return self.DefaultPattern(self._options)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, pattern):\r\n self.pattern = pattern", "def _parse_pattern(cls, pattern, default_pattern: str = \"*\") -> Pattern:\n pattern = pattern or default_pattern\n if pattern is None:\n return None\n\n return Pattern(pattern)", "def __init__(self, pattern1, pattern2):\n self.pattern1 = pattern1\n self.pattern2 = pattern2", "def __new__(cls, name, build_pattern: str = None, parse_pattern: re.Pattern = None):\n obj = super().__new__(cls, name)\n\n if parse_pattern is not None:\n obj.parse_pattern = parse_pattern\n\n if build_pattern is not None:\n obj.build_pattern = build_pattern\n\n return obj", "def __init__(self, pattern):\n self._pattern = re.compile(pattern)", "def __init__(self, pattern):\n self._pattern = pattern.lower()", "def pattern_factory(self):\n\t\treturn self.args[1]", "def MakePattern(self,content):\n return self.register(Pattern(content,reg=self))", "def __init__(self, pattern, flags=0):\n if flags:\n str_flags = hre.decodeflags(flags)\n pattern = r\"(?%s:%s)\"%(str_flags, pattern)\n super(Regex, self).__init__(pattern)", "def Pattern(self):\r\n\t\tfrom ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.statrequest.pattern.pattern import Pattern\r\n\t\treturn Pattern(self)", "def pattern_gen():\n pattern = \"\"\n\n return pattern", "def __new__(cls, format):\n self = super(SF_Pattern, cls).__new__(cls)\n\n if isinstance(format, bytes):\n uni_str = format.decode('ISO-8859-1') # decode to unicode\n trans_str = translate(uni_str) # translate only works with unicode\n re_fmt = trans_str.encode('ISO-8859-1') # encode back to bytes\n self._spec = _gbspec\n else:\n re_fmt = translate(format)\n self._spec = _gspec\n\n self._format = format\n self._re = cre = re.compile(re_fmt)\n\n if cre.groupindex and len(cre.groupindex) != cre.groups:\n raise RuntimeError('cannot mix mapped and unmapped specifiers')\n elif not cre.groupindex:\n self._retfunc = self._return_tuple\n self._type = tuple\n else:\n self._retfunc = self._return_dict\n self._type = dict\n\n self._casts = self._get_types()\n\n return self", "def __init__(self, config: Configurable, pattern: str = '{name}',\n default_name: str = 'default',\n class_resolver: ClassResolver = None):\n self.config = config\n self.pattern = pattern\n self.default_name = default_name\n if class_resolver is None:\n self.class_resolver = DictionaryClassResolver(\n self.INSTANCE_CLASSES)\n else:\n self.class_resolver = class_resolver", "def __init__(self, resource, pattern = None):\n self.__resource = None;\n self.__pattern = None;\n\n self.__resource = resource;\n self.__pattern = pattern;", "def __init__(self, pattern_type, experimental_scenario, pattern):\n self.pattern_type = pattern_type # if pattern_type=1 --> experimental group, otherwise control group\n self.experimental_scenario = experimental_scenario\n self.pattern = pattern", "def get_pattern(self):\n if self.pattern is None:\n pattern_str = self.blueprint.pattern()\n pattern_file = self.remgr.lookup_pattern_file(self.blueprint, self.provider)\n self.pattern = pattern.Pattern(pattern_str, pattern_file)\n self.pattern.set_provider(self)\n return self.pattern", "def __init__(self, regex, groups, nestedPattern = None, ignored = dict()):\r\n self.regex = regex.format(*[x.group() for x in groups])\r\n self.groups = groups\r\n self.ignored = ignored\r\n self.nestedPattern = nestedPattern\r\n self.name = \"_\"\r\n while self.name in self.groups:\r\n self.name += \"_\"", "def getPattern(self):\n return self.pattern", "def __init__(self, patterns=None):\n Container.__init__(self, patterns)", "def make_pattern_set(self):\n \n _pattern = []\n for x in range(1,9):\n _pattern.append(self.make_pattern())\n \n self.pattern = _pattern", "def __init__(self,\n pattern_vec=None,\n ):\n\n # Initialize members of the class\n self.pattern_vec = pattern_vec", "def __init__(self, pattern, markdown_instance=None):\r\n self.pattern = pattern\r\n self.compiled_re = re.compile(\"^(.*?)%s(.*?)$\" % pattern, \r\n re.DOTALL | re.UNICODE)\r\n\r\n # Api for Markdown to pass safe_mode into instance\r\n self.safe_mode = False\r\n if markdown_instance:\r\n self.markdown = markdown_instance", "def __init__(self, url_pattern):\n self._url_pattern = url_pattern", "def to_pattern(obj):\n if isinstance(obj, Pattern):\n return obj\n return Glob(str(obj))", "def __init__(self, matcher, generate):\n self.matcher = matcher\n self._generate = generate", "def __init__(self, pattern, response_dict):\r\n self._pattern = pattern\r\n self._response_dict = response_dict", "def make_pattern(self, rot=0):\n self.map = np.zeros(self._shape, dtype=np.int32)\n\n self.add_gaussian()\n self.add_reflex_1(A=500, sigma=(5, 6), x0=(220, rot))\n self.add_reflex_2(A=750, sigma=(6, 8), x0=(260, rot))\n self.add_beamstop()\n self.add_bars()\n \n p = dict()\n p['map'] = self.map\n p['beam_position'] = self._beam_position\n p['title'] = 'test pattern'\n \n return p", "def add_pattern(self, name, pattern=None):\n assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, \"name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(pattern, (list, np.ndarray, Pattern)), \"pattern must be a list or Pattern\"\n \n if not isinstance(pattern, Pattern):\n pattern = Pattern(name, multipliers=pattern, time_options=self._options.time) \n else: #elif pattern.time_options is None:\n pattern.time_options = self._options.time\n if pattern.name in self._data.keys():\n raise ValueError('Pattern name already exists')\n self[name] = pattern", "def __init__(self, regex, view):\n self.regex = re.compile(regex)\n self.view = view", "def __init__(self, in_pattern, out_pattern,\r\n allow_multiple_clients=False,\r\n skip_identities_fn=None, name=None, pdb=False,\r\n tracks=(), get_nodes=None):\r\n self.in_pattern = in_pattern\r\n self.out_pattern = out_pattern\r\n if isinstance(in_pattern, (list, tuple)):\r\n self.op = self.in_pattern[0]\r\n elif isinstance(in_pattern, dict):\r\n self.op = self.in_pattern['pattern'][0]\r\n else:\r\n raise TypeError(\"The pattern to search for must start with \"\r\n \"a specific Op instance.\")\r\n self.__doc__ = (self.__class__.__doc__ +\r\n \"\\n\\nThis instance does: \" +\r\n str(self) + \"\\n\")\r\n self.allow_multiple_clients = allow_multiple_clients\r\n self.skip_identities_fn = skip_identities_fn\r\n if name:\r\n self.__name__ = name\r\n self.pdb = pdb\r\n self._tracks = tracks\r\n self.get_nodes = get_nodes\r\n if tracks != ():\r\n assert get_nodes" ]
[ "0.7807499", "0.7243514", "0.7193833", "0.71809846", "0.7046036", "0.69185525", "0.6883249", "0.6868047", "0.6684589", "0.66810167", "0.6515232", "0.64147526", "0.6365711", "0.6365275", "0.6349147", "0.6279269", "0.62461793", "0.6243432", "0.6231972", "0.62051904", "0.618304", "0.6158406", "0.6108486", "0.60881466", "0.5947223", "0.59411395", "0.5921778", "0.5837736", "0.58223635", "0.5811702" ]
0.7329583
1
Sets curve type. WARNING this does not check to make sure key is typed before assigning it you could end up with a curve that is used for more than one type
def set_curve_type(self, key, curve_type): if curve_type is None: return curve_type = curve_type.upper() if curve_type == 'HEAD': self._pump_curves.add(key) elif curve_type == 'HEADLOSS': self._headloss_curves.add(key) elif curve_type == 'VOLUME': self._volume_curves.add(key) elif curve_type == 'EFFICIENCY': self._efficiency_curves.add(key) else: raise ValueError('curve_type must be HEAD, HEADLOSS, VOLUME, or EFFICIENCY')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_curve(self, key_curve):\n self.curve = key_curve", "def set_type(self,typ):\n self._typ = typ\n if typ == 'Sine':\n self._type = 7\n elif typ == 'Sawtooth up':\n self._type = 0\n elif typ == 'Sawtooth down':\n self._type = 1\n elif typ == 'Square':\n self._type = 2\n elif typ == 'Triangle':\n self._type = 3\n elif typ == 'Pulse':\n self._type = 4\n elif typ == 'Bipolar pulse':\n self._type = 5\n elif typ == 'Sample and hold':\n self._type = 6\n else:\n print \"Unrecognized type keyword!\"\n print \"Please use only the following keywords:\"\n print \"Choices are :\"\n print \" 0. Saw up\"\n print \" 1. Saw down\"\n print \" 2. Square\"\n print \" 3. Triangle\"\n print \" 4. Pulse\"\n print \" 5. Bipolar pulse\"\n print \" 6. Sample and hold\"\n print \" 7. Modulated Sine\"\n self._type = 7\n super(self.__class__, self).setType(self, self._type):", "def setCurve(self, index, curve) -> None:\n ...", "def update_curve(self, faction_type, card_type='creature', sub_type=None):\n\n self.curve.update({card_type: {sub_type: {faction_type: {}}}})\n\n for faction in mtg.Faction.get_factions(faction_type):\n self.curve[card_type][sub_type][faction_type][faction] =\\\n self.faction_curve(faction,\n card_type=card_type,\n sub_type=sub_type)", "def add_curve(self, name, curve_type, xy_tuples_list):\n assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, \"name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(curve_type, (type(None), str)), \"curve_type must be a string\"\n assert isinstance(xy_tuples_list, (list, np.ndarray)), \"xy_tuples_list must be a list of (x,y) tuples\"\n \n curve = Curve(name, curve_type, xy_tuples_list)\n self[name] = curve", "def key_type(self, key_type):\n allowed_values = [\"UNSET\", \"PGP_ASCII_ARMORED\", \"PKIX_PEM\"]\n if key_type not in allowed_values:\n raise ValueError(\n \"Invalid value for `key_type` ({0}), must be one of {1}\"\n .format(key_type, allowed_values)\n )\n\n self._key_type = key_type", "def settype(self, graphtype):\n\n if str(graphtype).find(\"GRAPH\") > -1:\n self.__type = \"GRAPHS\"\n elif str(graphtype).find(\"SCATTER\") > -1:\n self.__type = \"SCATTER\"\n else:\n # Unknown type of graph - raise an exception\n raise ValueError(\n \"Unknown graph type: \"\n + graphtype\n + \"\\n\"\n + \"Must be one of 'GRAPHS' or 'SCATTER'\"\n )\n self.__nonzero = True", "def set_type(self, type: int):\r\n self.type = type\r\n self.canvas.itemconfig(self.item, image=self._get_image())", "def setAxisType(axistype='rectangular'):\n adict = {'rectangular':'RECT', 'crossed':'CROSS'}\n dislin.axstyp(adict[axistype])", "def set_type(self, new_value):\n\n self.vax_type = new_value\n self.save()", "def set ( self , ** keywords ) :\n for k in keywords.keys ():\n if k == \"type\" :\n self.line_type = keywords [\"type\"]\n else :\n setattr (self, k, keywords [k])", "def set_type(self, _new_type):\n # Check to see if type is changing\n if _new_type == self._type:\n return\n # Move from current boid set to boid set for new type\n self.target._grid[self._grid][self._type].discard(self)\n self.target._grid[self._grid][_new_type].add(self)\n # Update type\n self._type = _new_type", "def set(self, key):\n if key == 0:\n self._servo.d_key(.1)\n elif key == 1:\n self._servo.ctrl_d(.1)\n elif key == 2:\n self._servo.ctrl_u(.1)\n elif key == 3:\n self._servo.ctrl_enter(.1)\n elif key == 4:\n self._servo.enter_key(.1)\n elif key == 5:\n self._servo.refresh_key(.1)\n elif key == 6:\n self._servo.ctrl_refresh_key(.1)\n elif key == 7:\n self._servo.sysrq_x(.1)\n else:\n raise kbError(\"Unknown key enum: %s\", key)", "def setCurve(self, *args):\n return _libsbml.GeneralGlyph_setCurve(self, *args)", "def setType(self,newtype):\n\t\tself.type = newtype;", "def add_curve(self, name, curve_type, xy_tuples_list):\n self._curve_reg.add_curve(name, curve_type, xy_tuples_list)", "def castCurve(self, newtype, idx, silentSuccess=False):\n if idx >= - len(self) and idx < len(self):\n newCurve = self.curve(idx).castCurve(newtype)\n if isinstance(newCurve, Curve):\n flag = self.replaceCurve(newCurve, idx)\n if flag:\n if not silentSuccess:\n print('Graph.castCurve: new Curve type:',\n self.curve(idx).classNameGUI() + '.')\n else:\n print('Graph.castCurve')\n return flag\n else:\n print('Graph.castCurve: idx not in suitable range (', idx, ', max',\n len(self), ').')\n return False", "def setSymbol(self, \n symbolStyle=None, \n brushColor=None, brushStyle=None, \n penColor=None, penWidth=None, penStyle=None, \n symbolHeight=None, symbolWidth=None):\n for item in self.__selectedCurves:\n oldSymbol = item.symbol()\n if symbolStyle is None:\n symbolStyle = oldSymbol.style()\n if brushColor is None:\n brushColor = oldSymbol.brush().color()\n if brushStyle is None:\n brushStyle = oldSymbol.brush().style()\n if penColor is None:\n penColor = oldSymbol.pen().color()\n if penWidth is None:\n penWidth = oldSymbol.pen().width()\n if penStyle is None:\n penStyle = oldSymbol.pen().style()\n if symbolHeight is None:\n symbolHeight = oldSymbol.size().height()\n if symbolWidth is None:\n symbolWidth = oldSymbol.size().width()\n pen = QtGui.QPen(penColor, penWidth, penStyle)\n symbol = Qwt.QwtSymbol(symbolStyle, oldSymbol.brush(), pen, QtCore.QSize(width, height)) \n item.setSymbol(symbol)\n self.replot()", "def set_type(self,number):\n if number == 0:\n self.Goblin()\n \n if number == 1:\n self.Ork()\n\n if number == 2:\n self.Skeleton()\n\n if number == 3:\n self.Troll()", "def setCurve(self, *args):\n return _libsbml.ReactionGlyph_setCurve(self, *args)", "def changeType(self, newType):\n self.__class__ = globals()[newType + 'Format']\n self.format = self.defaultFormat\n self.initFormat()", "def setCurve(self, *args):\n return _libsbml.ReferenceGlyph_setCurve(self, *args)", "def register_curve(self, curve):\n key = tuple(curve.points())\n if key not in self.curves:\n # new curve (lock and register)\n curve.is_locked = True # points list must not change, else not valid key\n self.curves[key] = curve\n return self.curves[key]", "def type_determine(self):\n\n if self.data_type == \"ECG\" or self.data_type == \"ENR\":\n self.curve_channel2 = self.ECGWinHandle.plot(self.display_channel2, pen=self.pen)\n self.curve_channel1 = self.RespirationWinHandle.plot(self.display_channel1, pen=self.pen)\n self.two_channel = True\n if self.data_type == \"ECG\":\n self.channel1_type = \"RESP\"\n self.channel2_type = \"ECG\"\n else:\n self.channel1_type = \"RESP\"\n self.channel1_type = \"ECG\"\n else:\n self.curve_channel2 = self.PulseWaveWinHandle.plot(self.display_channel2, pen=self.pen)\n self.curve_channel1 = None\n self.two_channel = False\n self.channel2_type = \"PULSE\"", "def set_type(self, type):\n self.type = type", "def set_type(self, type):\n self.type = type", "def set_type(self, typ):\n if typ in range(5):\n self._type = typ\n\n else:\n raise ValueError(\n \"ERROR: Invalid input. Please give a numerical value \"\n \"between 0 and 4 ( both inclusive ) \")", "def getTypeCode(self):\n return _libsbml.Curve_getTypeCode(self)", "def test_assign_categorical(curve):\n assert curve.dtypes[0] == 'float'\n curve.dtypes = 'category'\n assert curve.dtypes[0] == 'category'", "def set_type(self, handler_type):\n try:\n self.handler = self.HANDLER_TYPES[handler_type].__func__\n except KeyError:\n handler_names = ', '.join(['\"%s\"' % t for t in self.HANDLER_TYPES.keys()])\n raise ValueError(u'Unsupported handler_type %s, options are %s.' %\n (handler_type, handler_names))" ]
[ "0.6818262", "0.634501", "0.6043398", "0.6023125", "0.59724844", "0.5956123", "0.5896204", "0.58441633", "0.57730615", "0.57210946", "0.5714673", "0.568932", "0.56837136", "0.5655091", "0.5635704", "0.5634768", "0.5588842", "0.5548922", "0.5517519", "0.55156165", "0.5504896", "0.5446165", "0.54321325", "0.54222476", "0.53798556", "0.53798556", "0.5374117", "0.53698725", "0.5313216", "0.53060037" ]
0.82946426
0
List of names of all curves without types
def untyped_curve_names(self): defined = set(self._data.keys()) untyped = defined.difference(self._pump_curves, self._efficiency_curves, self._headloss_curves, self._volume_curves) return list(untyped)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def curve_name_list(self):\n return list(self._curve_reg.keys())", "def pump_curve_names(self):\n return list(self._pump_curves)", "def efficiency_curve_names(self):\n return list(self._efficiency_curves)", "def volume_curve_names(self):\n return list(self._volume_curves)", "def headloss_curve_names(self):\n return list(self._headloss_curves)", "def _curveNamesToList(settings):\n return [getattr(GroupName, val) for val in settings.eccCurves]", "def curves(self):\n return self._curve_reg", "def untyped_curves(self):\n defined = set(self._data.keys())\n untyped = defined.difference(self._pump_curves, self._efficiency_curves, \n self._headloss_curves, self._volume_curves)\n for key in untyped:\n yield key, self._data[key]", "def get_curves(p):\n curve_list = []\n for well in p:\n curves = well.data.keys()\n for c in curves:\n curve_list.append(c)\n return sorted(set(curve_list))", "def get_symbol(self):\n return []", "def axesNames(self, data, info):\n return []", "def endog_names(self):\n return self.data.ynames", "def getNoShortName(self):\n return [x for x in self.xeps if not x.shortname]", "def names(self) -> list[str]:", "def names(cls) -> List[str]:", "def getGraphPointsNames(self):\n return [gp.id for gp in self.getGraphPoints()]", "def get_all_object_names(self):\n o_objects = []\n for s in [\"Non Model\", \"Solids\", \"Unclassified\", \"Sheets\", \"Lines\"]:\n o_objects += self.design.modeler.get_objects_in_group(s)\n return o_objects", "def symbols(self):\n # get the names(identifiers) of all curves in the graph:\n curvenames = self.g.element_show()\n # foreach curve, add a diamond symbol, filled with the\n # color of the curve ('defcolor') and with a size of 2:\n for curvename in curvenames:\n self.g.element_configure(curvename, symbol='diamond',\n outlinewidth=2, fill='defcolor')", "def exog_names(self):\n return self.data.xnames", "def get_visibility_curves(self, nodes):\n curves = []\n for node in nodes:\n if mc.nodeType(node) != 'transform':\n node = mc.listRelatives(node, parent=True, fullPath=True)[0]\n # Check if a anim curve is connected to the visibility attribute\n if mc.listConnections(\"{0}.visibility\".format(node)):\n animCurves = filter(\n lambda connection: 'animCurve' in mc.nodeType(connection),\n mc.listConnections(\"{0}.visibility\".format(node))\n )\n # Unplug the anim curve (to avoid it being deleted when the\n # object will be) and store a ref to it in the anim curve list\n if animCurves:\n mc.disconnectAttr(\n \"{0}.output\".format(animCurves[0]),\n \"{0}.visibility\".format(node)\n )\n curves.append(animCurves[0])\n else:\n curves.append(None)\n else:\n curves.append(None)\n return curves", "def compound_names(self) -> List[str]:\n return None", "def list_syms():\n\tSymStringVec=[];\n\tSymStringVec.append(\"CSYM\");\n\tSymStringVec.append(\"DSYM\");\n\tSymStringVec.append(\"TET_SYM\");\n\tSymStringVec.append(\"OCT_SYM\");\n\tSymStringVec.append(\"ICOS_SYM\");\n\tSymStringVec.append(\"ISYM\");\n\treturn SymStringVec", "def pyranose_names(self):\n output = set()\n for item in self.pyranoses():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def names():\n pass", "def getElementName(self):\n return _libsbml.Curve_getElementName(self)", "def names(self):\n if not self.extensions:\n self.discover()\n\n names = list(self.builtins.keys())\n names += self.extensions.keys()\n\n return sorted(names)", "def namelist(self):\n return []", "def name(self):\n return [o.name for o in self.obs]", "def get_available_figures(self):\n return sorted((method[5:], func) \\\n for method, func in self.__class__.__dict__.iteritems() \\\n if method.startswith(\"plot_\") and callable(func))", "def getFeatureNames(self):\n return [\"f100\", \"f103\", \"f104\"]" ]
[ "0.728905", "0.7177009", "0.7102774", "0.6995169", "0.6821574", "0.6692155", "0.6231177", "0.6144938", "0.6102462", "0.5912403", "0.5880008", "0.58158827", "0.577501", "0.57147944", "0.5695711", "0.5672398", "0.56616974", "0.562902", "0.5619223", "0.56168723", "0.56018364", "0.55980986", "0.5578955", "0.5557119", "0.5532513", "0.5526996", "0.55207694", "0.55090815", "0.5505387", "0.55022967" ]
0.79089075
0
Generator to get all pump curves Yields
def pump_curves(self): for key in self._pump_curves: yield key, self._data[key]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pumps(self):\n for name in self._pumps:\n yield name, self._data[name]", "def iterCurves(self):\n for c in range(self.length()):\n yield self.curve(c)", "def _generators(self):\n return self.free_group.generators", "def semigroup_generators(self):", "def ticker_generator():\n return (v for v in load_equities().values)", "def semigroup_generators(self):\n return self.ambient().semigroup_generators().map(self.retract)", "def values(self):\n for row in self.yield_matrix:\n yield FissionYield(self.products, row)", "def items(self):\n for ene, row in zip(self.energies, self.yield_matrix):\n yield ene, FissionYield(self.products, row)", "def __iter__(self):\n from sage.combinat.posets.posets import FinitePosets_n\n n = 0\n while True:\n for P in FinitePosets_n(n):\n yield P\n n += 1", "def generators(self):\n return self._generators", "def power_points():\n next_reading = power_readings()\n stretch = []\n\n def next():\n nonlocal stretch, next_reading\n stretch.append(next_reading())\n if len(stretch) > XMAX + 1:\n stretch.pop(0)\n x = XMAX + 1 - len(stretch)\n points = []\n for y in stretch:\n points.append((x, y))\n points.append((x, 0))\n x += 1\n return points\n\n return next", "def generator(self):\n return [None, 1]", "def __iter__(self):\n return self._product_generator()", "def power_pumps(self):\n for name in self._power_pumps:\n yield name, self._data[name]", "def __iter__(self):\n yield from self.gen", "def sequences(self):\n # i am one\n yield self\n # nothing further\n return", "def __iter__(self):\n for point in self.points:\n yield point", "def volume_curves(self):\n for key in self._volume_curves:\n yield key, self._data[key]", "def generators(self) -> List[Generator]:\n return self._generators", "def _generator(self):\n while not self._stopFlag:\n yield self._oneStep()\n self._cooperator = None", "def algebra_generators(self):\n return self.basis().keys().semigroup_generators().map(self.monomial)", "def _generators_for_H(self):\n if self.level() in [1, 2]:\n return []\n return [ZZ(x) for x in IntegerModRing(self.level()).unit_gens()]", "def walk(self):\n for group in self.all_groups.values():\n yield from group.calculations", "def __iter__(self):\n yield from self.qc_mol\n yield from self.br_mol\n yield from self.pc_mol", "def nextGen(self):\n\n p = []\n while len(p) < len(self.p):\n #select mates and produce offspring\n p1, p2 = self.select()\n offspring = self.mate(p1, p2)\n\n #put the offspring in the next generation (with mutation)\n for child in offspring:\n child=self.mutate(child)\n p.append(child)\n \n\n # the world belongs to the new generation\n return p", "def iter_all_chains(self):\n for model in self.model_list:\n for chain in model.chain_list:\n yield chain", "def generators(self, algorithm=\"farey\"):\n if self.level() == 1:\n # we return a fixed set of generators for SL2Z, for historical\n # reasons, which aren't the ones the Farey symbol code gives\n return [ self([0,-1,1,0]), self([1,1,0,1]) ]\n\n elif algorithm==\"farey\":\n return self.farey_symbol().generators()\n\n elif algorithm==\"todd-coxeter\":\n from sage.modular.modsym.p1list import P1List\n from .congroup import generators_helper\n level = self.level()\n if level == 1: # P1List isn't very happy working mod 1\n return [ self([0,-1,1,0]), self([1,1,0,1]) ]\n gen_list = generators_helper(P1List(level), level)\n return [self(g, check=False) for g in gen_list]\n\n else:\n raise ValueError(\"Unknown algorithm '%s' (should be either 'farey' or 'todd-coxeter')\" % algorithm)", "def pipes(self):\n for name in self._pipes:\n yield name, self._data[name]", "def getGenerators(self) -> list:\n return self.state[GENERATORS]", "def stream():\n while True:\n yield random_point()" ]
[ "0.6955307", "0.69123983", "0.65011305", "0.62952924", "0.6177036", "0.6140256", "0.6089721", "0.6075028", "0.6056255", "0.60330725", "0.600164", "0.5999505", "0.5998696", "0.596925", "0.5952728", "0.5864612", "0.5799862", "0.5788411", "0.5763655", "0.5734014", "0.57314366", "0.57171017", "0.57119286", "0.5695046", "0.56813055", "0.56737465", "0.56607294", "0.5659714", "0.56531036", "0.5637052" ]
0.7948479
0
List of names of all pump curves
def pump_curve_names(self): return list(self._pump_curves)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def curve_name_list(self):\n return list(self._curve_reg.keys())", "def volume_curve_names(self):\n return list(self._volume_curves)", "def pump_names(self):\n return self._pumps", "def efficiency_curve_names(self):\n return list(self._efficiency_curves)", "def power_pump_names(self):\n return self._power_pumps", "def pump_name_list(self):\n return list(self._link_reg.pump_names)", "def power_pump_name_list(self):\n return list(self._link_reg.power_pump_names)", "def _curveNamesToList(settings):\n return [getattr(GroupName, val) for val in settings.eccCurves]", "def headloss_curve_names(self):\n return list(self._headloss_curves)", "def untyped_curve_names(self):\n defined = set(self._data.keys())\n untyped = defined.difference(self._pump_curves, self._efficiency_curves, \n self._headloss_curves, self._volume_curves)\n return list(untyped)", "def head_pump_names(self):\n return self._head_pumps", "def get_curves(p):\n curve_list = []\n for well in p:\n curves = well.data.keys()\n for c in curves:\n curve_list.append(c)\n return sorted(set(curve_list))", "def pump_curves(self):\n for key in self._pump_curves:\n yield key, self._data[key]", "def pumps(self):\n for name in self._pumps:\n yield name, self._data[name]", "def curves(self):\n return self._curve_reg", "def pumps(self): \n return self._link_reg.pumps", "def head_pump_name_list(self):\n return list(self._link_reg.head_pump_names)", "def get_all_image_names(self):\n\n # for RD analysis ...\n\n result = []\n for sweep in self._sweeps:\n result.extend(sweep.get_all_image_names())\n return result", "def GetNamesOfPieces(self):\n assert self.RecoveredEnoughPieces()\n result = []\n base = self.fileName + dibs_constants.fileSeparator \n for p in self.piecesRecovered.keys():\n result.append(base + p)\n return result", "def names(self) -> list[str]:", "def getGraphPointsNames(self):\n return [gp.id for gp in self.getGraphPoints()]", "def name(self):\n return [o.name for o in self.obs]", "def pyranose_names(self):\n output = set()\n for item in self.pyranoses():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def phase_names(self) -> [str]:\n return [phase.hyper_name for phase in self.hyper_phases]", "def names(self) -> List:\n ...", "def output_names(self):\n return []", "def get_pump_list(self):\n return self.pump_array", "def figure_names(self) -> List[str]:\n return self._db_data.figure_names", "def occr_p_names(self):\n\n occr_names = []\n\n for i in range(len(self._P_boundaries) - 1):\n occr_names.append(\"{:.3g} - {:.3g}\".format(\n self._P_boundaries[i], self._P_boundaries[i+1]))\n\n return occr_names", "def namelist(self):\n return []" ]
[ "0.70650846", "0.7049732", "0.7011603", "0.69478863", "0.6671471", "0.66613317", "0.6562465", "0.6556188", "0.64959246", "0.6474869", "0.6210788", "0.6114254", "0.6050862", "0.59828776", "0.5880972", "0.5876053", "0.584949", "0.5795343", "0.5747948", "0.56924325", "0.56870025", "0.56429935", "0.5617881", "0.55769527", "0.55231506", "0.5518822", "0.5503086", "0.54942286", "0.5472294", "0.54546994" ]
0.8472442
0
Generator to get all efficiency curves Yields
def efficiency_curves(self): for key in self._efficiency_curves: yield key, self._data[key]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iterCurves(self):\n for c in range(self.length()):\n yield self.curve(c)", "def pump_curves(self):\n for key in self._pump_curves:\n yield key, self._data[key]", "def ticker_generator():\n return (v for v in load_equities().values)", "def price_generator(self, start, end, periods):\r\n tickers = [self.SelectedTicker]\r\n tick_yahoo = YahooFinancials(tickers)\r\n data = tick_yahoo.get_historical_price_data(start, \r\n end, \r\n periods)\r\n \r\n df = pd.DataFrame({\r\n a: {x['formatted_date']: x['adjclose'] for x in data[a]['prices']} for a in tickers})\r\n \r\n self.prices = df.dropna()\r\n self.returns = self.prices.pct_change().dropna()\r\n try:\r\n self.div_yield = tick_yahoo.get_dividend_yield()\r\n #print(self.div_yield[self.SelectedTicker])\r\n if self.div_yield[self.SelectedTicker] == None:\r\n self.div_yield = 0.00\r\n else:\r\n self.div_yield = self.div_yield[self.SelectedTicker]\r\n except:\r\n print(\"no dividend yield\")", "def __iter__(self):\n return self._product_generator()", "def generator(self) -> Iterator[Tuple[int, int, complex]]:\n for inda in range(self._core.lena()):\n alpha_str = self._core.string_alpha(inda)\n for indb in range(self._core.lenb()):\n beta_str = self._core.string_beta(indb)\n yield alpha_str, beta_str, self.coeff[inda, indb]", "def values(self):\n for row in self.yield_matrix:\n yield FissionYield(self.products, row)", "def items(self):\n for ene, row in zip(self.energies, self.yield_matrix):\n yield ene, FissionYield(self.products, row)", "def curves(self):\n return self._curve_reg", "def _generator(self):\n # Initial setup\n ac = self._env.action_space.sample() # not used, just so we have the datatype\n self.new = True # marks if we're on first timestep of an episode\n self.ob = self._convert_state(self._env.reset()) \n T = self._timesteps\n\n cur_ep_ret = 0 # return in current episode\n cur_ep_len = 0 # len of current episode\n ep_rets = [] # returns of completed episodes in this segment\n ep_lens = [] # lengths of ...\n\n # Initialize history arrays\n #obs = np.array([None for _ in range(T)])\n obs = nd.empty((T,) + self._env.observation_space.shape)\n rews = np.zeros(T, 'float32')\n vpreds = np.zeros(T, 'float32')\n news = np.zeros(T, 'int32')\n acs = np.array([ac for _ in range(T)])\n prevacs = acs.copy()\n\n t = 0\n while True:\n ob = self.ob # Use `self.` since `_evaluate` may have reset the env\n new = self.new\n prevac = ac\n ac, vpred = self._act(ob)\n # NOTE(openAI) Slight weirdness here because we need value function at time T\n # before returning segment [0, T-1] so we get the correct terminal value\n if t > 0 and t % T == 0:\n seg = {\"ob\": obs, \"rew\": rews, \"vpred\": vpreds, \"new\": news,\n \"ac\": acs, \"nextvpred\": vpred * (1 - new),\n \"ep_rets\": np.array(copy.deepcopy(ep_rets)),\n \"ep_lens\": np.array(copy.deepcopy(ep_lens))}\n self._add_vtarg_and_adv(seg, self._gamma, self._lambda)\n yield seg\n # NOTE: Do a deepcopy if the values formerly in these arrays are used later.\n ep_rets = []\n ep_lens = []\n i = t % T\n\n obs[i] = ob[0]\n vpreds[i] = vpred\n news[i] = new\n acs[i] = ac\n prevacs[i] = prevac\n\n ob, rew, new, _ = self._env.step(ac)\n ob = self._convert_state(ob)\n rews[i] = rew\n\n cur_ep_ret += rew\n cur_ep_len += 1\n if new:\n ep_rets.append(cur_ep_ret)\n ep_lens.append(cur_ep_len)\n cur_ep_ret = 0\n cur_ep_len = 0\n ob = self._convert_state(self._env.reset())\n self.new = new\n self.ob = ob\n t += 1", "def __iter__(self):\n term_v = yicespy.term_vector_t()\n yicespy.yices_init_term_vector(term_v)\n #todo here\n status = yicespy.yices_model_collect_defined_terms(self.yices_model, term_v)\n self._check_error(status)\n for d in term_v:\n try:\n pysmt_d = self.converter.back(d())\n yield pysmt_d, self.get_value(pysmt_d)\n except UndefinedSymbolError:\n # avoids problems with symbols generated by z3\n pass\n yicespy.yices_delete_term_vector(term_v)", "def __iter__(self):\n yield from self.gen", "def iterator(self):\n yield", "def _evaluate(self, estimator, generator):\n return np.mean([np.mean(np.power(estimator.estimate(A, b) - x, 2))\n for A, x, b in[generator.generate()\n for _ in range(self.repetitions)]])", "def equation_generator(self):\n for H in self.Hrepresentation():\n if H.is_equation():\n yield H", "def power_readings():\n chain = [sin(x / (XMAX * 0.1)) * 0.1 + 0.6 for x in range(0, XMAX + 1)]\n cnt = 0\n\n def next():\n nonlocal chain, cnt\n next_reading = chain[cnt % len(chain)]\n cnt += 1\n return next_reading\n\n return next", "def __iter__(self):\n with open(self.inputfile) as fin:\n for line in fin:\n arr = line.strip().split()\n self.path = arr[0]\n self.y = int(arr[1])\n if len(arr) > 2:\n self.feats = map(float, arr[2:])\n yield self.path, self.y, self.feats\n else:\n yield self.path, self.y", "def _generators(self):\n return self.free_group.generators", "def generator(self):\n global_index = 0\n n_params = len(self.params)\n while (global_index < self.NXFLTEXP*self.paramspace):\n # skip row that have data already\n while (np.sum(self.spectra_hdu.data[global_index][1]) > 0.0): \n global_index += self.NXFLTEXP\n if (global_index >= self.NXFLTEXP*self.paramspace): break\n if (global_index >= self.NXFLTEXP*self.paramspace): break\n\n # get indexes in each grid; the last grid changing the fastest\n param_indexes = np.zeros(n_params, dtype=int)\n param_values = np.zeros(n_params)\n N0 = self.paramspace\n for i in range(n_params):\n (p_name, p_grid, p_log, p_frozen) = self.params[i]\n N = len(p_grid)\n N0 /= N\n p_index = int((global_index/3)//N0 % N)\n #print('global_index',global_index)\n #print('p_index',p_index)\n #print('p_grid[p_index]',p_grid[p_index])\n #print('p_grid',p_grid)\n param_indexes[i] = p_index\n param_values[i] = p_grid[p_index]\n\n # write parameter values (repeat the same parameters for each spectrum of the set) \n for i in range(self.NXFLTEXP):\n self.spectra_hdu.data[global_index+i][0] = param_values\n #end for\n\n # return total index, array of grid indexes, and array of grid values\n #sys.stderr.write(\"> generator: passing spectrum index %d (%s %s)\\n\" % (global_index, str(param_indexes), str(param_values)))\n yield (global_index, param_values, param_indexes, self.energies)\n global_index += self.NXFLTEXP\n #end while", "def generator(self):\n return [None, 1]", "def __iter__(self):\n for sample in self.samples:\n yield sample", "def _iter_sims(self):\n for idx, lineset in enumerate(self.linesets[:-1]):\n for lineset2 in self.linesets[idx + 1 :]:\n yield from self._find_common(lineset, lineset2)", "def get_yield(self, t, y):\n return", "def __iter__(self):\n for point in self.points:\n yield point", "def walk(self):\n for group in self.all_groups.values():\n yield from group.calculations", "def __iter__(self):\n # we should really never have 1e6, let's prevent some user pain\n for ii in range(self._stop):\n yield self.next()\n else:\n raise RuntimeError('Generated over %s images' % (self._stop,))", "def est_generator(limit=1000):\n last_guess = ZERO\n for i in range(limit):\n yield 1 + last_guess\n denom = last_guess + 2\n last_guess = 1 / denom", "def priorities_generator():\n priorities = [1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]\n yield from itertools.chain(priorities, itertools.repeat(0.1))", "def __iter__(self) :\n for s in self._samples_to_cache :\n yield s", "def get_assets(self):\n # The size of the price_data list should not change, even when updated\n price_data_length = len(self.price_data)\n\n for index in itertools.cycle(range(price_data_length)):\n try:\n yield self.price_data[index]\n except IndexError:\n yield None" ]
[ "0.6970686", "0.6494462", "0.62896115", "0.6112059", "0.6069281", "0.597246", "0.5970973", "0.5869654", "0.57763106", "0.5729279", "0.572446", "0.5703407", "0.56768227", "0.56695676", "0.5640674", "0.5629118", "0.56185347", "0.5613104", "0.5579484", "0.55687636", "0.5509577", "0.5507431", "0.5481511", "0.5471711", "0.5462284", "0.54455614", "0.5444981", "0.5444795", "0.54445225", "0.54332775" ]
0.76868105
0
List of names of all efficiency curves
def efficiency_curve_names(self): return list(self._efficiency_curves)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def curve_name_list(self):\n return list(self._curve_reg.keys())", "def pump_curve_names(self):\n return list(self._pump_curves)", "def headloss_curve_names(self):\n return list(self._headloss_curves)", "def curves(self):\n return self._curve_reg", "def _curveNamesToList(settings):\n return [getattr(GroupName, val) for val in settings.eccCurves]", "def volume_curve_names(self):\n return list(self._volume_curves)", "def efficiency_curves(self):\n for key in self._efficiency_curves:\n yield key, self._data[key]", "def untyped_curve_names(self):\n defined = set(self._data.keys())\n untyped = defined.difference(self._pump_curves, self._efficiency_curves, \n self._headloss_curves, self._volume_curves)\n return list(untyped)", "def endog_names(self):\n return self.data.ynames", "def legend_names(self):\n return [leg.label for leg in self.legends]", "def get_curves(p):\n curve_list = []\n for well in p:\n curves = well.data.keys()\n for c in curves:\n curve_list.append(c)\n return sorted(set(curve_list))", "def getElementName(self):\n return _libsbml.Curve_getElementName(self)", "def base_plot_keys(self):\r\n plot_keys = [\"loss\", \"l1_loss\", \"mse_loss\", \"dur_loss\"]\r\n \r\n if self.use_fe_condition:\r\n plot_keys += [\"pitch_loss\", \"energy_loss\"]\r\n return plot_keys", "def getSolRatioVarNames( self ):\n\n self.updateAdb( )\n\n return self.solNames.keys()", "def name(self):\n return [o.name for o in self.obs]", "def _scale_dependent_metrics() -> list:\n return ['mse', 'rmse', 'mae']", "def ex_curve(data):\n rv = []\n try:\n ef = autocomplete_curve_function(data[0])\n ed = autocomplete_curve_direction(data[1])\n period = 2\n try:\n period = max(int(data[2]), 2)\n except ValueError:\n pass\n data = data[3:]\n if not data:\n if consts.VERBOSE:\n print('ERROR: No data for curve')\n return []\n f = CURVE_FUNCTIONS[ef][ed]\n maxi = len(data)-1\n for i in range(period):\n v = f(float(i) / float(period-1))\n di = int(round(v*float(maxi)))\n rv.append(data[di])\n\n except Exception as e:\n if consts.VERBOSE:\n print('ERROR: Curve failed [%s]'%e)\n\n return rv", "def series_names(self):\r\n return self.names", "def return_all_parameter_names():\n a = list(titles)\n a.append(r\"$\\chi^{2}$ per degree of freedom\")\n b = list(labels)\n b.append(\"chi2_per_dof\")\n return a, b", "def getResRatioVarNames( self ):\n\n self.updateAdb( )\n\n return self.resNames.keys()", "def axesNames(self, data, info):\n return []", "def getSensorVariableNames(self, product):\r\n return []", "def getSensorVariableNames(self, product):\r\n\r\n return []", "def realizations(self):\n return [ self.km(), self.kHLP(), self.affineSchur(), self.dual_k_Schur()]", "def get_curve(self, name):\n return self._curve_reg[name]", "def names(self):\n labels = [\n \"$X_{%i}$\" % i if d.name is None else d.name\n for i, d in enumerate(self.dimensions)\n ]\n return labels", "def names(self) -> list[str]:", "def get_hyperparameter_names():\n params = ['mu', 'nu', 'r', 's']\n return params", "def get_suffstat_names():\n params = ['sum_x', 'sum_x_squared']\n return params", "def get_traced_op_names(self):\n return self._traced_op_names" ]
[ "0.7176187", "0.69882613", "0.67914665", "0.65645057", "0.6549786", "0.64903426", "0.6471656", "0.6310284", "0.60815775", "0.5918206", "0.58251923", "0.57553005", "0.5725963", "0.56404936", "0.5589268", "0.55174005", "0.55129546", "0.5444234", "0.54081243", "0.5403793", "0.53560305", "0.535534", "0.5346379", "0.5342867", "0.5333942", "0.5306573", "0.53033113", "0.52980703", "0.52862453", "0.52806926" ]
0.8754546
0
Generator to get all headloss curves Yields
def headloss_curves(self): for key in self._headloss_curves: yield key, self._data[key]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def headloss_curve_names(self):\n return list(self._headloss_curves)", "def train_step_generators(self, X):\n self.generator.zero_grad()\n self.encoder.zero_grad()\n\n Z = self.noise_fn(self.batch_size)\n\n X_hat = self.generator(Z)\n Z_hat = self.encoder(X)\n X_tilde = self.generator(Z_hat)\n Z_tilde = self.encoder(X_hat)\n\n X_hat_confidence = self.discriminator_image(X_hat)\n Z_hat_confidence = self.discriminator_latent(Z_hat)\n X_tilde_confidence = self.discriminator_image(X_tilde)\n Z_tilde_confidence = self.discriminator_latent(Z_tilde)\n\n X_hat_loss = self.criterion_gen(X_hat_confidence, self.target_ones)\n Z_hat_loss = self.criterion_gen(Z_hat_confidence, self.target_ones)\n X_tilde_loss = self.criterion_gen(X_tilde_confidence, self.target_ones)\n Z_tilde_loss = self.criterion_gen(Z_tilde_confidence, self.target_ones)\n\n X_recon_loss = self.criterion_recon_image(X_tilde, X) * ALPHA_RECONSTRUCT_IMAGE\n Z_recon_loss = self.criterion_recon_latent(Z_tilde, Z) * ALPHA_RECONSTRUCT_LATENT\n\n X_loss = (X_hat_loss + X_tilde_loss) / 2 * ALPHA_DISCRIMINATE_IMAGE\n Z_loss = (Z_hat_loss + Z_tilde_loss) / 2 * ALPHA_DISCRIMINATE_LATENT\n loss = X_loss + Z_loss + X_recon_loss + Z_recon_loss\n\n loss.backward()\n self.optim_e.step()\n self.optim_g.step()\n\n return X_loss.item(), Z_loss.item(), X_recon_loss.item(), Z_recon_loss.item()", "def yolo_v4_head_generator():\n return heads.YOLOv4()", "def _generator_loss(self, y_hat):\n\n l = -tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.zeros(tf.shape(y_hat)),logits = y_hat ))\n print('generatorloss shape',tf.shape(l))\n return l", "def _generators_for_H(self):\n if self.level() in [1, 2]:\n return []\n return [ZZ(x) for x in IntegerModRing(self.level()).unit_gens()]", "def losses(self):\n for name in self._nodes:\n if isinstance(self._nodes[name], Loss):\n yield name", "def ls_generator_loss(scores_fake):\r\n loss = torch.mean((scores_fake - 1) ** 2) / 2\r\n return loss", "def Hrep_generator(self):\n for H in self.Hrepresentation():\n yield H", "def retinanet_head_generator(params):\n head_params = params.model_params.architecture.head_params\n anchors_per_location = params.model_params.anchor.num_scales * len(params.model_params.anchor.aspect_ratios)\n return heads.RetinanetHead(\n params.model_params.architecture.min_level,\n params.model_params.architecture.max_level,\n params.model_params.architecture.num_classes,\n anchors_per_location,\n head_params.num_convs,\n head_params.num_filters,\n head_params.use_separable_conv,\n norm_activation=norm_activation_generator(params.model_params.norm_activation),\n )", "def learning_curve():\n loss = []\n val_loss = []\n data_size = []\n\n x_slid, y_slid = sliding_window_main(x, y)\n x_train, y_train, x_val, y_val, x_test, y_test = data_splitting_main(x_slid, y_slid)\n m_tot = x_train.shape[0]\n\n batch_step = 50\n try:\n for m in range(batch_size, m_tot, batch_step*batch_size):\n print(\"Training: \", m)\n net = create_network()\n history = trainer(net, x_train[:m], y_train[:m], x_val, y_val)\n loss.append(history.history[\"loss\"][-1])\n val_loss.append(history.history[\"val_loss\"][-1])\n data_size.append(m)\n\n print(\"Loss:\", loss[-1])\n print()\n\n finally:\n plt.plot(data_size, loss, label=\"Loss\", marker=\"o\")\n plt.plot(data_size, val_loss, label=\"Val Loss\", marker=\"o\")\n plt.xlabel(\"m\")\n plt.ylabel(\"Losses\")\n plt.title(\"Model Loss\")\n plt.legend()\n plt.savefig(\"img/\" + datetime.now().strftime(\"%y%m%d_%H%M\") + \"_learning_curve.png\")\n plt.show()\n plt.close()\n\n return loss, val_loss", "def rpn_head_generator(params):\n head_params = params.rpn_head\n if head_params.anchors_per_location:\n logging.info('[Deprecation]: `rpn_head.anchors_per_location` '\n 'is no longer used.')\n anchor_aspect_ratios = len(params.anchor.aspect_ratios)\n anchor_num_scales = params.anchor.num_scales\n anchors_per_location = anchor_aspect_ratios * anchor_num_scales\n return heads.RpnHead(\n params.architecture.min_level,\n params.architecture.max_level,\n anchors_per_location,\n head_params.num_convs,\n head_params.num_filters,\n head_params.use_separable_conv,\n params.batch_norm_activation.activation,\n head_params.use_batch_norm,\n batch_norm_activation=batch_norm_activation_generator(\n params.batch_norm_activation))", "def __next__(self):\n blend_tables = []\n for _ in range(self.batch_size):\n blend_table = self.sampling_function(self.catalog.table)\n self._check_n_sources(blend_table)\n blend_tables.append(blend_table)\n return blend_tables", "def train(self):\n d_loss = []\n g_loss = []\n for index, (real, _) in enumerate(self.data_loader):\n d_loss.append(self._train_discriminator(real))\n\n # Every n_critic batches train the generator.\n if index % self.params.n_critic == 0:\n g_loss.append((self._train_generator()))\n\n return d_loss, g_loss", "def rpn_head_generator(params):\n head_params = params.rpn_head\n anchors_per_location = params.anchor.num_scales * len(params.anchor.aspect_ratios)\n return heads.RpnHead(\n params.model_params.architecture.min_level,\n params.model_params.architecture.max_level,\n anchors_per_location,\n head_params.num_convs,\n head_params.num_filters,\n head_params.use_separable_conv,\n params.model_params.norm_activation.activation,\n head_params.use_batch_norm,\n norm_activation=norm_activation_generator(params.model_params.norm_activation),\n )", "def pump_curves(self):\n for key in self._pump_curves:\n yield key, self._data[key]", "def iterCurves(self):\n for c in range(self.length()):\n yield self.curve(c)", "def head_pumps(self):\n for name in self._head_pumps:\n yield name, self._data[name]", "def loss(self, data):\n loss, smoothed, lr = data\n\n curves = []\n\n curve_keys = ['color', 'linestyle', 'linewidth', 'alpha']\n\n if loss is not None:\n loss_name = self.config.get('label', f\"loss #{self.index + 1}\")\n loss_label = f'{loss_name} ⟶ {loss[-1]:2.3f}'\n final_window = self.config.get('final_window', None)\n if final_window is not None:\n final_window = min(final_window, len(loss))\n final = np.mean(loss[-final_window:])\n loss_label += f\"\\nmean over last {final_window} iterations={final:2.3f}\"\n\n loss_config = self.config.filter(keys=curve_keys, prefix='curve_')\n loss_curve = self.ax.plot(loss, label=loss_label, **loss_config)\n curves.extend(loss_curve)\n\n if smoothed is not None:\n smoothed_color = scale_lightness(loss_config['color'], scale=.5)\n smooth_window = self.config.get('window')\n smoothed_label = self.config.get('smoothed_label', loss_name)\n smoothed_label = smoothed_label + '\\n' if smoothed_label else ''\n smoothed_label += f'smoothed with window {smooth_window}'\n smoothed_curve = self.ax.plot(smoothed, label=smoothed_label, color=smoothed_color, linestyle='--')\n curves.extend(smoothed_curve)\n\n if lr is not None:\n lr_ax = self.ax if loss is None else self.twin_ax\n lr_label = f'learning rate №{self.index + 1} ⟶ {lr[-1]:.0e}'\n lr_config = self.config.filter(keys=curve_keys, prefix='lr_')\n lr_curve = lr_ax.plot(lr, label=lr_label, **lr_config)\n lr_ax.set_ylabel('Learning rate', fontsize=12)\n curves.extend(lr_curve)\n\n return curves", "def eg_sk():\n\n rxs = []\n a = []\n b = []\n c = []\n d = []\n e = []\n f = []\n g = []\n h = []\n i = []\n j = []\n\n for _ in range(1000):\n a.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n b.append(utils.gaussian(10.1, 1))\n\n for _ in range(1000):\n c.append(utils.gaussian(20, 1))\n\n for _ in range(1000):\n d.append(utils.gaussian(30, 1))\n\n for _ in range(1000):\n e.append(utils.gaussian(30.1, 1))\n\n for _ in range(1000):\n f.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n g.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n h.append(utils.gaussian(40, 1))\n\n for _ in range(1000):\n i.append(utils.gaussian(40, 3))\n\n for _ in range(1000):\n j.append(utils.gaussian(10, 1))\n\n for k, v in enumerate([a, b, c, d, e, f, g, h, i, j]):\n rxs.append(creation.RX(v, \"rx{}\".format(k)))\n\n for rx in stats.tiles(stats.scottKnot(rxs)):\n print(\"\", rx[\"rank\"], rx[\"name\"], rx[\"show\"], sep=\"\\t\")", "def _multi_head(heads, loss_weights=None):\n if loss_weights:\n if len(loss_weights) != len(heads):\n raise ValueError(\"heads and loss_weights must have same size\")\n\n def _weighted_loss_combiner(losses):\n if loss_weights:\n if len(losses) != len(loss_weights):\n raise ValueError(\"losses and loss_weights must have same size\")\n weighted_losses = []\n for loss, weight in zip(losses, loss_weights):\n weighted_losses.append(math_ops.multiply(loss, weight))\n return math_ops.add_n(weighted_losses)\n else:\n return math_ops.add_n(losses)\n\n return _MultiHead(heads, loss_combiner=_weighted_loss_combiner)", "def generator_loss(logits_fake, device):\r\n true_labels = torch.ones(logits_fake.size()).to(device=device, dtype=torch.float32)\r\n loss = bce_loss(logits_fake, true_labels)\r\n return loss", "def trainDataGenerator(num_epochs):\r\n samples, all_files = get_filenames()\r\n for num in range(num_epochs):\r\n for i in range(len(samples)):\r\n sample = samples[i]\r\n for file in all_files[i]:\r\n ohvs, Y = prepData(sample, file)\r\n if (ohvs == []):\r\n continue\r\n X = np.array([ohvs[:800]])\r\n yield X, Y\r\n # for i in range(0, len(ohvs), 400):\r\n # X = np.array([ohvs[i : i+400]])\r\n # print(\"\\tX shape =\", X.shape)\r\n # yield X, Y\r", "def ls_generator_loss(scores_fake):\n N = scores_fake.size()\n\n true_labels = Variable(torch.ones(N)).type(dtype)\n\n loss = 0.5 * ((torch.mean((scores_fake - true_labels)**2)))\n\n return loss", "def generator_loss(gen_images):\n output = disc_net(gen_images)\n cats = output.new_full(output.shape, real_label)\n return gen_loss_criterion(output, cats)", "def generator_loss(logits_fake):\n # Batch size.\n N = logits_fake.size()\n\n # 生成器的作用是将所有“假”的向真的(1)靠拢\n true_labels = Variable(torch.ones(N)).type(dtype)\n\n # 计算生成器损失\n loss = Bce_loss(logits_fake, true_labels)\n\n return loss", "def iterWeights(self):\r\n\t\tyield None", "def train_LR(self, X, y, eta=1e-3, batch_size=1, num_iters=1000) :\n loss_history = []\n N,d = X.shape\n for t in np.arange(num_iters):\n X_batch = None\n y_batch = None\n # ================================================================ #\n # YOUR CODE HERE:\n # Sample batch_size elements from the training data for use in gradient descent. \n # After sampling, X_batch should have shape: (batch_size,1), y_batch should have shape: (batch_size,)\n # The indices should be randomly generated to reduce correlations in the dataset. \n # Use np.random.choice. It is better to user WITHOUT replacement.\n # ================================================================ #\n \n # sample indices without replacement\n batch_idx = np.random.choice(N, batch_size, replace = False)\n X_batch = X[batch_idx]\n y_batch = y[batch_idx]\n \n \n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n loss = 0.0\n grad = np.zeros_like(self.w)\n # ================================================================ #\n # YOUR CODE HERE: \n # evaluate loss and gradient for batch data\n # save loss as loss and gradient as grad\n # update the weights self.w\n # ================================================================ #\n \n # compute the loss and gradient\n # loss_and_grad will take responsible for these\n \n loss, grad = self.loss_and_grad(X_batch, y_batch)\n \n self.w = self.w - eta * grad\n \n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n loss_history.append(loss)\n return loss_history, self.w", "def __iter__(self) -> SLNode:\n cur = self.head\n while cur is not None:\n yield cur\n cur = cur.next", "def __iter__(self) -> SLNode:\n cur = self.head\n while cur is not None:\n yield cur\n cur = cur.next", "def _generator(self):\n # Initial setup\n ac = self._env.action_space.sample() # not used, just so we have the datatype\n self.new = True # marks if we're on first timestep of an episode\n self.ob = self._convert_state(self._env.reset()) \n T = self._timesteps\n\n cur_ep_ret = 0 # return in current episode\n cur_ep_len = 0 # len of current episode\n ep_rets = [] # returns of completed episodes in this segment\n ep_lens = [] # lengths of ...\n\n # Initialize history arrays\n #obs = np.array([None for _ in range(T)])\n obs = nd.empty((T,) + self._env.observation_space.shape)\n rews = np.zeros(T, 'float32')\n vpreds = np.zeros(T, 'float32')\n news = np.zeros(T, 'int32')\n acs = np.array([ac for _ in range(T)])\n prevacs = acs.copy()\n\n t = 0\n while True:\n ob = self.ob # Use `self.` since `_evaluate` may have reset the env\n new = self.new\n prevac = ac\n ac, vpred = self._act(ob)\n # NOTE(openAI) Slight weirdness here because we need value function at time T\n # before returning segment [0, T-1] so we get the correct terminal value\n if t > 0 and t % T == 0:\n seg = {\"ob\": obs, \"rew\": rews, \"vpred\": vpreds, \"new\": news,\n \"ac\": acs, \"nextvpred\": vpred * (1 - new),\n \"ep_rets\": np.array(copy.deepcopy(ep_rets)),\n \"ep_lens\": np.array(copy.deepcopy(ep_lens))}\n self._add_vtarg_and_adv(seg, self._gamma, self._lambda)\n yield seg\n # NOTE: Do a deepcopy if the values formerly in these arrays are used later.\n ep_rets = []\n ep_lens = []\n i = t % T\n\n obs[i] = ob[0]\n vpreds[i] = vpred\n news[i] = new\n acs[i] = ac\n prevacs[i] = prevac\n\n ob, rew, new, _ = self._env.step(ac)\n ob = self._convert_state(ob)\n rews[i] = rew\n\n cur_ep_ret += rew\n cur_ep_len += 1\n if new:\n ep_rets.append(cur_ep_ret)\n ep_lens.append(cur_ep_len)\n cur_ep_ret = 0\n cur_ep_len = 0\n ob = self._convert_state(self._env.reset())\n self.new = new\n self.ob = ob\n t += 1" ]
[ "0.653499", "0.58579487", "0.5785109", "0.5771209", "0.5686856", "0.56332576", "0.5609146", "0.5551279", "0.5526352", "0.5490106", "0.54891896", "0.54797685", "0.5459814", "0.54216856", "0.5384982", "0.5376042", "0.5357695", "0.5339628", "0.53098184", "0.53075767", "0.5277881", "0.5238695", "0.52333564", "0.52109766", "0.5169075", "0.51680976", "0.5166687", "0.5160612", "0.5160612", "0.5155964" ]
0.8314999
0
List of names of all headloss curves
def headloss_curve_names(self): return list(self._headloss_curves)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def headloss_curves(self):\n for key in self._headloss_curves:\n yield key, self._data[key]", "def curve_name_list(self):\n return list(self._curve_reg.keys())", "def pump_curve_names(self):\n return list(self._pump_curves)", "def efficiency_curve_names(self):\n return list(self._efficiency_curves)", "def get_loss_names(self):\n losses = [tns.name[:-2].replace('loss_', '').split('/')[-1] for tns in tf.get_collection('losses')]\n return \"Losses: {}\".format(' '.join(losses))", "def loss_names(self):\n return ['loss']", "def unique_names(self):\n return list(self._nmtensor_uniname_dict.keys()) + [\"loss\"]", "def _curveNamesToList(settings):\n return [getattr(GroupName, val) for val in settings.eccCurves]", "def get_hyperparameter_names():\n params = ['mu', 'nu', 'r', 's']\n return params", "def legend_names(self):\n return [leg.label for leg in self.legends]", "def endog_names(self):\n return self.data.ynames", "def base_plot_keys(self):\r\n plot_keys = [\"loss\", \"l1_loss\", \"mse_loss\", \"dur_loss\"]\r\n \r\n if self.use_fe_condition:\r\n plot_keys += [\"pitch_loss\", \"energy_loss\"]\r\n return plot_keys", "def head_pump_names(self):\n return self._head_pumps", "def untyped_curve_names(self):\n defined = set(self._data.keys())\n untyped = defined.difference(self._pump_curves, self._efficiency_curves, \n self._headloss_curves, self._volume_curves)\n return list(untyped)", "def head_pump_name_list(self):\n return list(self._link_reg.head_pump_names)", "def get_loss_funcs():\n\n def _eucl_loss(x, y):\n return K.sum(K.square(x - y)) / batch_size / 2\n\n losses = {}\n losses[\"weight_stage1_L1\"] = _eucl_loss\n losses[\"weight_stage1_L2\"] = _eucl_loss\n losses[\"weight_stage2_L1\"] = _eucl_loss\n losses[\"weight_stage2_L2\"] = _eucl_loss\n losses[\"weight_stage3_L1\"] = _eucl_loss\n losses[\"weight_stage3_L2\"] = _eucl_loss\n losses[\"weight_stage4_L1\"] = _eucl_loss\n losses[\"weight_stage4_L2\"] = _eucl_loss\n losses[\"weight_stage5_L1\"] = _eucl_loss\n losses[\"weight_stage5_L2\"] = _eucl_loss\n losses[\"weight_stage6_L1\"] = _eucl_loss\n losses[\"weight_stage6_L2\"] = _eucl_loss\n\n return losses", "def name(self):\n return [o.name for o in self.obs]", "def volume_curve_names(self):\n return list(self._volume_curves)", "def layer_names(self) -> List[str]:\n _all_layers = []\n if self.category == \"ML\":\n pass\n elif self.config['backend'] == 'tensorflow':\n for layer in self.layers:\n _all_layers.append(layer.name)\n elif self.config['backend'] == 'pytorch':\n _all_layers = list(self._modules.keys())\n return _all_layers", "def head(self) -> List[str]:\n log = [\n \"idx\",\n \"from\",\n \"n\",\n \"params\",\n \"module\",\n \"arguments\",\n \"in_channel\",\n \"out_channel\",\n ]\n if self.log_shapes:\n log.append(\"in_shape\")\n log.append(\"out_shape\")\n\n return log", "def return_all_parameter_names():\n a = list(titles)\n a.append(r\"$\\chi^{2}$ per degree of freedom\")\n b = list(labels)\n b.append(\"chi2_per_dof\")\n return a, b", "def phase_names(self) -> [str]:\n return [phase.hyper_name for phase in self.hyper_phases]", "def list_step_functions() -> List[str]:\n return list(STEP_SCORES_MAP.keys())", "def exog_names(self):\n return self.data.xnames", "def _get_kdl_joint_names(self):\n num_links = self._urdf_chain.getNrOfSegments()\n num_joints = self._urdf_chain.getNrOfJoints()\n joint_names = []\n for i in range(num_links):\n link = self._urdf_chain.getSegment(i)\n joint = link.getJoint()\n joint_type = joint.getType()\n # JointType definition: [RotAxis,RotX,RotY,RotZ,TransAxis,\n # TransX,TransY,TransZ,None]\n if joint_type > 1:\n continue\n joint_names.append(joint.getName())\n assert num_joints == len(joint_names)\n return copy.deepcopy(joint_names)", "def current_losses(loss_name):\n loss = OrderedDict()\n for item in loss_name:\n loss[item] = []\n return loss", "def names(self) -> List[str]:\n return sorted(self.hyperparams)", "def names():\n pass", "def make_label_names(name_lsit):\n\n hover_label_names = []\n for x in range(len(name_lsit)):\n temp1 = name_lsit[x]\n hover_label_names.append(temp1)\n\n return hover_label_names", "def names(self) -> List:\n ..." ]
[ "0.6845803", "0.66282517", "0.66168493", "0.6473944", "0.64283067", "0.63984525", "0.6389227", "0.6074509", "0.60124904", "0.5980222", "0.58355063", "0.58134836", "0.5809034", "0.5795786", "0.5734508", "0.5719799", "0.5690162", "0.5683712", "0.5629307", "0.5604088", "0.5600266", "0.55969775", "0.55957717", "0.55880564", "0.554777", "0.55357856", "0.5478889", "0.54218", "0.5409802", "0.53846645" ]
0.8896076
0
Generator to get all volume curves Yields
def volume_curves(self): for key in self._volume_curves: yield key, self._data[key]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iterCurves(self):\n for c in range(self.length()):\n yield self.curve(c)", "def pump_curves(self):\n for key in self._pump_curves:\n yield key, self._data[key]", "def get_volume_batch_generators(self):\n # volgeninfo = []\n def create_volgen(shape, w, padding, features, masks):\n w = np.asarray(w)\n padding = np.asarray(padding)\n W = w - padding * 2\n iters = np.int32(np.ceil((np.asarray([s for s in shape if s > 1]) + padding) * 1.0 / (W + padding)))\n for counts in counter_generator(iters):\n start = -padding + (w - padding) * counts\n end = (w - padding) * (counts + 1)\n subf, subm = self._extract_sample(features, masks, copy.deepcopy(start), copy.deepcopy(end), shape)\n ma = np.asarray([subm])\n fe = np.asarray([subf])\n if self.channels_first:\n ndims = len(fe.shape)\n neworder = [0, ndims - 1] + [i for i in range(1, ndims - 1)]\n fe = np.transpose(fe, neworder)\n ma = np.transpose(ma, neworder)\n yield fe, ma, start, end\n\n def volgeninfo(tps):\n for tp in tps:\n features, masks = self._get_features_and_masks(tp)\n spatial_shape = np.shape(features[0])\n volgen = create_volgen(spatial_shape, self.w, self.p, features, masks)\n yield [volgen, tp, spatial_shape, self.w, self.p]\n\n return volgeninfo(self.tps)", "def ticker_generator():\n return (v for v in load_equities().values)", "def Vrep_generator(self):\n for V in self.Vrepresentation():\n yield V", "def volume_curve_names(self):\n return list(self._volume_curves)", "def items(self):\n for ene, row in zip(self.energies, self.yield_matrix):\n yield ene, FissionYield(self.products, row)", "def __iter__(self):\n for sample in self.samples:\n yield sample", "def __iter__(self):\n return self._product_generator()", "def walk(self):\n for group in self.all_groups.values():\n yield from group.calculations", "def semigroup_generators(self):", "def efficiency_curves(self):\n for key in self._efficiency_curves:\n yield key, self._data[key]", "def blob_generator(self):\n for blob in self.data:\n yield blob", "def _generators(self):\n return self.free_group.generators", "def equation_generator(self):\n for H in self.Hrepresentation():\n if H.is_equation():\n yield H", "def __iter__(self):\n for sample in self.data:\n yield sample", "def get_assets(self):\n # The size of the price_data list should not change, even when updated\n price_data_length = len(self.price_data)\n\n for index in itertools.cycle(range(price_data_length)):\n try:\n yield self.price_data[index]\n except IndexError:\n yield None", "def generator(self) -> Iterator[Tuple[int, int, complex]]:\n for inda in range(self._core.lena()):\n alpha_str = self._core.string_alpha(inda)\n for indb in range(self._core.lenb()):\n beta_str = self._core.string_beta(indb)\n yield alpha_str, beta_str, self.coeff[inda, indb]", "def iterator(self):\n yield", "def __iter__(self):\n for element in self.focals:\n yield element", "def values(self):\n for row in self.yield_matrix:\n yield FissionYield(self.products, row)", "def semigroup_generators(self):\n return self.ambient().semigroup_generators().map(self.retract)", "def price_generator(self, start, end, periods):\r\n tickers = [self.SelectedTicker]\r\n tick_yahoo = YahooFinancials(tickers)\r\n data = tick_yahoo.get_historical_price_data(start, \r\n end, \r\n periods)\r\n \r\n df = pd.DataFrame({\r\n a: {x['formatted_date']: x['adjclose'] for x in data[a]['prices']} for a in tickers})\r\n \r\n self.prices = df.dropna()\r\n self.returns = self.prices.pct_change().dropna()\r\n try:\r\n self.div_yield = tick_yahoo.get_dividend_yield()\r\n #print(self.div_yield[self.SelectedTicker])\r\n if self.div_yield[self.SelectedTicker] == None:\r\n self.div_yield = 0.00\r\n else:\r\n self.div_yield = self.div_yield[self.SelectedTicker]\r\n except:\r\n print(\"no dividend yield\")", "def iter_svgs(self):\n for name in self.parent.layers:\n yield name, self.parent.layers[name]\n for elem in self.parent.elements:\n if isinstance(elem, SVG):\n yield None, elem", "def __iter__(self):\n term_v = yicespy.term_vector_t()\n yicespy.yices_init_term_vector(term_v)\n #todo here\n status = yicespy.yices_model_collect_defined_terms(self.yices_model, term_v)\n self._check_error(status)\n for d in term_v:\n try:\n pysmt_d = self.converter.back(d())\n yield pysmt_d, self.get_value(pysmt_d)\n except UndefinedSymbolError:\n # avoids problems with symbols generated by z3\n pass\n yicespy.yices_delete_term_vector(term_v)", "def vertex_iterator(self):\n for X in self.fe.L:\n for x in self.K.unit_group:\n yield (X, x)", "def __iter__(self):\n yield from self.gen", "def kmerIter(self):\n for kmer in self.kmers:\n yield kmer", "def iter_composition(self):\n array = self.data\n total = array.sum() or 1.\n return zip(self._phases, array/total)", "def __iter__(self):\n yield from self.qc_mol\n yield from self.br_mol\n yield from self.pc_mol" ]
[ "0.7373252", "0.69593436", "0.6485614", "0.6443177", "0.61216885", "0.5960025", "0.5958581", "0.59274405", "0.58617693", "0.5826482", "0.5809247", "0.58030605", "0.57921374", "0.5754389", "0.5730487", "0.57225156", "0.56956106", "0.56815445", "0.5673956", "0.5630475", "0.562162", "0.5611081", "0.56001246", "0.55966085", "0.5583944", "0.55710375", "0.5563978", "0.5557811", "0.55571485", "0.5519312" ]
0.8090784
0
List of names of all volume curves
def volume_curve_names(self): return list(self._volume_curves)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pump_curve_names(self):\n return list(self._pump_curves)", "def curve_name_list(self):\n return list(self._curve_reg.keys())", "def efficiency_curve_names(self):\n return list(self._efficiency_curves)", "def volume_curves(self):\n for key in self._volume_curves:\n yield key, self._data[key]", "def _curveNamesToList(settings):\n return [getattr(GroupName, val) for val in settings.eccCurves]", "def untyped_curve_names(self):\n defined = set(self._data.keys())\n untyped = defined.difference(self._pump_curves, self._efficiency_curves, \n self._headloss_curves, self._volume_curves)\n return list(untyped)", "def headloss_curve_names(self):\n return list(self._headloss_curves)", "def curves(self):\n return self._curve_reg", "def darts(self):\r\n return self.alphas[0].keys()", "def getViewNames(self) -> list[float]:\n ...", "def getSolRatioVarNames( self ):\n\n self.updateAdb( )\n\n return self.solNames.keys()", "def get_curves(p):\n curve_list = []\n for well in p:\n curves = well.data.keys()\n for c in curves:\n curve_list.append(c)\n return sorted(set(curve_list))", "def figure_names(self) -> List[str]:\n return self._db_data.figure_names", "def names(self) -> list[str]:", "def gpv_names(self):\n return self._gpvs", "def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print(json.dumps({'data': lst}))", "def get_volumes():\n vols = []\n try:\n result = run_diskpart(['list volume'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append volume numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Volume (\\d+)\\s+([A-Za-z]?)\\s+', output):\n vols.append({'Number': tmp[0], 'Letter': tmp[1]})\n\n return vols", "def get_volumeslice_volume_names( slice_name ):\n try:\n all_vs = models.VolumeSlice.objects.filter( slice_id__name = slice_name )\n volume_names = []\n for vs in all_vs:\n volume_names.append( vs.volume_id.name )\n \n return volume_names\n except Exception, e:\n logger.exception(e)\n logger.error(\"Failed to query datastore for volumes mounted in %s\" % slice_name)\n return None", "def getElementName(self):\n return _libsbml.Curve_getElementName(self)", "def names(self) -> List:\n ...", "def getLayerNames(self):\n\t\treturn self._fileSystem.getLayerNames()", "def name(self):\n return [o.name for o in self.obs]", "def get_currencies_names():\n names = [x for x in cur_dict]\n return names", "def getNames():\n imgs = Image.objects.raw({})\n ans = []\n for img in imgs:\n ans.append(img.name)\n ans.sort()\n return ans", "def psv_names(self):\n return self._psvs", "def keys(self):\n kys = []\n narrays = self.VTKObject.GetNumberOfArrays()\n for i in range(narrays):\n name = self.VTKObject.GetAbstractArray(i).GetName()\n if name:\n kys.append(name)\n return kys", "def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]", "def compound_names(self) -> List[str]:\n return None", "def get_vgs() -> List[str]:\n p = subprocess.run(\n [\"vgs\", \"--reportformat\", \"json\"], check=True, capture_output=True\n )\n output = json.loads(p.stdout)\n return [vg[\"vg_name\"] for vg in output[\"report\"][0][\"vg\"]]", "def axesNames(self, data, info):\n return []" ]
[ "0.7351649", "0.7243365", "0.70367044", "0.6640773", "0.6593199", "0.65358555", "0.62840873", "0.6125034", "0.6002183", "0.5953892", "0.5941448", "0.5912211", "0.5876153", "0.5815371", "0.5799234", "0.579767", "0.57827914", "0.5756231", "0.5700889", "0.56664133", "0.5654718", "0.56361586", "0.5615296", "0.5613928", "0.56137043", "0.5562928", "0.55502146", "0.5549142", "0.55425024", "0.55040294" ]
0.8836443
0
Returns a generator to iterate over all nodes of a specific node type. If no node type is specified, the generator iterates over all nodes.
def __call__(self, node_type=None): if node_type==None: for node_name, node in self._data.items(): yield node_name, node elif node_type==Junction: for node_name in self._junctions: yield node_name, self._data[node_name] elif node_type==Tank: for node_name in self._tanks: yield node_name, self._data[node_name] elif node_type==Reservoir: for node_name in self._reservoirs: yield node_name, self._data[node_name] else: raise RuntimeError('node_type, '+str(node_type)+', not recognized.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nodes_iter(self) -> Generator:\n for n in self.graph.nodes(data=True):\n yield n", "def node_gen(self):\n for n in self.child_list:\n yield from n.node_gen\n yield self", "def get_nodes_by_type(self, node_type=None):\n target_nodes = []\n if node_type is not None:\n for node in self.nodes:\n if str(node_type).lower() == str(node.get('infos').get('type')).lower():\n target_nodes.append(node)\n return target_nodes", "def _create_node_iterator(self) -> Iterator[GraphNode]:\n return\n yield", "def list_nodes(self, type_):\n raise NotImplementedError()", "def nodes(self):\n for node_set in self.itervalues():\n for node in node_set:\n yield node", "def __iter__(self) -> Iterable[Node]:", "def iterate_nodes(self, allowed_kinds):\n for node, node_data in self._execution_graph.nodes_iter(data=True):\n if node_data['kind'] in allowed_kinds:\n yield node", "def nodes_iter(topology):\n return topology.nodes_iter()", "def iter(self, indices=None, item_type=None):\n if item_type is not None and item_type != CUBA.NODE:\n raise ValueError(\"item_type must be CUDSItem.NODE\")\n\n return self._iter_nodes(indices)", "def generate(cls, rng, get_nodes, tree_type, depth, force_types=[],\n parent=None):\n\n # Determine allowed types\n force_types_ = [] # For children\n if force_types: # If there's forced type for this depth,\n types = force_types[0] # use it, and don't pass it on.\n force_types_ = force_types[1:]\n elif parent is not None: # Otherwise, inherit from parent.\n # Doesn't work with mutate, because\n this_i = len(parent.children)\n types = parent.child_type[this_i]\n else:\n raise ValueError('Types must be specified for each node, either '\n 'by the \"force_types\" kwarg (required for root, '\n 'optional for depths), or inherited from the '\n '\"child_type\" attribute of the parent Node.')\n\n # Decide whether terminal or function\n if depth == 0: # Always return a terminal at depth 0\n is_terminal = True\n elif (tree_type == 'g' and # For grow trees,\n 'terminal' in types and # if it's allowed,\n rng.choice([False, True])): # flip a coin.\n is_terminal = True\n else: # Otherwise, return a function\n is_terminal = False\n types = [t for t in types if t != 'terminal']\n\n # Generate a random node\n if is_terminal:\n node_data = rng.choice(get_nodes(['terminal', 'constant']))\n node = cls(node_data, tree_type, parent=parent)\n else:\n node_data = rng.choice(get_nodes(types, depth))\n node = cls(node_data, tree_type, parent=parent)\n # Generate children\n node.children = []\n for i in range(node.arity):\n node.children.append(cls.generate(\n rng, get_nodes, tree_type, depth-1, force_types_, node))\n return node", "def each_nest(nest_spec, type=None, post_order=False):\n if type is not None and type not in Nest.nest_types():\n raise RuntimeError(\"Unknown nest type '%s' in call to each_nest\" % type)\n\n for node, nest in _each_nest(nest_spec, parent_nest=Nest(), post_order=post_order):\n if type is None or (type == nest.type):\n yield nest", "def __call__(self, link_type=None):\n if link_type==None:\n for name, node in self._data.items():\n yield name, node\n elif link_type==Pipe:\n for name in self._pipes:\n yield name, self._data[name]\n elif link_type==Pump:\n for name in self._pumps:\n yield name, self._data[name]\n elif link_type==Valve:\n for name in self._valves:\n yield name, self._data[name]\n else:\n raise RuntimeError('link_type, '+str(link_type)+', not recognized.')", "def get_nodes(self) -> Iterable[RTreeNode[T]]:\n yield from self._get_nodes(self.root)", "def __iter__ (self, data=False):\n return self.network.nodes_iter(data=data)", "def get_nodes(self, type, query_args={}):\n endpoint = '/v3/educator/%ss' % (Node.TYPE_MAP[type])\n result = self.request(endpoint, query_args)\n\n nodes = []\n for data in result.response:\n node = Node.instance(type, data)\n nodes.append(node)\n\n return nodes", "def topological_nodes_generator(graph, reverse=...):\n ...", "def __iter__(self):\n for tree in self._tree.subTrees():\n yield self.__class__(tree)", "def all_nodes_as_iterable(self, include_metadata: bool = False) -> Generator:\n if include_metadata:\n return [\n (self._names.get_name(i), self._meta.get_node(self._names.get_name(i)))\n for i in self._nk_graph.iterNodes()\n ]\n return [self._names.get_name(i) for i in self._nk_graph.iterNodes()]", "def iterate_nodes(\n self,\n keys: istr = None,\n terms: istr = None,\n prefixes: istr = None,\n labels: istr = None,\n ) -> Iterable[Node]:", "def nodes(self, root=None, order=\"preorder\"):\n methods = {\n \"preorder\": self._preorder_traversal,\n \"inorder\": self._inorder_traversal,\n \"postorder\": self._postorder_traversal,\n \"levelorder\": self._levelorder_traversal,\n \"breadthfirst\": self._levelorder_traversal,\n \"timeasc\": self._timeasc_traversal,\n \"timedesc\": self._timedesc_traversal,\n \"minlex_postorder\": self._minlex_postorder_traversal,\n }\n try:\n iterator = methods[order]\n except KeyError:\n raise ValueError(f\"Traversal ordering '{order}' not supported\")\n\n root = -1 if root is None else root\n return iterator(root)", "def __iter__(self):\n return iter(self.node)", "def __iter__(self):\n for node in self.grammar.walk():\n yield node", "def iterate_types(self) -> Iterator[FakeAnnotation]:\n yield from self.client.iterate_types()\n if self.service_resource:\n yield from self.service_resource.iterate_types()\n for waiter in self.waiters:\n yield from waiter.iterate_types()\n for paginator in self.paginators:\n yield from paginator.iterate_types()", "def report(self, node_types: List[str] = None):\n for node_type, results in self.nodes.items():\n if node_types is not None and node_type in node_types:\n print(node_type)\n pprint(results)", "def iter_all(self):\n for i in range(self.num_nodes):\n self.iter_node(i)", "def __iter__(self):\n node = self.head\n while node is not None:\n yield node._data\n node = node._next", "def iter_nodes(self):", "def allNodeTypes(*args, includeAbstract: bool=True, **kwargs)->List[AnyStr]:\n pass", "def walk(node):\r\n from collections import deque\r\n todo = deque([node])\r\n while todo:\r\n node = todo.popleft()\r\n todo.extend(iter_child_nodes(node))\r\n yield node" ]
[ "0.6656627", "0.6592198", "0.64690566", "0.6432494", "0.6153438", "0.6133566", "0.60828686", "0.6074038", "0.60599685", "0.6046766", "0.60308146", "0.5862091", "0.58604354", "0.5841883", "0.58378714", "0.5809017", "0.57915753", "0.5784165", "0.5725038", "0.5722978", "0.5706924", "0.5691698", "0.56626034", "0.5655353", "0.5625635", "0.56184775", "0.5610323", "0.5591866", "0.557543", "0.5569712" ]
0.7499164
0
Adds a tank to the water network model.
def add_tank(self, name, elevation=0.0, init_level=3.048, min_level=0.0, max_level=6.096, diameter=15.24, min_vol=0.0, vol_curve=None, overflow=False, coordinates=None): assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, "name must be a string with less than 32 characters and contain no spaces" assert isinstance(elevation, (int, float)), "elevation must be a float" assert isinstance(init_level, (int, float)), "init_level must be a float" assert isinstance(min_level, (int, float)), "min_level must be a float" assert isinstance(max_level, (int, float)), "max_level must be a float" assert isinstance(diameter, (int, float)), "diameter must be a float" assert isinstance(min_vol, (int, float)), "min_vol must be a float" assert isinstance(vol_curve, (type(None), str)), "vol_curve must be a string" assert isinstance(overflow, (type(None), str, bool, int)), "overflow must be a bool, 'YES' or 'NO, or 0 or 1" assert isinstance(coordinates, (type(None), (tuple,list,))), "coordinates must be a tuple" elevation = float(elevation) init_level = float(init_level) min_level = float(min_level) max_level = float(max_level) diameter = float(diameter) min_vol = float(min_vol) if init_level < min_level: raise ValueError("Initial tank level must be greater than or equal to the tank minimum level.") if init_level > max_level: raise ValueError("Initial tank level must be less than or equal to the tank maximum level.") if vol_curve is not None and vol_curve != '*': if not isinstance(vol_curve, six.string_types): raise ValueError('Volume curve name must be a string') elif not vol_curve in self._curve_reg.volume_curve_names: raise ValueError('The volume curve ' + vol_curve + ' is not one of the curves in the ' + 'list of volume curves. Valid volume curves are:' + str(self._curve_reg.volume_curve_names)) vcurve = np.array(self._curve_reg[vol_curve].points) if min_level < vcurve[0,0]: raise ValueError(('The volume curve ' + vol_curve + ' has a minimum value ({0:5.2f}) \n' + 'greater than the minimum level for tank "' + name + '" ({1:5.2f})\n' + 'please correct the user input.').format(vcurve[0,0],min_level)) elif max_level > vcurve[-1,0]: raise ValueError(('The volume curve ' + vol_curve + ' has a maximum value ({0:5.2f}) \n' + 'less than the maximum level for tank "' + name + '" ({1:5.2f})\n' + 'please correct the user input.').format(vcurve[-1,0],max_level)) tank = Tank(name, self) tank.elevation = elevation tank.init_level = init_level tank.min_level = min_level tank.max_level = max_level tank.diameter = diameter tank.min_vol = min_vol tank.vol_curve_name = vol_curve tank.overflow = overflow self[name] = tank if coordinates is not None: tank.coordinates = coordinates
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_tank(self, name, elevation=0.0, init_level=3.048,\n min_level=0.0, max_level=6.096, diameter=15.24,\n min_vol=0.0, vol_curve=None, overflow=False, coordinates=None):\n self._node_reg.add_tank(name, elevation, init_level, min_level, \n max_level, diameter, min_vol, vol_curve, \n overflow, coordinates)", "def add_to_water_level(self, amount):\n LandCell.add_to_water_level(self, amount)\n if self.water_level > 0:\n self.reset_food_level()", "def add_fuel(self, amount):\n if (self.fuel_level + amount\n <= self.fuel_capacity):\n self.fuel_level += amount\n print(\"Added fuel.\")\n else:\n print(\"The tank won't hold that much.\")", "def add_to_water_level(self, amount):\n self.water_level += amount\n if self.water_level < 0:\n self.water_level = 0.0", "def _addTurtle(self,turt):\n assert (type(turt) == Turtle), \"Parameter %s is not a valid Turtle object\" % `turt`\n self._turtles.append(turt)", "def add_fuel(self, amount):\n if (self.fuel_level + amount <= self.fuel_capacity):\n self.fuel_level += amount\n print(\"Added fuel to \"+ self.make + \".\")\n else:\n print(\"The tank won't hold that much.\")", "def add_inputt(self, name='T', control=False):\n inpt = InputT(name=name)\n self.nodes[name] = inpt\n self.rc.add_node(inpt)\n if control: # control input\n if name in self.inp.keys():\n raise Exception('Input temperature already defined')\n self.inp[name] = inpt\n else: # disturbance\n if name in self.dist.keys():\n raise Exception('Input temperature already defined')\n self.dist[name] = inpt", "def add(self, rank, birth_year, enlisting_year, shirt_color, name):\n # Your implementation here", "def add_node(self, node):\n self.nodes[node.id] = node\n\n self.layers = max(self.layers, node.layer + 1)", "def add_weight(self):\r\n\r\n # Get the csrf token\r\n csrf = self.extract_csrf('https://wger.de/en/weight/add/')\r\n # Adding referer to the headers\r\n self.headers['Referer'] = API.url_weight\r\n\r\n # Take the weight entires from TOML file\r\n entries = self.cfg.get('payload', {}).get('weight')\r\n # Check for valid entires\r\n if entries:\r\n for payload in entries:\r\n # Add csrf token to payload\r\n payload['csrfmiddlewaretoken'] = csrf\r\n # Test the entry with it's json schema\r\n check.check_entry(path='schemas/weight.json', test=payload)\r\n # Post request\r\n self.add_post(payload, API.url_weight, self.weights)\r\n \r\n # Eliminates the referer from the headers\r\n self.headers.pop('Referer')", "def fill_tank(self):\n print(\"This car has no fuel tank!\")", "def addTP(self, num=1):\n self.tp += num", "def add_pagerank(self):\n\n query = '''\n MATCH (c1:)-[r:INTERACTS]->(c2:)\n RETURN c1.name, c2.name, r.weight AS weight\n '''\n ig = IGraph.TupleList(self.graph.run(query), weights=True)\n\n pg = ig.pagerank()\n pgvs = []\n for p in zip(ig.vs, pg):\n print(p)\n pgvs.append({\"name\": p[0][\"name\"], \"pg\": p[1]})\n\n write_clusters_query = '''\n UNWIND {nodes} AS n\n MATCH (c:) WHERE c.name = n.name\n SET c.pagerank = n.pg\n '''\n\n self.graph.run(write_clusters_query, nodes=pgvs)", "def fill_tank(self):\r\n self.fuel_level = self.fuel_capacity", "def GachaCraftNodeExcelAddTier(builder, Tier):\n return AddTier(builder, Tier)", "def addTN(self, num=1):\n self.tn += num", "def add_fleet(self, index, *args, **kw):\n\n fleetid = self.fleets.append(ListNode(\"{0!s}\".format(kw.get(\"name\", \"Fleet {0:d}\".format(index))), [\n ListNode(\"Nodes\"),\n ListNode(\"Behaviours\", data=kw.get(\n \"behaviours\", self.defaults[2].get_data()))\n ])\n )\n for i in range(kw.get(\"nodes\", 1)):\n self.add_node(fleetid)", "def add_road(ccTremb):\n pass", "def fill_gas_tank(self):\n print(\"Filling the tank for\", self.get_descriptive_name())", "async def addTier(self, ctx, tier):\n server_dict = self.get_server_dict(ctx)\n tierList = server_dict.setdefault(\"Tiers\", [])\n \n try:\n tierList.append(tier)\n self.save_data()\n await self.bot.say(\":white_check_mark: {0} added to tier list\".format(tier))\n except:\n await self.bot.say(\":x: Error adding {0} to the tier list\".format(tier))", "def __add_boundary_contrib_prediction(self, bc, b_idx):\n if bc is not None:\n if bc.boundary_condition_type is configuration.BoundaryConditionType.DIRICHLET:\n self.ustar[b_idx] = bc.value(self.time)\n else:\n self.ustar[b_idx] += self.timestep * self.timestep * bc.value(self.time)", "def add_waypoint(self, waypoint):\n self.drone.add_waypoint(waypoint)", "def add_branch(self, branch):\n self.branch.append(branch)", "def add(self, obs_t, action, reward, obs_tp1, done):\n if random.uniform(0,1) < self.fifo_frac:\n self.fifo_buffer.add(obs_t, action, reward, obs_tp1, done)\n else:\n self.reservoir_buffer.add(obs_t, action, reward, obs_tp1, done)", "def add_tweet(self, tweet):\r\n self.tweets.append(tweet)", "def add_node(self, node):\n self.nodes[node.name] = node\n self.dirty = True", "def add_drink_order(self, chair_num, _drink):\n self.customers[chair_num].add_drink(_drink)", "def add_reward(self, reward):\n self.quest_node['reward'] = reward\n self.reward = reward\n graph.push(self.quest_node)", "def add(self, layer):\n if len(self.layers) == 0:\n if not layer.n_inputs:\n raise Exception('Need to have n_inputs for layer.')\n else:\n layer.n_inputs = self.layers[-1].units\n self.layers.append(layer)", "def add_node (self, node):\n self.network.add_node(node.id)\n self.network.node[node.id] = node" ]
[ "0.63137543", "0.55525565", "0.5492671", "0.5472554", "0.5433488", "0.53404385", "0.5299355", "0.5268483", "0.5242195", "0.5218148", "0.51868045", "0.5135821", "0.51356757", "0.51308405", "0.511713", "0.5112451", "0.5082069", "0.5050836", "0.50369924", "0.5035343", "0.5015549", "0.5009186", "0.49932134", "0.49894917", "0.49544743", "0.49543563", "0.49350315", "0.4934302", "0.49175066", "0.49047953" ]
0.59282255
1
List of names of all junctions
def junction_names(self): return self._junctions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def junction_name_list(self):\n return list(self._node_reg.junction_names)", "def junctions(self):\n return self._node_reg.junctions", "def junctions(self):\n for node_name in self._junctions:\n yield node_name, self._data[node_name]", "def junction_char(self):\n ...", "def names(self) -> list[str]:", "def get_names(cat):\n res = []\n while cat:\n res.append(cat.name)\n cat = cat.parent_id\n return res", "def get_names(self):\r\n names = []\r\n for p in self.people:\r\n names.append(p.get_name())\r\n return names", "def getNames(self) -> List[unicode]:\n ...", "def names(self) -> List:\n ...", "def names(self):\n return list(item.name for item in self.mechanisms)", "def getAllNames(self):\n result = []\n node = self\n while not node.isRoot():\n result.insert(0, node.getName())\n node = node.getParent()\n result.insert(0, node.getName())\n return result", "def link_name_list(self):\n return list(self._link_reg.keys())", "def get_all_names(self):\r\n return [person.name for person in self.__person_repository.elements]", "def get_motoneurons_names(self):\n\t\treturn self._motoneuronsNames", "def names(self):\n return [da.name for da in self]", "def gapJunctions(self, recurse = True):\n \n junctions = []\n junctions += self._gapJunctions\n if recurse:\n for subNeurite in self._neurites:\n junctions += subNeurite.gapJunctions()\n return junctions", "def namelist(self):\n return []", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()", "def _get_kdl_joint_names(self):\n num_links = self._urdf_chain.getNrOfSegments()\n num_joints = self._urdf_chain.getNrOfJoints()\n joint_names = []\n for i in range(num_links):\n link = self._urdf_chain.getSegment(i)\n joint = link.getJoint()\n joint_type = joint.getType()\n # JointType definition: [RotAxis,RotX,RotY,RotZ,TransAxis,\n # TransX,TransY,TransZ,None]\n if joint_type > 1:\n continue\n joint_names.append(joint.getName())\n assert num_joints == len(joint_names)\n return copy.deepcopy(joint_names)", "def neighbors(self):\n return [e.name for e in self.edges()]", "def get_names_short(self):\r\n return [p.get_name() for p in self.people]", "def names(cls) -> List[str]:", "def external_terminologies(self):\n terms = set()\n for node_record in self.graph.run(\"MATCH (n) RETURN (n)\"):\n node = node_record[\"n\"]\n if \"links_to\" in node:\n terms.add(node[\"links_to\"])\n return terms", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def list(self):\n\n result = []\n for i in self.bots:\n result.append(i.name)\n return result", "def get_real_motoneurons_names(self):\n\t\treturn self._realMotoneuronsNames" ]
[ "0.85728174", "0.77329415", "0.7549278", "0.6560675", "0.62946075", "0.6288291", "0.62225926", "0.61713374", "0.6107669", "0.61055756", "0.6003041", "0.6001774", "0.5998919", "0.5970804", "0.59657425", "0.594276", "0.59211296", "0.58707106", "0.58707106", "0.5866867", "0.5866867", "0.583744", "0.583616", "0.5835369", "0.57885987", "0.57568294", "0.5739691", "0.5739691", "0.5737114", "0.5727189" ]
0.85988754
0
Generator to get all junctions Yields
def junctions(self): for node_name in self._junctions: yield node_name, self._data[node_name]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def junctions(self):\n return self._node_reg.junctions", "def iteredges(self):\n for source, targets in self.successors.items():\n for target in targets:\n yield source, target", "def __iter__(self):\n leaf_paths, leaf_vals = self._find_combinatorial_leaves()\n return self._combinations_generator(leaf_paths, leaf_vals)", "def iter_all_chains(self):\n for model in self.model_list:\n for chain in model.chain_list:\n yield chain", "def iterate(self):\n yield self\n for x in self:\n for y in x.iterate():\n yield y", "def junction_names(self):\n return self._junctions", "def __next__(self):\n for child in self.children:\n yield child", "def __iter__(self):\n for node in self.grammar.walk():\n yield node", "def __iter__(self):\n return iter(self.chain_list)", "def __iter__(self):\n from sage.misc.mrange import cartesian_product_iterator\n\n if self._cd._length == 1:\n if self._cd._degree == 1:\n yield self([[0]])\n return\n\n S = self._cd._sym\n profile = list(self._profile)[:-1]\n for p in cartesian_product_iterator([S.conjugacy_class(pi)\n for pi in profile]):\n if self._cd._connected and not perms_are_connected(p, self._cd._degree):\n continue\n c = self._cd(list(p) + [None], check=False)\n if c.profile() == self._profile:\n yield c", "def __iter__(self):\n return iter(self.parents)", "def traverse(self):\n yield self\n for k in self._kids:\n for kk in k.traverse():\n yield kk", "def __iter__(self):\n for child in self.children:\n yield child", "def __iter__(self):\n return iter(self.chain)", "def __iter__(self):\n for i in range(len(self.ks)):\n yield self.get_neighs([i]), self.get_sp_rel_pos([i]),\\\n [self.ks[i]], self.iss", "def __iter__(self):\n\n for i in self._children:\n yield i", "def iter_chains(self):\n return iter(self.chain_list)", "def walk(self):\n yield (self,)\n for item in itertools.chain(\n self._cal_objs.values(),\n self._noise_objs.values()):\n if isinstance(item, Budget):\n for i in item.walk():\n yield (self,) + i\n else:\n yield (self, item)", "def __next__(self):\n for child in self.children:\n yield child\n return\n #self.parent.next()", "def __iter__(self):\n from itertools import product\n\n if self._length == 1:\n if self._degree == 1:\n yield self([[0]])\n return\n\n S = self._sym\n for p in product(S, repeat=self._length - 1):\n if self._connected and not perms_are_connected(p, self._degree):\n continue\n yield self(list(p) + [None], check=False)", "def iter_links(self):\n for site in self.iter_sites():\n for u in range(self.dim):\n yield tuple(list(site) + [u])", "def __iter__(self):\n return iter(self.adjacent)", "def __iter__(self):\n for plug in self.plugs:\n yield plug", "def __iter__(self):\n for id in self.order():\n inputs = [w for w in self.wires if w['target'][0] == id]\n yield id, inputs", "def get_relations(self):\n triples = list(self.get_triples())\n\n for s, p, o in triples:\n if not p.startswith(\"rel\"):\n s, o = int(s.id), int(o.id)\n yield {\"predicate\": p,\n \"subject\": s,\n \"subject_nodes\": list(self.get_descendants(s, triples)),\n \"object\": o,\n \"object_nodes\": list(self.get_descendants(o, triples)),\n }", "def iter_links(self):", "def iter_chains(self):\n if self.default_model:\n return iter(self.default_model.chain_list)\n return iter(list())", "def cascade_iterator(self, type_, state, visited_instances=None,\n halt_on=None):\n\n return iter(())", "def tripletGenerator(S):\n for a in S:\n for b in S:\n for c in S:\n yield (a, b, c)", "def nextGen(self):\n\n p = []\n while len(p) < len(self.p):\n #select mates and produce offspring\n p1, p2 = self.select()\n offspring = self.mate(p1, p2)\n\n #put the offspring in the next generation (with mutation)\n for child in offspring:\n child=self.mutate(child)\n p.append(child)\n \n\n # the world belongs to the new generation\n return p" ]
[ "0.695294", "0.6529823", "0.6474808", "0.60850096", "0.6082278", "0.5956198", "0.5954295", "0.5935453", "0.5934156", "0.5906857", "0.5890785", "0.588711", "0.5857223", "0.58099526", "0.58058184", "0.57999444", "0.5799198", "0.5794269", "0.5787864", "0.5780796", "0.5733086", "0.5726463", "0.57221884", "0.57049984", "0.5698666", "0.5680135", "0.5671084", "0.5667402", "0.5660411", "0.5648771" ]
0.8599879
0
Generator to get all tanks Yields
def tanks(self): for node_name in self._tanks: yield node_name, self._data[node_name]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sequence(self):\n for tn in self._testnodes:\n yield tn", "def __iter__(self):\n for benchclass in sorted(self.classes.values()):\n yield benchclass", "def tanks(self):\n return self._node_reg.tanks", "def __iter__(self):\n for benchinst in sorted(self.instances.values()):\n yield benchinst", "def semigroup_generators(self):", "def __iter__(self):\n for key in self.sprite_order:\n if key not in self.sprite_groups:\n # abstract type\n continue\n for s in self.sprite_groups[key]:\n yield s", "def nodes(self):\n for node_set in self.itervalues():\n for node in node_set:\n yield node", "def __iter__(self):\n for block in sorted(self.blocks.values(), key=lambda b: b.ordinal):\n yield block", "def entries(self):\n entries = []\n for t in range(self.num_steps):\n for b in range(self.batch_size):\n entries.append((t, b))\n while True:\n random.shuffle(entries)\n for entry in entries:\n yield entry", "def tank_names(self):\n return self._tanks", "def generators(n):\n return [g for g in zn_star(n)\n if is_generator_in_zn(g, n)]", "def _generators(self):\n return self.free_group.generators", "def __iter__(self):\n from sage.combinat.posets.posets import FinitePosets_n\n n = 0\n while True:\n for P in FinitePosets_n(n):\n yield P\n n += 1", "def iterate(self):\n yield self\n for x in self:\n for y in x.iterate():\n yield y", "def __iter__(self):\n for x in self.innings:\n yield x", "def _generators_for_H(self):\n if self.level() in [1, 2]:\n return []\n return [ZZ(x) for x in IntegerModRing(self.level()).unit_gens()]", "def iterator(self):\n yield", "def numbers():\n for number in range(1, 76):\n yield number", "def __iter__(self):\n for x in self._order:\n yield x", "def _get_test_generator(self):\n for data_element in self.test:\n image, heatmap = self._generate_input_tuple(data_element)\n \n yield (image, heatmap)", "def __iter__(self):\n batch = []\n for i_batch in range(self.episode_num):\n classes = torch.randperm(len(self.idx_list))[: self.way_num]\n for c in classes:\n idxes = self.idx_list[c.item()]\n pos = torch.randperm(idxes.size(0))[: self.image_num]\n batch.append(idxes[pos])\n if len(batch) == self.episode_size * self.way_num:\n batch = torch.stack(batch).reshape(-1)\n yield batch\n batch = []", "def __iter__(self):\n for run in self.runs:\n yield run", "def generator(self):\n return [None, 1]", "def node_gen(self):\n for n in self.child_list:\n yield from n.node_gen\n yield self", "def __iter__(self):\n n = self.head\n for _ in range(len(self)):\n if n == self.capacity:\n n = 0\n yield self.lst[n]\n n += 1", "def vytvorit_generator():\n mylist = range(3)\n print 'mylist = ', mylist\n for element in mylist:\n yield element", "def TankPositionGenerator(geometry):\n # for omkey, geo in geometry.omgeo:\n # if 60 < omkey.om < 65:\n # yield omkey, geo.position\n for station in geometry.stationgeo.values():\n for tank in station:\n yield tank.omkey_list[0], tank.position\n yield tank.omkey_list[1], tank.position", "def __iter__(self):\n for node in self.grammar.walk():\n yield node", "def __iter__(self):\n yield from self.gen", "def __iter__(self):\n for bucket in self._table:\n if bucket is not None:\n for key in bucket:\n yield key" ]
[ "0.6375296", "0.6296786", "0.6217762", "0.5907048", "0.5903566", "0.5794458", "0.5770278", "0.5761144", "0.5734883", "0.5724717", "0.57015395", "0.5697612", "0.567935", "0.5654618", "0.5636768", "0.56305444", "0.56232035", "0.56223494", "0.56080455", "0.5574875", "0.55670696", "0.5566274", "0.553566", "0.5524086", "0.5493678", "0.549036", "0.54783636", "0.54682094", "0.5464287", "0.5448429" ]
0.79621655
0
Generator to get all reservoirs Yields
def reservoirs(self): for node_name in self._reservoirs: yield node_name, self._data[node_name]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generators(self):\n return self.free_group.generators", "def __iter__(self):\n yield from self.gen", "def iterator(self):\n yield", "def sequences(self):\n # i am one\n yield self\n # nothing further\n return", "def iter_sequence(self):\n for res_name, fragment in self.sequence_fragment_list:\n yield res_name", "def generators(self):\n return self._generators", "def __iter__(self):\n for runspec in self.runspecs:\n yield runspec", "def generator(self):\n return [None, 1]", "def __iter__(self):\n for run in self.runs:\n yield run", "def reservoir_generator(\n number_of_reservoirs=None, sparsity_level=None, reservoir_dim=None\n):\n reservoir_computing = ReservoirComputing(\n reservoir_dim=reservoir_dim, sparsity_level=sparsity_level\n )\n\n return [reservoir_computing.create_reservoir() for n in range(number_of_reservoirs)]", "def __iter__(self) -> Generator:\r\n yield from self.sequence", "def __iter__(self):\n for x in self.innings:\n yield x", "def get_rings(self):\n return iter(self)", "def ticker_generator():\n return (v for v in load_equities().values)", "def vytvorit_generator():\n mylist = range(3)\n print 'mylist = ', mylist\n for element in mylist:\n yield element", "def getGenerators(self) -> list:\n return self.state[GENERATORS]", "def __iter__(self):\n for transaction in self.transaction_list:\n yield transaction", "def generator():\n mygenerator = (x for x in range(3))\n for element in mygenerator:\n print 'poprve = ', element\n\n for element in mygenerator:\n print 'podruhe = ', element", "def items(self):\n for ene, row in zip(self.energies, self.yield_matrix):\n yield ene, FissionYield(self.products, row)", "def get_processes():\n yield from psutil.process_iter()", "def generators(n):\n return [g for g in zn_star(n)\n if is_generator_in_zn(g, n)]", "def __iter__(self):\n return self.new_generator()", "def yields ( self ) :\n return tuple ( [ i for i in self.alist2 ] )", "def yields ( self ) :\n return tuple ( [ i for i in self.alist2 ] )", "def stepregistry():\n registry = StepRegistry()\n yield registry", "def customer_generator(env, inventory_stock):\n for i in itertools.count():\n yield env.timeout(random.randint(*T_INTER))\n env.process(customer(env, inventory_stock, 'Customer_'+str(i+1)))", "def __iter__(self):\n for x in self.seq: yield x", "def yieldRPC(remoteYields): #Status: WIP\r\n pass", "def generator(gens):\n if len(gens) < 20:\n gens.append(rule_110(gens[-1], gens[-1].copy(), 1))\n generator(gens)\n return gens", "def iter_relocations(self):\n for i in range(self.num_relocations()):\n yield self.get_relocation(i)" ]
[ "0.6724822", "0.65291315", "0.6520068", "0.64708203", "0.64076495", "0.6373286", "0.6365344", "0.619911", "0.6189909", "0.6157703", "0.614191", "0.6108884", "0.610793", "0.60956895", "0.6089938", "0.60685253", "0.60475975", "0.603563", "0.6025875", "0.60146135", "0.6003946", "0.59809774", "0.59663063", "0.59663063", "0.59606606", "0.59565514", "0.5947439", "0.5946719", "0.59249794", "0.5918746" ]
0.73494977
0