query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Pad or truncate a list `x` with the values `pad_value` and `maxlen`. | def list_pad_or_truncate(x, maxlen, pad_value=None):
length = len(x)
if maxlen > length:
x += [pad_value] * (maxlen - length)
elif maxlen < length:
x = x[:maxlen]
return x | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pad_with_zero(list, max_length, pad_type):\n padded_list = pad_sequences(list, maxlen=max_length, padding=pad_type, truncating='post')\n return padded_list",
"def pad_tokens(x, max_length, pad_token_id,\n truncate_from=\"left\",\n pad_from=\"left\"):\n assert truncate_from in (\"left\", \"right\")\n assert pad_from in (\"left\", \"right\")\n if len(x) > max_length:\n if truncate_from == \"left\":\n return x[-max_length:]\n else:\n return x[:max_length]\n elif len(x) < max_length:\n padding = [pad_token_id] * (max_length - len(x))\n if pad_from == \"left\":\n return padding + x\n else:\n return x + padding\n else:\n return x",
"def padding(arr, max_len, pad_val):\n tmp = []\n if len(arr) <= max_len:\n tmp.extend([element for element in arr])\n tmp.extend([pad_val for _ in range(max_len - len(arr))])\n else:\n tmp.extend([element for element in arr[:max_len]])\n return tmp",
"def add_padding(x, maxlen=500):\n \n # May want to increase maxlen from 500! Not sure the total dist of chomragram lengths.\n\n for i in range(len(x)):\n x[i] = x[i][:,:maxlen]\n q = maxlen - x[i].shape[1]\n p = q//2\n# if q % 2 == 0:\n# x[i] = np.pad(x[i], ((p,p), (0,0)), 'constant', constant_values=(0,0))\n# else:\n# x[i] = np.pad(x[i], ((p,p+1), (0,0)), 'constant', constant_values=(0,0))\n\n print\n if q % 2 == 0:\n x[i] = np.pad(x[i], ((0,0), (p,p)), 'constant', constant_values=(0,0))\n else:\n x[i] = np.pad(x[i], ((0,0), (p,p+1)), 'constant', constant_values=(0,0))\n \n return x",
"def pad_sequence_to_length(sequence: List,\n desired_length: int,\n default_value: Callable[[], Any] = lambda: 0,\n padding_on_right: bool = True) -> List:\n # Truncates the sequence to the desired length.\n if padding_on_right:\n padded_sequence = sequence[:desired_length]\n else:\n padded_sequence = sequence[-desired_length:]\n # Continues to pad with default_value() until we reach the desired length.\n for _ in range(desired_length - len(padded_sequence)):\n if padding_on_right:\n padded_sequence.append(default_value())\n else:\n padded_sequence.insert(0, default_value())\n return padded_sequence",
"def pad_sequence_to_length(sequence: List,\n desired_length: int,\n default_value: Callable[[], Any] = lambda: 0,\n padding_on_right: bool = True) -> List:\n # Truncates the sequence to the desired length.\n if padding_on_right:\n padded_sequence = sequence[:desired_length]\n else:\n padded_sequence = sequence[-desired_length:]\n # Continues to pad with default_value() until we reach the desired length.\n for _ in range(desired_length - len(padded_sequence)):\n if padding_on_right:\n padded_sequence.append(default_value())\n else:\n padded_sequence.insert(0, default_value())\n return padded_sequence",
"def np_pad(x, list_thresh):\n x = np.array(x)\n x = np.pad(x, pad_width = ((0,0),(0,list_thresh-x.shape[1])), mode=\"constant\", constant_values=0)\n return x",
"def pad_sequences(sequences, pad_func, maxlen = None):\n ret = []\n\n # Determine the maxlen\n max_value = max(map(len, sequences))\n if maxlen is None:\n maxlen = max_value\n\n # Pad / truncate (done this way to deal with np.array)\n for sequence in sequences:\n cur_seq = list(sequence[:maxlen])\n cur_seq.extend([pad_func()] * (maxlen - len(sequence)))\n ret.append(cur_seq)\n return ret",
"def pad_list(xs, pad_value=0.0, pad_left=False):\n bs = len(xs)\n max_time = max(x.size(0) for x in xs)\n xs_pad = xs[0].new_zeros(bs, max_time, *xs[0].size()[1:]).fill_(pad_value)\n for b in range(bs):\n if len(xs[b]) == 0:\n continue\n if pad_left:\n xs_pad[b, -xs[b].size(0):] = xs[b]\n else:\n xs_pad[b, :xs[b].size(0)] = xs[b]\n return xs_pad",
"def pad_list(xs, pad_value=0.0, pad_left=False):\n bs = len(xs)\n max_time = max(x.size(0) for x in xs)\n xs_pad = xs[0].new_zeros(bs, max_time, * xs[0].size()[1:]).fill_(pad_value)\n for b in range(bs):\n if len(xs[b]) == 0:\n continue\n if pad_left:\n xs_pad[b, -xs[b].size(0):] = xs[b]\n else:\n xs_pad[b, :xs[b].size(0)] = xs[b]\n return xs_pad",
"def pad_sequence(sequence, max_length, pad):\n padN = max(max_length - len(sequence), 0)\n result = sequence[:max_length - padN] + [pad] * padN\n return result",
"def pad(lst, pad_size, filler):\n assert(isinstance(lst, list))\n assert(pad_size - len(lst) >= 0)\n lst.extend([filler] * (pad_size - len(lst)))",
"def pad_sequences(self,sequences, pad_func, maxlen = None):\n ret = []\n\n # Determine the maxlen\n max_value = max(map(len, sequences))\n if maxlen is None:\n maxlen = max_value\n\n # Pad / truncate (done this way to deal with np.array)\n for sequence in sequences:\n cur_seq = list(sequence[:maxlen])\n cur_seq.extend([pad_func()] * (maxlen - len(sequence)))\n ret.append(cur_seq)\n return ret",
"def __pad__(sequence, max_l):\n if max_l - len(sequence) < 0:\n sequence = sequence[:max_l]\n else: \n sequence = np.pad(sequence, (0, max_l - (len(sequence))), 'constant', constant_values=(0))\n return sequence",
"def _pad_or_cut_to_max_seq_len(x,\n max_seq_len):\n # Shape of x (n_crops, num_patches, c)\n assert len(tf.shape(x)) == 3\n # Padding makes sure that # patches > max_seq_length. Note that it also\n # makes the input mask zero for shorter input.\n paddings = tf.zeros([tf.shape(x)[0], max_seq_len,\n tf.shape(x)[-1]],\n dtype=x.dtype)\n x = tf.concat([x, paddings], axis=1)\n # Cuts to max_seq_len number of patches.\n x = x[:, :max_seq_len, :]\n return x",
"def pad_sequences(sequences, maxlen, nb_sequences, dtype='int32', value=-1):\n\n x = (numpy.ones((nb_sequences, maxlen)) * value).astype(dtype)\n for idx, s in enumerate(sequences):\n trunc = s[:maxlen]\n\n x[idx, :len(trunc)] = trunc\n\n return x",
"def pad_with_border(x, n_pad):\n x_pad_list = [x[0:1]] * int(n_pad) + [x] + [x[-1:]] * int(n_pad)\n return np.concatenate(x_pad_list, axis=0)",
"def pad_sequences(sequences, maxlen=None, dtype='int32',\n padding='pre', truncating='pre', value=0.):\n if not hasattr(sequences, '__len__'):\n raise ValueError('`sequences` must be iterable.')\n lengths = []\n for x in sequences:\n if not hasattr(x, '__len__'):\n raise ValueError('`sequences` must be a list of iterables. '\n 'Found non-iterable: ' + str(x))\n lengths.append(len(x))\n\n num_samples = len(sequences)\n if maxlen is None:\n maxlen = np.max(lengths)\n\n # take the sample shape from the first non empty sequence\n # checking for consistency in the main loop below.\n sample_shape = tuple()\n for s in sequences:\n if len(s) > 0:\n sample_shape = np.asarray(s).shape[1:]\n break\n\n is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.unicode_)\n if isinstance(value, str) and dtype != object and not is_dtype_str:\n raise ValueError(\"`dtype` {} is not compatible with `value`'s type: {}\\n\"\n \"You should set `dtype=object` for variable length strings.\"\n .format(dtype, type(value)))\n\n x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)\n for idx, s in enumerate(sequences):\n if not len(s):\n continue # empty list/array was found\n if truncating == 'pre':\n trunc = s[-maxlen:]\n elif truncating == 'post':\n trunc = s[:maxlen]\n else:\n raise ValueError('Truncating type \"%s\" '\n 'not understood' % truncating)\n\n # check `trunc` has expected shape\n trunc = np.asarray(trunc, dtype=dtype)\n if trunc.shape[1:] != sample_shape:\n raise ValueError('Shape of sample %s of sequence at position %s '\n 'is different from expected shape %s' %\n (trunc.shape[1:], idx, sample_shape))\n\n if padding == 'post':\n x[idx, :len(trunc)] = trunc\n elif padding == 'pre':\n x[idx, -len(trunc):] = trunc\n else:\n raise ValueError('Padding type \"%s\" not understood' % padding)\n return x",
"def pad(input_list: List[int], padding_size: int, padding_value: int) -> List[int]:\n return input_list + [padding_value] * abs((len(input_list) - padding_size))",
"def pad_to_max_length(self, sequence):\n sequence = sequence[:self.max_seq_length]\n n = len(sequence)\n #return sequence + ['[PAD]'] * (self.max_seq_length - n)\n return sequence + [0] *(self.max_seq_length - n)",
"def _pad(self, array, sentinel, max_len=None):\n # Compute max length.\n maxlen = 0\n for seq in array:\n maxlen = max(maxlen, len(seq))\n\n if max_len is not None:\n maxlen = max(maxlen, max_len)\n\n # Pad.\n padded = []\n lens = []\n for seq in array:\n padding = maxlen - len(seq)\n padded.append(seq + [sentinel] * padding)\n lens.append(len(seq))\n\n return padded, lens",
"def pad_trunc(data, maxlen):\n new_data = []\n\n # Create a vector of 0's the length of our word vectors\n zero_vector = []\n for _ in range(len(data[0][0])):\n zero_vector.append(0.0)\n\n for sample in data:\n\n if len(sample) > maxlen:\n temp = sample[:maxlen]\n elif len(sample) < maxlen:\n temp = sample\n additional_elems = maxlen - len(sample)\n for _ in range(additional_elems):\n temp.append(zero_vector)\n else:\n temp = sample\n new_data.append(temp)\n return new_data",
"def add_padding(*data, value, maxlen=250, padding=\"post\"):\n return [keras.preprocessing.sequence.pad_sequences(\n d, value=value, padding=padding,\n maxlen=maxlen) for d in data]",
"def pad_trunc(data, maxlen):\n new_data = []\n # Create a vector of 0s the length of our word vectors\n zero_vector = []\n for _ in range(len(data[0][0])):\n zero_vector.append(0.0)\n\n for sample in data:\n if len(sample) > maxlen:\n temp = sample[:maxlen]\n elif len(sample) < maxlen:\n temp = list(sample)\n # Append the appropriate number 0 vectors to the list\n additional_elems = maxlen - len(sample)\n for _ in range(additional_elems):\n temp.append(zero_vector)\n else:\n temp = sample\n new_data.append(temp)\n return new_data",
"def pad_list(xs: torch.Tensor, pad_value: int):\n n_batch = len(xs)\n max_len = max(x.size(0) for x in xs)\n pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)\n\n for i in range(n_batch):\n pad[i, : xs[i].size(0)] = xs[i]\n\n return pad",
"def pad_sequences_1d(sequences, max_len=None, padding='post', truncating='post', value=0.):\n return pad_sequences(sequences, maxlen=max_len, padding=padding, truncating=truncating,\n value=value)",
"def pad_data(d):\n max_len = set((len(i) for i in d))\n if len(max_len) == 1:\n return d\n else:\n max_len = max(max_len)\n return [i + [\"\"] * (max_len - len(i)) for i in d]",
"def pad_sequence(self, arr, max_length_tweet):\n # padding a list of indices with 0 until a maximum length (max_length_tweet)\n if max_length_tweet>len(arr):\n trailing_zeros = [0]*(max_length_tweet-len(arr))\n arr.extend(trailing_zeros)\n return arr[:max_length_tweet]",
"def pad_sequences(self, X):\n return pad_sequences(X, maxlen=self.pad_length)",
"def pad_digits(x, width):\n if pd.notnull(x):\n return '{0:0{1}d}'.format(int(x), width)\n else:\n return x"
] | [
"0.7232516",
"0.7194974",
"0.70571303",
"0.7005884",
"0.69063616",
"0.69063616",
"0.6899334",
"0.68993306",
"0.68805975",
"0.68804926",
"0.68802965",
"0.6851657",
"0.6702354",
"0.66523",
"0.6640961",
"0.65956634",
"0.65647626",
"0.65528095",
"0.6521586",
"0.6515963",
"0.6478317",
"0.64777493",
"0.64330524",
"0.6407414",
"0.63984144",
"0.6364932",
"0.62974817",
"0.6158804",
"0.61391807",
"0.6131111"
] | 0.8817511 | 0 |
Return list of rain fall for previous year | def precipitation():
last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
last_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
rain = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date > last_year).\
order_by(Measurement.date).all() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def xbrl_years(self):\n return [year for year in self.years if year >= 2021]",
"def calculate_iron_hemoglobin_time_lag_effective_fraction(df, years):\n final = pd.DataFrame()\n data = df.reset_index()\n for i in list(range(0, len(years))):\n current = (data.loc[data.year == years[i]]\n .set_index([c for c in data.columns if 'draw' not in c and c != 'year'])\n .drop(columns='year'))\n if i == 0:\n for draw in list(range(0, 1000)):\n current[f'draw_{draw}'] = 1\n else:\n prior = (data.loc[data.year == years[i - 1]]\n .set_index([c for c in data.columns if 'draw' not in c and c != 'year'])\n .drop(columns='year'))\n current = 1 - ((current - prior) * 0.75 / current)\n current['year'] = years[i]\n final = pd.concat([final, current])\n final = final.reset_index().set_index([c for c in data.columns if 'draw' not in c]).sort_index()\n return final",
"def austral_year_daily(x, y):\n if isinstance(x, xr.DataArray):\n x = x.values\n \n jfmamj = x < 182.\n jasond = x >= 182.\n \n x_jasond = []\n y_jasond = []\n if any(jasond):\n x_jasond = x[jasond] - 181\n y_jasond = y[jasond]\n\n x_jfmamj = []\n y_jfmamj = []\n if any(jfmamj):\n x_jfmamj = x[jfmamj] + 184\n y_jfmamj = y[jfmamj]\n\n xout = np.concatenate([xi for xi in [x_jasond, x_jfmamj] if len(xi)])\n yout = np.concatenate([yi for yi in [y_jasond, y_jfmamj] if len(yi)])\n \n return xout, yout",
"def get_previous_yr(df, df2, years):\n # Get n+_ year\n df[\"season_n-{}_tmp\".format(years)] = df[\"season\"] - years\n df_merged = pd.merge(df, df2, how=\"left\", left_on=[\"player\", \"player_id\", \"season_n-{}_tmp\".format(years)],\n right_on=[\"player\", \"player_id\", \"season\"],\n suffixes=['', \"_n-{}\".format(years)])\n\n df_merged = df_merged.drop([\"season_n-{}_tmp\".format(years)], axis=1)\n\n return df_merged",
"def msatna_blocks_3lag_year(year: int) -> pd.Series:\n return msatna_blocks_3lag_panel()[year]",
"def chance_of_rain(self):\r\n # Amount of yesterday's rain indicating chance of it occurring.\r\n NO_RAIN = 0.1\r\n LITTLE_RAIN = 3\r\n SOME_RAIN = 8\r\n # Chance of rain occurring.\r\n NONE = 0\r\n MILD = 40\r\n PROBABLE = 75\r\n LIKELY = 90\r\n\r\n if self._yesterdays_weather.get_rainfall() < NO_RAIN:\r\n chance_of_rain = NONE\r\n elif self._yesterdays_weather.get_rainfall() < LITTLE_RAIN:\r\n chance_of_rain = MILD\r\n elif self._yesterdays_weather.get_rainfall() < SOME_RAIN:\r\n chance_of_rain = PROBABLE\r\n else:\r\n chance_of_rain = LIKELY\r\n\r\n return chance_of_rain",
"def showPreviousYear(self):\n pass",
"def dbf_years(self):\n return [year for year in self.years if year <= 2020]",
"def get_sea_level_raw(start_year, end_year, path_out):\n c = cdsapi.Client()\n\n for year in range(start_year, end_year + 1):\n\n print(f\"Starting Year: {year}\")\n\n c.retrieve(\n \"satellite-sea-level-global\",\n {\n \"format\": \"tgz\",\n \"year\": [str(year)],\n \"month\": [\n \"01\",\n \"02\",\n \"03\",\n \"04\",\n \"05\",\n \"06\",\n \"07\",\n \"08\",\n \"09\",\n \"10\",\n \"11\",\n \"12\",\n ],\n \"day\": [\n \"01\",\n \"02\",\n \"03\",\n \"04\",\n \"05\",\n \"06\",\n \"07\",\n \"08\",\n \"09\",\n \"10\",\n \"11\",\n \"12\",\n \"13\",\n \"14\",\n \"15\",\n \"16\",\n \"17\",\n \"18\",\n \"19\",\n \"20\",\n \"21\",\n \"22\",\n \"23\",\n \"24\",\n \"25\",\n \"26\",\n \"27\",\n \"28\",\n \"29\",\n \"30\",\n \"31\",\n ],\n },\n os.path.join(path_out, str(year) + \"_download.tar.gz\"),\n )",
"def rainfall_series(self):\n\n # assign local temporal variables\n datatype = 'strds'\n increment = str(self.rain_interval)+\" minutes\"\n raster = 'raster'\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n #iterations = sum(1 for row in precip)\n\n # create a raster space time dataset\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(\n elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # open txt file with precipitation data\n with open(evol.precipitation) as csvfile:\n\n # check for header\n has_header = csv.Sniffer().has_header(csvfile.read(1024))\n\n # rewind\n csvfile.seek(0)\n\n # skip header\n if has_header:\n next(csvfile)\n\n # parse time and precipitation\n precip = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n\n # initial run\n initial = next(precip)\n evol.start = initial[0]\n evol.rain_intensity = 'rain_intensity'\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=evol.rain_intensity,\n rain_observation=float(initial[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model for each rainfall record\n for row in precip:\n\n # update the elevation\n evol.elevation=evolved_elevation\n\n # update time\n evol.start=row[0]\n\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=rain_intensity,\n rain_observation=float(row[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"= {evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)",
"def get_past_game_dates(self, year=None):\n year = self._year if not year else year\n data = list(self._db.Games.aggregate([{'$project':\n {'_id' : 0,\n 'date' : 1}}]))\n dates = set([x['date'] for x in data\n if x['date'].split('-')[0] == year])\n return dates",
"def get_xc_races_by_year(self, year):\n try:\n results = self.__get_results('CALL GetXcRacesByYear({0})'.format(year))\n return [XcRace(result).get_json() for result in results] if results else []\n except Exception as e:\n logging.exception(e)\n raise",
"def _get_data_pre2007(date):\n #the data is obtained from one file for each year.\n\n url = '{}/Environmental_Data_{}/'.format(BASE_URL, date.year)\n print('Fetching online data for {} (full year)'.format(date.year))\n\n try:\n year_data = request.urlopen(url).read().decode(encoding='utf_8').split('\\n')\n except:\n raise ValueError(date)\n else:\n year_data.pop(0)\n\n for line in year_data:\n elements = line.split()\n yield dict(Date = elements[0],\n Time = elements[1],\n Status = 'COMPLETE',\n Air_Temp = elements[5],\n Barometric_Press = elements[7],\n Wind_Speed = elements[2])",
"def year_data(self,year):\n idx = [i for i in range(self.dates.shape[0]) if self.dates[i].year == year]\n year_dates = self.dates[idx]\n year_dc = self.dc[idx]\n return year_dates, year_dc",
"def get_yearly_data(name, startyr=None, endyr=None, interpolated=False):\n varinfo = get_varinfo(name)\n \n if varinfo[\"type\"] == \"yearly\":\n data = get_data(varinfo[\"id\"], startyr=startyr, endyr=endyr)\n giddict = dict()\n sorteddata = sorted(data[\"cells\"], key=lambda vd: vd[\"gid\"])\n for gid,valuedicts in itertools.groupby(sorteddata, key=lambda vd: vd[\"gid\"]):\n yrdict = dict([(valuedict[\"year\"],valuedict[\"value\"])\n for valuedict in valuedicts\n ])\n info = {\"data\": yrdict}\n giddict[gid] = info\n\n if interpolated:\n def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)\n \n def lerp(factor, fromval, toval):\n valrange = toval - fromval\n return fromval + valrange * factor\n \n for gid,info in giddict.items():\n yrdict = info[\"data\"]\n if len(yrdict) > 1:\n for (fromyr,fromval),(toyr,toval) in pairwise(sorted(yrdict.items(),key=lambda i: i[0])):\n curyr = fromyr + 1\n interpneeded = fromval != toval\n \n while curyr != toyr:\n if interpneeded:\n factor = (curyr - fromyr) / float(toyr - fromyr)\n yrdict[curyr] = lerp(factor, fromval, toval)\n else:\n yrdict[curyr] = fromval\n curyr += 1\n\n return giddict\n\n else:\n raise Exception(\"Could not find a yearly variable with that name\")",
"def snowfall_for_period(resort_name, start_date, end_date):\n\n #yyyymmdd\n start_date_year = int(start_date[0:4])\n start_date_month = int(start_date[4:6])\n start_date_day = int(start_date[6:8])\n\n end_date_year = int(end_date[0:4])\n end_date_month = int(end_date[4:6])\n end_date_day = int(end_date[6:8])\n\n resort_table = resort_table_dict[resort_name]\n\n query = \"SELECT status_date FROM %s\" %(resort_table)\n connection = get_connection()\n\n period_date_list = []\n snowfall_list = []\n\n if connection is not None:\n try:\n for row in get_select_query_results(connection, query):\n #yyyymmdd\n row_year = int(row[0].strftime('%Y'))\n row_month = int(row[0].strftime('%m'))\n row_day = int(row[0].strftime('%d'))\n\n if row_year < start_date_year or row_year > end_date_year:\n continue\n if start_date_year == row_year:\n if start_date_month > row_month:\n continue\n if start_date_year == row_year:\n if start_date_month == row_month:\n if start_date_day > row_day:\n continue\n if end_date_year == row_year:\n if end_date_month < row_month:\n continue\n if end_date_year == row_year:\n if end_date_month == row_month:\n if end_date_day < row_day:\n continue\n date_to_append = (row[0].strftime('%Y') + row[0].strftime('%m') + row[0].strftime('%d'))\n period_date_list.append(date_to_append)\n\n except Exception as e:\n print(e, file=sys.stderr)\n\n for date in period_date_list:\n snowfall_to_add = snowfall_for_date(resort_name, date)\n snowfall_list.append(snowfall_to_add)\n\n return json.dumps(snowfall_list)",
"def get_fixed_holidays(self, year):\n # 2021 exception.\n # Because May 1st is both International Workers' day and Easter\n self.include_labour_day = (year != 2021)\n\n # Unshifted days are here:\n days = super().get_fixed_holidays(year)\n days_to_inspect = copy(days)\n for day_shifted in self.get_shifted_holidays(days_to_inspect):\n days.append(day_shifted)\n\n # 2021 exception.\n # Because May 1st is both International Workers' day and Easter\n if year == 2021:\n days.append((date(2021, 5, 4), self.labour_day_label))\n return days",
"def rain(self, json):\n rain = str(json['forecast']['txt_forecast']['forecastday'][0]['pop'])\n return rain",
"def multiple_years(our_data, start, end):\n count = start\n album_list = []\n while count <= end:\n album_list.append(find_by_year(our_data,count))\n count += 1",
"def year_emissions_intensity_rule(_m, y):\r\n\r\n return m.YEAR_EMISSIONS[y] / m.YEAR_DEMAND[y]",
"def north_america_countries():\r\n north_america_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in north_america:\r\n north_america_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in north_america_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians",
"def years():\r\n # Use Pandas to perform the sql query\r\n results = db.session.query(Worldmapdata.year.distinct().label(\"year\"))\r\n year = [row.year for row in results.all()]\r\n # Return a list of the column names (sample names)\r\n return jsonify(list(year))",
"def get_player_games(self, year, use_local=True):",
"def get_years(met_fname, nyear_spinup):\n pre_indust = 1850\n\n ds = xr.open_dataset(met_fname)\n\n st_yr = pd.to_datetime(ds.time[0].values).year\n\n # PALS met files final year tag only has a single 30 min, so need to\n # end at the previous year, which is the real file end\n en_yr = pd.to_datetime(ds.time[-1].values).year - 1\n\n # length of met record\n nrec = en_yr - st_yr + 1\n\n # number of times met data is recycled during transient simulation\n nloop_transient = np.ceil((st_yr - 1 - pre_indust) / nrec) - 1\n\n # number of times met data is recycled with a spinup run of nyear_spinup\n nloop_spin = np.ceil(nyear_spinup / nrec)\n\n st_yr_transient = st_yr - 1 - nloop_transient * nrec + 1\n en_yr_transient = st_yr_transient + nloop_transient * nrec - 1\n\n en_yr_spin = st_yr_transient - 1\n st_yr_spin = en_yr_spin - nloop_spin * nrec + 1\n\n return (st_yr, en_yr, st_yr_transient, en_yr_transient,\n st_yr_spin, en_yr_spin)",
"def threat(year, clock):\n # find latest adjustment preceding or equal to the given year\n index = -1\n while clock[index][0] > year:\n index -= 1\n # return time set at latest adjustment\n return clock[index][1]",
"def get_season_list_BDEW(weather_data):\n season_list = []\n\n for j, date_obj in enumerate(weather_data.index):\n YEAR = date_obj.year\n\n winter_end = dt.datetime(YEAR, 3, 21, 00, 00, 00)\n winter_start = dt.datetime(YEAR, 10, 31, 00, 00, 00)\n summer_start = dt.datetime(YEAR, 5, 15, 00, 00, 00)\n summer_end = dt.datetime(YEAR, 9, 15, 00, 00, 00)\n\n if date_obj <= winter_end or date_obj > winter_start:\n season_list.append('Winter') # Winter\n\n elif date_obj > summer_start and date_obj <= summer_end:\n season_list.append('Sommer') # Summer\n\n else:\n season_list.append('Übergangszeit') # Transition\n\n return season_list",
"def get_model_years(make, model):\n api_url = 'http://api.edmunds.com/api/vehicle/v2/{}/{}/years?fmt=json&api_key={}'\\\n .format(make, model, API_KEY)\n r = requests.get(api_url).json()\n model_years = [model_year['year'] for model_year in r['years']]\n return model_years",
"def yearly(self):\r\n return RecordsYearly(self)",
"def parse_snowfall(regime, lines, data):\n for linenum, line in enumerate(lines):\n # skipme\n if len(line.strip()) < 14:\n continue\n tokens = make_tokens(regime, line)\n key = tokens[0].strip()\n if key.startswith(\"SNOW DEPTH\"):\n data[\"snowdepth\"] = get_number(tokens[1])\n continue\n key = convert_key(key)\n data[f\"snow_{key}\"] = get_number(tokens[1])\n data[f\"snow_{key}_record\"] = get_number(tokens[3])\n yeartest = get_number_year(tokens[4])\n if yeartest is not None:\n data[f\"snow_{key}_record_years\"] = [yeartest]\n data[f\"snow_{key}_normal\"] = get_number(tokens[5])\n data[f\"snow_{key}_departure\"] = get_number(tokens[6])\n data[f\"snow_{key}_last\"] = get_number(tokens[7])\n if (\n key == \"today\"\n and yeartest is not None\n and data[f\"snow_{key}_record_years\"][0] is not None\n ):\n while (linenum + 1) < len(lines) and len(\n lines[linenum + 1].strip()\n ) == 4:\n n = get_number_year(lines[linenum + 1])\n if n is not None:\n data.setdefault(\"snow_today_record_years\", []).append(n)\n linenum += 1",
"def highest_snowfall_for_year(resort_name, year):\n resort_table = resort_table_dict[resort_name]\n year = int(year)\n query = \"SELECT snowfall FROM %s WHERE CAST(EXTRACT(YEAR FROM status_date) AS INTEGER) = %d\" %(resort_table, year)\n connection = get_connection()\n\n snowfall_list = []\n\n if connection is not None:\n try:\n for row in get_select_query_results(connection, query):\n snowfall_list.append(row)\n except Exception as e:\n print(e, file=sys.stderr)\n connection.close()\n snowfall_list.sort(reverse=True)\n \"\"\"\n need to think about making our own sorter so we can break ties effectively\n \"\"\"\n highest_snowfall = snowfall_list[0]\n return json.dumps(highest_snowfall)"
] | [
"0.59791404",
"0.5774895",
"0.57245696",
"0.5675499",
"0.55384594",
"0.5491503",
"0.5471699",
"0.5403219",
"0.54018354",
"0.5384144",
"0.5367263",
"0.5363518",
"0.5355815",
"0.5304118",
"0.5281944",
"0.52812725",
"0.5260535",
"0.5254466",
"0.5250503",
"0.52311355",
"0.5220862",
"0.5216031",
"0.5213441",
"0.52083033",
"0.5200584",
"0.51998085",
"0.5188539",
"0.51834196",
"0.5181969",
"0.51736724"
] | 0.61494935 | 0 |
Create mode of given scale | def scale_to_mode(scale, transpose=0):
# find mode scheme based on original scale
l = scale[transpose:]
# create complete 16-elements list of steps
i = ceil((16 - len(l)) / 12)
l += scale * i
l = list(accumulate(l))
n = l[0]
l = list(map(lambda x: x - n, l))
return l[:16] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setScale(self, mode='ACC', scale=0):\r\n\t\tif mode.upper() == 'ACC':\r\n\t\t\treg = 0x1C\r\n\t\telif mode.upper() == 'GYR':\r\n\t\t\treg = 0x1B\t\t\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\tcurrentVal = self.read(reg)\r\n\t\tcurrentVal = self.dec2BinList(currentVal)\r\n\t\tscale = self.dec2BinList(value=scale,bits=2)\r\n\t\tcurrentVal[3] = scale[0]\r\n\t\tcurrentVal[4] = scale[1]\r\n\t\tcurrentVal = self.binList2Dec(currentVal)\r\n\t\tself.write(reg, currentVal)",
"def setScalingMode(mode='down'):\n mdict = {'down':'DOWN','full':'FULL'}\n dislin.sclmod(mode)",
"def getScale(self, mode='ACC'):\t#good\r\n\t\tif mode.upper() == 'ACC':\r\n\t\t\treg = 0x1C\r\n\t\telif mode.upper() == 'GYR':\r\n\t\t\treg = 0x1B\t\t\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\tcurrentVal = self.read(reg)\r\n\t\tcurrentVal = self.dec2BinList(currentVal)\r\n\t\tscaleSetting = (currentVal[4]*2) + (currentVal[3]*1) \r\n\t\tif mode.upper() == 'ACC':\r\n\t\t\tscale = 2**(scaleSetting+1) \r\n\t\telif mode.upper() == 'GYR':\r\n\t\t\tscale = (2**(scaleSetting+1))*125\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\treturn scale,scaleSetting",
"def random_scale(img_scales, mode='range'):\n num_scales = len(img_scales)\n if num_scales == 1: # fixed scale is specified\n img_scale = img_scales[0]\n elif num_scales == 2: # randomly sample a scale\n if mode == 'range':\n ratio=max(img_scales[0])/min(img_scales[0])\n img_scale_long = [max(s) for s in img_scales]\n img_scale_short = [min(s) for s in img_scales]\n long_edge = np.random.randint(\n min(img_scale_long),\n max(img_scale_long) + 1)\n \"\"\"\n short_edge = np.random.randint(\n min(img_scale_short),\n max(img_scale_short) + 1)\n \"\"\"\n short_edge = int(long_edge/ratio) \n img_scale = (long_edge, short_edge)\n elif mode == 'value':\n img_scale = img_scales[np.random.randint(num_scales)]\n else:\n if mode != 'value':\n raise ValueError('Only \"value\" mode supports more than 2 image scales')\n img_scale = img_scales[np.random.randint(num_scales)]\n return img_scale",
"def any_scale(scale):\n return scale",
"def get_scale():\r\n\r\n \r\n return 0.5",
"def scale_to_factor(scale):\n return (B.pi / 2) / (2 * scale**2)",
"def get_resize(image, scale):\n if isinstance(scale, int):\n if image.shape[0] % scale != 0 or image.shape[1] % scale != 0:\n return 1, None\n if image.shape[0] < scale or image.shape[1] < scale:\n return 2, None\n\n arrays = []\n size = image.shape[0] // scale, image.shape[1] // scale\n for i in range(scale):\n for j in range(scale):\n arrays.append(image[i::scale, j::scale])\n\n result = mode(np.stack(arrays), axis=0).mode[0]\n else:\n size = int(image.shape[0] / scale), int(image.shape[1] / scale)\n result = []\n for i in range(size[0]):\n result.append([])\n for j in range(size[1]):\n result[-1].append(image[int(i * scale), int(j * scale)])\n\n result = np.uint8(result)\n\n return 0, result",
"def scale(self):",
"def setScaleMode(self, mode):\n if mode != self.__scale_mode and mode in (self.ScaleModeGlobal, self.ScaleModeLocal):\n self.__scale_mode = mode\n self.__scaled_datasets = None\n self.__axisDomains = None\n self.dataChanged.emit()",
"def resMode(mode): \n if mode==0:\n makeMesh(r0x, r0y)\n elif mode==1:\n makeMesh(r1x, r1y)\n elif (mode==2):\n makeMesh(r2x, r2y)",
"def get_scale_op(self):\n\t\treturn self.variables.get('scale')",
"def factor_to_scale(factor):\n return 1 / B.sqrt(4 * factor / B.pi)",
"def mode(self) -> int:",
"def imageScale(scale):\n\t\treturn max(1, int(scale * (InterfaceTools.getCanvasSize()[0] / height)))",
"def __init__(self,scale):\n self.scale = scale",
"def scale_to_16(scale, mode=0, base=0, length=16):\n pattern = deque(scale)\n pattern.rotate(mode)\n result = deque(islice(cycle(pattern), (length - 1)))\n\n result.appendleft(base)\n\n res_acc = list(accumulate(result))\n return res_acc",
"def default_scale(scale):\n return sequence_scale(scale, (1, 1.25, 1.5, 1.75, 2,\n 2.5, 3, 4, 5, 6, 7.5, 8, 9, 10))",
"def _random_scale(self, results):\n # For multi-scale training\n shuffle(self.img_scale)\n \n if self.multiscale_mode == 'range':\n scale, scale_idx = self.random_sample_ratio(\n self.img_scale[0], self.ratio_range, self.ratio_hr_lr)\n elif self.multiscale_mode == 'value':\n scale, scale_idx = self.random_select_ratio(self.img_scale[0], self.ratio_range, self.ratio_hr_lr)\n else:\n raise NotImplementedError\n\n results['scale'] = scale\n results['scale_idx'] = scale_idx",
"def GetScale(self):\n ...",
"def with_scale_op(self, scale):\n\t\tself.variables['scale'] = scale\n\t\treturn self",
"def scale_model(model,scaleparname='A',scaleval=1):\n model = get_model_instance(model)\n if scaleparname in model.params:\n scaleparname += '1'\n if isinstance(model,FunctionModel1D):\n compclass = CompositeModel1D\n else:\n compclass = CompositeModel\n res = compclass((model,'constant'),operation='*',\n parnames={'C1':scaleparname})\n setattr(res,scaleparname,scaleval)\n return res",
"def __init__(self, size=800, scale=(3. / 4, 5. / 2)):\n assert isinstance(size, int)\n assert isinstance(scale, float) or isinstance(scale, tuple)\n self.size = size\n self.scale = scale if isinstance(scale, tuple) \\\n else (1 - scale, 1 + scale)",
"def make_feature_scale_factors():\n X, y = make_X_and_y()\n sqm = make_sqm_X()\n scale_factors = {\n \"indoor_temp\": np.max(X[:,:,0]),\n \"outdoor_temp\": np.max(X[:,:,1]),\n \"gas_kwh\": np.max(X[:,:,2]),\n \"elec_kwh\": np.max(X[:,:,3]),\n \"floor_area\": np.max(sqm),\n \"htc\": np.max(y),\n }\n\n with open(os.path.join(_TRAINING_DATA_PATH, \"scalefactors.json\"), \"w+\") as f:\n json.dump(scale_factors, f)",
"def plane_scale(self, scale):\n cmd = '{}testPlaneScale {}'.format(self.console, scale)\n self.write_command(cmd)",
"def generate_scales(self, height, width):\n min_hw = min(height, width)\n m_scale = 12.0 / self.min_size\n min_hw = int(min_hw * m_scale)\n scales = []\n factor_count = 0\n while min_hw >= 50:\n scales.append(m_scale * pow(self.face_factor, factor_count))\n min_hw = int(min_hw * self.face_factor)\n factor_count += 1\n return scales",
"def setSurfaceColorScale(low,high):\n dislin.zscale(low,high)",
"def mode(s='bin2slow'):\n ms = s.strip().lower()\n print camera.SetMode(ms)\n camera.status.update()",
"def mode(v_o, Vcc):\n if v_o == Vcc:\n return \"positive saturation\"\n if v_o >= -Vcc and v_o <= Vcc:\n return \"linear region\"\n if v_o == -Vcc:\n return \"negative saturation\"",
"def _random_scale(self, results):\n\n if self.ratio_range is not None:\n scale, scale_idx = self.random_sample_ratio(\n self.img_scale[0], self.ratio_range)\n elif len(self.img_scale) == 1:\n scale, scale_idx = self.img_scale[0], 0\n elif self.multiscale_mode == 'range':\n scale, scale_idx = self.random_sample(self.img_scale)\n elif self.multiscale_mode == 'value':\n scale, scale_idx = self.random_select(self.img_scale)\n else:\n raise NotImplementedError\n\n results['scale'] = scale\n results['scale_idx'] = scale_idx"
] | [
"0.6633015",
"0.6360772",
"0.62977636",
"0.62056977",
"0.6008764",
"0.60017097",
"0.5995628",
"0.5878364",
"0.5820572",
"0.58104134",
"0.5709557",
"0.56858575",
"0.5675628",
"0.56720704",
"0.5600536",
"0.55899876",
"0.5563008",
"0.5555391",
"0.55215645",
"0.55069333",
"0.5503952",
"0.54974425",
"0.5485736",
"0.5466393",
"0.5444021",
"0.543925",
"0.54208314",
"0.53747356",
"0.5367086",
"0.5365005"
] | 0.6723477 | 0 |
This function is from the latest version of SCons to support older SCons version. Configure check for a specific program. Check whether program prog_name exists in path. If it is found, returns the path for it, otherwise returns None. | def CheckProg(context, prog_name):
context.Message("Checking whether %s program exists..." % prog_name)
path = context.env.WhereIs(prog_name)
context.Result(bool(path))
return path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_program(binary_name):\n pth = os.path.abspath(__file__)\n\n # Split off the name and the directory...\n pth, notused = os.path.split(pth)\n pth, notused = os.path.split(pth)\n pth = os.path.join(pth, \"programs\", binary_name)\n pth = os.path.normpath(pth)\n\n log.debug(\"Checking for program %s\", binary_name)\n if not os.path.exists(pth) or not os.path.isfile(pth):\n log.error(\"No such file: '%s'\", pth)\n raise PartitionFinderError\n log.debug(\"Found program %s at '%s'\", binary_name, pth)\n return pth",
"def _which(self, program):\n\n def is_exe(path):\n return os.path.isfile(path) and os.access(path, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n basedir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n os.environ[\"PATH\"] += os.pathsep + '%s/bin/' % basedir\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n print 'ishakesumd not found, build it or place it in the PATH before using this tool.'\n exit(1)",
"def _Which(program, paths):\n if sys.platform == 'win32' and not program.lower().endswith('.exe'):\n program += '.exe'\n\n for path in paths:\n candidate = os.path.join(os.path.normpath(path), program)\n if os.path.isfile(candidate):\n return candidate\n\n return None",
"def which(program, program_name):\n fpath, fname = os.path.split(program)\n if fpath:\n if __is_exe__(program):\n return program\n elif (__is_script__(program)):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if __is_exe__(exe_file):\n return exe_file\n logger.error(program_name + \" path = \" + fpath +\n \" not locatable in the path of directory specified\")\n return None",
"def find_program(name):\r\n # See MSDN for the REAL search order.\r\n base, ext = os.path.splitext(name)\r\n if ext:\r\n exts = [ext]\r\n else:\r\n exts = ['.bat', '.exe']\r\n for directory in os.environ['PATH'].split(os.pathsep):\r\n for e in exts:\r\n fname = os.path.join(directory, base + e)\r\n if os.path.exists(fname):\r\n return fname\r\n return None",
"def find_program(ctx, names, paths=None, *, quieter=0):\n\n if paths is None:\n paths = os.environ['PATH'].split(os.pathsep)\n\n # If we're running on windows, we need to append '.exe' to the filenames\n # that we're searching for.\n if sys.platform == 'win32':\n new_names = []\n for name in names:\n if \\\n not name.endswith('.exe') or \\\n not name.endswith('.cmd') or \\\n not name.endswith('.bat'):\n new_names.append(name + '.exe')\n new_names.append(name + '.cmd')\n new_names.append(name + '.bat')\n new_names.append(name)\n names = new_names\n\n for name in names:\n ctx.logger.check('looking for program ' + name, verbose=quieter)\n\n filename = fbuild.path.Path(name)\n if filename.exists() and filename.isfile():\n ctx.logger.passed('ok %s' % filename, verbose=quieter)\n return fbuild.path.Path(name)\n else:\n for path in paths:\n filename = fbuild.path.Path(path, name)\n if filename.exists() and filename.isfile():\n ctx.logger.passed('ok %s' % filename, verbose=quieter)\n return fbuild.path.Path(filename)\n\n ctx.logger.failed(verbose=quieter)\n\n raise MissingProgram(names)",
"def is_exist(program):\n def is_exe(fpath):\n return path.isfile(fpath) and access(fpath, X_OK)\n\n fpath, _ = path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for mypath in environ[\"PATH\"].split(pathsep):\n exe_file = path.join(mypath, program)\n if is_exe(exe_file):\n return exe_file\n\n return None",
"def thepath = getProgramPath(theprog):\r\n\r\n theprog = lower(theprog);\r\n\r\n if strcmp(theprog,'POV-Ray')\r\n # install location for POV-Ray\r\n thepath = '/usr/local/bin';\r\n\r\n else if strcmp(theprog,'quietpov')\r\n # install location for the QuietPOV add-on\r\n thepath = 'C:\\Program Files\\POV-Ray for Windows v3.6\\guiext\\QuietPOV';\r\n\r\n else if strcmp(theprog,'imagemagick')\r\n # install location for ImageMagick\r\n thepath = '/home/kieran/Downloads/ImageMagick-6.8.5-8';\r\n\r\n else if strcmp(theprog,'ffmpeg')\r\n # install location for the ffmpeg library\r\n thepath = '/usr/bin/ffmpeg';\r\n\r\n else\r\n thepath = '';",
"def find_program(basename):\n names = [basename]\n if os.name == 'nt':\n # Windows platforms\n extensions = ('.exe', '.bat', '.cmd')\n if not basename.endswith(extensions):\n names = [basename+ext for ext in extensions]+[basename]\n for name in names:\n path = is_program_installed(name)\n if path:\n return path",
"def which(program):\r\n def is_exe(fpath):\r\n return os.path.exists(fpath) and os.access(fpath, os.X_OK)\r\n\r\n fpath, fname = os.path.split(program)\r\n if fpath:\r\n if is_exe(program):\r\n return program\r\n else:\r\n for path in os.environ[\"PATH\"].split(os.pathsep):\r\n exe_file = os.path.join(path, program)\r\n if is_exe(exe_file):\r\n return exe_file\r\n\r\n return None",
"def which(program):\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n return None",
"def which(program):\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None",
"def which(program):\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None",
"def which(program):\r\n import os\r\n def is_exe(fpath):\r\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\r\n\r\n fpath, fname = os.path.split(program)\r\n if fpath:\r\n if is_exe(program):\r\n return program\r\n else:\r\n for path in os.environ[\"PATH\"].split(os.pathsep):\r\n path = path.strip('\"')\r\n exe_file = os.path.join(path, program)\r\n if is_exe(exe_file):\r\n return exe_file\r\n\r\n return None",
"def _which(program):\n # Borrowed from:\n # https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python\n # XXX May need more porting to handle .exe extensions on Windows\n\n fpath, _fname = os.path.split(program)\n if fpath:\n if _is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, program)\n if _is_exe(exe_file):\n return exe_file\n\n return None",
"def which(program):\n\n def is_bin(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_bin(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n bin_file = os.path.join(path, program)\n if is_bin(bin_file):\n return bin_file\n\n return None",
"def FindEnv(progname):\n for path in os.environ['PATH'].split(':'):\n fullname = os.path.join(path, progname)\n if os.access(fullname, os.X_OK):\n return fullname\n raise AssertionError(\n \"Could not find an executable named '%s' in the system path\" % progname)",
"def which(program):\n import os\n def is_exe(fpath):\n return os.path.exists(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None",
"def which(program):\n\n\tfpath, fname = os.path.split(program)\n\tif fpath:\n\t\tif is_exe(program):\n\t\t\treturn program\n\telse:\n\t\tfor path in os.environ[\"PATH\"].split(os.pathsep):\n\t\t\tpath = path.strip('\"')\n\t\t\texe_file = os.path.join(path, program)\n\t\t\tif is_exe(exe_file):\n\t\t\t\treturn exe_file\n\n\treturn None",
"def which(program):\n\t# requirements = os\n\tis_exe = lambda fpath: (os.path.isfile(fpath) and os.access(fpath, os.X_OK))\n\tfor path in os.environ['PATH'].split(os.pathsep):\n\t\tpath = path.strip('\"')\n\t\texe_file = os.path.join(path, program)\n\t\tif is_exe(exe_file):\n\t\t\treturn exe_file\n\tif is_exe(program):\n\t\treturn os.path.abspath(program)\n\treturn None",
"def find_program(name):\r\n return name",
"def which(program):\n\n def is_exe(fpath):\n found = os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n if not found and sys.platform == \"win32\":\n fpath = fpath + \".exe\"\n found = os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n return found\n\n fpath, _ = os.path.split(program)\n if fpath:\n if is_exe(program):\n logger.debug(\"Found executable: \" + str(program))\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = os.path.expandvars(os.path.expanduser(path)).strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n logger.debug(\"Found executable: \" + str(exe_file))\n return exe_file\n\n return None",
"def find_program(assembler_name, program, assembler_arg, option=True):\n if assembler_arg == assembler_name and option and not which(program):\n err = (textwrap.dedent(\"\"\"\n We could not find the \"{}\" program. You either need to\n install it or you need to adjust the PATH environment\n variable with the \"--path\" option so that aTRAM can\n find it.\"\"\")).format(program)\n sys.exit(err)",
"def which(program):\n\n def is_exe(fpath):\n \"\"\"\n Return True is the fpath exists and is executable. This is needed since\n executables are specifed in the JSON files, but not the path to them.\n The executables may be in different locations based on which PC is\n running this.\n \"\"\"\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None",
"def program_exists(name):\n for path in os.environ['PATH'].split(os.path.pathsep):\n if path and os.path.exists(os.path.join(path, name)):\n return True\n return False",
"def find_program_file():\n value = sys.argv[0]\n msg = \"Failed to determine absolute pathname of program!\"\n if not os.path.isabs(value):\n candidates = which(value)\n if not candidates:\n raise Exception(msg)\n value = candidates[0]\n if not os.access(value, os.X_OK):\n raise Exception(msg)\n return value",
"def which(program):\n fpath, fname = os.path.split(program)\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n raise ExecutableNotFound(program)",
"def check_PATH_for_program(f):\n\n path = os.environ[\"PATH\"].split(\":\")\n\n for p in path:\n\n if os.path.isfile(os.path.join(p,f)):\n return True\n\n return False",
"def which(program):\n def is_exe(fpath):\n \"\"\"Determine wether file at given path is executable.\"\"\"\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, _ = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None",
"def getexe(self, exe_name):\n try:\n exe_path = super().getstr('config', exe_name)\n except NoOptionError as e:\n if self.logger:\n self.logger.error(e)\n else:\n print(e)\n\n return None\n\n full_exe_path = shutil.which(exe_path)\n if full_exe_path is None:\n msg = f'Executable {exe_name} does not exist at {exe_path}'\n if self.logger:\n self.logger.error(msg)\n else:\n print('ERROR: {}'.format(msg))\n return None\n\n # set config item to full path to exe and return full path\n self.set('config', exe_name, full_exe_path)\n return full_exe_path"
] | [
"0.67661804",
"0.66921204",
"0.65894896",
"0.6528314",
"0.64908946",
"0.6469822",
"0.641857",
"0.63612264",
"0.6318409",
"0.6250026",
"0.61892205",
"0.61833847",
"0.61833847",
"0.61687654",
"0.61340445",
"0.61195254",
"0.61188084",
"0.6089571",
"0.6089144",
"0.6088777",
"0.6067471",
"0.6052141",
"0.603533",
"0.60347813",
"0.60298467",
"0.6018274",
"0.6010809",
"0.5988568",
"0.5961038",
"0.59277034"
] | 0.7263758 | 0 |
This function is from SCons but extended with additional flags, e.g. the extra_libs. Another (more sophisticated) test for a library. Checks, if library and header is available for language (may be 'C' or 'CXX'). Call maybe be a valid expression _with_ a trailing ';'. As in CheckLib, we support library=None, to test if the call compiles without extra link flags. | def CheckLibWithHeader(context, libs, header, language,
call = None, extra_libs = None, autoadd = 1):
prog_prefix, dummy = \
SCons.SConf.createIncludesFromHeaders(header, 0)
if libs == []:
libs = [None]
if not SCons.Util.is_List(libs):
libs = [libs]
res = SCons.Conftest.CheckLib(context, libs, None, prog_prefix,
call = call, language = language, extra_libs = extra_libs,
autoadd = autoadd)
context.did_show_result = 1
return not res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_library(self, **kw):\n\tself.check(\n\t\tcompile_filename = [],\n\t\tfeatures = 'link_lib_test',\n\t\tmsg = 'Checking for libraries',\n\t\t)",
"def check_libraries(env):\n # Detect OS X python installation, and attempt to correct for it.\n if os.uname()[0] == 'Darwin':\n env.Replace(SHLINKFLAGS='$LINKFLAGS -bundle -flat_namespace -undefined suppress')\n env.Replace(SHLIBSUFFIX='.so')\n if os.path.isdir('/opt/local'):\n env.Append(\n LIBPATH=['/opt/local/lib'],\n CPPPATH=['/opt/local/include']\n )\n\n # Detect the presence of necessary dependencies.\n conf = Configure(env)\n\n if not conf.CheckLibWithHeader('m', 'math.h', 'c'):\n print \"Can't find standard math libraries.\"\n Exit(1)\n\n if not conf.CheckLibWithHeader('python%s' % python_version,\n 'Python.h', 'c'):\n print \"Can't find python %s.\" % python_version\n Exit(1)\n\n env = conf.Finish()\n\n return env",
"def checkLibraries(env):\n # Detect OS X python installation, and attempt to correct for it.\n if os.uname()[0] == 'Darwin':\n env.Replace(SHLINKFLAGS='$LINKFLAGS -bundle -flat_namespace -undefined suppress')\n env.Replace(SHLIBSUFFIX='.so')\n\n # Detect the presence of necessary dependencies.\n conf = Configure(env)\n\n if not conf.CheckLibWithHeader('m', 'math.h', 'c'):\n print \"Can't find standard math libraries.\"\n Exit(1)\n\n env = conf.Finish()\n\n return env",
"def examineLoadLibrary(lib):\n from PyJobTransformsCore.envutil import examine_library\n\n # turn module name into library name\n if not lib.startswith('lib') and not lib.endswith('.so'):\n lib = 'lib' + lib + '.so'\n print (\"Examining library \" + lib)\n diagLines = []\n errorAcronym = None\n missingSystemLibs = []\n missingOtherLibs = []\n misLibs = examine_library(lib)\n for l in misLibs:\n if systemLibsRE.search(l):\n missingSystemLibs.append(l)\n else:\n missingOtherLibs.append(l)\n if missingSystemLibs:\n if len(missingSystemLibs) == 1: libWord = 'library'\n else: libWord = 'libraries'\n diagLines.append( 'Site problem: Missing system %s: %s' % (libWord, ','.join(missingSystemLibs)) )\n if not errorAcronym: errorAcronym = \"ATH_SITE_SYSLIBS\"\n\n if missingOtherLibs:\n if len(missingOtherLibs) == 1: libWord = 'library'\n else: libWord = 'libraries'\n diagLines.append( 'Can not find %s: %s Please check software installation.' % (libWord,','.join(missingOtherLibs)) )\n if not errorAcronym: errorAcronym = \"ATH_SITE_LIBS\"\n return (errorAcronym,os.linesep.join(diagLines))",
"def checkArguments ( ) :\r\n\r\n if len( sys.argv ) <= 1 : return None\r\n\r\n\r\n # splits the arguments that contain quotes\r\n \r\n wordList = [ ]\r\n\r\n for argument in sys.argv :\r\n\r\n wordList.extend( argument.split( '\"' ) )\r\n\r\n\r\n # places all the arguments that start with \"--\" at the end, and joins the others into words\r\n\r\n noMinusList = [ ]\r\n\r\n minusList = [ ]\r\n\r\n argument = \"\"\r\n\r\n for word in wordList[ 1 : ] :\r\n\r\n # strips spaces and quotes\r\n \r\n word = word.strip( \" \\\"'\" ) \r\n\r\n if word.startswith( \"--\" ) :\r\n\r\n minusList.append( word )\r\n\r\n if len( argument ) > 0 : noMinusList.append( argument )\r\n\r\n argument = \"\"\r\n\r\n elif argument == \"\" :\r\n\r\n argument = word\r\n\r\n else :\r\n\r\n argument = argument + \" \" + word\r\n\r\n if len( argument ) > 0 : noMinusList.append( argument )\r\n\r\n\r\n # library = 1st argument of the form \"-- ... /\" that exists\r\n\r\n libraryPath = None\r\n\r\n for argument in minusList :\r\n\r\n if ( ( argument.endswith( os.sep ) ) and ( os.path.exists( argument.strip( \"- \" ) ) ) ) :\r\n\r\n libraryPath = argument.strip( \"-\" )\r\n\r\n break\r\n\r\n # recomposes the command line\r\n \r\n sys.argv = wordList[ : 1 ] + noMinusList + minusList \r\n\r\n return libraryPath",
"def check_linking(self):\n\n # This one checks if the linking command works out of the box or\n # if any specific flag is required. For example if the linker if the\n # Intel FORTRAN compiler, then the \"-nofor_main\" is usually required.\n # This function only checks if linker works but does not automatically\n # detect the required flags\n print 'Checking loader...',\n sys.stdout.flush()\n writefile('tmpf.f',\"\"\"\n subroutine fsub()\n write(*,*)'success'\n stop\n end\\n\"\"\")\n writefile('tmpc.c',\"\"\"\n #if defined ADD_\n #define fsub fsub_\n #elif defined NOCHANGE\n #define fsub fsub\n #elif defined fcIsF2C\n #define fsub fsub_\n #elif defined UPCASE\n #define fsub FSUB\n #endif\n void main(){\n fsub();}\\n\"\"\")\n\n ccomm = self.config.cc+' '+self.config.ccflags+' '+self.mangling+' -c -o tmpc.o tmpc.c'\n fcomm = self.config.fc+' '+self.config.fcflags+' -c -o tmpf.o tmpf.f'\n lcomm = self.config.fc+' '+self.config.ldflags_fc+' '+self.config.ld_fcmain+' -o lnk tmpf.o tmpc.o'\n\n (output, error, retz) = runShellCommand(ccomm)\n if retz:\n print '\\n\\nCOMMON: in check_linking: cannot compile'\n print 'command is: ',ccomm\n print 'error is:\\n','*'*40,'\\n',error,'\\n','*'*40\n sys.exit()\n\n (output, error, retz) = runShellCommand(fcomm)\n if retz:\n print '\\n\\nCOMMON: in check_linking: cannot compile'\n print 'command is: ',fcomm\n print 'error is:\\n','*'*40,'\\n',error,'\\n','*'*40\n sys.exit()\n\n (output, error, retz) = runShellCommand(lcomm)\n if retz:\n print \"\"\"\\n\\nCOMMON: in check_linking: cannot link\n Cannot link a C main program to a Fortran77 subroutine\n Make sure that the appropriate flags are passed to the linker.\"\"\"\n print 'command is: ',lcomm\n print 'error is:\\n','*'*40,'\\n',error,'\\n','*'*40\n sys.exit()\n\n\n killfiles(['lnk', 'tmpf.f', 'tmpf.o', 'tmpc.c', 'tmpc.o'])\n\n print 'works'\n return 1;",
"def find_python_library():\n python_library = sysconfig.get_config_var('LIBRARY')\n if (not python_library or os.path.splitext(python_library)[1][-2:] == '.a'):\n candidate_lib_prefixes = ['', 'lib']\n candidate_implementations = ['python']\n if hasattr(sys, \"pypy_version_info\"):\n candidate_implementations = ['pypy-c', 'pypy3-c']\n candidate_extensions = ['.lib', '.so', '.a']\n if sysconfig.get_config_var('WITH_DYLD'):\n candidate_extensions.insert(0, '.dylib')\n candidate_versions = []\n candidate_versions.append('')\n candidate_versions.insert(0, str(sys.version_info.major) +\n \".\" + str(sys.version_info.minor))\n abiflags = getattr(sys, 'abiflags', '')\n candidate_abiflags = [abiflags]\n if abiflags:\n candidate_abiflags.append('')\n # Ensure the value injected by virtualenv is\n # returned on windows.\n # Because calling `sysconfig.get_config_var('multiarchsubdir')`\n # returns an empty string on Linux, `du_sysconfig` is only used to\n # get the value of `LIBDIR`.\n libdir = du_sysconfig.get_config_var('LIBDIR')\n if sysconfig.get_config_var('MULTIARCH'):\n masd = sysconfig.get_config_var('multiarchsubdir')\n if masd:\n if masd.startswith(os.sep):\n masd = masd[len(os.sep):]\n libdir = os.path.join(libdir, masd)\n if libdir is None:\n libdir = os.path.abspath(os.path.join(\n sysconfig.get_config_var('LIBDEST'), \"..\", \"libs\"))\n no_valid_candidate = True\n for (pre, impl, ext, ver, abi) in itertools.product(candidate_lib_prefixes,\n candidate_implementations,\n candidate_extensions,\n candidate_versions,\n candidate_abiflags):\n candidate = os.path.join(libdir, ''.join((pre, impl, ver, abi, ext)))\n if os.path.exists(candidate):\n python_library = candidate\n no_valid_candidate = False\n break\n # If there is not valid candidate then set the python_library is empty\n if no_valid_candidate:\n python_library = \"\"\n return python_library",
"def _add_linking_libs(context, call):\n libs = getattr(call, \"libs\", ())\n if libs:\n context.add_linking_libs(libs)",
"def CustomCFlagCheck(context, flag, append=True):\n context.Message(\"Checking if C compiler supports \" + flag + \" flag \")\n ccflags = context.env[\"CCFLAGS\"]\n context.env.Append(CCFLAGS=flag)\n result = context.TryCompile(\"int main(int argc, char **argv) { return 0; }\", \".c\")\n context.Result(result)\n if not append or not result:\n context.env.Replace(CCFLAGS=ccflags)\n return result",
"def is_lib_available(library):\n try:\n __import__(library)\n return True\n except ImportError:\n return False",
"def CustomCompileCheck(context, message, source, extension=\".cc\"):\n context.Message(message)\n\n env = context.env\n if env.GetOption(\"clean\") or env.GetOption(\"help\") or env.GetOption(\"no_exec\"):\n result = True\n else:\n result = context.TryCompile(source, extension)\n\n context.Result(result)\n\n return result",
"def selected_libs(args: Namespace) -> List[str]:\n return args.lib or [\"python\", \"lkt\"]",
"def has_library(self, library_id, ignore_case=False, **kwargs): # lint-amnesty, pylint: disable=unused-argument\n if not isinstance(library_id, LibraryLocator):\n return None\n\n index = self.get_course_index(library_id, ignore_case)\n if index:\n return LibraryLocator(index['org'], index['course'], library_id.branch)\n return None",
"def has_flag(compiler, flag, ext=None):\n return try_compile(compiler, flags=[flag], ext=ext)",
"def is_static_library(lib_file):\n if sys.platform.startswith('aix'):\n # An AIX library could be both, but for simplicity assume it isn't.\n return not AIXDumpExtractor.is_shared_lib(lib_file)\n else:\n _, ext = os.path.splitext(lib_file)\n return ext == '.a'",
"def _c_optimizations_required():\n pure_env = os.environ.get('PURE_PYTHON')\n require_c = pure_env == \"0\"\n return require_c",
"def CustomCppFlagCheck(context, flag, append=True):\n context.Message(\"Checking if C++ compiler supports \" + flag + \" flag \")\n cxxflags = context.env[\"CXXFLAGS\"]\n context.env.Append(CXXFLAGS=flag)\n result = context.TryCompile(\"int main(int argc, char **argv) { return 0; }\", \".cc\")\n context.Result(result)\n if not append or not result:\n context.env.Replace(CXXFLAGS=cxxflags)\n return result",
"def try_add_flag(args, compiler, flag, ext=None):\n if try_compile(compiler, flags=args+[flag], ext=ext):\n args.append(flag)",
"def _get_linker_flags(\n target,\n fc,\n cc,\n syslibs,\n srcfiles,\n sharedobject=False,\n osname=None,\n verbose=False,\n):\n # get list of unique fortran and c/c++ file extensions\n fext = _get_fortran_files(srcfiles, extensions=True)\n\n # remove .exe extension of necessary\n if fc is not None:\n fc = _get_base_app_name(fc)\n if cc is not None:\n cc = _get_base_app_name(cc)\n\n # set linker compiler\n compiler = None\n if len(srcfiles) < 1:\n if fc is not None:\n compiler = fc\n else:\n if fext is not None:\n compiler = fc\n if compiler is None:\n compiler = cc\n\n # remove target .exe extension, if necessary\n target = _get_base_app_name(target)\n\n # get lower case OS string\n if osname is None:\n osname = _get_osname()\n\n # get - or / to prepend for compiler switches\n prepend = _get_prepend(compiler, osname)\n\n # set outgoing syslibs\n syslibs_out = []\n\n # add option to statically link intel provided libraries on osx and linux\n if sharedobject:\n if osname in (\n \"darwin\",\n \"linux\",\n ):\n if compiler == fc:\n if fc in (\n \"ifort\",\n \"mpiifort\",\n ):\n syslibs_out.append(\"static-intel\")\n\n # add linker switch for a shared object\n if sharedobject:\n gnu_compiler = True\n if compiler == fc:\n if fc in (\n \"ifort\",\n \"mpiifort\",\n ):\n gnu_compiler = False\n else:\n if cc in (\n \"icc\",\n \"mpiicc\",\n \"icl\",\n \"cl\",\n ):\n gnu_compiler = False\n if osname == \"win32\":\n if gnu_compiler:\n copt = \"shared\"\n else:\n copt = \"dll\"\n else:\n if osname == \"darwin\":\n copt = \"dynamiclib\"\n else:\n copt = \"shared\"\n syslibs_out.append(copt)\n # add static link flags for GNU compilers\n else:\n if \"shared\" in syslibs_out:\n syslibs_out.remove(\"shared\")\n if \"dynamiclib\" in syslibs_out:\n syslibs_out.remove(\"dynamiclib\")\n if \"dll\" in syslibs_out:\n syslibs_out.remove(\"dll\")\n isstatic = False\n isgfortran = False\n if osname == \"win32\":\n if compiler == fc and fc in (\"gfortran\",):\n isstatic = True\n isgfortran = True\n if not isstatic:\n if compiler == cc and cc in (\n \"gcc\",\n \"g++\",\n ):\n isstatic = True\n if isstatic:\n syslibs_out.append(\"static\")\n if isgfortran:\n syslibs_out.append(\"static-libgfortran\")\n syslibs_out.append(\"static-libgcc\")\n syslibs_out.append(\"static-libstdc++\")\n syslibs_out.append(\"lm\")\n\n # add -nologo switch for compiling on windows with intel compilers\n if osname == \"win32\":\n addswitch = False\n if compiler == fc:\n if fc in (\n \"ifort\",\n \"mpiifort\",\n ):\n addswitch = True\n else:\n if cc in (\n \"icl\",\n \"cl\",\n ):\n addswitch = True\n if addswitch:\n syslibs_out.append(\"nologo\")\n\n # process passed syslibs switches - check for switches with a space between\n # the switch and a setting\n for idx, flag in enumerate(syslibs[1:]):\n if flag[0] not in (\"/\", \"-\"):\n syslibs[idx] += \" {}\".format(flag)\n syslibs[idx + 1] = \"\"\n\n # add passed syslibs switches - assume that flags have - or / as the\n # first character.\n for switch in syslibs:\n if len(switch) < 1:\n continue\n if switch[1:] not in syslibs_out:\n syslibs_out.append(switch[1:])\n\n # add target specific linker (syslib) switches\n tlist = _set_syslibs(target, fc=fc, cc=cc, argv=False, osname=osname)\n if len(tlist) > 0:\n for switch in tlist:\n if switch[1:] not in syslibs_out:\n syslibs_out.append(switch[1:])\n\n # add prepend to syslibs flags\n for idx, switch in enumerate(syslibs_out):\n syslibs_out[idx] = prepend + switch\n\n return compiler, syslibs_out",
"def isLibSBMLCompiledWith(*args):\n return _libsbml.isLibSBMLCompiledWith(*args)",
"def check_prerequisites() -> None:\n # check black code formatter is installed\n if not is_installed(\"black\"):\n raise FileNotFoundError(\n \"Cannot find black code formatter! To install, please follow this link: https://black.readthedocs.io/en/stable/installation_and_usage.html\"\n )\n\n # check isort code formatter is installed\n if not is_installed(\"isort\"):\n raise FileNotFoundError(\n \"Cannot find isort code formatter! To install, please follow this link: https://pycqa.github.io/isort/#installing-isort\"\n )\n\n # check protolint code formatter is installed\n if subprocess.call(f\"{base_protolint_command()} version\", shell=True) != 0: # nosec\n raise FileNotFoundError(\n \"Cannot find protolint protocol buffer schema file linter! To install, please follow this link: https://github.com/yoheimuta/protolint.\"\n )\n\n # check protocol buffer compiler is installed\n if not is_installed(\"protoc\"):\n raise FileNotFoundError(\n \"Cannot find protocol buffer compiler! To install, please follow this link: https://developers.google.com/protocol-buffers/\"\n )",
"def is_valid_language(args, skip=False):\n if (is_valid_file_and_directory(args) and is_valid_comments(args)) or skip:\n if args.language is not None:\n return True\n return False",
"def is_selection(cfg):\n if LIBRARIES in list(cfg.keys()):\n return True\n else:\n return False",
"def detect(self):\n GCCLike.detect(self)\n\n if self._platform != platforms.lumin.NAME:\n err = self.detect_version_on_path_or_env('CPP', 'cpp', False)\n if err:\n return err\n err = self.detect_version_on_path_or_env('CC', 'clang',\n needs_version=self._suffix != '',\n allow_unversioned=not self._suffix)\n if err:\n return err\n err = self.detect_version_on_path_or_env('CXX', 'clang++',\n needs_version=self._suffix != '',\n allow_unversioned=not self._suffix)\n if err:\n return err\n err = self.detect_version_on_path_or_env('AS', 'llvm-as',\n needs_version=self._suffix != '',\n allow_unversioned=not self._suffix)\n if err:\n err = self.detect_version_on_path_or_env('AS', 'as', False)\n if err:\n return err\n err = self.detect_version_on_path_or_env('AR', 'ar', False)\n if err:\n return err\n else:\n err = self.add_cross_toolchain_tool('CPP', 'cpp')\n if err:\n return err\n err = self.add_cross_toolchain_tool('CC', 'clang')\n if err:\n return err\n err = self.add_cross_toolchain_tool('CXX', 'clang++')\n if err:\n return err\n err = self.add_cross_toolchain_tool('AS', 'as')\n if err:\n return err\n err = self.add_cross_toolchain_tool('AR', 'gcc-ar')\n if err:\n return err\n err = self.add_cross_toolchain_tool('OBJCOPY', 'objcopy')\n if err:\n return err\n err = self.add_cross_toolchain_tool('STRIP', 'strip')\n if err:\n return err\n\n return None",
"def _cached_create_libspec(\n self,\n libname: str,\n is_builtin: bool,\n target_file: Optional[str],\n args: Optional[str],\n *,\n _internal_force_text=False, # Should only be set from within this function.\n ) -> Optional[str]:\n from robotframework_ls.impl import robot_constants\n\n if not is_builtin:\n if not target_file:\n is_builtin = libname in robot_constants.STDLIBS\n\n import time\n from robocorp_ls_core.subprocess_wrapper import subprocess\n from robocorp_ls_core.robotframework_log import get_log_level\n\n acquire_mutex = _timed_acquire_mutex_for_spec_filename\n\n if _internal_force_text:\n # In this case this is a recursive call and we already have the lock.\n acquire_mutex = NULL\n\n log_exception = log.exception\n if is_builtin and libname == \"Dialogs\" and get_log_level() < 1:\n # Dialogs may have dependencies that are not available, so, don't show\n # it unless verbose mode is enabled.\n log_exception = log.debug\n\n if not libname.replace(\".\", \"\").replace(\"/\", \"\").replace(\"\\\\\", \"\").strip():\n return f\"Unable to generate libspec for: {libname}\"\n\n additional_path = None\n additional_path_exists = False\n\n log_time = True\n cwd = None\n\n if target_file is not None:\n additional_path = os.path.dirname(target_file)\n if os.path.splitext(os.path.basename(target_file))[0] == \"__init__\":\n additional_path = os.path.dirname(additional_path)\n\n additional_path_exists = os.path.exists(additional_path)\n if additional_path and additional_path_exists:\n cwd = additional_path\n if libname.endswith((\"/\", \"\\\\\")):\n libname = libname[:-1]\n libname = os.path.basename(libname)\n if libname.lower().endswith((\".py\", \".class\", \".java\")):\n libname = os.path.splitext(libname)[0]\n\n curtime = time.time()\n\n try:\n try:\n call = [sys.executable]\n major_version = self.get_robot_major_version()\n if major_version < 4:\n call.extend(\"-m robot.libdoc --format XML\".split())\n else:\n call.extend(\n \"-m robot.libdoc --format XML --specdocformat RAW\".split()\n )\n\n if additional_path and additional_path_exists:\n call.extend([\"-P\", os.path.normpath(additional_path)])\n\n if _internal_force_text:\n call.append(\"--docformat\")\n call.append(\"text\")\n\n # Note: always set as a whole, so, iterate in generator is thread-safe.\n for entry in self._additional_pythonpath_folder_to_folder_info:\n if os.path.exists(entry):\n call.extend([\"-P\", os.path.normpath(entry)])\n\n if not args:\n call.append(libname)\n else:\n call.append(\"::\".join([libname, args]))\n\n libspec_filename = self._compute_libspec_filename(\n libname, is_builtin, target_file, args\n )\n\n log.debug(f\"Obtaining mutex to generate libspec: {libspec_filename}.\")\n with acquire_mutex(libspec_filename): # Could fail.\n log.debug(\n f\"Obtained mutex to generate libspec: {libspec_filename}.\"\n )\n call.append(libspec_filename)\n\n mtime: float = -1\n try:\n mtime = os.path.getmtime(libspec_filename)\n except:\n pass\n\n log.debug(\n \"Generating libspec for: %s.\\nCwd:%s\\nCommand line:\\n%s\",\n libname,\n cwd,\n \" \".join(call),\n )\n try:\n try:\n # Note: stdout is always subprocess.PIPE in this call.\n # Note: the env is always inherited (the process which has\n # the LibspecManager must be the target env already).\n self._subprocess_check_output(\n call,\n stderr=subprocess.STDOUT,\n stdin=subprocess.PIPE,\n cwd=cwd,\n )\n except OSError as e:\n log.exception(\"Error calling: %s\", call)\n # We may have something as: Ignore OSError: [WinError 6] The handle is invalid,\n # give the result based on whether the file changed on disk.\n try:\n if mtime != os.path.getmtime(libspec_filename):\n _dump_spec_filename_additional_info(\n self,\n libspec_filename,\n is_builtin=is_builtin,\n obtain_mutex=False,\n )\n return None\n except:\n pass\n\n log.debug(\"Not retrying after OSError failure.\")\n return str(e)\n\n except subprocess.CalledProcessError as e:\n if not _internal_force_text:\n if (\n b\"reST format requires 'docutils' module to be installed\"\n in e.output\n ):\n return self._cached_create_libspec(\n libname,\n is_builtin,\n target_file,\n args,\n _internal_force_text=True,\n )\n\n log_exception(\n \"Error creating libspec: %s.\\nReturn code: %s\\nOutput:\\n%s\",\n libname,\n e.returncode,\n e.output,\n )\n bytes_output = e.output\n output = bytes_output.decode(\"utf-8\", \"replace\")\n\n # Remove things we don't want to show.\n for s in (\"Try --help\", \"--help\", \"Traceback\"):\n index = output.find(s)\n if index >= 0:\n output = output[:index].strip()\n\n if output:\n return output\n return f\"Error creating libspec: {output}\"\n\n _dump_spec_filename_additional_info(\n self,\n libspec_filename,\n is_builtin=is_builtin,\n obtain_mutex=False,\n )\n return None\n except Exception as e:\n log_exception(\"Error creating libspec: %s\", libname)\n return str(e)\n finally:\n if log_time:\n delta = time.time() - curtime\n log.debug(\"Took: %.2fs to generate info for: %s\" % (delta, libname))",
"def test_add_library_cmd_line(self):\n\n lib_name = self.conf.options('libs')[0]\n cmd = ['pydroid', 'add', 'library', lib_name]\n subprocess.call(cmd)\n self.assertTrue(os.path.exists(os.path.join(project_libs_dir(),\n lib_name)))",
"def _get_ldflags() -> str:\n # windows gcc does not support linking with unresolved symbols\n if sys.platform == 'win32': # pragma: win32 cover\n libs = os.path.join(sys.base_prefix, 'libs')\n return f'-L{libs} -lpython{sys.version_info[0]}'\n else: # pragma: win32 no cover\n cc = subprocess.check_output(('go', 'env', 'CC')).decode().strip()\n\n with _tmpdir() as tmpdir:\n testf = os.path.join(tmpdir, 'test.c')\n with open(testf, 'w') as f:\n f.write('int f(int); int main(void) { return f(0); }\\n')\n\n for lflag in LFLAGS: # pragma: no cover (platform specific)\n try:\n subprocess.check_call((cc, testf, lflag), cwd=tmpdir)\n return lflag\n except subprocess.CalledProcessError:\n pass\n else: # pragma: no cover (platform specific)\n # wellp, none of them worked, fall back to gcc and they'll get\n # a hopefully reasonable error message\n return LFLAG_GCC",
"def detectExtensions(builder):\n print (\"Checking if C extensions can be compiled, don't be alarmed if \"\n \"a few compile errors are printed.\")\n\n if not builder._compile_helper(\"#define X 1\\n\"):\n print \"Compiler not found, skipping C extensions.\"\n return []\n\n # Extension modules to build.\n exts = [\n Extension(\"twisted.spread.cBanana\",\n [\"twisted/spread/cBanana.c\"],\n define_macros=builder.define_macros),\n ]\n\n # urllib.unquote accelerator\n exts.append( Extension(\"twisted.protocols._c_urlarg\",\n [\"twisted/protocols/_c_urlarg.c\"],\n define_macros=builder.define_macros) )\n\n if sys.platform == 'darwin':\n exts.append(\n Extension(\"twisted.internet.cfsupport\",\n [\"twisted/internet/cfsupport/cfsupport.c\"],\n extra_compile_args=['-w'],\n extra_link_args=['-framework','CoreFoundation',\n '-framework','CoreServices',\n '-framework','Carbon'],\n define_macros=builder.define_macros))\n\n if sys.platform == 'win32':\n exts.append( Extension(\"twisted.internet.iocpreactor._iocp\",\n [\"twisted/internet/iocpreactor/_iocp.c\"],\n libraries=[\"ws2_32\", \"mswsock\"],\n define_macros=builder.define_macros))\n\n return exts",
"def KengeLibrary(self, name, buildname=None, source = None, public_headers = None, **kargs):\n library_args = {}\n\n library_args[\"CPPPATH\"] = []\n\n if buildname is None:\n buildname = name\n\n if source is None:\n # User didn't provide any source files\n # explicitly, so we work out it form them\n # based on some hueristics.\n glob_list = []\n dirs = [\"include/interfaces/\", \"src/\", \"src/arch-%s/\" % env.arch]\n\t if self.test_lib == name:\n\t\tdirs.append(\"test/\")\n if self[\"BUILD_TESTS\"]:\n dirs += [\"test/\"]\n for src_ext in env.src_exts:\n for dir_ in dirs:\n glob_list.append(dir_ + \"*.\" + src_ext)\n else:\n glob_list = source\n\n libs = []\n \n if \"LIBS\" in kargs:\n if self[\"BUILD_TESTS\"]:\n kargs[\"LIBS\"].append(\"check\")\n for lib in kargs[\"LIBS\"]:\n libs.append(lib)\n if lib not in self.libs.keys():\n raise SCons.Errors.UserError, \"Library [%s] was looking for library [%s] but it doesn't exist \" \\\n \"in environment [%s]\\n This environment has: %s\" % (name, lib, self.name, self.libs.keys())\n\n del kargs[\"LIBS\"]\n\n # He we expand the glob to a list of files\n source_list = Flatten([src_glob(glob) for glob in glob_list])\n\n idl_files = [fn for fn in source_list if fn.endswith(\".idl4\")]\n reg_files = [fn for fn in source_list if fn.endswith(\".reg\")]\n\n # Now we go through everything in the kargs:\n for arg in kargs:\n if arg.startswith(\"EXTRA_\"):\n argname = arg[6:]\n library_args[argname] = self[argname] + kargs[arg]\n else:\n library_args[arg] = kargs[arg]\n\n # Generally this is the only public headers\n if public_headers is None:\n public_headers = [\"#libs/%s/include\" % name, \"#libs/%s/test\" % name]\n\n if len(idl_files) or len(reg_files):\n # Unless we have generated files\n public_headers.append(Dir(\".\").abspath + \"/include\")\n\n # Now if we are for real compiling stuff...\n cpp_path = copy.copy(self[\"CPPPATH\"])\n\tif self.test_lib:\n\t\tcpp_path.append(\"#libs/check/include\")\n \n # Make sure we include any of the libraries header file's\n for each in public_headers:\n cpp_path.append(each)\n\n # This ensure that any generated header files\n # Maybe move this somewhere else later though\n cpp_path.append(Dir('.').abspath + \"/src\") # Broken\n cpp_path.append(Dir('.').abspath + \"/include\")\n\n # Find any .idl4 files that should be generated\n for file in idl_files:\n gen_file = self.IDL4(file)\n\n # Generate any .reg files\n for file in reg_files:\n self.Reg(file)\n \n library_args[\"CPPPATH\"] += cpp_path + self.end_cpp_path # End cpp_path is a hack hack hack!\n \n # Now we just call the normal StaticLibrary with our simple defaults\n lib = self.StaticLibrary(buildname, source_list, **library_args)\n\n assert(len(lib) == 1)\n lib = lib[0]\n\n if self[\"FLINT_RUN\"]:\n for each in lib.children():\n if str(each).endswith(\".o\"):\n if str(each.children()[0]).endswith(\".c\") or \\\n str(each.children()[0]).endswith(\".cc\"):\n self.AddPreAction(each, \"$FLINTCOM\")\n\n # And construct our definition of the library\n # This should suck muhc much less... how about a class?\n lib = (public_headers, lib, \"#\" + os.path.dirname(lib.path), None, libs)\n return lib",
"def try_lib_load():\n # If we are building the documentation, then we abort the import\n rtd_build_environ = 'PYGORPHO_BUILD_READTHEDOCS'\n if rtd_build_environ in os.environ:\n import warnings\n warnings.warn('Environment variable {} exists - we assume '\n 'documentation is being built and are aborting the '\n 'import'.format(rtd_build_environ))\n return _DummyLib(), __file__\n\n path_candidates = []\n # If PYGORPHO_PATH was set we start looking there\n if os.getenv('PYGORPHO_PATH') is not None:\n path_candidates.append(os.path.abspath(os.getenv('PYGORPHO_PATH')))\n # Look in the dir. where this python file is placed\n path_candidates.append(os.path.dirname(__file__))\n # Look in dir. one level up from current file dir.\n path_candidates.append(os.path.dirname(path_candidates[-1]))\n # Start looking\n for path in path_candidates:\n try:\n if platform.system() == 'Windows':\n lib = ctl.load_library('pygorpho', path)\n else:\n lib = ctl.load_library('libpygorpho', path)\n # Load was successful, so return path and lib now\n return lib, path\n except OSError:\n # Lib was not here so move on...\n pass\n else:\n raise ImportError('could not find pygorpho dynamic library file '\n '(try setting PYGORPHO_PATH environment variable)')"
] | [
"0.70988935",
"0.6297976",
"0.60500425",
"0.56923157",
"0.55388916",
"0.5488028",
"0.53949934",
"0.5367634",
"0.532435",
"0.5322072",
"0.5235258",
"0.5161452",
"0.5156778",
"0.51427037",
"0.5137287",
"0.5064834",
"0.501241",
"0.4967916",
"0.49471545",
"0.49432126",
"0.4930325",
"0.4925288",
"0.4912353",
"0.49113977",
"0.48917145",
"0.48783392",
"0.48603076",
"0.4857446",
"0.48449332",
"0.48327386"
] | 0.71553355 | 0 |
Returns a playlist with a given name or raise NotFound. | def playlist(self, title): # noqa
for item in self.playlists():
if item.title == title:
return item
raise NotFound('Invalid playlist title: %s' % title) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getPlaylist(self,name):\n playlist = self.getAllPlaylists(name)\n return playlist[0] if playlist else None",
"def find_playlist(playlist_name):\n\n playlists = spotifyObject.user_playlists(config.USERNAME)\n\n for playlist in playlists['items']:\n if playlist['name'] == playlist_name:\n return playlist['id']\n \n raise PlaylistNotFoundException(f\"The playlist name: {playlist_name} was not found.\")",
"def find_playlist(playlist_name):\n\n playlists = spotifyObject.user_playlists(config.USERNAME)\n\n for playlist in playlists['items']:\n if playlist['name'] == playlist_name:\n return playlist['id']\n \n raise PlaylistNotFoundException(\"The given playlist name was not found.\")",
"def get_playlist(speaker, name):\n playlists = speaker.get_sonos_playlists(complete_result=True)\n # Strict match\n for playlist in playlists:\n if name == playlist.title:\n logging.info(\n \"Found playlist '{}' using strict match\".format(playlist.title)\n )\n return playlist\n # Fuzzy match\n name = name.lower()\n for playlist in playlists:\n if name in playlist.title.lower():\n logging.info(\"Found playlist '{}' using fuzzy match\".format(playlist.title))\n return playlist\n return None",
"def playlist(self):\n _LOGGER.debug(\"Fetching Playlist info\")\n parameters = {\n 'cmd': None,\n 'param3': 'playlist.json'\n }\n try:\n res = requests.get(url=self.url, headers=headers, params=parameters, timeout=self.timeout).json()\n except (ConnectionError, OSError) as e:\n _LOGGER.error(\"Fetching playlist info failed: %s\", e)\n res = None\n return res",
"def lookup(self, uri):\n uri_scheme = urllib.parse.urlparse(uri).scheme\n backend = self.backends.with_playlists.get(uri_scheme, None)\n if not backend:\n return None\n\n with _backend_error_handling(backend):\n playlist = backend.playlists.lookup(uri).get()\n playlist is None or validation.check_instance(playlist, Playlist)\n return playlist\n\n return None",
"def show_playlist(self, playlist_name):\n playlist_exists = False\n for playlist in list(self.playlists.keys()):\n if playlist_name.upper() == playlist.upper():\n playlist_exists = True\n real_playlist_name = playlist\n break\n if playlist_exists:\n print(f\"Showing playlist: {playlist_name}\")\n if len(self.playlists[real_playlist_name]) == 0:\n print(\"\\tNo videos here yet\")\n else:\n for song in self.playlists[real_playlist_name]:\n video = self._video_library.get_video(song)\n tags = str(video.tags)\n tags=tags.replace(\"'\",\"\")\n tags=tags.replace(\",\", \"\") \n tags=tags.replace(\")\", \"\") \n tags=tags.replace(\"(\", \"\") \n print(f\"{video.title} ({video.video_id}) [{tags}]\")\n\n else:\n print(f\"\\tCannot show playlist {playlist_name}: Playlist does not exist\")\n\n # print(\"show_playlist needs implementation\")",
"def get_playlist_by_id(cls, id):\n try:\n return cls._playlists_by_id[id]\n except KeyError:\n return None",
"def lookup(self, uri: Uri) -> Optional[Playlist]:\n uri_scheme = UriScheme(urllib.parse.urlparse(uri).scheme)\n backend = self.backends.with_playlists.get(uri_scheme, None)\n if not backend:\n return None\n\n with _backend_error_handling(backend):\n playlist = backend.playlists.lookup(uri).get()\n if playlist is not None:\n validation.check_instance(playlist, Playlist)\n return playlist\n\n return None",
"async def playlist(self, ctx, *, query):\n # Setup the headers with the token that should be here\n headers = {\"Authorization\": \"Bearer {}\".format(self._token)}\n opts = {\"q\": query, \"type\": \"playlist\"}\n url = \"https://api.spotify.com/v1/search\"\n response = await utils.request(url, headers=headers, payload=opts)\n try:\n await ctx.send(\n response.get(\"playlists\")\n .get(\"items\")[0]\n .get(\"external_urls\")\n .get(\"spotify\")\n )\n except (KeyError, AttributeError, IndexError):\n await ctx.send(\"Couldn't find a song for:\\n{}\".format(query))",
"def get_playlist_id(name):\n \n #search for the first playlist result given a drama name\n search_response = youtube.search().list(q=name,type=\"playlist\",part=\"id\",maxResults=1).execute()\n result = search_response.get(\"items\", [])\n playlist_id = result[0]['id']['playlistId']\n return playlist_id",
"def show_playlist(self, playlist_name):\n playlist_id = playlist_name.lower()\n if not playlist_id in self.playlists.keys():\n print(f\"Cannot show playlist {playlist_name}: Playlist does not exist\")\n return\n\n playlist = self.playlists.get(playlist_id)\n videos = playlist.videos\n\n if len(videos) == 0:\n print(f\"Showing playlist: {playlist_name}\")\n print(\"No videos here yet\")\n return\n\n print(f\"Showing playlist: {playlist_name}\")\n for video_id in videos:\n print(self._video_library.get_video(video_id))\n return",
"def show_playlist(self, playlist_name):\n if playlist_name.lower() not in self._playlists:\n print(f\"Cannot show playlist {playlist_name}: Playlist does not exist\")\n return\n playlist = self._playlists[playlist_name.lower()]\n print(f\"Showing playlist: {playlist_name}\")\n if not playlist.videos:\n print(\"No videos here yet\")\n for video in playlist.videos:\n print(video)",
"def _get_playlist_index_by_name(library_list, playlist_name):\n for playlist in library_list:\n if playlist['name'] == playlist_name:\n return library_list.index(playlist)\n return None",
"def get_playlist(cls, tag):\n try:\n return cls._playlists_by_tag[tag]\n except KeyError:\n return None",
"def get_playlist_id(self, username, playlist_name):\n playlist_id = ''\n playlists = self.spotify.user_playlists(username)\n for playlist in playlists['items']:\n if playlist['name'] == playlist_name:\n playlist_id = playlist['id']\n return playlist_id\n while playlists['next']: # If there are more playlists\n playlists = self.spotify.next(playlists)\n for playlist in playlists['items']:\n if playlist['name'] == playlist_name:\n playlist_id = playlist['id']\n return playlist_id\n return playlist_id",
"def get_playlist(self, playlist_uuid, *args):\n\n if playlist_uuid not in self.playlists:\n return rsp_codes[2]\n\n rsp = rsp_codes[0]\n rsp['playlist'] = self.playlists[playlist_uuid]\n return rsp",
"def create_playlist(self, name):\n\n user_id = self.get_current_user()\n endpoint = f\"/users/{user_id}/playlists\"\n headers = self.headers\n headers.update()\n response = self._send(\n endpoint,\n \"POST\",\n extra_headers={\"Content-Type\": \"application/json\"},\n data=json.dumps({\"name\": name, \"public\": False})\n )\n playlist_id = response.json()[\"id\"]\n return playlist_id",
"def get_playlist_by_id(self, request):\n pl = Playlist.find_by_id(request.pid)\n response = PlaylistResponse(pid=pl.key.id(),\n name=pl.name,\n songs=[])\n songs = Song.find_by_playlist(pl.key).fetch()\n for song in songs:\n response.songs.append(SongMessage(id=song.key.id(),\n spotify_id=song.spotify_id,\n name=song.name,\n vote_count=song.vote_count))\n return response",
"def from_id(id):\n response = settings.database.get_item(Key={'id': id})\n raise_for_response(response)\n if not \"Item\" in response.keys():\n raise NotFoundException(\"Playlist with id \" + str(id) + \" couldn't be found\")\n playlist = Playlist()\n playlist.init_from_body(response[\"Item\"])\n return playlist",
"def _mpd_get_playlist(position=None):\n \n if position != None:\n return _mpd_client.playlistinfo(position)\n else:\n return _mpd_client.playlistinfo()",
"async def get_playlist(self, part=\"snippet\", max_results=7, playlist_id=\"\", playlist_url=\"\"):\n\n url = self.url_api.get_playlist_url(playlist_id, part, max_results, playlist_url)\n\n response = await self.session.get(url)\n search_results = await response.json()\n return search_results",
"def get_playlists_for_user_by_name(self, request): \n user = Account.find_by_username(request.username)\n playlists = Playlist.find_by_owner(user.key).fetch(20)\n return self.build_playlist_response(playlists)",
"def get_playlist(self, object_id):\n return self.get_object(\"playlist\", object_id)",
"def show_playlist(self, playlist_name):\n print(f\"Showing playlist: {playlist_name}\")\n print(\" No videos here yet\")",
"def create_playlist(self, playlist_name):\n if playlist_name.lower() in self._playlists:\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n return\n print(f\"Successfully created new playlist: {playlist_name}\")\n self._playlists[playlist_name.lower()] = Playlist(playlist_name)",
"def create_playlist(self, playlist_name):\n print(\"create_playlist needs implementation\")",
"def playlist(self, playlist_id: str, fields: str = None,\n market: str = 'from_token'):\n return self._get('playlists/' + playlist_id,\n fields=fields, market=market)",
"def Playlist(self, type='audio'):\n self.logger.debug(\"Loading Playlist of type \" + type)\n xbmc = Server(self.url('/jsonrpc', True))\n if type == 'video':\n return xbmc.Playlist.GetItems(playlistid=1, properties=['year', 'showtitle', 'season', 'episode', 'runtime'])\n\n return xbmc.Playlist.GetItems(playlistid=0, properties=['artist', 'title', 'album', 'duration'])",
"def getAllPlaylists(self,name):\n return [p for p in self.playlists if p.title == name]"
] | [
"0.7941071",
"0.7861843",
"0.78293544",
"0.7535346",
"0.69100803",
"0.6823972",
"0.6806134",
"0.6756279",
"0.67378354",
"0.6722012",
"0.6667086",
"0.66420937",
"0.65845215",
"0.65714145",
"0.6559312",
"0.65355885",
"0.65215516",
"0.6419069",
"0.63462394",
"0.63388675",
"0.62996066",
"0.6279552",
"0.62776995",
"0.6266178",
"0.62398326",
"0.62068707",
"0.620335",
"0.62024647",
"0.61696726",
"0.6169007"
] | 0.79534113 | 0 |
List all active sessions. | def sessions(self):
return utils.listItems(self, '/status/sessions') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sessions(self):\n\n return self.all_sessions",
"def get_sessions_list():\n sessions = Session.query.all()\n result = sessions_schema.dump(sessions).data\n return jsonify({'status': 'success', 'message': None, 'data': result}), 200",
"def fusion_api_get_active_sessions(self):\n return self.loginsession.get_active_sessions()",
"def sessions(self):\n return list(Session.get_sessions(self))",
"def session_list(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/sessions', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/sessions' % endpoint_name, 'GET')\n return body",
"def sessions(self):\n logger.debug(\"Get sessions\")\n return self._raw_api.sessions.get()",
"def get_sessions(self):\n return self.current_sessions",
"def active_sessions(self):\n skey = self.r_key('active_sessions')\n sessions_to_expire = []\n for user_id in self.r_server.smembers(skey):\n ukey = self.r_key('session', user_id)\n if self.r_server.exists(ukey):\n yield user_id, self.load_session(user_id)\n else:\n sessions_to_expire.append(user_id)\n\n # clear empty ones\n for user_ids in sessions_to_expire:\n self.r_server.srem(skey, user_id)",
"def list(self, status: Optional[str] = None) -> SessionList:\n filter = {\"status\": status} if status else None\n return self._list(list_cls=SessionList, resource_cls=Session, method=\"GET\", filter=filter)",
"def list(self, request, *args, **kwargs):\n self.check_authentication(request)\n serializer = SessionSerializer(\n context={\"request\": request, \"view\": self},\n instance=[_Session(request)],\n many=True,\n )\n return Response(serializer.data)",
"def sessions(self):\n return self._sessions",
"def list(self):\n return {str(k): v for k, v in self.rpc.call(MsfRpcMethod.SessionList).items()} # Convert int id to str",
"def get_all_sessions(self) -> list:\n sessions = list()\n for stream_id in self.streams.keys():\n tcpsession, session_position, network_tuple = self.streams[stream_id]\n sessions.append(tcpsession.get_session(session_position - 1))\n return sessions",
"def sessions(self):\n return self.rpc.compatiblesessions(self.modulename)",
"def print_sessions(self):\n print(\"[Printing Sessions]\")\n for key in self.sessions.keys():\n print(f\"{key}:\\n\\t{self.sessions[key]}\")",
"def fusion_api_get_active_user_sessions(self, param='', api=None, headers=None):\n return self.usersessions.get(api=api, headers=headers, param=param)",
"def list():\n rino.login.list()",
"def sessions(self, *args, **kwargs):\r\n return self._get('Sessions', *args, **kwargs)",
"def _sessions(self):\n return self.__sessions",
"def get_active_sessions():\n\n # The output changes based on locales, force it to be YY-MM-DD\n # for the benefit of split()\n os.environ['LANG'] = 'en_GB.utf8'\n try:\n output = subprocess.check_output(['who']).rstrip()\n except subprocess.CalledProcessError:\n print 'UNKNOWN: unable to invoke who'\n sys.exit(NAGIOS_UNKNOWN)\n\n # Nothing to process\n if not output:\n return {}\n\n sessions = {}\n for line in output.split(\"\\n\"):\n fields = line.split()\n sessions[fields[1]] = {\n 'user': fields[0],\n 'date': fields[2],\n 'time': fields[3],\n 'source': fields[4][1:-1] if len(fields) >= 5 else None,\n }\n\n return sessions",
"def sessions(self):\n for session_id in self.get_sessions(): \n session = Session(self.session_cache, self.sid, session_id)\n yield session",
"def get_current_users(self):\n active_sessions = Session.objects.filter(expire_date__gte=timezone.now())\n user_id_list = []\n for session in active_sessions:\n data = session.get_decoded()\n user_id_list.append(data.get('_auth_user_id', None))\n # Query all logged in users based on id list\n return self.filter(id__in=user_id_list)",
"def get_sessions(url: str, token: str) -> List[Session]:\n sessions_url = f'{url}api/sessions'\n response = requests.get(sessions_url, params={'token': token})\n assert(response.status_code == 200)\n sessions_raw = json.loads(response.text)\n sessions = []\n for session_raw in sessions_raw:\n session = Session(\n path = session_raw['path'],\n last_activity = dateutil.parser.isoparse(session_raw['kernel']['last_activity']),\n execution_state = session_raw['kernel']['execution_state']\n )\n assert(session['execution_state'] in valid_execution_states)\n sessions.append(session)\n\n sessions.sort(key=lambda session: session['last_activity'], reverse=True)\n return sessions",
"def list():\n cmd_output = None\n\n try:\n cmd_output = tmux_exec('ls')\n except CalledProcessError:\n return []\n\n sessions = cmd_output.strip().split('\\n')\n sessions = map(lambda session: session.split(':')[0], sessions)\n\n return sessions",
"def iter_sessions():\n return iter(_session_stack)",
"def all (self):\n sparql_results = self.query (\"\"\"\n select distinct ?rs ?session ?name ?number ?pid ?sitename\n where {\n \n ?rs rdf:type austalk:RecordedSession .\n ?rs olac:speaker ?participant .\n \n ?participant austalk:id ?pid .\n ?participant austalk:recording_site ?site .\n ?site rdfs:label ?sitename .\n \n ?rs austalk:prototype ?session .\n ?session austalk:name ?name .\n ?session austalk:id ?number .\n }\n ORDER BY ?name\"\"\")\n\n results = []\n\n for result in sparql_results[\"results\"][\"bindings\"]:\n\n results.append (Session (\n client = self.client,\n identifier = result[\"rs\"][\"value\"],\n prototype = result[\"session\"][\"value\"],\n name = result[\"name\"][\"value\"],\n number = result[\"number\"][\"value\"],\n site = result[\"sitename\"][\"value\"],\n participantId = result[\"pid\"][\"value\"]))\n\n return results",
"def filtered_sessions(self):\n return self.stage.filtered_sessions",
"def get(self):\n\n response = openvidu().list_sessions()\n\n if response.status_code == 200:\n return response.json()[\"content\"]\n abort(response)",
"def active():\n session = session_maker(\n app.config['MYSQL_USER'], app.config['MYSQL_PASS'], app.config['MYSQL_SERVER_PORT_3306_TCP_ADDR'],\n app.config['MYSQL_SERVER_PORT_3306_TCP_PORT'], app.config['DB'])\n\n\n print(\n tabulate(\n selection_list_active(session),\n headers=['number', 'sqlid', 'name', 'city', 'state']))",
"def get_users_list(self, session):\n\n users = session.query(User.chat_id).filter(User.is_admin==False).all()\n return users"
] | [
"0.75758445",
"0.757478",
"0.7396808",
"0.7384779",
"0.73801714",
"0.72702295",
"0.71300334",
"0.7045607",
"0.7029493",
"0.6845006",
"0.679199",
"0.6788236",
"0.6770604",
"0.6673959",
"0.6615374",
"0.65827996",
"0.6541172",
"0.65161306",
"0.6476787",
"0.6473066",
"0.647231",
"0.6468992",
"0.6457664",
"0.64194864",
"0.6338903",
"0.6320755",
"0.6255221",
"0.6226086",
"0.60916793",
"0.6060558"
] | 0.76642567 | 0 |
Update the use of a cache. | def _update_use(self, key):
if (self._replace_pol == Cache.LRU):
self.cache[key]= self.hashmap[key]
if (self._replace_pol == Cache.LRU_S):
self.cache[key] = self.hashmap[key] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_cache(self, val):\n pass",
"def update(self, cache_key):\r\n self._write_sha(cache_key)",
"def set_to_cache(self, url, data):\n cache_key, cache_lookup = self.get_cacheable_info(url)\n MEM_CACHE[cache_key][cache_lookup] = (data, time.time())",
"def do_api_calls_update_cache(self):\n self.get_nodes()\n self.write_to_cache(self.inventory, self.cache_path_cache)\n self.write_to_cache(self.index, self.cache_path_index)",
"def update_cache(self, repo=None, force=False):\n raise NotImplementedError(self.update_cache)",
"def _update_cache(self, cset):\n # If this changeset is already in the cache, remove it\n try:\n self._lru_cache.remove(cset)\n except ValueError:\n pass\n\n # Add the changeset at the end\n if len(self._lru_cache) >= Repository._LRU_CACHE_SIZE:\n del self._lru_cache[0]\n self._lru_cache.append(cset)",
"def _update_cachesize(self):\n san_res = self.san_interface\n _load = not self.san_interface.runmode\n if self.cachesize > 0:\n pvds = self._get_pvds()\n if len(pvds) < 1:\n # not suppposed to get here\n return (1,'Error no valid provider/path was found when setting cache')\n logger.eventlog.debug('in update cache for %s , cachedrdev: %s' % (str(self),str(self.cachedrdev)))\n # check if this is a single path case or replicated cache (multipath)\n if len(pvds) == 1 and len(self.cachepvds) < 2 and not self.cachedrdev:\n (e,pt) = ext2path(self,san_res.providers[pvds[0]])\n if e:\n return (e,'Error updating cache, '+pt)\n (e,r) = san_res.providers[pvds[0]].add_cache(pt,self.cachesize)\n if e:\n return (e,r)\n else:\n #\n # more than 1 path\n #\n\n # one path with cacheon and is running return ok\n for pt in self.paths():\n if pt.cacheon:\n if pt.state == ObjState.running:\n return (0,'Cache is ok')\n logger.eventlog.warning('cache for %s is ON but path is not running !' % str(self))\n\n # no running path with cache on\n self.cachepresent=False\n\n #\n cvolname=obj2volstr(self)\n cvolname=cvolname.replace(':',CACHESEP) # replace ':' with a legal volume char\n drname=CACHEPFX+cvolname\n cache_loadonly=False\n #\n\n # self.cachedrdev ?\n if self.san_interface.raids.has_key(drname):\n # found drbd dev for cache (fail-over or load??):\n # del tgt (old), remove cache (old), promote (new),\n # cache load (new), add targets (new)\n logger.eventlog.warning('Cache for %s is not on, while DR device is detected during update' % str(self))\n drdev = self.san_interface.raids[drname]\n if not drdev:\n logger.eventlog.error('cant update cache dr for %s , drdev not found' % (str(self)))\n return (1,'cant update Cache dr')\n if not drdev.provider:\n drdev.promote_one(checkluns=False)\n if not drdev.provider:\n logger.eventlog.error('cant update cache dr for %s , drdev provider not detected' % (str(self)))\n return (1,'cant update Cache dr')\n # debug\n #logger.eventlog.debug(\"cachepresent: %s\" % str(self.cachepresent))\n #for p in self.paths():\n # if p.provider==drdev.provider:\n # logger.eventlog.debug(\"p: %s\" % str(p))\n # logger.eventlog.debug(\"state: %s\" % str(p.state))\n # logger.eventlog.debug(\"cacheon: %s\" % str(p.cacheon))\n # end debug\n e,prim = ext2path(self,drdev.provider)\n if e:\n logger.eventlog.error('valid path not found for %s on %s in update' % (str(self),str(drdev.provider)))\n return (1,'valid path not found')\n #logger.eventlog.debug(\"prim: %s\" % str(prim))\n cache_loadonly=True\n else:\n if len(self.cachepvds)==1 or len(self.cachepvds)>2:\n # has only 1 cache LV (load, absent?) ?? or >2 (old ones redetected)\n logger.eventlog.error('Found %d Cache LVs for %s in update' % (len(self.cachepvds),str(self)))\n return (1,'Found %d Cache LVs for %s in update' % (len(self.cachepvds),str(self)))\n\n if len(self.cachepvds) == 2:\n # if has 2 cache LVs, no DR (load): create drbd, load cache\n (e1,path1) = ext2path(self,san_res.providers[self.cachepvds[0]])\n (e2,path2) = ext2path(self,san_res.providers[self.cachepvds[1]])\n print 'cache paths: ',str(path1),str(path2)\n if e1 or e2:\n logger.eventlog.error('valid paths not found for %s in update' % str(self))\n return (1,'valid path not found')\n vol1 = san_res.providers[self.cachepvds[0]].cachevg.volumes[cvolname]\n vol2 = san_res.providers[self.cachepvds[1]].cachevg.volumes[cvolname]\n cache_loadonly=True\n\n else:\n # else (new) : select 2 paths, create 2 LVs,\n # create & promote DRBD, Create cache on master\n\n e,path1,path2 = self._get_2_pvds_paths()\n if e:\n logger.eventlog.error(path1)\n return (1,path1)\n\n # create 2 cache LVs\n (e,vol1) = path1.provider.add_lv_for_cache(self,self.cachesize)\n if e > 1:\n tmp='cant create Cache LV1 for %s on %s in update: %s' % (self.name,path1.provider.name,vol1)\n logger.eventlog.error(tmp)\n return (1,tmp)\n (e,vol2) = path2.provider.add_lv_for_cache(self,self.cachesize)\n if e > 1:\n vol1.provider.cachevg.volumes.delete(vol1,force=True)\n tmp='cant create Cache LV2 for %s on %s in update: %s' % (self.name,path2.provider.name,vol2)\n logger.eventlog.error(tmp)\n return (1,tmp)\n #\n print 'cache vols: ',str(vol1),str(vol2)\n\n # create new drbd device\n drdev = san_res.raids.add(drname,SanRaidGrp(drname,None))\n if not drdev :\n logger.eventlog.error('failed to create/updare dr device for cache in %s' % str(self))\n return (1,'failed to create/updare dr device')\n drdev.raid=RaidLevel.dr\n drdev.iscachedr=True\n drdev.devices=[vol1,vol2]\n (e,txt)=drdev.update()\n print 'create dr device:',e,txt\n if e:\n logger.eventlog.error('cant create Cache dr for %s , %s' % (str(self),txt))\n return (1,'cant create Cache dr')\n if drdev.provider is path1.provider:\n prim=path1\n else:\n prim=path2\n\n logger.eventlog.debug('create cache on %s , loadonly: %s , drname: %s' % \\\n (drdev.provider.name, cache_loadonly, drname))\n #loadonly=(self.cachepvds<>[]) # check if we already had cache LVs\n\n # create CacheDev\n # on loadonly we also forcing devname update\n (e,r) = drdev.provider.create_cache(prim,drdev,cvolname,loadonly=cache_loadonly,force=cache_loadonly)\n logger.eventlog.debug('create cache response: %s %s' % (e,r))\n if e:\n return (e, 'error creating cache on %s: %s' % (drdev.provider.name,r))\n else:\n (e,r) = self._remove_cache()\n if e:\n return (e,'error removing cache on %s: %s' % (str(self),r))\n return (0,'')",
"def _cache_set(self, metric_name, metric):\n pass",
"def _update_cache(self):\n \n # Check if the model cache is full\n if self.__class__.cache_limit == len(self.__class__.model_cache):\n # Remove the oldest item from the cache if exceeding cache limit\n self.__class__.model_cache.popitem(last=False)\n \n # Remove the obsolete version of the model from the cache\n if self.model.name in self.__class__.model_cache:\n del self.__class__.model_cache[self.model.name]\n \n # Add the current model to the cache\n self.__class__.model_cache[self.model.name] = self.model\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(8)",
"def set_cache(self, key, value):\n self._cache[key] = (self.model.batch_number, value)",
"def _refresh_cache(self, data_dict):\r\n pass",
"def mark_if_cached(self, args):\n pass",
"def reload_cache(self):\n self.data = self.read_data_cache()",
"def _invalidate_http_cache(self):\n self._requests_cache = {}",
"def update_cache(self, line):\n self.inputcache.insert(0, line) # This copies the cache every time ... :-(\n if len(self.inputcache) >= self.CACHELENGTH:\n self.inputcache.pop() # This not :-)",
"def manage_image_cache(self, context, all_instances):\n self.image_cache_manager.update(context, all_instances)",
"def update_resources_for_this_host(cache, db):\n free_cpu, free_mem = get_resources()\n my_ip = cache[\"ip\"]\n\n logger.info(\"UPDATING\", extra = {\"cpu\": free_cpu, \"mem\": free_mem, \"ip\": my_ip})\n try:\n db.hset(my_ip, mapping={\"cpu\": free_cpu, \"mem\": free_mem})\n except Exception as e:\n logger.error(e)\n raise e",
"def use_cached_files(self, cache_key):\r\n pass",
"def update(self, key_path, content, t_mserver):\n#\t\tif key not in self.cache and len(self.cache) >= self.max_cache_size:\n#\t\t\tself.remove_oldest()\n\t\t\n\t\tcurrent_time = int(time.time())\n\t\tif key_path not in self.cache:\n\t\t\tself.add_cache(key_path, content, t_mserver)\n\t\t\t\t\n\t\telif current_time >= self.cache[key_path]['time_validated']:\n\t\t\tself.cache[key_path] = {'time_validated': int(time.time()),\n\t\t\t\t\t\t\t\t\t't_mclient': int(t_mserver),\n\t\t\t\t\t\t\t\t\t'content': content}\n\t\telse:\n\t\t\tprint(\"Content is not updates OR time_accessed went wrong!\")",
"def set_cache(self, key, value):\n self.r.set(key, value)\n self.r.expire(key, time=1500)",
"def recache_updates(self):\n ks = ['BooksUpdated', '%s'%self.key().id()]\n decaches(ks)",
"def update_commit_cache(commit, cache):\n\n cache.append(commit.id)",
"def _cache_set(self, metric_name, metric):\n with self._lock:\n self.__cache[metric_name] = metric",
"def cache_set(self, key: str, value: bytes) -> None:\n if self.cache is not None:\n self.cache.set(key, value)",
"def _update_cache(self, key, value, cache, decode_loop_step):\n # Combines cached keys and values with new keys and values.\n if decode_loop_step is not None:\n # TPU special case.\n key_seq_dim = cache[\"key\"].shape.as_list()[1]\n indices = tf.reshape(\n tf.one_hot(decode_loop_step, key_seq_dim, dtype=key.dtype),\n [1, key_seq_dim, 1, 1])\n key = cache[\"key\"] + key * indices\n value_seq_dim = cache[\"value\"].shape.as_list()[1]\n indices = tf.reshape(\n tf.one_hot(decode_loop_step, value_seq_dim, dtype=value.dtype),\n [1, value_seq_dim, 1, 1])\n value = cache[\"value\"] + value * indices\n else:\n key = tf.concat([tf.cast(cache[\"key\"], key.dtype), key], axis=1)\n value = tf.concat([tf.cast(cache[\"value\"], value.dtype), value], axis=1)\n\n # Update cache\n cache[\"key\"] = key\n cache[\"value\"] = value\n\n return key, value",
"def update_cached_instance(sender, instance, **kwargs):\n if not hasattr(instance, \"cache_instance\"):\n return\n sender.cache_instance(instance)",
"def set(key, value):\n return Cache.cache_connector.set(key, value)",
"def update(self, flags=''):\n _load = not self.san_interface.runmode\n self._update_params()\n if 'cachesize' in self._updatedattr or _load or 'f' in flags:\n (e,r) = self._update_cachesize()\n if e:\n if not _load: # reset cachesize on create but not on load\n self.cachesize=0\n return (e,r)\n## self._flush()\n return (0,'')",
"def library_caching(self, library_caching):\n\n self._library_caching = library_caching",
"async def refresh_cache(request: Request) -> Response:\n await request.state.canvas.sync_cache(request.state.db_conn, skip_check=True)\n\n return Response(status_code=204)"
] | [
"0.70355237",
"0.67454726",
"0.66589284",
"0.66395354",
"0.6594092",
"0.658877",
"0.655342",
"0.63988495",
"0.63722324",
"0.63371176",
"0.6319258",
"0.6313111",
"0.6270669",
"0.62608325",
"0.623213",
"0.6211307",
"0.6194371",
"0.61508423",
"0.61492276",
"0.61486644",
"0.6111692",
"0.60700923",
"0.6053279",
"0.6053",
"0.6001416",
"0.598955",
"0.59607416",
"0.5942355",
"0.59207505",
"0.58924943"
] | 0.7350366 | 0 |
Return the name, arguments, and return type of the first function definition found in code. Arguments are returned as [(type, name), ...]. | def parse_function_signature(code):
m = re.search("^\s*" + re_func_decl + "\s*{", code, re.M)
if m is None:
print(code)
raise Exception("Failed to parse function signature. "
"Full code is printed above.")
rtype, name, args = m.groups()[:3]
if args == 'void' or args.strip() == '':
args = []
else:
args = [tuple(arg.strip().split(' ')) for arg in args.split(',')]
return name, args, rtype | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_functions(code):\n regex = \"^\\s*\" + re_func_decl + \"\\s*{\"\n \n funcs = []\n while True:\n m = re.search(regex, code, re.M)\n if m is None:\n return funcs\n \n rtype, name, args = m.groups()[:3]\n if args == 'void' or args.strip() == '':\n args = []\n else:\n args = [tuple(arg.strip().split(' ')) for arg in args.split(',')]\n funcs.append((name, args, rtype))\n \n code = code[m.end():]",
"def get_parsed_declaration(self) -> str:\n args = self._get_arguments()\n\n func = self.node\n tu = func.tu\n\n # For functions the extent encompasses the return value, and the\n # location is the beginning of the functions name. So we can consume\n # all tokens in between.\n end = cindex.SourceLocation.from_offset(\n tu, func.location.file, func.location.offset - 1\n )\n extent = cindex.SourceRange.from_locations(func.extent.start, end)\n\n return_type = \" \".join(\n t.spelling for t in cindex.TokenGroup.get_tokens(tu, extent=extent)\n )\n\n return f\"{return_type} {func.spelling}({args})\"",
"def get_f_args(f_name, code):\n # get the function definition regex\n r='^[ ]*def[ ]+{}[ ]*\\((?P<args>.*)\\)[ ]*:[ ]*$'.format(f_name)\n r=\"[ ]*def[ ]+{}\\((?P<args>.*)\\)\".format(f_name)\n # for line in code\n a=[]\n for l in code.split(\"\\n\"):\n res=re.match(r, l)\n if res:\n test=res.groups(0)[0]\n cc=[]\n while len(test):\n tt,test=get_next_arg(test)\n cc.append(tt)\n a.append(ArgumentInfo([parse_arg(s) for s in cc]))\n if len(a)==0:\n return None\n if len(a)==1:\n return a[0]\n return a",
"def find_prototypes(code):\n\n prots = []\n lines = code.split('\\n')\n for line in lines:\n m = re.match(\"\\s*\" + re_func_prot, line)\n if m is not None:\n rtype, name, args = m.groups()[:3]\n if args == 'void' or args.strip() == '':\n args = []\n else:\n args = [tuple(arg.strip().split(' '))\n for arg in args.split(',')]\n prots.append((name, args, rtype))\n\n return prots",
"def parseFunction(tokens: List[LEX_Type], last_token: LEX_Type, ast_main: AST_Program) -> (AST_Function, List[LEX_Type], LEX_Type, AST_Program):\n if last_token.value == \"recipe\":\n if tokens[0].type == \"Identifier\":\n ast_main.Functions[tokens[0].value] = AST_Function()\n func: AST_Function\n rest_tokens: List[LEX_Type]\n final_token: LEX_Type\n func, rest_tokens, final_token, ast_main = parseFunction(tokens[1:], tokens[0], ast_main)\n func.name = tokens[0].value\n if func.CodeSequence is None:\n print(\"no code in function, expected code after Bake:\")\n exit()\n return func, rest_tokens, final_token, ast_main\n else:\n throw_error_with_token(\"MissingIdentifier\", tokens[0])\n elif last_token.type == \"Identifier\":\n if tokens[0].value == \"->\":\n if tokens[1].type == \"Type\":\n func: AST_Function\n rest_tokens: List[LEX_Type]\n final_token: LEX_Type\n func, rest_tokens, final_token, ast_main = parseFunction(tokens[2:], tokens[1], ast_main)\n func.ReturnType = tokens[1].value\n return func, rest_tokens, final_token, ast_main\n else:\n throw_error_with_token(\"ExpectedReturnType\", tokens[1])\n else:\n throw_error_with_token(\"ExpectedArrow\", tokens[0])\n elif tokens[0].type == \"Keyword\":\n if tokens[0].value == \"prepare\":\n if last_token.type == \"LineEnd\":\n if tokens[1].value == \":\":\n arguments: List[AST_FunctionArgument]\n rest_tokens: List[LEX_Type]\n arguments, rest_tokens = getFunctionArguments(tokens[2:], last_token)\n if len(arguments) == 0:\n throw_error(\"ExpectedAfter\", tokens[0].file, tokens[0].line, \"FunctionArguments\", tokens[0].value)\n func, rest_tokens, final_token, ast_main = parseFunction(rest_tokens[1:], rest_tokens[0], ast_main)\n func.argumentList = arguments\n return func, rest_tokens, final_token, ast_main\n else:\n throw_error(\"ExpectedAfter\", tokens[0].file, tokens[0].line, \":\", tokens[0].value)\n else:\n throw_error(\"ExpectedBefore\", tokens[0].file, tokens[0].line, \"LineEnd\", tokens[0].value)\n elif tokens[0].value == \"bake\":\n if last_token.type == \"LineEnd\":\n if tokens[1].value == \":\":\n func: AST_Function = AST_Function()\n code: List[AST_Node]\n rest_tokens: List[LEX_Type]\n code, rest_tokens = createCodeBlock(tokens[2:], tokens[1], ast_main)\n func.CodeSequence = code\n if len(code) == 0:\n throw_error(\"CodeBlockEmpty\", tokens[1].file, tokens[1].line, tokens[1].value)\n return func, rest_tokens[1:], rest_tokens[0], ast_main\n else:\n throw_error(\"ExpectedAfter\", tokens[0].file, tokens[0].line, \":\", tokens[0].value)\n else:\n throw_error(\"ExpectedBefore\", tokens[0].file, tokens[0].line, \"LineEnd\", tokens[0].value)\n else:\n return parseFunction(tokens[1:], tokens[0], ast_main)",
"def make_func_declarations(self):\n\n\t\tfor name in self.func_dict:\n\t\t\tbody = Lexer(self.func_dict[name]).get_tokens()\n\t\t\ti = body.index('\\\\') + 1 #Start of parameters\n\t\t\tj = body.match_paren(i)\n\t\t\tparam_tokens = body[i + 1: j] #Stuff inside parentheses\n\t\t\t#\t\t\tprint \"param list:\", param_tokens\n\n\t\t\tparams = self.split_params(param_tokens)\n\t\t\tparams = map(lambda n: n.split(':'), params)\n\t\t\t#params is now [[<name>,<type>],...]\n\t\t\tc_types = map(lambda n: self.convert_type(*n), params)\n\t\t\t#\t\t\tprint c_types\n\n\t\t\treturn_type = ''\n\t\t\t# +2 to skip over \")\" and \":\"\n\t\t\tif body[j+2] == '(': #Function returns another function\n\t\t\t\t# +3 for [\")\",\"->\",\"<type>\"]\n\t\t\t\tfor x in xrange(j+2, body.match_paren(j+2)+3):\n\t\t\t\t\treturn_type += body[x]\n\t\t\telse: #Function returns a concrete type\n\t\t\t\treturn_type = body[j+2] #+2 to skip over \")\" and \":\"\n\n\t\t\tfunc_type = self.convert_type(name, return_type)\n\t\t\t#\t\t\tprint \"params\", params\n\t\t\t#\t\t\tprint \"c_types\", c_types\n\t\t\t#while True:exec raw_input() in globals(), locals()\n\t\t\tself.cpp_declarations[name] = func_type + '(' + ', '.join(c_types) + ')'\n\n\t\tself.cpp_declarations['main'] = 'int main()' #actually this isn't used",
"def GetFunctionName():\n return traceback.extract_stack(None, 2)[0][2]",
"def get_func_ast(obj : types.FunctionType):\n return get_ast(obj).body[0]",
"def getFirstFunction(self) -> ghidra.program.model.listing.Function:\n ...",
"def num_41(func=None):\n def predicates(func):\n \"\"\" \"\"\"\n predicate = [['isfunction', ['__doc__', '__name__', '__code__', '__defaults__', '__globals__', '__kwdefaults__']],\n ['ismodule',[]], ['isroutine', []],\n ['ismethod'], []\n ]\n def demo_def():\n \"\"\"dummy...\n : Demonstrates retrieving and documenting module and function info.\n :\n \"\"\"\n def sub():\n \"\"\"sub in dummy\"\"\"\n pass\n return None\n import inspect\n if func is None:\n func = demo_def\n script = sys.argv[0] # a useful way to get a file's name\n lines, line_num = inspect.getsourcelines(func)\n code = \"\".join([\"{:4d} {}\".format(idx, line)\n for idx, line in enumerate(lines)])\n defs = [key for key, value in globals().items()\n if callable(value) and value.__module__ == __name__]\n args = [line_num, code,\n inspect.getcomments(func), inspect.isfunction(func),\n inspect.ismethod(func), inspect.getmoduleinfo(script),\n defs\n ]\n members = []\n funcs = []\n if inspect.ismodule(func): #ismodule, isfunction\n m_s = inspect.getmembers(func)\n for m in m_s:\n members.append(m[0])\n if inspect.isfunction(func):\n f_s = inspect.getmembers(func)\n for f in f_s:\n funcs.append(f[0])\n # **** work on this\n mem = [i[0] for i in inspect.getmembers(art)]\n frmt = \"\"\"\n :----------------------------------------------------------------------\n :Code for a function on line...{}...\n {}\n :Comments preceeding function\n {}\n :function?... {} ... or method? {}\n :Module info...\n {}\n :\n :Module functions...\n {} \n :----------------------------------------------------------------------\n \"\"\"\n print(dedent(frmt).format(*args))\n print(\"function member names\\n{}\".format(members))\n return None",
"def get_return_stmt(self, code, func_name, return_type):\n func = self.get_func(\"\"\"\n %s %s(void) {\n %s\n }\n \"\"\" % (return_type, func_name, code), func_name)\n return func.return_stmts[0]",
"def _parse_function(self):\n first_pos = self.start_pos\n token_type, fname = self.next()\n if token_type != tokenize.NAME:\n return None\n\n fname = pr.Name(self.module, [(fname, self.start_pos)], self.start_pos,\n self.end_pos)\n\n token_type, open = self.next()\n if open != '(':\n return None\n params = self._parse_parentheses()\n\n token_type, colon = self.next()\n annotation = None\n if colon in ['-', '->']:\n # parse annotations\n if colon == '-':\n # The Python 2 tokenizer doesn't understand this\n token_type, colon = self.next()\n if colon != '>':\n return None\n annotation, colon = self._parse_statement(added_breaks=[':'])\n\n if colon != ':':\n return None\n\n # because of 2 line func param definitions\n scope = pr.Function(self.module, fname, params, first_pos, annotation)\n if self.user_scope and scope != self.user_scope \\\n and self.user_position > first_pos:\n self.user_scope = scope\n return scope",
"def get_code(func):\n import inspect\n\n raw = \"\".join(inspect.getsource(func))\n found = re.findall(\"(k = .*)\", raw)\n\n if any(found):\n code = found[0]\n return code\n else:\n return \"\"",
"def _find_functions(whole):\n for res in function_start_regex.findall(whole):\n function_start, function_name, params = res\n params_split = [x.strip() for x in params.split(',')]\n stack, code, core_code = 1, function_start, ''\n start = whole.find(function_start) + len(code)\n while stack > 0:\n try:\n next_char = whole[start]\n except IndexError: # dont worry we will obfuscte the one we found\n return # sometimes fails to find all functions on big files\n core_code += next_char\n if next_char == '{':\n stack += 1\n elif next_char == '}':\n stack -= 1\n start += 1\n yield (params, params_split, core_code[:-1], function_start)",
"def _get_first(details: CallableDetails) -> CallableArg:\n return details.args[0]",
"def FunctionDef(self):\n type = self.Type()\n key = self.currtok[0]\n self.functions[self.currtok[0]] = \"first_call\"\n id = self.primary()\n self.functions[key] = type\n if self.currtok[1].name == \"LPAREN\":\n self.currtok = next(self.tg)\n params = self.Params()\n if self.currtok[1].name == \"RPAREN\":\n self.currtok = next(self.tg)\n if self.currtok[1].name == \"LCURLY\":\n self.currtok = next(self.tg)\n decs = self.Declarations()\n states = self.Statements()\n if self.currtok[1].name == \"RCURLY\":\n self.currtok = next(self.tg)\n return FunctionDef(type, id, params, decs, states)\n raise SLUCSyntaxError(\"ERROR: Missing Right Curly Brace on line {0}\"\n .format(str(self.currtok[2] - 1 - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing Left Curly Brace on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing Right Paren on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing Left Paren on line {0}\".format(str(self.currtok[2] - 1)))",
"def get_fun_name(line):\n match = re.match(r'(function|macro)\\s*\\((\\w+)', line)\n if not match:\n return\n return match.groups()[1]",
"def find_called_function(self, name, node):\n\n functions = self.context.get_functions(name)\n\n if len(functions) == 1:\n return functions[0]\n else:\n matching_functions = []\n\n for function in functions:\n if self.visit_call_params(function, node):\n matching_functions.append(function)\n\n if len(matching_functions) != 1:\n raise CompileError('ambigious function call', node)\n\n return matching_functions[0]",
"def inspect_fdef(node):\n if node.returns is not None:\n return [{\"name\": \"returns\", \"line\": node.returns.lineno - 1, \"end_line\": node.returns.end_lineno - 1,\n \"col_offset\": node.returns.col_offset, \"end_col_offset\": node.returns.end_col_offset}]\n else:\n return []",
"def getcodedesc(code_): # 3\n if not isinstance(code_,rescode): raise TypeError(\"Argument code has wrong type\")\n arr_symname = array.array(\"b\",[0]*(value.max_str_len))\n memview_arr_symname = memoryview(arr_symname)\n arr_str = array.array(\"b\",[0]*(value.max_str_len))\n memview_arr_str = memoryview(arr_str)\n res,resargs = _msk.Env.getcodedesc(code_,memview_arr_symname,memview_arr_str)\n if res != 0:\n raise Error(rescode(res),\"\")\n retarg_symname,retarg_str = resargs\n retarg_str = arr_str.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n retarg_symname = arr_symname.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_symname,retarg_str",
"def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res",
"def function_names_to_evaluate_first_found(self): # % str(Function._function_names_to_evaluate_first_found)\n return Function._function_names_to_evaluate_first_found",
"def funcs_in_script(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n\n N = len(lines)\n funcs = []\n for n in range(N):\n line = lines[n]\n\n ###################################################\n # RETRIEVE FUNCTION NAME #\n ###################################################\n if not line[:4] == 'def ':\n continue\n if not '(' in line:\n continue\n end = line.index('(')\n name = line[4:end]\n\n ###################################################\n # RETRIEVE DOCSTRING HEADER #\n ###################################################\n header = ''\n for m in range(n, N - 1):\n line = lines[m]\n\n # this should not happen (when coded in python syntax, a closing\n # parenthesis must appear first)\n if m > n and line[:4] == 'def ':\n break\n\n # this marks the end of the function definition\n if '):' in line:\n hline = lines[m + 1] # potential docstring header line\n # if it exists, then here\n\n\n # remove leading white spaces:\n while hline[0] == ' ':\n hline = hline[1:]\n\n # check whether it is in fact (the start of) a docstring\n if hline[:3] not in ['\"\"\"', \"'''\"]:\n break\n\n # take the first line of this docstring\n header = hline[3:-1]\n\n # remove docstring closing:\n if header[-3:] in ['\"\"\"', \"'''\"]:\n header = header[:-3]\n\n # ignore outdated functions if labelled as such:\n if header.lower()[:10] == '[outdated]':\n name = None\n if header.lower()[:1] == '*':\n name = None\n break\n\n if name is None:\n continue\n\n funcs.append([name, header])\n\n return funcs",
"def get_parsed_declaration(self) -> str:\n parent_type = self.node.underlying_typedef_type.spelling\n\n # Function prototypes need to be handled different. When clang can't\n # successfully parse the file it falls back to naming the return type\n # as the display name.\n # Unfortunately some versions of clang behave a little differently, some\n # will return a `POINTER` while others will return `FUNCITONNOPROTO`. The\n # `POINTER`s are easy to derive the real type from, but the test\n # environment doesn't use that version of clang.\n type_ = self.node.underlying_typedef_type\n if type_.kind == cindex.TypeKind.POINTER: # pragma: no cover\n type_ = type_.get_pointee()\n\n if type_.kind in (\n cindex.TypeKind.FUNCTIONPROTO,\n cindex.TypeKind.FUNCTIONNOPROTO,\n ):\n ret_value, paren, signature = parent_type.partition(\")\")\n signature = \"\".join((ret_value, self.name, paren, signature))\n\n return f\"typedef {signature}\"\n\n return f\"typedef {parent_type} {self.name}\"",
"def get_definition(self, info):\r\n token = info.obj\r\n lines = info.lines\r\n source_code = info.source_code\r\n filename = info.filename\r\n\r\n line_nr = None\r\n if '.' in token:\r\n token = token.split('.')[-1]\r\n\r\n line_nr = get_definition_with_regex(source_code, token,\r\n len(lines))\r\n if line_nr is None:\r\n return\r\n line = info.line\r\n exts = python_like_exts()\r\n if not osp.splitext(filename)[-1] in exts:\r\n return filename, line_nr\r\n if line.startswith('import ') or line.startswith('from '):\r\n alt_path = osp.dirname(filename)\r\n source_file = python_like_mod_finder(line, alt_path=alt_path,\r\n stop_token=token)\r\n if (not source_file or\r\n not osp.splitext(source_file)[-1] in exts):\r\n line_nr = get_definition_with_regex(source_code, token,\r\n line_nr)\r\n return filename, line_nr\r\n mod_name = osp.basename(source_file).split('.')[0]\r\n if mod_name == token or mod_name == '__init__':\r\n return source_file, 1\r\n else:\r\n with open(filename, 'rb') as fid:\r\n code = fid.read()\r\n code = encoding.decode(code)[0]\r\n line_nr = get_definition_with_regex(code, token)\r\n\r\n return filename, line_nr",
"def _get_module_and_name(func: Callable) -> Tuple[str, str]:\n if not inspect.isfunction(func) and not inspect.isclass(func):\n raise ValueError('Expect a function or class, but got: {}'.format(func))\n return func.__module__, func.__name__",
"def findCaller(cls):\n f = currentframe()\n # On some versions of IronPython, currentframe() returns None if\n # IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown file)\", 0, \"(unknown function)\"\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == _srcfile:\n f = f.f_back\n continue\n rv = (co.co_filename, f.f_lineno, co.co_name)\n break\n return rv",
"def parse(cls, data):\r\n try:\r\n # Parse the function here\r\n result = cls.FuncDefn.parseString(data)\r\n result_list = result.asList()\r\n args = result_list[3:result_list.index(')')]\r\n # Return single line or multi-line function body\r\n fn_body = re.sub(r'[^\\{]+\\{', '', data, count=1)\r\n parts = fn_body.strip().split('\\n')\r\n fn_body = '\\n'.join(parts[0:-1])\r\n return cls.GroovyFunction(result[1], args, fn_body, data)\r\n except Exception, ex:\r\n return {}",
"def find(name: str):\n return _functions[name]",
"def generate(code):\n name, traits = parseCode(code)\n return globals()[name](**traits)"
] | [
"0.68184936",
"0.66357005",
"0.6627714",
"0.6380974",
"0.634481",
"0.6164433",
"0.59679073",
"0.5878357",
"0.58456427",
"0.58147675",
"0.57996273",
"0.5772736",
"0.57490474",
"0.57399726",
"0.56921184",
"0.56889397",
"0.56479543",
"0.56471384",
"0.5646925",
"0.55967027",
"0.55775833",
"0.55628705",
"0.5551035",
"0.5499953",
"0.5482307",
"0.54764414",
"0.54168874",
"0.54138374",
"0.54001623",
"0.540007"
] | 0.6752605 | 1 |
Return a list of (name, arguments, return type) for all function definition2 found in code. Arguments are returned as [(type, name), ...]. | def find_functions(code):
regex = "^\s*" + re_func_decl + "\s*{"
funcs = []
while True:
m = re.search(regex, code, re.M)
if m is None:
return funcs
rtype, name, args = m.groups()[:3]
if args == 'void' or args.strip() == '':
args = []
else:
args = [tuple(arg.strip().split(' ')) for arg in args.split(',')]
funcs.append((name, args, rtype))
code = code[m.end():] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_prototypes(code):\n\n prots = []\n lines = code.split('\\n')\n for line in lines:\n m = re.match(\"\\s*\" + re_func_prot, line)\n if m is not None:\n rtype, name, args = m.groups()[:3]\n if args == 'void' or args.strip() == '':\n args = []\n else:\n args = [tuple(arg.strip().split(' '))\n for arg in args.split(',')]\n prots.append((name, args, rtype))\n\n return prots",
"def inspect_fdef(node):\n if node.returns is not None:\n return [{\"name\": \"returns\", \"line\": node.returns.lineno - 1, \"end_line\": node.returns.end_lineno - 1,\n \"col_offset\": node.returns.col_offset, \"end_col_offset\": node.returns.end_col_offset}]\n else:\n return []",
"def make_func_declarations(self):\n\n\t\tfor name in self.func_dict:\n\t\t\tbody = Lexer(self.func_dict[name]).get_tokens()\n\t\t\ti = body.index('\\\\') + 1 #Start of parameters\n\t\t\tj = body.match_paren(i)\n\t\t\tparam_tokens = body[i + 1: j] #Stuff inside parentheses\n\t\t\t#\t\t\tprint \"param list:\", param_tokens\n\n\t\t\tparams = self.split_params(param_tokens)\n\t\t\tparams = map(lambda n: n.split(':'), params)\n\t\t\t#params is now [[<name>,<type>],...]\n\t\t\tc_types = map(lambda n: self.convert_type(*n), params)\n\t\t\t#\t\t\tprint c_types\n\n\t\t\treturn_type = ''\n\t\t\t# +2 to skip over \")\" and \":\"\n\t\t\tif body[j+2] == '(': #Function returns another function\n\t\t\t\t# +3 for [\")\",\"->\",\"<type>\"]\n\t\t\t\tfor x in xrange(j+2, body.match_paren(j+2)+3):\n\t\t\t\t\treturn_type += body[x]\n\t\t\telse: #Function returns a concrete type\n\t\t\t\treturn_type = body[j+2] #+2 to skip over \")\" and \":\"\n\n\t\t\tfunc_type = self.convert_type(name, return_type)\n\t\t\t#\t\t\tprint \"params\", params\n\t\t\t#\t\t\tprint \"c_types\", c_types\n\t\t\t#while True:exec raw_input() in globals(), locals()\n\t\t\tself.cpp_declarations[name] = func_type + '(' + ', '.join(c_types) + ')'\n\n\t\tself.cpp_declarations['main'] = 'int main()' #actually this isn't used",
"def get_f_args(f_name, code):\n # get the function definition regex\n r='^[ ]*def[ ]+{}[ ]*\\((?P<args>.*)\\)[ ]*:[ ]*$'.format(f_name)\n r=\"[ ]*def[ ]+{}\\((?P<args>.*)\\)\".format(f_name)\n # for line in code\n a=[]\n for l in code.split(\"\\n\"):\n res=re.match(r, l)\n if res:\n test=res.groups(0)[0]\n cc=[]\n while len(test):\n tt,test=get_next_arg(test)\n cc.append(tt)\n a.append(ArgumentInfo([parse_arg(s) for s in cc]))\n if len(a)==0:\n return None\n if len(a)==1:\n return a[0]\n return a",
"def getListOfFunctionDefinitions(self, *args):\n return _libsbml.Model_getListOfFunctionDefinitions(self, *args)",
"def get(self, *args):\n return _libsbml.ListOfFunctionDefinitions_get(self, *args)",
"def parse_function_signature(code):\n m = re.search(\"^\\s*\" + re_func_decl + \"\\s*{\", code, re.M)\n if m is None:\n print(code)\n raise Exception(\"Failed to parse function signature. \"\n \"Full code is printed above.\")\n rtype, name, args = m.groups()[:3]\n if args == 'void' or args.strip() == '':\n args = []\n else:\n args = [tuple(arg.strip().split(' ')) for arg in args.split(',')]\n return name, args, rtype",
"def make_function_stubs(self):\n res = \"\"\n for node in self.description.declarations() + self.description.definitions():\n if isinstance(node.type,pdl.TypeFunction):\n res += \"def {}({}):\\n pass\".format(node.name, \", \".join(map(\n lambda t: \"{}\".format(t.name), node.type.args)) )\n\n return res",
"def make_cpp_func_bodies(self):\n\t\tfor name, body in self.func_bodies.iteritems():\n\t\t\tt = Lexer(body).get_tokens()\t\t\t\n\t\t\tS = [] #Stack\n\t\t\tx = 0\n\t\t\twhile x < len(t):\n\t\t\t\tif t[x] == '(': #function call begins\n\t\t\t\t\tx += 1\n\t\t\t\t\tS.append(self.FUNCS_DICT.get(t[x], t[x]) + '(')\n\t\t\t\telif t[x] == ')': #function call ends\n\t\t\t\t\tacc = ''\n\t\t\t\t\twhile S[-1][-1] != '(':\n\t\t\t\t\t\t#pop off params until function call is reached\n\t\t\t\t\t\tacc = S.pop() + ',' + acc\n\t\t\t\t\t# [:-1] to strip off comma at the end\n\t\t\t\t\tS.append(S.pop() + acc[:-1] + ')') #S.pop() gives function\n\t\t\t\telse:\n\t\t\t\t\tS.append(self.convert_atom(t[x]))\n\t\t\t\tx += 1\n\t\t\tself.cpp_func_bodies[name] = S[0]",
"def funcs_in_script(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n\n N = len(lines)\n funcs = []\n for n in range(N):\n line = lines[n]\n\n ###################################################\n # RETRIEVE FUNCTION NAME #\n ###################################################\n if not line[:4] == 'def ':\n continue\n if not '(' in line:\n continue\n end = line.index('(')\n name = line[4:end]\n\n ###################################################\n # RETRIEVE DOCSTRING HEADER #\n ###################################################\n header = ''\n for m in range(n, N - 1):\n line = lines[m]\n\n # this should not happen (when coded in python syntax, a closing\n # parenthesis must appear first)\n if m > n and line[:4] == 'def ':\n break\n\n # this marks the end of the function definition\n if '):' in line:\n hline = lines[m + 1] # potential docstring header line\n # if it exists, then here\n\n\n # remove leading white spaces:\n while hline[0] == ' ':\n hline = hline[1:]\n\n # check whether it is in fact (the start of) a docstring\n if hline[:3] not in ['\"\"\"', \"'''\"]:\n break\n\n # take the first line of this docstring\n header = hline[3:-1]\n\n # remove docstring closing:\n if header[-3:] in ['\"\"\"', \"'''\"]:\n header = header[:-3]\n\n # ignore outdated functions if labelled as such:\n if header.lower()[:10] == '[outdated]':\n name = None\n if header.lower()[:1] == '*':\n name = None\n break\n\n if name is None:\n continue\n\n funcs.append([name, header])\n\n return funcs",
"def get_function_definition(self, file, i):\n\n # Run super definition\n definition, params = super().get_function_definition(file, i)\n\n # Parse function definition\n return_type, func_name, params, decorator = \\\n self.parse_function_definition(file, i, definition, params)\n\n # Define access modifier\n is_private = func_name.startswith(\"__\") and func_name.count(\"__\") < 2\n access_modifier = \"private\" if is_private else \"public\"\n\n # Create start and end for function call\n start = []\n end = [] + decorator\n\n # Return all variables of function definition\n return access_modifier, return_type, func_name, params, start, end",
"def get_rewards():\n this = modules[__name__]\n names, funcs = [], []\n for name, func in inspect.getmembers(this):\n\n # Is a definition a function\n if inspect.isfunction(func):\n # Is defined in this module\n if inspect.getmodule(func) == this:\n names.append(name)\n funcs.append(func)\n\n return tuple(names), tuple(funcs)",
"def getFunctionArguments(tokens: List[LEX_Type], last_token: LEX_Type) -> (List[AST_FunctionArgument], List[LEX_Type]):\n if tokens[0].type == \"LineEnd\":\n if tokens[1].type == \"Keyword\":\n return [], tokens\n return getFunctionArguments(tokens[1:], last_token)\n if tokens[0].type == \"ItemLister\":\n if tokens[1].type == \"Type\":\n if tokens[2].type == \"Identifier\":\n arguments: List[AST_FunctionArgument]\n last: List[LEX_Type]\n arguments, last = getFunctionArguments(tokens[3:], last_token)\n return [AST_FunctionArgument(tokens[1].value, tokens[2].value)] + arguments, last",
"def make_def_function_types(self):\n res = \"\"\n for node in self.description.definitions():\n if isinstance(node.type, pdl.TypeFunction):\n res += \"{} = {}\\n\".format(self.python_madz_deftypes + self.mangled_namespace + \"___\" + node.name, self.gen_type_string(node.type))\n\n return res",
"def functions(self):\n return functions(self.startEA, self.endEA)",
"def _parse_functions(self, locals: dict):\n functions_dict = dict(filter(self._isfunction, locals.items()))\n functions = []\n if not self.args:\n functions.append(next(iter(functions_dict.values())))\n else:\n for i in range(len(self.args)):\n if functions_dict.get(self.args[0]):\n functions.append(functions_dict[self.args.pop(0)])\n else:\n if not functions:\n msg = f'ezmake command args: {self.args} did not ' + \\\n 'match any functions defined in Makefile.py: %s' %\\\n list(functions_dict.keys())\n raise TypeError(msg)\n break\n self.functions = functions",
"def _build_functions_list():\n return {\"ec2-sg\": _build_ec2_mapping_from_sg,\n \"ec2-resources\": _build_ec2_mapping_from_resources,\n \"rds-sg\": _build_rds_mapping_from_sg,\n \"rds-resources\": _build_rds_mapping_from_resources,\n \"elbv2-sg\": _build_elbv2_mapping_from_sg,\n \"elbv2-resources\": _build_elbv2_mapping_from_resources}",
"def get_function_list_from_modlist(self):\n function_list = []\n function_name_list = []\n for module in self.module_list:\n for name, obj in inspect.getmembers(module, inspect.isfunction):\n if inspect.getmodule(obj) == module:\n function_list.append(obj)\n function_name_list.append(name)\n return function_list",
"def _method_calls(fn):\n return [x[1] for x in re.findall(METHOD, getsource(fn))]",
"def match_function(self, tokens):\n items = []\n\n def add_ref(item, unused_start, unused_end, unused_value):\n if isinstance(item, logic.Description):\n items.append(logic.expr(item))\n else:\n items.append(item)\n\n self.cp_parser.reference_callback = add_ref\n self.cp_parser.parse_tokens(tokens, debug=self.debug)\n return items",
"def make_func_bodies(self):\n\t\tfor name in self.func_dict:\n\t\t\ttok = Lexer(self.func_dict[name]).get_tokens()\n\t\t\tend = tok.match_paren(0)\n\t\t\theader_end = tok.match_paren(2)\n\n\t\t\tif tok[header_end+2] == '(': #Function returns another function\n\t\t\t\tstart = body.match_paren(header_end+2)+3\n\t\t\telse: #Function returns a concrete type\n\t\t\t\tstart = header_end+3\n\t\t\t\n\t\t\tself.func_bodies[name] = tok[start:end].get_joined()\n\n\t\tself.func_bodies['main'] = self.main",
"def getmethparlist(ob):\n defText = callText = \"\"\n # bit of a hack for methods - turn it into a function\n # but we drop the \"self\" param.\n # Try and build one for Python defined functions\n args, varargs, varkw = inspect.getargs(ob.__code__)\n items2 = args[1:]\n realArgs = args[1:]\n defaults = ob.__defaults__ or []\n defaults = [\"=%r\" % (value,) for value in defaults]\n defaults = [\"\"] * (len(realArgs)-len(defaults)) + defaults\n items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)]\n if varargs is not None:\n items1.append(\"*\" + varargs)\n items2.append(\"*\" + varargs)\n if varkw is not None:\n items1.append(\"**\" + varkw)\n items2.append(\"**\" + varkw)\n defText = \", \".join(items1)\n defText = \"(%s)\" % defText\n callText = \", \".join(items2)\n callText = \"(%s)\" % callText\n return defText, callText",
"def getmethparlist(ob):\n defText = callText = \"\"\n # bit of a hack for methods - turn it into a function\n # but we drop the \"self\" param.\n # Try and build one for Python defined functions\n args, varargs, varkw = inspect.getargs(ob.__code__)\n items2 = args[1:]\n realArgs = args[1:]\n defaults = ob.__defaults__ or []\n defaults = [\"=%r\" % (value,) for value in defaults]\n defaults = [\"\"] * (len(realArgs)-len(defaults)) + defaults\n items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)]\n if varargs is not None:\n items1.append(\"*\" + varargs)\n items2.append(\"*\" + varargs)\n if varkw is not None:\n items1.append(\"**\" + varkw)\n items2.append(\"**\" + varkw)\n defText = \", \".join(items1)\n defText = \"(%s)\" % defText\n callText = \", \".join(items2)\n callText = \"(%s)\" % callText\n return defText, callText",
"def make_function_callbacks(self):\n res = \"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frags={\n \"name\": node.name,\n \"nameupper\": self.python_madz_deftypes + \"___\" + node.name,\n \"sanitize\": \"_sanitize_python_callback\" if isinstance(node.type.return_type.get_type(), pdl.TypePointer) else \"_python_callback\"\n }\n res += \\\n\"\"\"\n temp = cast({sanitize}(user_code_module.{name}, {nameupper}), {nameupper})\n keepers['{nameupper}'] = temp\n _plugin.contents.{name} = temp\n\"\"\".format(**frags)\n return res",
"def get_rdkit_descriptor_functions():\n ret = [\n (name, f)\n for name, f in inspect.getmembers(Descriptors)\n if inspect.isfunction(f) and not name.startswith(\"_\")\n ]\n # some which are not in the official Descriptors module we need to add manually\n ret.extend([(\"FormalCharge\", Chem.GetFormalCharge), (\"SSSR\", Chem.GetSSSR)])\n ret.sort()\n return ret",
"def getcodedesc(code_): # 3\n if not isinstance(code_,rescode): raise TypeError(\"Argument code has wrong type\")\n arr_symname = array.array(\"b\",[0]*(value.max_str_len))\n memview_arr_symname = memoryview(arr_symname)\n arr_str = array.array(\"b\",[0]*(value.max_str_len))\n memview_arr_str = memoryview(arr_str)\n res,resargs = _msk.Env.getcodedesc(code_,memview_arr_symname,memview_arr_str)\n if res != 0:\n raise Error(rescode(res),\"\")\n retarg_symname,retarg_str = resargs\n retarg_str = arr_str.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n retarg_symname = arr_symname.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_symname,retarg_str",
"def getGlobalFunctions(self, name: unicode) -> List[ghidra.program.model.listing.Function]:\n ...",
"def getGroupFuncs(self):\n\n funcs = []\n for p in self.Parameters:\n if p.arg_name[0:8] == \"Function\" and p.arg_value:\n fct, attr = p.arg_value.split(':')\n if fct and attr:\n funcs.append((fct, attr))\n if not funcs:\n funcs.append(('count', '*'))\n return funcs",
"def _find_functions(whole):\n for res in function_start_regex.findall(whole):\n function_start, function_name, params = res\n params_split = [x.strip() for x in params.split(',')]\n stack, code, core_code = 1, function_start, ''\n start = whole.find(function_start) + len(code)\n while stack > 0:\n try:\n next_char = whole[start]\n except IndexError: # dont worry we will obfuscte the one we found\n return # sometimes fails to find all functions on big files\n core_code += next_char\n if next_char == '{':\n stack += 1\n elif next_char == '}':\n stack -= 1\n start += 1\n yield (params, params_split, core_code[:-1], function_start)",
"def get_explorer_toolbox() -> List[Tuple[str, str, str]]:\n explorer_toolbox = list(_explorer_toolbox)\n explorer_toolbox.extend(\n (func_name, title, description)\n for func_name, title, description in _bio2bel_functions\n if _function_is_registered(func_name)\n )\n return explorer_toolbox"
] | [
"0.6066096",
"0.6015459",
"0.5959938",
"0.59203005",
"0.583354",
"0.56550306",
"0.56340224",
"0.5607439",
"0.557955",
"0.554569",
"0.5541941",
"0.5537403",
"0.5534576",
"0.5530362",
"0.5520808",
"0.5512901",
"0.54985154",
"0.5478151",
"0.54779917",
"0.54722935",
"0.5433578",
"0.5405825",
"0.5405825",
"0.53922826",
"0.5358211",
"0.5356389",
"0.53420347",
"0.5338142",
"0.53197753",
"0.53147763"
] | 0.6613337 | 0 |
Return a list of signatures for each function prototype declared in code. Format is [(name, [args], rtype), ...]. | def find_prototypes(code):
prots = []
lines = code.split('\n')
for line in lines:
m = re.match("\s*" + re_func_prot, line)
if m is not None:
rtype, name, args = m.groups()[:3]
if args == 'void' or args.strip() == '':
args = []
else:
args = [tuple(arg.strip().split(' '))
for arg in args.split(',')]
prots.append((name, args, rtype))
return prots | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_functions(code):\n regex = \"^\\s*\" + re_func_decl + \"\\s*{\"\n \n funcs = []\n while True:\n m = re.search(regex, code, re.M)\n if m is None:\n return funcs\n \n rtype, name, args = m.groups()[:3]\n if args == 'void' or args.strip() == '':\n args = []\n else:\n args = [tuple(arg.strip().split(' ')) for arg in args.split(',')]\n funcs.append((name, args, rtype))\n \n code = code[m.end():]",
"def parse_function_signature(code):\n m = re.search(\"^\\s*\" + re_func_decl + \"\\s*{\", code, re.M)\n if m is None:\n print(code)\n raise Exception(\"Failed to parse function signature. \"\n \"Full code is printed above.\")\n rtype, name, args = m.groups()[:3]\n if args == 'void' or args.strip() == '':\n args = []\n else:\n args = [tuple(arg.strip().split(' ')) for arg in args.split(',')]\n return name, args, rtype",
"def signature(cls) -> List[Term]:\n el = []\n for term in cls.__dict__.values():\n if not isinstance(term, (Constant, Function)):\n continue\n el.append(deepcopy(term))\n return el",
"def fix_function_signatures(code):\n pat = r\"\"\"^[ \\t]*function[ \\t.\\n]* # keyword (function)\n (\\[?[\\w, \\t.\\n]*\\]?) # outputs: group(1)\n [ \\t.\\n]*=[ \\t.\\n]* # punctuation (eq)\n (\\w+)[ \\t.\\n]* # name: group(2)\n \\(?([\\w, \\t.\\n]*)\\)?\"\"\" # args: group(3)\n pat = re.compile(pat, re.X | re.MULTILINE) # search start of every line\n\n # replacement function\n def repl(m):\n retv = m.group(0)\n # if no args and doesn't end with parentheses, append \"()\"\n if not (m.group(3) or m.group(0).endswith(\"()\")):\n retv = retv.replace(m.group(2), m.group(2) + \"()\")\n return retv\n\n code = pat.sub(repl, code) # search for functions and apply replacement\n\n return code",
"def make_func_declarations(self):\n\n\t\tfor name in self.func_dict:\n\t\t\tbody = Lexer(self.func_dict[name]).get_tokens()\n\t\t\ti = body.index('\\\\') + 1 #Start of parameters\n\t\t\tj = body.match_paren(i)\n\t\t\tparam_tokens = body[i + 1: j] #Stuff inside parentheses\n\t\t\t#\t\t\tprint \"param list:\", param_tokens\n\n\t\t\tparams = self.split_params(param_tokens)\n\t\t\tparams = map(lambda n: n.split(':'), params)\n\t\t\t#params is now [[<name>,<type>],...]\n\t\t\tc_types = map(lambda n: self.convert_type(*n), params)\n\t\t\t#\t\t\tprint c_types\n\n\t\t\treturn_type = ''\n\t\t\t# +2 to skip over \")\" and \":\"\n\t\t\tif body[j+2] == '(': #Function returns another function\n\t\t\t\t# +3 for [\")\",\"->\",\"<type>\"]\n\t\t\t\tfor x in xrange(j+2, body.match_paren(j+2)+3):\n\t\t\t\t\treturn_type += body[x]\n\t\t\telse: #Function returns a concrete type\n\t\t\t\treturn_type = body[j+2] #+2 to skip over \")\" and \":\"\n\n\t\t\tfunc_type = self.convert_type(name, return_type)\n\t\t\t#\t\t\tprint \"params\", params\n\t\t\t#\t\t\tprint \"c_types\", c_types\n\t\t\t#while True:exec raw_input() in globals(), locals()\n\t\t\tself.cpp_declarations[name] = func_type + '(' + ', '.join(c_types) + ')'\n\n\t\tself.cpp_declarations['main'] = 'int main()' #actually this isn't used",
"def getGroupFuncs(self):\n\n funcs = []\n for p in self.Parameters:\n if p.arg_name[0:8] == \"Function\" and p.arg_value:\n fct, attr = p.arg_value.split(':')\n if fct and attr:\n funcs.append((fct, attr))\n if not funcs:\n funcs.append(('count', '*'))\n return funcs",
"def signature(function):\n\tdesc = inspect.getargspec(function)\n\tif desc[3]:\n\t\tldefault = len(desc[3])\n\t\tdefault = desc[3]\n\t\tsign = ','.join(desc[0][:-ldefault])\n\telse:\n\t\tldefault = 0\n\t\tdefault=[]\n\t\tsign = ','.join(desc[0])\t\n\tfor n,v in zip(desc[0][-ldefault:],default):\n\t\tsign += ','+n+\"=\"+str(v)\t\n\tif desc[1]:\n\t\tsign +=',*'+desc[1]\n\tif desc[2]:\n\t\tsign +=',**'+desc[2]\t\n\tif sign and sign[0]==',': sign = sign[1:]\n\treturn sign",
"def getListOfFunctionDefinitions(self, *args):\n return _libsbml.Model_getListOfFunctionDefinitions(self, *args)",
"def methodSignature(self, req, method):\n p = self.get_method(method)\n return [','.join([RPC_TYPES[x] for x in sig]) for sig in p.xmlrpc_signatures()]",
"def extract_method_signature(code, line):\n line += 5\n method_signature = []\n offset = get_offset(code, line, \"catch(\")\n param_pattern = re.compile(rf\"{R_VAR}\\(.*, ?.*, ?(.*)\\)\\);\")\n\n for _ in range(int((offset - 2) / 2)):\n parameter = parse_parameter(code, re.findall(param_pattern, code[line])[0])\n\n # If List type found, assume ArrayList implementation of Strings\n if parameter.startswith(COMPLEX_TYPES[\"LIST\"]):\n parameter += f\"<{COMPLEX_TYPES['ARRAY']}/4159755760\"\n parameter += f\"<{COMPLEX_TYPES['STRING']}/2004016611>>\"\n\n # If specific List implementation found, assume it is of Strings\n elif re.match(r\"java\\.util\\.[A-Za-z]+List/.*\", parameter):\n parameter += f\"<{COMPLEX_TYPES['STRING']}/2004016611>\"\n\n method_signature.append(parameter)\n\n line += 1\n\n return method_signature",
"def make_cpp_func_bodies(self):\n\t\tfor name, body in self.func_bodies.iteritems():\n\t\t\tt = Lexer(body).get_tokens()\t\t\t\n\t\t\tS = [] #Stack\n\t\t\tx = 0\n\t\t\twhile x < len(t):\n\t\t\t\tif t[x] == '(': #function call begins\n\t\t\t\t\tx += 1\n\t\t\t\t\tS.append(self.FUNCS_DICT.get(t[x], t[x]) + '(')\n\t\t\t\telif t[x] == ')': #function call ends\n\t\t\t\t\tacc = ''\n\t\t\t\t\twhile S[-1][-1] != '(':\n\t\t\t\t\t\t#pop off params until function call is reached\n\t\t\t\t\t\tacc = S.pop() + ',' + acc\n\t\t\t\t\t# [:-1] to strip off comma at the end\n\t\t\t\t\tS.append(S.pop() + acc[:-1] + ')') #S.pop() gives function\n\t\t\t\telse:\n\t\t\t\t\tS.append(self.convert_atom(t[x]))\n\t\t\t\tx += 1\n\t\t\tself.cpp_func_bodies[name] = S[0]",
"def signature(function):\n pass",
"def _getargs(fn_sig):\n params = fn_sig.parameters\n args = []\n for k, v in params.items():\n if (v.kind & v.POSITIONAL_OR_KEYWORD) == v.POSITIONAL_OR_KEYWORD:\n args.append(k)\n else:\n msg = \"%s argument type unsupported in jitclass\" % v.kind\n raise errors.UnsupportedError(msg)\n return args",
"def parseNames(self, compiledCode):\n res = []\n if not compiledCode is None:\n res = compiledCode.co_names\n for co in compiledCode.co_consts:\n if not co is None and isinstance(co, CodeType):\n res += co.co_names\n return res",
"def get_test_functions():\r\n\r\n test_funcs = [obj for name,obj in inspect.getmembers(sys.modules[__name__])\r\n if (inspect.isfunction(obj) and name.startswith('test'))]\r\n src = inspect.getsource(sys.modules[__name__])\r\n lines = src.split('\\n')\r\n\r\n # Create a dictionary with key=function name and value is 0-based order\r\n # in the module\r\n ordered_func_names = dict()\r\n ordered_funcs = list()\r\n func_index = 0\r\n for line in lines:\r\n if line.find(\"def test\") > -1 and not line.find('line.find') > -1:\r\n func_name = line.split(\"(\")[0].split()[1]\r\n ordered_func_names[func_name] = func_index\r\n # Create an empty list with sampe number of elements as test\r\n # functions\r\n ordered_funcs.append('')\r\n func_index += 1\r\n for test_func in test_funcs:\r\n index = ordered_func_names[test_func.__name__]\r\n ordered_funcs[index] = test_func\r\n return ordered_funcs",
"def make_function_stubs(self):\n res = \"\"\n for node in self.description.declarations() + self.description.definitions():\n if isinstance(node.type,pdl.TypeFunction):\n res += \"def {}({}):\\n pass\".format(node.name, \", \".join(map(\n lambda t: \"{}\".format(t.name), node.type.args)) )\n\n return res",
"def arguments_from_funccode(f):\n fc = fc_or_c(f)\n vnames = fc.co_varnames\n nargs = fc.co_argcount\n # bound method and fake function will be None\n args = vnames[1 if is_bound(f) else 0:nargs]\n if not args:\n raise RuntimeError('Function has variable number of arguments')\n return list(args)",
"def _extract_methods_signatures(self):\n return {\n 'erc20': {\n 'totalSupply': self._extract_first_bytes('totalSupply()'),\n 'balanceOf': self._extract_first_bytes('balanceOf(address)'),\n 'allowance': self._extract_first_bytes('allowance(address,address)'),\n 'transfer': self._extract_first_bytes('transfer(address,uint256)'),\n 'transferFrom': self._extract_first_bytes('transferFrom(address,address,uint256)'),\n 'approve': self._extract_first_bytes('approve(address,uint256)'),\n },\n 'erc223': {\n 'tokenFallback': self._extract_first_bytes('tokenFallback(address,uint256,bytes)')\n },\n 'bancor_converter': {\n 'convert': self._extract_first_bytes('convert(address,address,uint256,uint256)')\n }\n }",
"def get_rdkit_descriptor_functions():\n ret = [\n (name, f)\n for name, f in inspect.getmembers(Descriptors)\n if inspect.isfunction(f) and not name.startswith(\"_\")\n ]\n # some which are not in the official Descriptors module we need to add manually\n ret.extend([(\"FormalCharge\", Chem.GetFormalCharge), (\"SSSR\", Chem.GetSSSR)])\n ret.sort()\n return ret",
"def _method_calls(fn):\n return [x[1] for x in re.findall(METHOD, getsource(fn))]",
"def parse_prototype(text):\n m = re_symbol.match(text)\n if not m:\n raise ValueError(\"Invalid function name for export prototype\")\n s = m.start(0)\n e = m.end(0)\n symbol = text[s:e]\n functype = text[e + 1:]\n return symbol, functype",
"def methodSignature(self, name):\r\n methods = self._listMethods()\r\n for method in methods:\r\n if method == name:\r\n rtype = None\r\n ptypes = []\r\n parsed = gettags(methods[method])\r\n for thing in parsed:\r\n if thing[1] == 'return': # tag name\r\n rtype = thing[2] # datatype\r\n elif thing[1] == 'param': # tag name\r\n ptypes.append(thing[2]) # datatype\r\n if rtype is None:\r\n raise RPCError(Faults.SIGNATURE_UNSUPPORTED)\r\n return [rtype] + ptypes\r\n raise RPCError(Faults.SIGNATURE_UNSUPPORTED)",
"def getSignature(self):\n listOfSignatures = []\n listOfSignatures += self.keyExpression.getSignature()\n for i, e in self.dictOfExpressions.items():\n listOfSignatures += e.getSignature()\n signature = '<{}>'.format(self.getClassName())\n signature += '{{{}}}'.format(id(self))\n signature += '({})'.format(len(self.dictOfExpressions))\n signature += ',{}'.format(id(self.keyExpression))\n for i, e in self.dictOfExpressions.items():\n signature += f',{i},{id(e)}'\n listOfSignatures += [signature.encode()]\n return listOfSignatures",
"def _func_calls(fn):\n funcs = []\n bytecode = dis.Bytecode(fn)\n for itr in bytecode:\n if itr.opname in [\"LOAD_GLOBAL\", \"LOAD_METHOD\"]:\n funcs.append(itr.argval)\n return funcs",
"def allFunctions(self):\n\t\tmodulos=sublime.decode_value(open(RutasPython.funciones()).read())\n\t\tlista=[]\n\t\tfor modulo in modulos:\n\t\t\tlista+=[ (funcion+\"\\t•\"+modulo, self.ponerCursor(modulo+\".\"+funcion)) for funcion in modulos[modulo]]\n\t\treturn sorted(lista)",
"def _f_in_parameters(self) -> List[Tuple[str, str]]:\n result = list() # type: List[Tuple[str, str]]\n for param in self.params:\n type_list = param.f_type()\n for type_name, postfix in type_list:\n result.append((type_name, param.name + postfix))\n return result",
"def signature(function: model.Function) -> str:\n return str(function.signature)",
"def getSignature(self):\n listOfSignatures = []\n for e in self.children:\n listOfSignatures += e.getSignature()\n mysignature = f'<{self.getClassName()}>'\n mysignature += f'{{{id(self)}}}'\n mysignature += f'({len(self.children)})'\n for e in self.children:\n mysignature += f',{id(e)}'\n listOfSignatures += [mysignature.encode()]\n return listOfSignatures",
"def callsignature(function):\n\tdesc = inspect.getargspec(function)\n\tsign = ','.join(desc[0])\n\tif desc[1]:\n\t\tsign +=',*'+desc[1]\n\tif desc[2]:\n\t\tsign +=',**'+desc[2]\t\n\tif sign and sign[0]==',': sign = sign[1:]\n\treturn sign",
"def getSignature(self):\n listOfSignatures = []\n for e in self.children:\n listOfSignatures += e.getSignature()\n signature = f'<{self.getClassName()}>'\n signature += f'{{{id(self)}}}'\n signature += f'({len(self.util)})'\n signature += f',{id(self.choice)}'\n for i, e in self.util.items():\n signature += f',{i},{id(e)},{id(self.av[i])}'\n listOfSignatures += [signature.encode()]\n return listOfSignatures"
] | [
"0.6960925",
"0.6853837",
"0.6183662",
"0.6137309",
"0.61293304",
"0.585127",
"0.58011335",
"0.5792403",
"0.5768999",
"0.5726607",
"0.571727",
"0.5692678",
"0.56545895",
"0.5620403",
"0.55659837",
"0.5563249",
"0.55443704",
"0.5544288",
"0.5539026",
"0.55336374",
"0.55096585",
"0.5494622",
"0.54686856",
"0.54186183",
"0.5408511",
"0.5407519",
"0.5379801",
"0.5378179",
"0.53543603",
"0.53466547"
] | 0.76626337 | 0 |
Return a list of template variables found in code. | def find_template_variables(code):
return re.findall(re_template_var, code) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vars(cls):\n for key in dir(cls):\n if key.startswith('var_'):\n yield key[4:]",
"def variables(self):\n return {u for u in self if u.type == 'var'}",
"def variables_referenced(text):\n return set(substitution_pattern.findall(text))",
"def variables(self):\r\n return self.get_field('variable')",
"def get_variables_list(self):\n variables = self.variables.values()\n # handle reference variables\n for variable in variables:\n name = variable['name']\n if name in self.references:\n variable['data'] = self.references[name]\n return variables",
"def get_variables(self):\n\t\treturn self.variables",
"def variables(self):\n return self._.vars",
"def GetVariableAttributes(template_src, env=None):\n env = env or jinja2.Environment()\n abstract_syntax_tree = env.parse(template_src)\n node_visitor = _GetattrNodeVisitor()\n node_visitor.visit(abstract_syntax_tree)\n\n output = set()\n undeclared_variables = meta.find_undeclared_variables(abstract_syntax_tree)\n used_variables = set()\n for node in node_visitor.getattr_nodes:\n attr_list = _GetAttributeList(node)\n if attr_list[0] in undeclared_variables:\n used_variables.add(attr_list[0])\n output.add('.'.join(attr_list))\n return output | (undeclared_variables - used_variables)",
"def get_variables(self):\n local_variables = self._design.GetVariables(\n )+self._design.GetPostProcessingVariables()\n return {lv: self.get_variable_value(lv) for lv in local_variables}",
"def context(template):\n\n return {\n v.key: v.read()\n for v in [Variable(name) for name in extract_variables(template)]\n }",
"def __get_vars_list(self, template_idx, log):\n template = self.templates[template_idx].split()\n log = log.split()\n variables = []\n pt = pl = 0\n while pt < len(template) and pl < len(log):\n if template[pt] == log[pl]:\n pt += 1\n pl += 1\n continue\n elif template[pt] == '*':\n # found a variable\n while pt < len(template) and template[pt] == '*':\n # in case there are many variables together\n pt += 1\n if pt >= len(template):\n # it's the end of the template\n variables.append(' '.join(log[pl:]))\n break\n else:\n variable_tokens = []\n while pl < len(log) and log[pl] != template[pt]:\n variable_tokens.append(log[pl])\n pl += 1\n # it duplicates when many variables together for a correct output\n variables.append(' '.join(variable_tokens))\n else:\n # it is a variable not covered by the template asterisks\n # we move on on the log but stay on the template token\n pl += 1\n return variables",
"def get_variables(self):\n return [self.variables[key] for key in sorted(self.variables)]",
"def get_all_variables(self):\n return []",
"def __setVarNames(self):\n result = set()\n\n # detecting variables\n for templatePart in self.inputString().split(\"{\"):\n if templatePart is '' or \"}\" not in templatePart:\n continue\n\n endIndex = templatePart.find('}')\n result.add(templatePart[:endIndex])\n\n self.__varNames = list(result)",
"def retrieve_variables(content):\n variables = []\n in_var_section = False\n for line in content.splitlines():\n #print line\n if in_var_section:\n var_def = re.split(' +', line)\n if len(var_def) > 1:\n #print var_def[0], ':', var_def[1]\n var_name = var_def[0]\n def_value = var_def[1]\n if not def_value.startswith('%'): #not environment variable which would be directly passed to robot\n variables.append([var_name.strip('${').strip('}'), def_value])\n if '*** Variables ***' in line:\n in_var_section = True\n elif in_var_section and '*** ' in line:\n #end of Variables section\n break\n return variables",
"def variables(self):\n return [term.variable for term in self.terms]",
"def variables_used (self) :\r\n\t\treturn []",
"def get_all_variables(self):\n return [self.item]",
"def list_variables(self):\n return list(self._properties.keys())",
"def get_variable_names(self):\n return [VariableString(s) for s in\n self._design.GetVariables()+self._design.GetPostProcessingVariables()]",
"def get_variables(self):\n return self.variables",
"def get_variables(self):\n return {VariableString(s): self.get_variable_value(s) for s in self._project.GetVariables()}",
"def getVariables(self)->Dict[str,str]:\n pass",
"def dump_var_map(self):\n vars = []\n vars.append(\"--------- VARIABLES ---------\")\n for var in self.var_map:\n vars.append(var)\n\n return vars",
"def get_variable_names(text):\n names = []\n if '@@' in text:\n matches = _property_pattern.findall(text)\n for token, key in matches:\n names.append(key)\n\n return names",
"def variable_names(self):\n \n return [x['variable'] for x in self.variable_dicts()]",
"def list_variables(self, request, context):\n response = ListVariablesResponse()\n for variable in self._delegator.list_variables(request.component):\n response.variables.append(variable)\n return response",
"def variables(self):\n return self._variablesDef",
"def getSelectedVariables(self):\r\n\r\n variables = []\r\n\r\n if self.ui.variablesStackedWidget.currentWidget() == self.ui.treePage:\r\n for index in self.ui.treeView.selectionModel().selectedRows():\r\n sourceIndex = self.treeFilterModel.mapToSource(index)\r\n treeItem = sourceIndex.internalPointer()\r\n if treeItem.variable is not None:\r\n variables.append(treeItem.variable)\r\n else:\r\n for index in self.ui.tableView.selectionModel().selectedRows():\r\n sourceIndex = self.tableFilterModel.mapToSource(index)\r\n variable = sourceIndex.internalPointer()\r\n variables.append(variable)\r\n\r\n return variables",
"def regex_findall_variables(raw_string: Text) -> List[Text]:\n try:\n match_start_position = raw_string.index(\"$\", 0)\n except ValueError:\n return []\n\n vars_list = []\n while match_start_position < len(raw_string):\n\n # Notice: notation priority\n # $$ > $var\n\n # search $$\n dollar_match = dolloar_regex_compile.match(raw_string, match_start_position)\n if dollar_match:\n match_start_position = dollar_match.end()\n continue\n\n # search variable like ${var} or $var\n var_match = variable_regex_compile.match(raw_string, match_start_position)\n if var_match:\n var_name = var_match.group(1) or var_match.group(2)\n vars_list.append(var_name)\n match_start_position = var_match.end()\n continue\n\n curr_position = match_start_position\n try:\n # find next $ location\n match_start_position = raw_string.index(\"$\", curr_position + 1)\n except ValueError:\n # break while loop\n break\n\n return vars_list"
] | [
"0.66962886",
"0.65876555",
"0.6326123",
"0.6295308",
"0.62385863",
"0.62311065",
"0.62209594",
"0.6211561",
"0.61806494",
"0.61253965",
"0.61250657",
"0.6060957",
"0.6031869",
"0.6000928",
"0.59701294",
"0.5965443",
"0.5964786",
"0.5951391",
"0.59493124",
"0.592245",
"0.5907057",
"0.5891997",
"0.5865592",
"0.58622086",
"0.5847081",
"0.58281034",
"0.582591",
"0.58208275",
"0.5816272",
"0.5792535"
] | 0.8810326 | 0 |
Returns a function for generating trials for a model op. Infers the Python main module for the operation and returns the `gen_trials` function defined for that module. Raise `TypeError` if the operation does not use a Python main module (either explicitly with the `main` attribute or implicitly in the `exec` attribute. | def optimizer_trial_generator(model, op_name):
try:
module_name = _model_op_main(model, op_name)
except ValueError as e:
raise TypeError(
f"could not get main module for {model.name}{op_name}: {e}"
) from None
else:
try:
main_mod = importlib.import_module(module_name)
except ImportError:
raise TypeError(
f"could not import main module {module_name} for "
f"{model.name}:{op_name}"
) from None
else:
try:
return main_mod.gen_trials
except AttributeError:
raise TypeError(
f"{main_mod.__name__} optimizer module does not "
"implement gen_trials"
) from None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_test_routine(\n self,\n ) -> Callable[\n [\n torch.utils.data.Dataset,\n argparse.Namespace,\n torch.nn.Module,\n Progress,\n TaskID,\n ],\n Tuple[Dict[str, float], pd.DataFrame],\n ]:\n pass",
"def main(_):\n\n params = create_params()\n\n assert params[\"train_dataset_path\"]\n assert params[\"eval_dataset_path\"]\n\n input_fn = input_fn_from_files(\n params[\"train_dataset_path\"])\n eval_input_fn = input_fn_from_files(\n params[\"eval_dataset_path\"])\n\n feature_columns = create_feature_columns(params)\n\n model_fn = create_model_fn(feature_columns)\n estimator = create_tpu_estimator(model_fn, feature_columns, params)\n\n for cycle_index in range(params[\"train_epochs\"]):\n tf.logging.info(\"Starting a training cycle: {}/{}\".format(\n cycle_index + 1, params[\"train_epochs\"]))\n estimator.train(input_fn=input_fn, steps=params[\"steps_per_epoch\"])\n tf.logging.info(\"Beginning evaluation.\")\n eval_results = estimator.evaluate(eval_input_fn,\n steps=params[\"num_eval_steps\"])\n tf.logging.info(\"Evaluation complete.\")\n\n recall_1 = float(eval_results[\"recall@1\"])\n recall_5 = float(eval_results[\"recall@5\"])\n loss = float(eval_results[\"loss\"])\n tf.logging.info(\n \"Iteration {}: recall@1 = {:.4f}, recall@5 = {:.4f}, Loss = {:.4f}\"\n .format(cycle_index + 1, recall_1, recall_5, loss))",
"def first_time_chief_generate(self, features, input_layer_fn, trial_mode,\n shared_input_tensor, shared_lengths,\n logits_dimension, hparams, run_config,\n is_training, trials):\n my_id = architecture_utils.DirectoryHandler.get_trial_id(\n run_config.model_dir, self._phoenix_spec)\n\n prior_build_args = dict(\n features=features,\n input_layer_fn=input_layer_fn,\n shared_input_tensor=shared_input_tensor,\n shared_lengths=shared_lengths,\n is_training=is_training,\n trials=trials,\n logits_dimension=logits_dimension,\n my_id=my_id,\n my_model_dir=run_config.model_dir)\n\n if trial_mode == trial_utils.TrialMode.DISTILLATION:\n return self.build_priors_distillation(**prior_build_args)\n\n if trial_utils.is_nonadaptive_ensemble_search(\n self._phoenix_spec.ensemble_spec):\n return self.build_priors_nonadaptively(**prior_build_args)\n\n if trial_utils.is_adaptive_ensemble_search(\n self._phoenix_spec.ensemble_spec):\n return self.build_priors_adaptively(**prior_build_args)\n\n if trial_utils.is_residual_ensemble_search(\n self._phoenix_spec.ensemble_spec):\n return self.build_priors_adaptively(**prior_build_args)\n\n if trial_utils.is_intermixed_ensemble_search(\n self._phoenix_spec.ensemble_spec):\n return self.build_priors_intermixed(**prior_build_args)\n\n # No ensemble spec or distillation spec was specified.\n architecture_utils.set_number_of_towers(self.generator_name(), 0)\n return [], []",
"def worker(module_name,\n operator_class,\n occurrence,\n test_runner):\n try:\n with preserve_modules():\n module = importlib.import_module(module_name)\n module_source_file = inspect.getsourcefile(module)\n module_ast = get_ast(module)\n module_source = astunparse.unparse(module_ast)\n\n core = MutatingCore(occurrence)\n operator = operator_class(core)\n # note: after this step module_ast and modified_ast\n # appear to be the same\n modified_ast = operator.visit(module_ast)\n modified_source = astunparse.unparse(modified_ast)\n\n if not core.activation_record:\n return WorkItem(\n worker_outcome=WorkerOutcome.NO_TEST)\n\n # generate a source diff to visualize how the mutation\n # operator has changed the code\n module_diff = [\"--- mutation diff ---\"]\n for line in difflib.unified_diff(module_source.split('\\n'),\n modified_source.split('\\n'),\n fromfile=\"a\" + module_source_file,\n tofile=\"b\" + module_source_file,\n lineterm=\"\"):\n module_diff.append(line)\n\n with using_ast(module_name, module_ast):\n rec = test_runner()\n\n rec.update({\n 'diff': module_diff,\n 'worker_outcome': WorkerOutcome.NORMAL\n })\n rec.update(core.activation_record)\n return rec\n\n except Exception: # noqa # pylint: disable=broad-except\n return WorkItem(\n data=traceback.format_exception(*sys.exc_info()),\n test_outcome=TestOutcome.INCOMPETENT,\n worker_outcome=WorkerOutcome.EXCEPTION)",
"def main():\n flags = PARSER.parse_args()\n\n if flags.to == 'savedmodel':\n to_savedmodel(input_shape=flags.input_shape,\n model_fn=unet_fn,\n src_dir=flags.checkpoint_dir,\n dst_dir='./saved_model',\n input_names=['IteratorGetNext'],\n output_names=['total_loss_ref'],\n use_amp=flags.use_amp,\n use_xla=flags.use_xla,\n compress=flags.compress)\n if flags.to == 'tensorrt':\n ds = Dataset(data_dir=flags.data_dir,\n batch_size=1,\n augment=False,\n gpu_id=0,\n num_gpus=1,\n seed=42)\n iterator = ds.test_fn(count=1).make_one_shot_iterator()\n features = iterator.get_next()\n\n sess = tf.Session()\n\n def input_data():\n return {'input_tensor:0': sess.run(features)}\n\n to_tensorrt(src_dir=flags.savedmodel_dir,\n dst_dir='./tf_trt_model',\n precision=flags.precision,\n feed_dict_fn=input_data,\n num_runs=1,\n output_tensor_names=['Softmax:0'],\n compress=flags.compress)\n if flags.to == 'onnx':\n to_onnx(src_dir=flags.savedmodel_dir,\n dst_dir='./onnx_model',\n compress=flags.compress)",
"def get_function(model_or_function, preprocess_function=None):\n from dianna.utils.onnx_runner import SimpleModelRunner # pylint: disable=import-outside-toplevel\n\n if isinstance(model_or_function, Path):\n model_or_function = str(model_or_function)\n\n if isinstance(model_or_function, (str, bytes, Path)):\n runner = SimpleModelRunner(model_or_function,\n preprocess_function=preprocess_function)\n elif callable(model_or_function):\n if preprocess_function is None:\n runner = model_or_function\n else:\n\n def runner(input_data):\n return model_or_function(preprocess_function(input_data))\n else:\n raise TypeError(\n 'model_or_function argument must be string (path to model), '\n 'bytes (serialized onnx model), or function')\n return runner",
"def get_adv_test_routine(\n self,\n ) -> Callable[\n [\n torch.utils.data.DataLoader,\n argparse.Namespace,\n torch.nn.Module,\n torch.optim.Optimizer,\n Progress,\n TaskID,\n ],\n None,\n ]:\n pass",
"def run_test(test, fw):\n\n test_path = f\"tests.{test}\"[:-3]\n print(test_path)\n __import__(test_path)\n test_module = sys.modules[test_path]\n analysis_id = test_module.main(fw)\n print(f\"analysis_id = {analysis_id}\")\n return analysis_id",
"def model_fn_builder(config):\n init_checkpoint = config.init_checkpoint\n coref_model = CorefQAModel(config)\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n predictions, total_loss = coref_model.forward(features, is_training)\n doc_idx, subtoken_map, top_span_starts, top_span_ends, antecedent_starts, antecedent_ends, antecedent_scores = predictions\n tvars = tf.trainable_variables()\n initialized_variables = {}\n scaffold_fn = None\n if init_checkpoint:\n assignment_map, initialized_variables = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if config.use_tpu:\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \", *INIT_FROM_CKPT*\" if var.name in initialized_variables else \"\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape, init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = create_custom_optimizer(total_loss, config)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn(loss):\n return {\"eval_loss\": tf.metrics.mean(loss)}\n\n eval_metrics = (metric_fn, [total_loss])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\"doc_idx\": doc_idx, \"subtoken_map\": subtoken_map,\n \"top_span_starts\": top_span_starts, \"top_span_ends\": top_span_ends,\n \"antecedent_starts\": antecedent_starts, \"antecedent_ends\": antecedent_ends,\n \"antecedent_scores\": antecedent_scores, \"loss\": total_loss},\n scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn",
"def create_model_fn(feature_columns):\n def _model_fn(features, mode, params):\n \"\"\"Model Function.\"\"\"\n logits = logits_fn(features, feature_columns, params)\n labels = tf.squeeze(features[\"label\"])\n\n if mode == tf_estimator.ModeKeys.EVAL:\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels,\n logits=logits\n ))\n\n def metric_fn(labels, logits):\n labels = tf.cast(labels, tf.int64)\n return {\n \"recall@1\": tf.metrics.recall_at_k(labels, logits, 1),\n \"recall@5\": tf.metrics.recall_at_k(labels, logits, 5)\n }\n\n return tf_estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metrics=(metric_fn, [labels, logits]))\n\n elif mode == tf_estimator.ModeKeys.TRAIN:\n\n optimizer = tf.train.AdamOptimizer(\n learning_rate=params[\"learning_rate\"], beta1=params[\"beta1\"],\n beta2=params[\"beta2\"], epsilon=params[\"epsilon\"])\n optimizer = tf.tpu.CrossShardOptimizer(optimizer)\n\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels,\n logits=logits,\n ))\n\n train_op = optimizer.minimize(loss, tf.train.get_global_step())\n\n return tf_estimator.tpu.TPUEstimatorSpec(\n mode=mode, loss=loss, train_op=train_op)\n\n else:\n raise NotImplementedError\n return _model_fn",
"def gen(\n file: str,\n infer: bool = typer.Option(\n True, help=\"Whether to run type inference on code examples.\"\n ),\n exec: bool = typer.Option(\n False, help=\"Whether to attempt to execute doctring code.\"\n ),\n experimental: bool = typer.Option(False, help=\"Use experimental Ts parsing\"),\n debug: bool = False,\n dummy_progress: bool = typer.Option(False, help=\"Disable rich progress bar\"),\n):\n _intro()\n from papyri.gen import gen_main\n\n gen_main(\n infer=infer,\n exec_=exec,\n target_file=file,\n experimental=experimental,\n debug=debug,\n dummy_progress=dummy_progress,\n )",
"def construct_model_fn(problem, optimizer_class, base_optimizer_class,\n eval_weights=None, eval_num_samples=10,\n training_params_class=None,\n training_params_conditioning_class=None,\n base_optimizer_conditioning_class=None):\n def model_fn(features, mode, params):\n \"\"\"Returns a TPU estimator spec for the task at hand.\"\"\"\n problem.initialize_model()\n optimizer = optimizer_class(problem, batch_size=params[\"batch_size\"])\n training_params = training_params_class()\n learning_rate_normal = get_learning_rate(training_params)\n separate_conditioning_optimizer = (\n training_params_conditioning_class and base_optimizer_conditioning_class\n and isinstance(optimizer,\n optimizers.MultiLossOptimizerWithConditioning))\n if not separate_conditioning_optimizer and (\n training_params_conditioning_class\n or base_optimizer_conditioning_class):\n raise ValueError(\"training_params_conditioning_class and \"\n \"base_optimizer_conditioning_class should be provided \"\n \"together and only when the optimizer is \"\n \"MultiLossOptimizerWithConditioning.\")\n\n tf.logging.info(\"separate_conditioning_optimizer: %s\",\n separate_conditioning_optimizer)\n\n if separate_conditioning_optimizer:\n training_params_conditioning = training_params_conditioning_class()\n learning_rate_conditioning = get_learning_rate(\n training_params_conditioning)\n\n if mode == tf_estimator.ModeKeys.TRAIN:\n\n base_optimizer = get_optimizer(base_optimizer_class, learning_rate_normal,\n params[\"use_tpu\"])\n if separate_conditioning_optimizer:\n base_optimizer_conditioning = get_optimizer(\n base_optimizer_conditioning_class, learning_rate_conditioning,\n params[\"use_tpu\"])\n loss, opt_step = optimizer.compute_train_loss_and_update_op(\n features, base_optimizer, base_optimizer_conditioning)\n all_vars_str = \"\\n\".join([str(v) for v in optimizer.all_vars])\n normal_vars_str = \"\\n\".join([str(v) for v in optimizer.normal_vars])\n conditioning_vars_str = \"\\n\".join([str(v) for\n v in optimizer.conditioning_vars])\n tf.logging.info(\"\\n\\nall_vars\\n %s\", all_vars_str)\n tf.logging.info(\"\\n\\nnormal_vars\\n %s\", normal_vars_str)\n tf.logging.info(\"\\n\\nconditioning_vars\\n %s\", conditioning_vars_str)\n else:\n loss, opt_step = optimizer.compute_train_loss_and_update_op(\n features, base_optimizer)\n\n # weight decay op\n decay_op = get_decay_op(training_params.weight_decay,\n learning_rate_normal, opt_step,\n vars_to_decay=optimizer.normal_vars)\n if separate_conditioning_optimizer:\n decay_op_conditioning = get_decay_op(\n training_params_conditioning.weight_decay,\n learning_rate_conditioning,\n opt_step, vars_to_decay=optimizer.conditioning_vars)\n decay_op = tf.group([decay_op, decay_op_conditioning])\n # batch norm update ops\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n train_op = tf.group([opt_step, decay_op] + update_ops)\n return tf_estimator.tpu.TPUEstimatorSpec(\n mode=mode, loss=loss, train_op=train_op)\n elif mode == tf_estimator.ModeKeys.EVAL:\n def unstack_metrics(**metrics):\n \"\"\"Unstack separate metrics from one big aggregate tensor.\n\n This is needed because otherwise evaluation on TPU with many metrics\n gets horribly slow. Concatenating all metrics into one tensor makes\n things much better.\n\n Args:\n **metrics: Dict[ Str: tf.Tensor ]. Dictionary with one element, for\n which the key the concatenation of all metric names separated by \"!\"\n and the value are all metric values stacked along axis 1.\n\n Returns:\n metrics_dict: Dict[ Str: tf.Tensor ]. Dictionary mapping metrics names\n to tensors with their per-sample values.\n \"\"\"\n if len(metrics) != 1:\n raise ValueError(\"Stacked metrics dict should have one element, got \"\n \"{}\".format(len(metrics)))\n names_stacked = list(metrics.keys())[0]\n values_stacked = metrics[names_stacked]\n names = names_stacked.split(\"!\")\n values = tf.unstack(values_stacked, axis=1)\n return {name: tf.metrics.mean(value) for name, value in\n zip(names, values)}\n\n loss = optimizer.compute_eval_loss(features)\n\n if isinstance(optimizer, optimizers.MultiLossOptimizerWithConditioning):\n sampled_weights = distributions.get_samples_as_dicts(\n eval_weights, num_samples=eval_num_samples,\n names=problem.losses_keys, seed=17)\n all_metrics = {}\n for idx, weights in enumerate(sampled_weights):\n with tf.variable_scope(\"\", reuse=tf.AUTO_REUSE):\n losses_id, metrics_id = \\\n optimizer.compute_eval_losses_and_metrics_for_weights(features,\n weights)\n all_metrics.update({\"{}/{}\".format(key, idx): value\n for key, value in losses_id.items()})\n all_metrics.update({\"{}/{}\".format(key, idx): value\n for key, value in metrics_id.items()})\n full_loss = 0.\n for loss_name in losses_id.keys():\n full_loss += weights[loss_name] * losses_id[loss_name]\n all_metrics.update({\"full_loss/{}\".format(idx): full_loss})\n else:\n with tf.variable_scope(\"\", reuse=tf.AUTO_REUSE):\n losses, metrics = problem.losses_and_metrics(features, training=False)\n all_metrics = losses\n all_metrics.update(metrics)\n metrics_shape_out = all_metrics[list(all_metrics.keys())[0]].get_shape()\n # Need this broadcasting because on TPU all output tensors should have\n # the same shape\n all_metrics.update(\n {\"learning_rate_normal\": tf.broadcast_to(\n learning_rate_normal, metrics_shape_out)})\n if separate_conditioning_optimizer:\n all_metrics.update(\n {\"learning_rate_conditioning\": tf.broadcast_to(\n learning_rate_conditioning, metrics_shape_out)})\n # Stacking all metrics for efficiency (otherwise eval is horribly slow)\n sorted_keys = sorted(all_metrics.keys())\n sorted_values = [all_metrics[key] for key in sorted_keys]\n metrics_stacked = {\"!\".join(sorted_keys): tf.stack(sorted_values, axis=1)}\n return tf_estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metrics=(unstack_metrics, metrics_stacked))\n else:\n raise ValueError(\"Unknown mode: {}\".format(mode))\n\n return model_fn",
"def multi_backend_test(globals_dict,\n relative_module_name,\n backends=('jax', 'tensorflow', 'numpy'),\n test_case=None):\n if test_case is None:\n return lambda test_case: multi_backend_test( # pylint: disable=g-long-lambda\n globals_dict=globals_dict,\n relative_module_name=relative_module_name,\n test_case=test_case)\n\n if BACKEND is not None:\n return test_case\n\n if relative_module_name == '__main__':\n raise ValueError(\n 'module_name should be written out manually, not by passing __name__.')\n\n # This assumes `test_util` is 2 levels deep inside of `inference_gym`. If we\n # move it, we'd change the `-2` to equal the (negative) nesting level.\n root_name_comps = __name__.split('.')[:-2]\n relative_module_name_comps = relative_module_name.split('.')\n\n # Register the rewrite hooks.\n importlib.import_module('.'.join(root_name_comps + ['backends', 'rewrite']))\n\n new_test_case_names = []\n for backend in backends:\n new_module_name_comps = (\n root_name_comps + ['dynamic', 'backend_{}'.format(backend)] +\n relative_module_name_comps)\n # Rewrite the module.\n new_module = importlib.import_module('.'.join(new_module_name_comps))\n\n # Subclass the test case so that we can rename it (absl uses the class name\n # in its UI).\n base_new_test = getattr(new_module, test_case.__name__)\n new_test = type('{}_{}'.format(test_case.__name__, backend),\n (base_new_test,), {})\n new_test_case_names.append(new_test.__name__)\n globals_dict[new_test.__name__] = new_test\n\n # We deliberately return None to delete the original test case from the\n # original module.",
"def relay_to_tir(name, func):\n return GenerateTIR(name).generate_tir(func)",
"def main():\n tng.api.runner()",
"def gen_examples_worker(program):\n print(\"\\rGenerating examples... %d\\\\%d (remaining programs: %d)\" %\n (progress_counter.value, num_programs, valid_counter.value), end=\"\")\n\n input_output_examples = constraint.get_input_output_examples(program, num_examples=num_examples,\n num_tries=num_example_tries)\n\n progress_counter.value += 1\n if input_output_examples:\n return input_output_examples\n else:\n valid_counter.value -= 1\n return None",
"def get_train_routine(\n self,\n ) -> Callable[\n [\n torch.utils.data.DataLoader,\n torch.utils.data.DataLoader,\n argparse.Namespace,\n torch.nn.Module,\n torch.optim.Optimizer,\n torch.optim.Optimizer,\n Progress,\n TaskID,\n ],\n None,\n ]:\n pass",
"def gen_program_worker(input_types):\n def helper(functions, program, programs):\n random.shuffle(functions)\n if progress_counter.value >= num_programs:\n return True\n\n if len(program) >= program_len:\n if get_unused_indices(program) or program in programs:\n return False\n else:\n programs.add(program)\n progress_counter.value += 1\n print(\"\\rGenerating programs... %d\\\\%d\" % (progress_counter.value, num_programs), end=\"\")\n return True\n\n type_to_vars = collections.defaultdict(list)\n for i, typ in enumerate(program.var_types):\n type_to_vars[typ].insert(0, i)\n\n # Move free indices to the front\n free_indxs = get_free_indices(program, program_len)\n for typ in program.var_types:\n for var in type_to_vars[typ]:\n if var in free_indxs:\n type_to_vars[typ].remove(var)\n type_to_vars[typ].insert(0, var)\n\n for func in LAMBDAS:\n type_to_vars[func.type].append(func)\n\n used = set(program.statements)\n for function in functions:\n for args in iterate_inputs(function, type_to_vars):\n if len([arg for arg in args if arg in free_indxs]) == 0:\n continue\n statement = Statement(function, args)\n if statement in used:\n continue\n\n next_program = Program(program.input_types,\n program.statements + [statement])\n if helper(functions, next_program, programs):\n return True\n\n program_base = Program(input_types, [])\n res = set()\n while progress_counter.value < num_programs:\n helper(ALL_FUNCTIONS, program_base, res)\n return res",
"def test_gen():\n tpot_obj = TPOTClassifier()\n\n pipeline = tpot_obj._gen_grow_safe(tpot_obj._pset, 1, 3)\n\n assert len(pipeline) > 1\n assert pipeline[0].ret == Output_DF",
"def main():\n model = Calculator()",
"def generate_model_fn(mode_feature_cols_map):\n def model_fn(features, labels, mode, params=None, config=None):\n if params is None:\n params = tf.contrib.training.HParams(learning_rate=0.01)\n\n # Extract the id tensor from the input features if it exists in the\n # feature_columns\n id_tensor = None\n if 'id' in features:\n id_tensor = features.pop('id')\n\n # Feature columns for given mode\n feature_cols = mode_feature_cols_map[mode]\n\n # Tensor of logits formed from input features\n logits = tf.feature_column.linear_model(features, feature_cols)\n\n # Apply the logistic function to the logits defined above\n # This is our classifier\n logistic = tf.sigmoid(logits, name='logistic')\n\n classifier_output = {\n 'clicked': logistic\n }\n\n if id_tensor is not None:\n classifier_output['id'] = tf.identity(id_tensor)\n\n loss = None\n train_op = None\n\n if mode in (MODES.TRAIN, MODES.EVAL):\n loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=logits, labels=labels, name='loss')\n )\n\n if mode == MODES.TRAIN:\n global_step = tf.train.get_or_create_global_step()\n train_op = tf.train.GradientDescentOptimizer(\n learning_rate=params.learning_rate\n ).minimize(loss, global_step=global_step)\n\n eval_metric_ops = None\n\n if mode == MODES.EVAL:\n eval_metric_ops = {\n 'accuracy': tf.metrics.accuracy(labels, logistic)}\n\n # Define serving signatures\n prediction_output = tf.estimator.export.PredictOutput(\n classifier_output)\n\n export_outputs = {\n tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n prediction_output\n }\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=classifier_output,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=eval_metric_ops,\n export_outputs=export_outputs\n )\n\n return model_fn",
"def main():\n load()\n\n print(generate())",
"def task(self):\n return import_path_to_callable(self.func)",
"def evaluate(cfg: DictConfig):\n\n # suppress TensorFlow and DALI warnings\n suppress_warnings()\n\n if cfg.USE_MULTI_GPUS.VALUE:\n # change number of visible gpus for evaluation\n set_gpus(cfg.USE_MULTI_GPUS.GPU_IDS)\n # update batch size according to available gpus\n data_generator.update_batch_size(cfg)\n\n if cfg.OPTIMIZATION.AMP:\n print(\"Enabling Automatic Mixed Precision(AMP) training\")\n policy = mixed_precision.Policy('mixed_float16')\n mixed_precision.set_global_policy(policy)\n\n if cfg.OPTIMIZATION.XLA:\n print(\"Enabling Automatic Mixed Precision(XLA) training\")\n tf.config.optimizer.set_jit(True)\n\n # create model\n strategy = None\n if cfg.USE_MULTI_GPUS.VALUE:\n # multi gpu training using tensorflow mirrored strategy\n strategy = tf.distribute.MirroredStrategy(\n cross_device_ops=tf.distribute.HierarchicalCopyAllReduce()\n )\n print('Number of visible gpu devices: {}'.format(strategy.num_replicas_in_sync))\n with strategy.scope():\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE\n ) # optimizer\n if cfg.OPTIMIZATION.AMP:\n optimizer = mixed_precision.LossScaleOptimizer(\n optimizer,\n dynamic=True\n )\n dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)\n dice_coef = tf.keras.metrics.MeanMetricWrapper(name=\"dice_coef\", fn=dice_coef)\n model = prepare_model(cfg, training=True)\n else:\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE\n ) # optimizer\n if cfg.OPTIMIZATION.AMP:\n optimizer = mixed_precision.LossScaleOptimizer(\n optimizer,\n dynamic=True\n )\n dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)\n dice_coef = tf.keras.metrics.MeanMetricWrapper(name=\"dice_coef\", fn=dice_coef)\n model = prepare_model(cfg, training=True)\n\n model.compile(\n optimizer=optimizer,\n loss=unet3p_hybrid_loss,\n metrics=[dice_coef],\n )\n\n # weights model path\n checkpoint_path = join_paths(\n cfg.WORK_DIR,\n cfg.CALLBACKS.MODEL_CHECKPOINT.PATH,\n f\"{cfg.MODEL.WEIGHTS_FILE_NAME}.hdf5\"\n )\n\n assert os.path.exists(checkpoint_path), \\\n f\"Model weight's file does not exist at \\n{checkpoint_path}\"\n\n # TODO: verify without augment it produces same results\n # load model weights\n model.load_weights(checkpoint_path, by_name=True, skip_mismatch=True)\n model.summary()\n\n # data generators\n val_generator = data_generator.get_data_generator(cfg, \"VAL\", strategy)\n validation_steps = data_generator.get_iterations(cfg, mode=\"VAL\")\n\n # evaluation metric\n evaluation_metric = \"dice_coef\"\n if len(model.outputs) > 1:\n evaluation_metric = f\"{model.output_names[0]}_dice_coef\"\n\n result = model.evaluate(\n x=val_generator,\n steps=validation_steps,\n workers=cfg.DATALOADER_WORKERS,\n return_dict=True,\n )\n\n # return computed loss, validation accuracy, and it's metric name\n return result, evaluation_metric",
"def main():\n args = get_args()\n\n src_dir = args.input\n\n if os.path.exists(args.output):\n print(\"output directory already exists\")\n sys.exit(1)\n os.makedirs(args.output)\n copy_submission_dir(args.input, args.output, args.submitter)\n src_dir = args.output\n\n config = checker.Config(\n args.version,\n args.extra_model_benchmark_map)\n\n if not args.nodelete_empty_dirs:\n delete_empty_dirs(os.path.join(src_dir))\n\n os.chdir(src_dir)\n\n infer_scenario_results(args.submitter, args.noinfer_low_accuracy_results, config)\n\n return 0",
"def test():\n return _make_modules(is_train=False)",
"def _create_test_func(nb_name, nb_path, clearoutput=True):\n\n nb_func = f'\\ndef test_{nb_name}():\\n'\\\n f' fpath_rel = {nb_path.split(os.sep)[1:]}\\n'\\\n ' fname = os.path.join(nb_dir, *fpath_rel)\\n'\\\n ' tf.run_notebook(fname, clearoutput=False)\\n'\\\n ' return 0\\n'\n\n return nb_func",
"def run_from_generator(\n model, input_func=None, input_func_dict=None,\n eval_func_dict=None, nb_epochs=10, optimizer=None, model_dir=None):\n\n # 1. Create optimizer and compile model if optimizer is None\n if (optimizer is None):\n optimizer = tf.keras.optimizers.SGD(\n lr=1e-3, decay=1e-5, momentum=0.9, nesterov=True)\n\n # 2. compile the model\n model.compile(\n optimizer=optimizer, loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n # 3. create estimator\n dir_path = os.path.join(os.getcwd(), model_dir)\n print(\"Model path chosen : \", dir_path)\n if (not os.path.exists(dir_path)):\n os.mkdir(dir_path)\n\n print(\"Creating estimator...\")\n est = tf.keras.estimator.model_to_estimator(\n keras_model=model, model_dir=dir_path)\n\n # 4. Train and Evaluate the model\n print(\"Training...\")\n\n # training spec\n train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_func(input_func_dict),\n max_steps=500)\n\n # evaluation spec\n eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_func(eval_func_dict))\n\n # Run the training\n model_est = tf.estimator.train_and_evaluate(est, train_spec, eval_spec)\n #est.train(input_fn=lambda: input_func(input_func_dict),\n # steps=None)\n #\n #est.evalute(input_fn=lambda: input_func(eval_func_dict))\n\n return est",
"def gen_workloads(lower_idx, upper_idx, target=\"llvm\"):\n return [LorienTestWorkload(target, idx) for idx in range(lower_idx, upper_idx)]",
"def main(args=sys.argv[1:]):\n\n # Parse arguments\n args = create_parser().parse_args(args)\n\n execution = API.create_execution(\n args.script, {'inputs': [['dataset-id', args.dataset]]})\n\n execution_id = execution['resource']\n e_id = re.split('execution/', execution_id)[1]\n execution_status = 0\n\n while execution_status != FINISHED:\n execution_resource = API.get_execution(execution_id)\n execution_status = execution_resource['object']['status']['code']\n number_of_models = API.list_models(\n \"execution_id=%s\" % e_id)['meta']['total_count']\n print \"models: %s\" % number_of_models\n return"
] | [
"0.52469647",
"0.50867206",
"0.49812433",
"0.49300626",
"0.48819524",
"0.48743096",
"0.48729882",
"0.48613867",
"0.4854776",
"0.48491868",
"0.48481944",
"0.48190248",
"0.48174357",
"0.47987285",
"0.47749686",
"0.47592515",
"0.47589567",
"0.47463167",
"0.4741073",
"0.4720266",
"0.47054622",
"0.47050485",
"0.469281",
"0.46909595",
"0.46787012",
"0.4649152",
"0.46463305",
"0.46366015",
"0.46340734",
"0.46256533"
] | 0.75061655 | 0 |
Looks for main module in exec spec for model op. Raises `ValueError` if exec spec is empty or not in the exepcted format. | def _op_main_for_exec(exec_):
if not exec_:
raise ValueError("exec spec not specified")
m = re.search(r"-u?m ([^ ]+)", exec_)
if not m:
raise ValueError(f"unexpected exec spec: {exec_!r}")
return m.group(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _find_module(model, mod_name):\n for name, module in model.named_modules():\n if name == mod_name:\n return module\n return None",
"def search_executable(op, description = None):\n checked = []\n ret = None\n if isinstance(op, (list, tuple)):\n for ii in op:\n if not ii in checked:\n if check_executable(ii):\n ret = ii\n break\n else:\n checked += [ii]\n elif isinstance(op, str):\n if not op in checked:\n if check_executable(op):\n ret = op\n checked += [op]\n else:\n raise RuntimeError(\"weird argument given to executable search: %s\" % (str(op)))\n if description and is_verbose():\n output_message = \"Looking for '%s' executable... \" % (description)\n if ret:\n print(\"%s'%s'\" % (output_message, ret))\n else:\n print(\"%snot found\" % (output_message))\n return ret",
"def check_exec_cmd(config, modname, xsectname, xsectdict, indent=''):\n\n cnts = [0] * NUMCNTS\n\n # check that each exec section has execname (required)\n if pfwdefs.SW_EXECNAME not in xsectdict:\n error(indent, \"module %s, %s - missing %s\" % \\\n (modname, xsectname, pfwdefs.SW_EXECNAME))\n cnts[ERRCNT_POS] += 1\n elif '/' in xsectdict[pfwdefs.SW_EXECNAME]:\n warning(indent, \"module %s, %s - hardcoded path in %s (%s)\" % \\\n (modname, xsectname,\n pfwdefs.SW_EXECNAME, xsectdict[pfwdefs.SW_EXECNAME]))\n cnts[WARNCNT_POS] += 1\n\n # almost all production cases would need to have command line arguments\n if pfwdefs.SW_CMDARGS not in xsectdict:\n warning(indent, \"module %s, %s - missing %s\" % \\\n (modname, xsectname, pfwdefs.SW_CMDARGS))\n cnts[WARNCNT_POS] += 1\n else:\n moddict = config[pfwdefs.SW_MODULESECT][modname]\n argvars = pfwutils.search_wcl_for_variables(xsectdict[pfwdefs.SW_CMDARGS])\n for var in argvars:\n if var.endswith('.fullname'):\n var2 = var[0:-(len('.fullname'))]\n (sect, name, subname) = parse_wcl_objname(var2)\n if sect not in moddict or name not in moddict[sect]:\n error(indent, \"module %s, %s, %s - Undefined variable (%s)\" % \\\n (modname, xsectname, pfwdefs.SW_CMDARGS, var))\n cnts[ERRCNT_POS] += 1\n\n if subname and subname not in moddict[pfwdefs.SW_FILESECT]:\n error(indent, \"module %s, %s, %s - Undefined variable (%s)\" % \\\n (modname, xsectname, pfwdefs.SW_CMDARGS, var))\n cnts[ERRCNT_POS] += 1\n else:\n curvals = {'curr_module': modname}\n (_, _) = config.search(var, {pfwdefs.PF_CURRVALS: curvals,\n 'searchobj': xsectdict,\n 'required':False,\n intgdefs.REPLACE_VARS: True})\n\n # check that all values in args exist?/\n # check for value names that look like file/list names but are missing file/list in front\n # check that all file/list entries in args appears in inputs/outputs : err\n return cnts",
"def find_main_module(self):\n\n if self.type == 'passthrough':\n return None\n directory, basename = os.path.split(self.main_module)\n module, ext = os.path.splitext(basename)\n if ext:\n # if the module include the extension, just return its absolute\n # path\n return os.path.join(self.code_dir, self.main_module)\n\n # Otherwise, try to find the proper module, by assuming that there\n # is only one file with such name. Note that this may fail if\n # there are other files such as byte-compiled binaries, etc.\n found = glob.glob(os.path.join(self.code_dir, directory, module+'.*'))\n if not found:\n raise APIException('module not found: {}'\n .format(self.main_module), 400)\n\n return found[0]",
"def get_model(model: str) -> Any:\n try:\n model_function = eval(model)\n except (NameError, AttributeError) as err:\n sys.exit(f'{err}. Accepted models from {tf}, {sm}, {tfa}, {tfc}')\n return model_function",
"def _parse_module_name(program_param):\n if program_param and program_param.endswith(\".py\"):\n return program_param[:-3]\n return program_param",
"def is_openconfig_validatable_module(mod):\n if re.match(r\"[a-z0-9]+\\-.*\", mod.lower()):\n # Avoid parsing IETF and IANA modules which are currently\n # included by OpenConfig, and avoid parsing the extension\n # module itself.\n modname_parts = mod.split(\"-\")\n if modname_parts[0] in [u\"ietf\", u\"iana\"]:\n return ModuleType.NONOC\n elif modname_parts[1] == \"extensions\":\n return ModuleType.OCINFRA\n return ModuleType.OC\n return ModuleType.NONOC",
"def find_spec(self, fullname, path, target=None):\n if not path:\n path = [os.getcwd()]\n if \".\" in fullname:\n name = fullname.split(\".\")[-1]\n else:\n name = fullname\n for entry in path:\n if os.path.isdir(os.path.join(entry, name)):\n # this module has child modules\n filename = os.path.join(entry, name, \"__init__.py\")\n submodule_locations = [os.path.join(entry, name)]\n else:\n filename = os.path.join(entry, name + \".\" + config.FILE_EXT)\n submodule_locations = None\n\n if not os.path.exists(filename):\n continue\n\n return spec_from_file_location(\n fullname,\n filename,\n loader=ExtensionLoader(filename),\n submodule_search_locations=submodule_locations,\n )\n return None # we don't know how to import this",
"def which(module, mode, exename):\n return _which(exename)",
"def check_exec_inputs(config, modname, dataobjs, xsectname, xsectdict, indent=''):\n\n cnts = [0] * NUMCNTS\n moddict = config[pfwdefs.SW_MODULESECT][modname]\n\n if pfwdefs.SW_INPUTS in xsectdict:\n print \"%sChecking %s %s...\" % (indent, xsectname, pfwdefs.SW_INPUTS)\n indent += ' '\n #print \"%sxsectdict[pfwdefs.SW_INPUTS] = %s\" % (indent, xsectdict[pfwdefs.SW_INPUTS])\n # for each entry in inputs\n for objname in miscutils.fwsplit(xsectdict[pfwdefs.SW_INPUTS], ','):\n objname = objname.lower()\n\n (sect, name, subname) = parse_wcl_objname(objname)\n\n if sect is None:\n error(indent+' ', \"module %s, %s, %s - Invalid entry (%s). Missing section label\" % (modname, xsectname, pfwdefs.SW_INPUTS, objname))\n cnts[ERRCNT_POS] += 1\n else:\n # check that appears in [file/list]sect : error\n bad = False\n if sect not in moddict or name not in moddict[sect]:\n found = False\n if 'loopobj' in moddict and moddict['loopobj'].startswith(sect) and sect in moddict:\n temp = moddict['loopobj'].split('.')[1:]\n d = moddict[sect]\n for t in temp:\n if t in d:\n d = d[t]\n if name in d:\n found = True\n else:\n if 'div_list_by_col' in d:\n if name in d['div_list_by_col']:\n found = True\n moddict[sect][name] = d['div_list_by_col'][name]\n\n if not found:\n bad = True\n error(indent+' ', \"module %s, %s, %s - Invalid entry (%s). Cannot find definition.\" % (modname, xsectname, pfwdefs.SW_INPUTS, objname))\n cnts[ERRCNT_POS] += 1\n\n if not bad:\n if subname is None: # file\n dataobjs[pfwdefs.SW_INPUTS][objname] = True\n elif sect != pfwdefs.SW_LISTSECT: # only lists can have subname\n error(indent+' ', \"module %s, %s, %s, %s - Too many sections/periods for a %s.\" % (modname, xsectname, pfwdefs.SW_INPUTS, objname, sect))\n cnts[ERRCNT_POS] += 1\n elif subname not in moddict[pfwdefs.SW_FILESECT]:\n error(indent+' ', \"module %s, %s, %s, %s - Cannot find definition for %s\" % (modname, xsectname, pfwdefs.SW_INPUTS, objname, subname))\n cnts[ERRCNT_POS] += 1\n else:\n dataobjs[pfwdefs.SW_INPUTS][\"%s.%s\" % (pfwdefs.SW_LISTSECT, name)] = True\n dataobjs[pfwdefs.SW_INPUTS][\"%s.%s\" % (pfwdefs.SW_FILESECT, subname)] = True\n dataobjs[pfwdefs.SW_INPUTS][objname] = True\n fdict = moddict[pfwdefs.SW_FILESECT][subname]\n if ('listonly' not in fdict or not miscutils.convertBool(fdict['listonly'])):\n warning(indent, \"module %s, %s, %s, %s - File in list does not have listonly=True\" % (modname, xsectname, pfwdefs.SW_INPUTS, objname))\n cnts[WARNCNT_POS] += 1\n\n return cnts",
"def determine_k8s_mod_class(cd: ClassDescriptor, op: Operation = None) -> \\\n Tuple[Optional[str], Optional[str], Optional[str], Optional[str]]:\n method_name = make_method_name(op, cd)\n search_args = [(cd.group, cd.version, cd.kind),\n (op.group, op.version, op.kind),\n ('core', op.version, op.kind),\n ('apps', op.version, op.kind)]\n pkg = mod = cls = meth = None\n for group, version, kind in search_args:\n details = _search_for_method(group, version, kind, method_name)\n if details is not None:\n pkg, mod, cls, meth = details\n break\n else:\n print(f\"Can't find p/m/c/m for {method_name} in {cd.group} or {op.group}\")\n return pkg, mod, cls, meth",
"def split_specstring_into_ops_and_versions(spec):\n specset = pip._vendor.packaging.specifiers.SpecifierSet(spec)\n ops_and_versions = []\n\n for spec in specset._specs:\n ops_and_versions.append([spec.operator, spec.version])\n \n return ops_and_versions",
"def check_versioning(ctx, stmt):\n\n # Don't perform this check for modules that are not OpenConfig\n # or are OpenConfig infrastructure (e.g., extensions)\n if (OCLintFunctions.is_openconfig_validatable_module(stmt.arg) in\n [ModuleType.NONOC, ModuleType.OCINFRA]):\n return\n\n version = None\n for substmt in stmt.substmts:\n # pyang uses a keyword tuple when the element is from\n # an external extension rather than a built-in, check for\n # this before checking the argument. Assumption is made\n # that openconfig-version is unique across all extension\n # modules.\n if (isinstance(substmt.keyword, tuple) and\n substmt.keyword[1] == \"openconfig-version\"):\n version = substmt\n\n if version is None:\n err_add(ctx.errors, stmt.pos, \"OC_MODULE_MISSING_VERSION\",\n stmt.arg)\n return\n\n if not re.match(r\"^[0-9]+\\.[0-9]+\\.[0-9]+$\", version.arg):\n err_add(ctx.errors, stmt.pos, \"OC_INVALID_SEMVER\",\n version.arg)\n\n # Check that there\n match_revision = False\n for revision_stmt in stmt.search(\"revision\"):\n reference_stmt = revision_stmt.search_one(\"reference\")\n if reference_stmt is not None and reference_stmt.arg == version.arg:\n match_revision = True\n\n if match_revision is False:\n err_add(ctx.errors, stmt.pos, \"OC_MISSING_SEMVER_REVISION\",\n version.arg)",
"def interpret_argument(worker, name):\n if os.path.isdir(name):\n return _discover_enclosing_packages(name, [])\n\n if os.path.isfile(name):\n base, extension = os.path.splitext(name)\n if extension != '.py':\n print('Error - test file lacks .py extension: {0}'.format(name))\n return\n directory, name = os.path.split(base)\n return _discover_enclosing_packages(directory, [name])\n\n with worker:\n worker.call(import_modules, [name])\n module_paths = dict(worker.call(list_module_paths))\n if name in module_paths:\n return None, name\n\n print('Error - can neither open nor import: {0}'.format(name))\n exit(1)",
"def find_model_information_in_edi(parsed_edi_output, model_id, model_version=None):\r\n founded = [info for info in parsed_edi_output if info[0] == model_id and model_version in (None, info[2])]\r\n if not founded:\r\n raise Exception('Info about model {!r} v {!r} not found'.format(model_id, model_version))\r\n\r\n return founded[0]",
"def test_main_modular(tmpdir_factory: TempdirFactory) -> None:\n\n output_directory = Path(tmpdir_factory.mktemp('output'))\n\n input_filename = OPEN_API_DATA_PATH / 'modular.yaml'\n output_path = output_directory / 'model'\n\n with freeze_time(TIMESTAMP):\n main(['--input', str(input_filename), '--output', str(output_path)])\n main_modular_dir = EXPECTED_MAIN_PATH / 'main_modular'\n for path in main_modular_dir.rglob('*.py'):\n result = output_path.joinpath(path.relative_to(main_modular_dir)).read_text()\n assert result == path.read_text()",
"def test_main_modular_filename(tmpdir_factory: TempdirFactory) -> None:\n\n output_directory = Path(tmpdir_factory.mktemp('output'))\n\n input_filename = OPEN_API_DATA_PATH / 'modular.yaml'\n output_filename = output_directory / 'model.py'\n\n assert (\n main(['--input', str(input_filename), '--output', str(output_filename)])\n == Exit.ERROR\n )",
"def identify_algorithm(model_initializer):\n # FLAG: Will need different way to handle neural network libraries (keras, pytorch, skorch)\n\n try:\n if isinstance(model_initializer, partial):\n algorithm_name = model_initializer.func.__name__\n else:\n algorithm_name = model_initializer.__name__\n except AttributeError:\n algorithm_name = type(model_initializer).__name__\n\n try:\n module_name = model_initializer.__module__.split(\".\")[0]\n except AttributeError:\n module_name = model_initializer.func.__module__.split(\".\")[0]\n\n return algorithm_name, module_name",
"def test_module_doc():\r\n\r\n for fname in os.listdir('.'):\r\n if fname.endswith('.py'):\r\n f = fname.split('.')[0]\r\n print 'Executing ', fname\r\n execfile(fname, locals())",
"def test_main_modular_no_file() -> None:\n\n input_filename = OPEN_API_DATA_PATH / 'modular.yaml'\n\n assert main(['--input', str(input_filename)]) == Exit.ERROR",
"def prepare_executable_cmd(args: dict):\n return [str(args[\"executable\"].resolve(strict=True)),\n \"-m\", str(args[\"model\"].resolve(strict=True)),\n \"-d\", args[\"device\"]]",
"def test_get_module_qualname_from_path_with_dot(self):\n\n name = b_utils.get_module_qualname_from_path(\n os.path.join(\".\", \"__init__.py\")\n )\n\n self.assertEqual(\"__init__\", name)",
"def parse(self, prog):\n return self.imp_parser.parse(prog)",
"def import_model(command):\n namespace = app.main(command)\n assert namespace.command == 'im' or namespace.command == \"importmodel\"\n assert namespace.modelpath == \"test1\"\n assert namespace.convertpath == \"test2\"",
"def find_model(config, obj, mods):\n for mod in mods:\n if mod[0] != config:\n continue\n\n if len(mod) == 2:\n return mod[1]\n\n if len(mod) == 3 and mod[1] in obj:\n return mod[2]\n\n return None",
"def find_model(config, obj, mods):\n for mod in mods:\n if mod[0] != config:\n continue\n\n if len(mod) == 2:\n return mod[1]\n\n if len(mod) == 3 and mod[1] in obj:\n return mod[2]\n\n return None",
"def conf_load_run_specification(fin):\n err_msg = \"Unknown specification. Excpected RUN:'name'.\"\n spec = fin.readline().strip().split(':')\n if len(spec) != 2 or spec[0] != 'RUN':\n raise EnvironmentError(err_msg)\n name = spec[1].strip()\n if len(name) == 0:\n raise EnvironmentError(\"Excpected non empty name for RUN(RUN:'name').\")\n return name",
"def validate_required_model(program_name, arg_map):\n _method_name = 'validate_required_model'\n\n if CommandLineArgUtil.MODEL_FILE_SWITCH in arg_map:\n model_file_value = arg_map[CommandLineArgUtil.MODEL_FILE_SWITCH]\n model_files = cla_utils.get_model_files(model_file_value)\n\n for model_file in model_files:\n try:\n FileUtils.validateExistingFile(model_file)\n except IllegalArgumentException, iae:\n ex = exception_helper.create_cla_exception(ExitCode.ARG_VALIDATION_ERROR, 'WLSDPLY-20006', program_name,\n model_file, iae.getLocalizedMessage(), error=iae)\n __logger.throwing(ex, class_name=_class_name, method_name=_method_name)\n raise ex\n else:\n ex = exception_helper.create_cla_exception(ExitCode.USAGE_ERROR, 'WLSDPLY-20015', program_name,\n CommandLineArgUtil.MODEL_FILE_SWITCH)\n __logger.throwing(ex, class_name=_class_name, method_name=_method_name)\n raise ex",
"def find_spec(cls, fullname: str, path: Optional[List[str]] = None,\n target: Optional[str] = None) -> Optional[ModuleSpec]:\n if path is None:\n path = sys.path\n\n sorocospec = None\n\n for p in path:\n sorocospec = XPYCEFileFinder(p).find_spec(fullname, target)\n\n if sorocospec is None:\n continue\n if sorocospec.origin is None:\n sorocospec = None\n break\n\n # This line is important for Python's internal libraries (like\n # warnings) to work. Setting has_location to True can break\n # introspection because Python will assume the entire source code\n # is there, but it is encrypted\n sorocospec.has_location = False\n\n if sorocospec is not None:\n break\n return sorocospec",
"def do_one_mod(self, names: List[str], infer: bool, exec_: bool, conf: dict):\n\n p = lambda: Progress(\n TextColumn(\"[progress.description]{task.description}\", justify=\"right\"),\n BarColumn(bar_width=None),\n \"[progress.percentage]{task.percentage:>3.1f}%\",\n \"[progress.completed]{task.completed} / {task.total}\",\n TimeElapsedColumn(),\n )\n # step one collect all the modules instances we want to analyse.\n\n modules = []\n for name in names:\n x, *r = name.split(\".\")\n n0 = __import__(name)\n for sub in r:\n n0 = getattr(n0, sub)\n modules.append(n0)\n\n # step 2 try to guess the version number from the top module.\n version = getattr(modules[0], \"__version__\", \"???\")\n\n root = names[0].split(\".\")[0]\n module_conf = conf.get(root, {})\n examples_folder = module_conf.get(\"examples_folder\", None)\n print(\"EF\", examples_folder)\n if examples_folder is not None:\n examples_folder = Path(examples_folder).expanduser()\n examples_data = self.collect_examples(examples_folder)\n for edoc, figs in examples_data:\n self.examples.update(\n {k: json.dumps(v.to_json()) for k, v in edoc.items()}\n )\n for name, data in figs:\n print(\"put one fig\", name)\n self.put_raw(name, data)\n print(\"Configuration:\", json.dumps(module_conf, indent=2))\n self.root = root\n self.version = version\n subs = module_conf.get(\"submodules\", [])\n extra_from_conf = [root + \".\" + s for s in subs]\n for name in extra_from_conf:\n x, *r = name.split(\".\")\n n0 = __import__(name)\n for sub in r:\n n0 = getattr(n0, sub)\n modules.append(n0)\n\n # print(modules)\n\n collector = DFSCollector(modules[0], modules[1:])\n collected: Dict[str, Any] = collector.items()\n\n # collect all items we want to document.\n for qa, item in collected.items():\n if (nqa := full_qual(item)) != qa:\n print(\"after import qa differs : {qa} -> {nqa}\")\n if collected[nqa] == item:\n print(\"present twice\")\n del collected[nqa]\n else:\n print(\"differs: {item} != {other}\")\n\n for target in module_conf.get(\"exclude\", []):\n print(\"exclude tgt:\", target)\n del collected[target]\n # p = nullcontext\n with p() as p2:\n\n # just nice display of progression.\n taskp = p2.add_task(description=\"parsing\", total=len(collected))\n\n for qa, target_item in collected.items():\n short_description = (qa[:19] + \"..\") if len(qa) > 21 else qa\n p2.update(taskp, description=short_description.ljust(17))\n p2.advance(taskp)\n item_docstring = target_item.__doc__\n\n # TODO: we may not want tosip items as they may have children\n # right now keep modules, but we may want to keep classes if\n # they have documented descendants.\n\n if item_docstring is None and not isinstance(target_item, ModuleType):\n continue\n elif item_docstring is None and isinstance(target_item, ModuleType):\n item_docstring = \"\"\"This module has no documentation\"\"\"\n\n # progress.console.print(qa)\n try:\n if tsparse is None:\n print(\n \"please see how to install Tree-sitter in the readme to parse complex RST documents\"\n )\n arbitrary = tsparse(dedent_but_first(item_docstring).encode())\n except Exception as e:\n print(f\"TS could not parse: {qa}\")\n raise ValueError(f\"from {qa}\") from e\n arbitrary = []\n # raise\n try:\n ndoc = NumpyDocString(dedent_but_first(item_docstring))\n except Exception:\n if not isinstance(target_item, ModuleType):\n p2.console.print(\n \"Unexpected error parsing\",\n target_item,\n target_item.__name__,\n )\n if isinstance(target_item, ModuleType):\n # from .take2 import main\n # main(item_docstring)\n ndoc = NumpyDocString(\n f\"Was not able to parse docstring for {qa}\"\n )\n else:\n continue\n if not isinstance(target_item, ModuleType):\n arbitrary = []\n execute_exclude_patterns = module_conf.get(\n \"execute_exclude_patterns\", None\n )\n ex = exec_\n if execute_exclude_patterns and exec_:\n for pat in execute_exclude_patterns:\n if qa.startswith(pat):\n ex = False\n break\n # else:\n # print(\"will run\", qa)\n\n try:\n doc_blob, figs = self.do_one_item(\n target_item, ndoc, infer, ex, qa, config=module_conf\n )\n doc_blob.arbitrary = arbitrary\n except Exception:\n raise\n if module_conf.get(\"exec_failure\", None) == \"fallback\":\n print(\"Re-analysing \", qa, \"without execution\")\n # debug:\n doc_blob, figs = self.do_one_item(\n target_item, ndoc, infer, False, qa, config=module_conf\n )\n doc_blob.aliases = collector.aliases[qa]\n\n # processing....\n doc_blob.signature = doc_blob.content.pop(\"Signature\")\n try:\n for section in [\"Extended Summary\", \"Summary\", \"Notes\", \"Warnings\"]:\n if section in doc_blob.content:\n if data := doc_blob.content[section]:\n PX = P2(data)\n doc_blob.content[section] = Section(PX)\n else:\n doc_blob.content[section] = Section()\n except Exception as e:\n raise type(e)(f\"during {qa}\")\n\n doc_blob.references = doc_blob.content.pop(\"References\")\n if isinstance(doc_blob.references, str):\n if doc_blob.references == \"\":\n doc_blob.references = None\n else:\n assert False\n doc_blob.references = list(doc_blob.references)\n assert (\n isinstance(doc_blob.references, list) or doc_blob.references is None\n )\n del doc_blob.content[\"Examples\"]\n del doc_blob.content[\"index\"]\n sections_ = [\n \"Parameters\",\n \"Returns\",\n \"Raises\",\n \"Yields\",\n \"Attributes\",\n \"Other Parameters\",\n \"Warns\",\n ##\"Warnings\",\n \"Methods\",\n # \"Summary\",\n \"Receives\",\n ]\n from .take2 import Param\n\n # new_doc_blob._content[\"Parameters\"] = [\n # Parameter(a, b, c)\n # for (a, b, c) in new_doc_blob._content.get(\"Parameters\", [])\n # ]\n\n for s in sections_:\n if s in doc_blob.content:\n assert isinstance(\n doc_blob.content[s], list\n ), f\"{s}, {doc_blob.content[s]} \"\n new_content = Section()\n for param, type_, desc in doc_blob.content[s]:\n assert isinstance(desc, list)\n items = []\n if desc:\n items = P2(desc)\n new_content.append(Param(param, type_, items))\n doc_blob.content[s] = new_content\n\n doc_blob.see_also = []\n if see_also := doc_blob.content.get(\"See Also\", None):\n for nts, d0 in see_also:\n try:\n d = d0\n for (name, type_or_description) in nts:\n if type_or_description and not d:\n desc = type_or_description\n if isinstance(desc, str):\n desc = [desc]\n assert isinstance(desc, list)\n desc = paragraphs(desc)\n type_ = None\n else:\n desc = d0\n type_ = type_or_description\n assert isinstance(desc, list)\n desc = paragraphs(desc)\n\n sai = SeeAlsoItem(Ref(name, None, None), desc, type_)\n doc_blob.see_also.append(sai)\n del desc\n del type_\n except Exception as e:\n raise ValueError(\n f\"Error {qa}: {see_also=} | {nts=} | {d0=}\"\n ) from e\n del doc_blob.content[\"See Also\"]\n\n for k, v in doc_blob.content.items():\n assert isinstance(v, Section), f\"{k} is not a section {v}\"\n # end processing\n\n self.put(qa, json.dumps(doc_blob.to_json(), indent=2))\n for name, data in figs:\n self.put_raw(name, data)\n\n found = {}\n not_found = []\n for k, v in collector.aliases.items():\n if [item for item in v if item != k]:\n if shorter := find_cannonical(k, v):\n found[k] = shorter\n else:\n not_found.append((k, v))\n\n if logo := module_conf.get(\"logo\", None):\n self.put_raw(\"logo.png\", Path(logo).read_bytes())\n self.metadata = {\n \"version\": version,\n \"logo\": \"logo.png\",\n \"aliases\": found,\n \"module\": root,\n }"
] | [
"0.53562915",
"0.53267694",
"0.5284103",
"0.5240484",
"0.51123273",
"0.50640464",
"0.5061494",
"0.50039196",
"0.4992714",
"0.4973761",
"0.49309054",
"0.49113435",
"0.49091592",
"0.49087858",
"0.49011794",
"0.48769084",
"0.4876754",
"0.48592058",
"0.48583668",
"0.48241246",
"0.4818992",
"0.48112157",
"0.4802496",
"0.48019683",
"0.4800632",
"0.4800632",
"0.47921017",
"0.47458082",
"0.47435784",
"0.47282"
] | 0.6107568 | 0 |
Return a vignette for the package | def getVignette(self, packageUrl):
cat = getToolByName(self.context, 'portal_catalog')
results = cat.searchResults(portal_type='Vignette',
path={'query': packageUrl})
if results:
return results[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def for_slug(slug):\n vig = Vignette.objects.filter(slug=slug).first()\n if not vig:\n vig = Vignette(slug=slug, content=json.dumps({'data': [\n {'type': 'text', 'data': {\n 'text': 'Missing Vignette `' + slug + '`'}}]}))\n return vig",
"def _provision_package(self):",
"def get(self, bento_name, bento_version):",
"def virtual(**kwds):\n # get the virtual filesystem factory\n from .Filesystem import Filesystem\n\n # make one and return it\n return Filesystem(**kwds)",
"def package():\n pass",
"def characterise_vignette(\n image: ArrayLike,\n method: Literal[\"2D Function\", \"Bivariate Spline\", \"RBF\"] | str = \"RBF\",\n **kwargs,\n) -> DataVignetteCharacterisation:\n\n method = validate_method(\n method, tuple(VIGNETTE_CHARACTERISATION_METHODS.keys())\n )\n\n return VIGNETTE_CHARACTERISATION_METHODS[method](image, **kwargs)",
"def _get_version(self):",
"def _get_versions(self, package):\n raise NotImplementedError(self, \"_get_versions\")",
"def __virtual__():\n if HAS_SHADE:\n return __virtualname__\n return (\n False,\n \"The glanceng execution module failed to load: shade python module is not\"\n \" available\",\n )",
"def get_version(self):\n return arbwave_version()",
"def version(self):",
"def get_frozen_vgg(path: str = None):\n\n if path is None:\n original_vgg = models.vgg16(pretrained=True)\n else:\n state_dict = torch.load(path)\n original_vgg = models.vgg16()\n original_vgg.load_state_dict(state_dict)\n\n for param in original_vgg.parameters():\n param.requires_grad = False\n\n # We don't want the last 7 modules.\n return original_vgg",
"def toVegaLite(self) -> dict:\t\t\n\t\tfrom lux.vizLib.altair.AltairRenderer import AltairRenderer\n\t\trenderer = AltairRenderer(outputType=\"VegaLite\")\n\t\tself.vis = renderer.createVis(self)\n\t\treturn self.vis",
"def version():\n\n pass",
"def version():\n\tclick.clear()\n\trich.print(\"[bold magenta]Image Dataset Tool (IDT)[/bold magenta] version 0.0.6 beta\")",
"def __virtual__():\n if HAS_SHADE:\n return __virtualname__\n return (\n False,\n \"The neutronng execution module failed to load: shade python module is not available\",\n )",
"def do_pack():\n try:\n if not os.path.exists(\"versions\"):\n local(\"mkdir versions\")\n date = datetime.now()\n date = date.strftime(\"%Y%m%d%H%M%S\")\n new_versions = \"versions/web_static_{}.tgz\".format(date)\n local(\"tar -cvzf {} web_static\".format(new_versions))\n return new_versions\n except:\n return None",
"def get_lvfs_detached_signature():\n url = \"https://cdn.fwupd.org/downloads/firmware.xml.gz.asc\"\n ua_string = \"fwupd/1.4.1\"\n r = requests.get(url, headers={\"User-Agent\": ua_string})\n return r.text",
"def __virtual__():\n if get_configured_provider() is False:\n return False\n if _get_dependencies() is False:\n return False\n\n global cache # pylint: disable=global-statement,invalid-name\n cache = salt.cache.Cache(__opts__)\n\n return __virtualname__",
"def load():\n return VirtualDatacenterPlugin()",
"def metadata(self):\n return UnpackedSDist(self.find_egg_info_file())",
"def show_version():\n print(\"===============================================================\")\n print(f\"Productivity App v{__VERSION__}\")\n print(f\"Made by {__AUTHOR__} (with the advices of {__ADVISOR__})\")\n print(\"Source : https://github.com/Ilade-s/productivite-app-TkVer\")\n print(\"Server (optionnal) : https://github.com/Tifiloow/productivite-app\")\n print(\"Assets : https://feathericons.com/\")\n print(\"===============================================================\")",
"def raw_version_data(self, unstable=False, **kwargs):\n if unstable:\n kwargs.setdefault('allow_experimental', True)\n kwargs.setdefault('allow_unknown', True)\n\n return super(Discover, self).raw_version_data(**kwargs)",
"def get_version():\n return about.get_version()",
"def get_version(self):\n pass",
"def show_version():\n terminal.echo(f\"{package_metadata['name']} {package_metadata['version']}\")",
"def do_pack():\n local(\"sudo mkdir -p versions\")\n date_time = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n name_file = \"versions/web_static{}.tgz\".format(date_time)\n local(\"sudo tar -cvzf {} web_static\".format(name_file))\n return name_file",
"def get_vs(self,name):\n return self.mfp.get_vs(name)",
"def VisumInit(path=None):\r\n import win32com.client \r\n Visum = win32com.client.Dispatch('Visum.Visum.125')\r\n if path != None: Visum.LoadVersion(path)\r\n return Visum",
"def __virtual__():\n if \"glusterfs.list_volumes\" in __salt__:\n return \"glusterfs\"\n return (False, \"glusterfs module could not be loaded\")"
] | [
"0.6036985",
"0.53940934",
"0.5330004",
"0.530821",
"0.52660775",
"0.5136677",
"0.5044925",
"0.5041591",
"0.5003167",
"0.49641988",
"0.49498764",
"0.49451274",
"0.48880824",
"0.48761797",
"0.48731172",
"0.4868222",
"0.48601785",
"0.48379087",
"0.48331505",
"0.48277253",
"0.4823758",
"0.48142573",
"0.48091537",
"0.48066482",
"0.48034483",
"0.47802415",
"0.47754067",
"0.47736445",
"0.4773003",
"0.47659224"
] | 0.7263297 | 0 |
This function creates a new hdf5 file in the active directory taking as the sole argument a string name for the file. | def new_hdf5(new_filename):
# handling input errors
if not isinstance(new_filename, str):
raise TypeError('Passed value of `filename` is not a string! Instead, it is: '
+ str(type(new_filename)))
# w- mode will create a file and fail if the file already exists
hdf5 = h5py.File('{}.hdf5'.format(new_filename), 'w-')
hdf5.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_file(self, filepath):\n folder, _filename = os.path.split(filepath)\n if not os.path.isdir(folder):\n os.makedirs(folder)\n file = h5py.File(filepath, 'a')\n return file",
"def save_as_hdf5(self, filename):",
"def hdf5_file(self):\n if self._hdf5_file is None:\n self._hdf5_file = h5py.File(self.hdf5_path, 'r', swmr=self.hdf5_use_swmr, libver='latest')\n return self._hdf5_file",
"def generate_file(self, hdf5_file_name) -> None:\n self.f = h5py.File(hdf5_file_name, \"w\")\n\n print(Fore.GREEN + \"Generating simulation condition list...\")\n self.generate_simulation_condition_map()\n\n print(Fore.GREEN + \"Generating parameter list...\")\n self.generate_parameter_list()\n\n print(Fore.GREEN + \"Generating fixed parameters matrix...\")\n self.generate_fixed_parameter_matrix()\n\n print(Fore.GREEN + \"Generating measurement matrix...\")\n self.generate_measurement_matrices()\n\n print(Fore.GREEN + \"Handling scaling parameters...\")\n self.generate_hierarchical_optimization_data()\n\n print(Fore.GREEN + \"Copying default AMICI options...\")\n self.copy_amici_options()\n\n print(Fore.GREEN + \"Writing default optimization options...\")\n self.write_optimization_options()",
"def open_h5(filename: str, mode: str, **kwargs):\n import h5py\n return h5py.File(filename, mode, **kwargs)",
"def write_hdf5(filename, data):\n \n if '.h5' in filename:\n fid = h5py.File(filename, 'w')\n else:\n filename = filename+'.h5'\n fid = h5py.File(filename, 'w')\n\n print('Writing %s...'%filename)\n\n write_hdf5_group(fid, data)\n\n fid.close()\n print('Finished writting %s.'%filename)\n return",
"def create_output_file(self):\n if self.options['storage_method'] == 'hdf5':\n try:\n fp = h5py.File(self.file_name, \"w\")\n except IOError:\n print \"Unable to open output file '%s'\" % self.file_name\n sys.exit(1)\n # remember file pointer\n self.file_pointer = fp\n print \"Creating file '%s'\" % self.file_name\n elif self.options['storage_method'] == 'none':\n # save command for later processing\n self.h5commands.append((\"create_file\", self.file_name))",
"def _get_h5_path(self, name):\n return posixpath.join(self.h5_path, name)",
"def to_hdf5(self, filename):\n\n f = h5py.File(filename, 'w')\n f['xyz'] = self.xyz\n f.close()\n\n return",
"def prepare_hdf5_file(hdf5_file, n_train, n_valid, n_test):\n n_total = n_train + n_valid + n_test\n splits = create_splits(n_train, n_valid, n_test)\n hdf5_file.attrs['split'] = H5PYDataset.create_split_array(splits)\n vlen_dtype = h5py.special_dtype(vlen=numpy.dtype('uint8'))\n hdf5_file.create_dataset('encoded_images', shape=(n_total,),\n dtype=vlen_dtype)\n hdf5_file.create_dataset('targets', shape=(n_total, 1), dtype=numpy.int16)\n hdf5_file.create_dataset('filenames', shape=(n_total, 1), dtype='S32')",
"def make_hdf5file(self, dataset_paths: list = None, dataset_values: list = None) -> None:\n\n assert dataset_paths is not None and len(dataset_paths) != 0, (\"`dataset_path` not valid. Expected at least \" \\\n f\"one list element, got {len(dataset_paths)}.\")\n\n assert dataset_values is not None and len(dataset_values) != 0, (\"`dataset_values` not valid. Expected at least \" \\\n f\"one list element, got {len(dataset_values)}.\")\n\n\n # Remove file if already exists and create a new one\n if os.path.isfile(os.path.join(self.FOFDirectory, self.filename)):\n os.remove(os.path.join(self.FOFDirectory, self.filename))\n print(f'[ FOFOutput ]\\t==> Removed old {self.filename} file.')\n\n # Create file and optional groups within it\n FOFfile = h5py.File(os.path.join(self.FOFDirectory, self.filename), 'w')\n print(f'[ FOFOutput ]\\t==> Created new {self.filename} file.')\n\n # Push the attributes to file, if any\n if self.attrs is not None and len(self.attrs.keys()) > 0:\n for key, text in zip(self.attrs.keys(), self.attrs.values()):\n FOFfile.attrs[key] = text\n\n for internal_path, dataset_content in zip(dataset_paths, dataset_values):\n\n assert not internal_path.endswith('/'), \"Invalid hdf5 internal path\"\n assert type(dataset_content) is np.ndarray, \"Can only push numpy.ndarrays into hdf5 files.\"\n\n nested_groups = self.groups_from_path(internal_path)\n if len(nested_groups) == 1:\n FOFfile.create_dataset(nested_groups[0], data=dataset_content)\n else:\n for nested_group in nested_groups[:-1]:\n g = FOFfile.create_group(nested_group)\n g.create_dataset(nested_groups[-1], data = dataset_content)\n\n print(f'[ FOFOutput ]\\t==> Created {internal_path} dataset in {self.filename} file.')\n\n FOFfile.close()",
"def save_hdf5(self, filename):\n filename += '.h5'\n try:\n hf = h5py.File(filename, 'w')\n hf.create_dataset('Array', data=self.flat_array)\n hf.close()\n except TypeError as err:\n if isinstance(self.mess_inst, MessagesGUI):\n self.mess_inst.message('TypeError [{}] when attempting to save HDF5'.format(err))\n else:\n print('TypeError [{}] when attempting to save HDF5'.format(err))",
"def load_h5(filename: str, **kwargs):\n return open_h5(filename, 'r', **kwargs)",
"def open_hdf_file(self, mode = \"a\"):\n\t\tself.h5f = open_file(self.input_object, mode = mode )",
"def hdf5(dirs: T.Dict[str, Path], env: T.Mapping[str, str]):\n\n if os.name == \"nt\":\n if \"ifort\" in env[\"FC\"]:\n msg = \"\"\"\nFor Windows with Intel compiler, use HDF5 binaries from HDF Group.\nhttps://www.hdfgroup.org/downloads/hdf5/\nlook for filename like hdf5-1.12.0-Std-win10_64-vs14-Intel.zip\n \"\"\"\n elif \"gfortran\" in env[\"FC\"]:\n msg = \"\"\"\nFor MSYS2 on Windows, just use MSYS2 HDF5.\nInstall from the MSYS2 terminal like:\npacman -S mingw-w64-x86_64-hdf5\nreference: https://packages.msys2.org/package/mingw-w64-x86_64-hdf5\n \"\"\"\n else:\n msg = \"\"\"\nFor Windows, use HDF5 binaries from HDF Group.\nhttps://www.hdfgroup.org/downloads/hdf5/\nInstead of this, it is generally best to use MSYS2 or Windows Subsystem for Linux\n \"\"\"\n raise SystemExit(msg)\n\n hdf5_name = \"hdf5\"\n install_dir = dirs[\"prefix\"] / hdf5_name\n source_dir = dirs[\"workdir\"] / hdf5_name\n\n git_update(source_dir, HDF5_GIT, tag=HDF5_TAG)\n\n cmd = [\n \"./configure\",\n f\"--prefix={install_dir}\",\n \"--enable-fortran\",\n \"--enable-build-mode=production\",\n ]\n\n subprocess.check_call(nice + cmd, cwd=source_dir, env=env)\n\n cmd = [\"make\", \"-C\", str(source_dir), \"-j\", \"install\"]\n subprocess.check_call(nice + cmd)",
"def SaveResultsToH5(self):\n\n try: \n wildcard = \"HDF5 files (*.hdf5)|*.hdf5\"\n dialog = wx.FileDialog(None, \"Save as .hdf5\", wildcard=wildcard,\n style=wx.SAVE|wx.OVERWRITE_PROMPT)\n\n if dialog.ShowModal() == wx.ID_OK:\n filepath = dialog.GetPath()\n self.page1.filename = dialog.GetFilename()\n dir = dialog.GetDirectory()\n \n self.common.path = dir\n self.common.filename = self.page1.filename\n\n wx.BeginBusyCursor() \n self.stk.write_results_h5(filepath, self.data_struct, self.anlz) \n wx.EndBusyCursor() \n\n except:\n\n wx.EndBusyCursor()\n wx.MessageBox(\"Could not save HDF5 file.\")\n \n dialog.Destroy()\n self.refresh_widgets()\n \n return",
"def h5root():\n with h5py.File('dummy.nxs', mode='w', driver=\"core\", backing_store=False) as f:\n yield f",
"def _generate_testdata_h5(cls, test_filepath):\n # Generate some test data\n data = numpy.indices( (10, 100, 200, 3) )\n assert data.shape == (4, 10, 100, 200, 3)\n data = data.astype( numpy.uint32 )\n cls.original_data = data\n\n # Choose names\n cls.dvid_dataset = \"datasetA\"\n cls.data_uuid = \"abcde\"\n cls.data_name = \"indices_data\"\n cls.volume_location = \"/datasets/{dvid_dataset}/volumes/{data_name}\".format( **cls.__dict__ )\n cls.node_location = \"/datasets/{dvid_dataset}/nodes/{data_uuid}\".format( **cls.__dict__ )\n cls.voxels_metadata = voxels.VoxelsMetadata.create_default_metadata(data.shape, data.dtype, \"cxyzt\", 1.0, \"\")\n\n # Write to h5 file\n with H5MockServerDataFile( test_filepath ) as test_h5file:\n test_h5file.add_node( cls.dvid_dataset, cls.data_uuid )\n test_h5file.add_volume( cls.dvid_dataset, cls.data_name, data, cls.voxels_metadata )\n\n test_h5file.add_node( \"datasetB\", \"12345\" )\n test_h5file.add_volume( \"datasetB\", cls.data_name, data, cls.voxels_metadata )",
"def SaveStackH5(self):\n\n try: \n wildcard = \"HDF5 files (*.hdf5)|*.hdf5\"\n dialog = wx.FileDialog(None, \"Save as .hdf5\", wildcard=wildcard,\n style=wx.SAVE|wx.OVERWRITE_PROMPT)\n\n if dialog.ShowModal() == wx.ID_OK:\n filepath = dialog.GetPath()\n self.page1.filename = dialog.GetFilename()\n dir = dialog.GetDirectory()\n \n self.common.path = dir\n self.common.filename = self.page1.filename\n\n wx.BeginBusyCursor() \n self.stk.write_h5(filepath, self.data_struct) \n wx.EndBusyCursor() \n\n except:\n\n wx.EndBusyCursor()\n wx.MessageBox(\"Could not save HDF5 file.\")\n \n dialog.Destroy()\n self.refresh_widgets()\n \n return",
"def create_hdf(self, path, job_name):\n return self._project.create_hdf(path=path, job_name=job_name)",
"def to_hdf5(self, filename):\n\n f = h5py.File(filename, 'w')\n f['xyz'] = np.vstack(np.squeeze(self.xyz))\n f.close()\n\n return",
"def ToH5(self,h5File=None):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n if h5File == None:\r\n h5File=self.h5File\r\n\r\n #Delete .h5 File if exists\r\n if os.path.exists(h5File): \r\n logger.debug(\"{0:s}{1:s}: Delete ...\".format(logStr,h5File)) \r\n os.remove(h5File)\r\n\r\n #Determine .h5 BaseKey\r\n\r\n relPath2XmlromCurDir=os.path.normpath(os.path.relpath(os.path.normpath(self.xmlFile),start=os.path.normpath(os.path.curdir))) # ..\\..\\..\\..\\..\\3S\\Modelle\\....XML\r\n #print(repr(relPath2XmlromCurDir)) # '..\\\\..\\\\..\\\\..\\\\..\\\\3S\\\\Modelle\\\\....XML'\r\n h5KeySep='/'\r\n h5KeyCharForDot='_'\r\n h5KeyCharForMinus='_'\r\n relPath2XmlromCurDirH5BaseKey=re.sub('\\.',h5KeyCharForDot,re.sub(r'\\\\',h5KeySep,re.sub('-',h5KeyCharForMinus,re.sub('.xml','',relPath2XmlromCurDir,flags=re.IGNORECASE))))\r\n #__/__/__/__/__/3S/Modelle/...\r\n\r\n warnings.filterwarnings('ignore',category=pd.io.pytables.PerformanceWarning) #your performance may suffer as PyTables will pickle object types that it cannot map directly to c-types \r\n warnings.filterwarnings('ignore',category=tables.exceptions.NaturalNameWarning) #\\lib\\site-packages\\tables\\path.py:100: NaturalNameWarning: object name is not a valid Python identifier: '3S'; it does not match the pattern ``^[a-zA-Z_][a-zA-Z0-9_]*$``; you will not be able to use natural naming to access this object; using ``getattr()`` will still work, though)\r\n \r\n #Write .h5 File\r\n logger.debug(\"{0:s}pd.HDFStore({1:s}) ...\".format(logStr,h5File)) \r\n with pd.HDFStore(h5File) as h5Store: \r\n #for tableName,table in self.dataFrames.items():\r\n for tableName in sorted(self.dataFrames.keys()):\r\n table=self.dataFrames[tableName]\r\n h5Key=relPath2XmlromCurDirH5BaseKey+h5KeySep+tableName \r\n logger.debug(\"{0:s}{1:s}: Writing DataFrame {2:s} with h5Key={3:s}\".format(logStr,h5File,tableName,h5Key)) \r\n try:\r\n h5Store.put(h5Key,table)#,format='table') \r\n except Exception as e:\r\n logger.error(\"{0:s}{1:s}: Writing DataFrame {2:s} with h5Key={3:s} FAILED!\".format(logStr,h5File,tableName,h5Key)) \r\n raise e\r\n \r\n\r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n \r\n finally:\r\n h5Store.close()\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))",
"def HDF5_to_HDF5(self, **kwds):\n # split extension from HDF5 file\n if isinstance(self.filename, str):\n fileBasename,fileExtension=os.path.splitext(self.filename)\n else:\n fileBasename,fileExtension=os.path.splitext(self.filename.filename)\n # output HDF5 file\n hdf5_file = os.path.expanduser(f'{fileBasename}.h5')\n # copy everything from the HDF5 file\n with h5py.File(self.filename,mode='r') as source:\n dest = h5py.File(hdf5_file,mode='w')\n # value checks on output HDF5\n if not hasattr(dest, 'create_dataset'):\n raise ValueError('dest must be a group, got {!r}'.format(dest))\n # for each key in the root of the hdf5 file structure\n for k in source.keys():\n self.copy_from_HDF5(source[k], dest, name=k, **kwds)",
"def hdf5_container(tmpdir):\n filename = tmpdir.join(\"test.h5\").strpath\n hdcon = SensitivityCubeHDF5Container(filename, mode=\"w\")\n\n # Clever trick to close the file when we're done with it \n yield hdcon\n hdcon.close()",
"def path_in_hdf5(self):\n raise NotImplementedError",
"def save_to_hdf(df, fname, output_subdir=None):\n path = Path(fname)\n newfname = path.with_suffix('.h5').name\n folderpath = HOME / 'output'\n if output_subdir:\n folderpath = folderpath / output_subdir\n path = folderpath / newfname\n df.to_hdf(str(path), 'df', format='t')\n return str(path)",
"def to_hdf5(self, filepath, **kwargs):\n hdf = pd.HDFStore(filepath, **kwargs)\n hdf.put(self.INDEXDATAFRAME, self.df, format='fixed', data_columns=True)\n hdf.close()",
"def FromH5(self,h5File=None):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n\r\n if h5File == None:\r\n h5File=self.h5File\r\n\r\n #Check if h5File exists\r\n if not os.path.exists(h5File): \r\n logStrFinal=\"{0:s}{1:s}: Not Existing!\".format(logStr,h5File) \r\n raise XmError(logStrFinal) \r\n \r\n try:\r\n self.dataFrames={} \r\n with pd.HDFStore(h5File) as h5Store:\r\n h5Keys=sorted(h5Store.keys())\r\n for h5Key in h5Keys:\r\n match=re.search('(/)(\\w+$)',h5Key)\r\n key=match.group(2)\r\n logger.debug(\"{0:s}{1:s}: Reading h5Key {2:s} to tableName {3:s}.\".format(logStr,h5File,h5Key,key)) \r\n self.dataFrames[key]=h5Store[h5Key]\r\n \r\n\r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n \r\n finally:\r\n h5Store.close()\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))",
"def writeH5Dataset( self, foldername, time, nameConvention = \"grid\" ):\n filename = \"{0}/{1}_{2:06}.h5\".format(foldername,nameConvention,time)\n file = h5py.File(filename,'w',driver='mpio',comm=self.global_comm)\n dset = file.create_dataset(\"dset\",self._layout.fullShape, dtype = self._f.dtype)\n slices = tuple([slice(s,e) for s,e in zip(self._layout.starts,self._layout.ends)])\n dset[slices]=self._f[:]\n attr_data = np.array(self._layout.dims_order)\n dset.attrs.create(\"Layout\", attr_data, (self._nDims,), h5py.h5t.STD_I32BE)\n file.close()",
"def test_create():\n\n with tempfile.TemporaryDirectory() as td:\n fp = os.path.join(td, 'outputs.h5')\n\n with Outputs(fp, 'w') as f:\n f.meta = meta\n f.time_index = time_index\n\n with h5py.File(fp, 'r') as f:\n test_meta = pd.DataFrame(f['meta'][...])\n test_ti = f['time_index'][...]\n assert test_meta.shape == (100, 2)\n assert len(test_ti) == 8760\n\n assert f.attrs['package'] == 'reV'\n assert f.attrs['version'] == __version__"
] | [
"0.7140792",
"0.6980622",
"0.68066597",
"0.6773389",
"0.6753648",
"0.6693808",
"0.65225184",
"0.6473293",
"0.6460949",
"0.63484126",
"0.6270696",
"0.6265804",
"0.62491304",
"0.62026066",
"0.61233056",
"0.6107673",
"0.6106745",
"0.6065954",
"0.60572034",
"0.60490173",
"0.60163903",
"0.6012979",
"0.60012543",
"0.598221",
"0.5978892",
"0.5955647",
"0.5946696",
"0.59112036",
"0.58953357",
"0.58685344"
] | 0.740254 | 0 |
This function adds Raman calibration data to an existing hdf5 file. It uses the spectrafit.fit_data function to fit the data before saving the fit result and the raw data to the hdf5 file. | def add_calibration(hdf5_filename, data_filename, label=None):
# handling input errors
if not isinstance(hdf5_filename, str):
raise TypeError('Passed value of `cal_filename` is not a string! Instead, it is: '
+ str(type(hdf5_filename)))
if not hdf5_filename.split('/')[-1].split('.')[-1] == 'hdf5':
raise TypeError('`cal_filename` is not type = .hdf5! Instead, it is: '
+ hdf5_filename.split('/')[-1].split('.')[-1])
if not isinstance(data_filename, str):
raise TypeError('Passed value of `data_filename` is not a string! Instead, it is: '
+ str(type(data_filename)))
# r+ is read/write mode and will fail if the file does not exist
cal_file = h5py.File(hdf5_filename, 'r+')
if data_filename.split('.')[-1] == 'xlsx':
data = pd.read_excel(data_filename, header=None, names=('wavenumber', 'counts'))
elif data_filename.split('.')[-1] == 'csv':
data = pd.read_csv(data_filename, header=None, names=('wavenumber', 'counts'))
else:
print('data file type not recognized')
# ensure that the data is listed from smallest wavenumber first
if data['wavenumber'][:1].values > data['wavenumber'][-1:].values:
data = data.iloc[::-1]
data.reset_index(inplace=True, drop=True)
else:
pass
# peak detection and data fitting
fit_result, residuals = spectrafit.fit_data(data['wavenumber'].values, data['counts'].values)
# write data to .hdf5 using custom label if provided
if label is not None:
cal_file['{}/wavenumber'.format(label)] = data['wavenumber']
cal_file['{}/counts'.format(label)] = data['counts']
cal_file['{}/residuals'.format(label)] = residuals
for i, result in enumerate(fit_result):
# create custom datatype
my_datatype = np.dtype([('fraction', np.float),
('center', np.float),
('sigma', np.float),
('amplitude', np.float),
('fwhm', np.float),
('height', np.float),
('area under the curve', np.float)])
if i < 9:
dataset = cal_file.create_dataset('{}/Peak_0{}'.format(label, i+1),
(1,), dtype=my_datatype)
else:
dataset = cal_file.create_dataset('{}/Peak_0{}'.format(label, i+1),
(1,), dtype=my_datatype)
# apply data to tuple
data = tuple(result[:7])
data_array = np.array(data, dtype=my_datatype)
# write new values to the blank dataset
dataset[...] = data_array
else:
label = (data_filename.split('/')[-1]).split('.')[0]
cal_file['{}/wavenumber'.format(label)] = data['wavenumber']
cal_file['{}/counts'.format(label)] = data['counts']
cal_file['{}/residuals'.format(label)] = residuals
for i, result in enumerate(fit_result):
# create custom datatype
my_datatype = np.dtype([('fraction', np.float),
('center', np.float),
('sigma', np.float),
('amplitude', np.float),
('fwhm', np.float),
('height', np.float),
('area under the curve', np.float)])
if i < 9:
dataset = cal_file.create_dataset('{}/Peak_0{}'.format(label, i+1),
(1,), dtype=my_datatype)
else:
dataset = cal_file.create_dataset('{}/Peak_{}'.format(label, i+1),
(1,), dtype=my_datatype)
# apply data to tuple
data = tuple(result[:7])
data_array = np.array(data, dtype=my_datatype)
# write new values to the blank dataset
dataset[...] = data_array
print("""Data from {} fit with compound pseudo-Voigt model.
Results saved to {}.""".format(data_filename, hdf5_filename))
cal_file.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write(self,data): \n \n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n # We will store these in a separate file and link them to the level2s\n fname = data.filename.split('/')[-1]\n units = {'A':'K','x0':'degrees','y0':'degrees','sigx':'degrees','sigy':'degrees','sigy_scale':'none','B':'K','phi':'radians'}\n\n outfile = '{}/{}_{}'.format(self.output_dir,self.prefix,fname)\n\n print ('WRITING: ',outfile)\n output = h5py.File(outfile,'a')\n\n # Set permissions and group\n os.chmod(outfile,0o664)\n shutil.chown(outfile, group='comap')\n\n ##\n ## Narrow channel fits\n ##\n\n for valerr in ['Values','Errors','Chi2']:\n if f'Gauss_Narrow_{valerr}' in output:\n del output[f'Gauss_Narrow_{valerr}']\n gauss_fits = output.create_group(f'Gauss_Narrow_{valerr}')\n gauss_fits.attrs['FitFunc'] = self.model.__name__\n gauss_fits.attrs['source_el'] = self.source_positions['mean_el']\n gauss_fits.attrs['source_az'] = self.source_positions['mean_az']\n\n dnames = self.map_parameters\n dsets = [self.map_fits[valerr][...,iparam] for iparam in range(self.map_fits[valerr].shape[-1])]\n\n for (dname, dset) in zip(dnames, dsets):\n if dname in output:\n del output[dname]\n print(dname,dset.shape,units[dname])\n gauss_dset = gauss_fits.create_dataset(dname, data=dset)\n gauss_dset.attrs['Unit'] = units[dname]\n \n\n output.attrs['SourceFittingVersion'] = __version__\n output.attrs['source'] = self.getSource(data)\n output.close()\n self.linkfile(data)",
"def fit_HIDRA(runNumber, rootDir, dataDir, phases, mode='texture', sequential=False, liveplot=True, exportPFs=False, pfType='jul', smplRot=None, pbaridx=None, pbarcolor='WHITE', ranges=None, rot_phase=None):\r\n\r\n # define instrument\r\n inst = 'HIDRA'\r\n\r\n # load in .h5 file\r\n if mode == 'auto': \r\n fname = 'HB2B_{}.h5'.format(runNumber)\r\n desc = '{}_aut_uq'.format(runNumber)\r\n elif mode == 'texture': \r\n fname = 'HB2B_{}_texture.h5'.format(runNumber)\r\n desc = '{}_tex_uq'.format(runNumber)\r\n else: raise ValueError('mode not recognized..')\r\n \r\n exp_h5 = h5py.File(os.path.join(dataDir,fname), 'r')\r\n\r\n # read wavelength\r\n lmbda = exp_h5['instrument/monochromator setting/wave length'][()][0]\r\n\r\n # read angular data\r\n chi = exp_h5['raw data/logs/chi'][()]\r\n phi = exp_h5['raw data/logs/phi'][()]\r\n omega = exp_h5['raw data/logs/omega'][()]\r\n two_theta = exp_h5['reduced diffraction data/2theta'][()]\r\n\r\n # number of measured patterns (for loop)\r\n meas_num = len(phi)\r\n\r\n # read intensity data\r\n if mode == 'auto': #no eta slice\r\n \r\n # get from raw data/logs/2thetaSetpoint\r\n num_det_pos = len(np.unique(exp_h5['raw data/logs/2thetaSetpoint'][()]))\r\n num_eta_slice = 1\r\n\r\n eta_zero = np.nan_to_num(exp_h5['reduced diffraction data/main'][()])\r\n max_int = np.max(eta_zero)\r\n\r\n elif mode == 'texture': #should have eta slices\r\n\r\n num_det_pos = len(np.unique(exp_h5['raw data/logs/2thetaSetpoint'][()]))\r\n num_eta_slice = 3\r\n\r\n eta_neg5 = np.nan_to_num(exp_h5['reduced diffraction data/eta_-5.0'][()])\r\n eta_zero = np.nan_to_num(exp_h5['reduced diffraction data/eta_0.0'][()])\r\n eta_pos5 = np.nan_to_num(exp_h5['reduced diffraction data/eta_5.0'][()])\r\n\r\n max_int = np.max([np.max(eta) for eta in [eta_neg5, eta_zero, eta_pos5]])\r\n\r\n # close the h5 file\r\n exp_h5.close()\r\n\r\n # number of measured q\r\n rot_num = int((meas_num/num_det_pos)*num_eta_slice)\r\n\r\n ## fitting setup ##\r\n d_all = []\r\n ref_all = []\r\n cnt_all = []\r\n name_all = []\r\n\r\n ## get phase data ##\r\n for pi, (pn, ph) in enumerate(phases.items()):\r\n\r\n for k,v in ph.d_spacing(dmin=lmbda/2).items():\r\n\r\n d_all.append(v[0])\r\n ref_all.append(v[-1])\r\n cnt_all.append(pi)\r\n\r\n name_all.append(pn)\r\n\r\n sort_idx = np.argsort(d_all)\r\n d_all = [d_all[i] for i in sort_idx]\r\n ref_all = [ref_all[i] for i in sort_idx]\r\n cnt_all = [cnt_all[i] for i in sort_idx]\r\n tt_all = [2*np.rad2deg(np.arcsin(lmbda/(2*d))) for d in d_all]\r\n \r\n ## setup pole fig dictionary ##\r\n pfd = {}\r\n for i,(d,ref,pi,tt) in enumerate(zip(d_all,ref_all,cnt_all,tt_all)):\r\n \r\n pfd[i+1] = {}\r\n pfd[i+1]['phase'] = name_all[pi]\r\n pfd[i+1]['ref'] = ''.join(map(str,ref))\r\n pfd[i+1]['data'] = np.zeros(( rot_num, 5 ))\r\n pfd[i+1]['tt'] = tt\r\n pfd[i+1]['lattice'] = phases[name_all[pi]].lattice\r\n pfd[i+1]['lattice_type'] = phases[name_all[pi]].get_type()\r\n\r\n # for PF Δk index\r\n # will iterate +1 on each insertion\r\n # to account for variable # of points for each PF \r\n # (shouldn't be the case in CW?)\r\n pfd[i+1]['pole_cnt'] = 0\r\n \r\n # setup flag if it was fit or not\r\n pfd[i+1]['fit'] = False\r\n\r\n # where to store\r\n poleFig_path = os.path.join(rootDir,'pole_figs',desc)\r\n fitResult_path = os.path.join(rootDir,'fit_results',desc,'params')\r\n fitImage_path = os.path.join(rootDir,'fit_results',desc,'figures')\r\n\r\n if not os.path.exists(fitResult_path): os.makedirs(fitResult_path)\r\n if not os.path.exists(fitImage_path): os.makedirs(fitImage_path)\r\n if not os.path.exists(poleFig_path): os.makedirs(poleFig_path)\r\n\r\n # progress bar setup\r\n if pbaridx is None:\r\n refine_pbar = tqdm(range(meas_num),desc=desc)\r\n else:\r\n refine_pbar = tqdm(range(meas_num),desc=desc, position=pbaridx)\r\n \r\n border = \"=\"*80\r\n clear_border = _term_move_up() + \"\\r\" + \" \"*len(border) + \"\\r\"\r\n\r\n liveplot = False\r\n\r\n # ## figure setup\r\n # if liveplot is True:\r\n \r\n # fig = plt.figure(figsize=(12.8,4.8),constrained_layout=True)\r\n # gs = fig.add_gridspec(5,4)\r\n # ax1 = fig.add_subplot(gs[:4,:2])\r\n # ax2 = fig.add_subplot(gs[:4,2:])\r\n # ax3 = fig.add_subplot(gs[4,:2])\r\n # plt.pause(0.05)\r\n\r\n k = 0\r\n\r\n ## loop over rotations\r\n for ri in refine_pbar:\r\n\r\n t0 = time.time()\r\n\r\n # easy to reference these later \r\n o = omega[ri]\r\n c = 90 - chi[ri]\r\n p = 360 - phi[ri]\r\n \r\n if mode == 'auto': inner_iter = zip([eta_zero],[0])\r\n elif mode == 'texture': inner_iter = zip([eta_neg5, eta_zero, eta_pos5],[-5, 0, 5])\r\n # inner_iter = zip([eta_neg5, eta_zero, eta_pos5],[-5, 0, 5])\r\n\r\n # loop over data\r\n for meas_int,eta in inner_iter:\r\n\r\n # refine_pbar.write('\\n')\r\n\r\n t2 = time.time()\r\n\r\n # if o*2 < 90: continue\r\n\r\n counter = 0\r\n\r\n label = 'tt{}_o{}_c{}_p{}_e{}'.format(round(o*2),round(o),round(c),round(p),270 - eta)\r\n \r\n # get mask on invalid data on edges\r\n valid_data = ma.masked_where(meas_int[ri,:]==0,meas_int[ri,:])\r\n valid = ~valid_data.mask\r\n\r\n # get 2theta range of measurement\r\n tt_ran = two_theta[ri,valid]\r\n\r\n # get weights\r\n weights = 1 / meas_int[ri,valid]**2\r\n # get intensity\r\n inten = meas_int[ri,valid]\r\n \r\n # find what peaks are present\r\n tt_mask = (tt_all >= min(tt_ran)) * (tt_all <= max(tt_ran))\r\n tt_pres = list(itertools.compress(tt_all,tt_mask))\r\n # only these are present\r\n tt_pres_num = list(itertools.compress(range(len(tt_all)),tt_mask))\r\n # adjust index\r\n tt_pres_num = [v+1 for v in tt_pres_num]\r\n \r\n # num of peaks\r\n num_peaks = len(tt_pres_num)\r\n\r\n # setup lmfit model\r\n model = ConstantModel()\r\n for i in tt_pres_num:\r\n # add individual peaks\r\n model = model + PseudoVoigtModel(prefix='p{}_'.format(i))\r\n \r\n ## initialize params\r\n params = model.make_params()\r\n \r\n # guess the background\r\n I_bkgd = np.median(inten)\r\n params['c'].set(value = I_bkgd)\r\n\r\n # set peak initial parameters\r\n for i in tt_pres_num:\r\n \r\n\r\n \r\n pk_loc = pfd[i]['tt']\r\n pk_loc_lo = pfd[i]['tt'] - 0.5\r\n pk_loc_hi = pfd[i]['tt'] + 0.5\r\n\r\n loi = np.argmin( np.abs( tt_ran - pk_loc_lo ) )\r\n hii = np.argmin( np.abs( tt_ran - pk_loc_hi ) )\r\n\r\n I_guess = (np.max(inten[loi:hii]) - I_bkgd)/2\r\n if I_guess < 4:\r\n I_guess = 1E-2\r\n TT_guess = tt_ran[np.argmax(inten[loi:hii])+loi]\r\n\r\n # set center\r\n params['p{}_center'.format(i)].set(value = TT_guess,\r\n min = TT_guess - 0.5,\r\n max = TT_guess + 0.5)\r\n # set amplitude\r\n # print(f'{i} - {pk_loc}:{TT_guess} - {I_guess}')\r\n # print(f'{pk_loc_lo} - {pk_loc_hi}')\r\n # print(f'{i} - {inten[loi:hii]}')\r\n params['p{}_amplitude'.format(i)].set(I_guess, min=0)\r\n \r\n # set lims on FWHM\r\n params['p{}_sigma'.format(i)].set(value=0.2,min=0,max=0.35)\r\n\r\n # setup file to save parameters (.json)\r\n fitResult = os.path.join(fitResult_path,'fitParams_{}.json'.format(label))\r\n\r\n if sequential:\r\n # skip on first run\r\n if counter == 0: pass\r\n else: \r\n priorFitResult = os.path.join(fitResult_path,\r\n 'fitParams_{}.json'.format(prev_label))\r\n with open(priorFitResult,'r') as f_in:\r\n params = params.load(f_in)\r\n \r\n # fit model\r\n\r\n t3 = time.time()\r\n\r\n # refine_pbar.write('model setup time:{}'.format(t3-t2)) \r\n\r\n init = model.eval(params, x=tt_ran)\r\n out = model.fit(meas_int[ri, valid],\r\n params,\r\n x=tt_ran, \r\n fit_kws={'gtol':1E-3,\r\n 'xtol':1E-3,\r\n 'ftol':1E-3},\r\n method='least_squares') \r\n\r\n comps = out.eval_components(x=tt_ran)\r\n\r\n t4 = time.time()\r\n \r\n # refine_pbar.write('model fit time:{}'.format(t4-t3))\r\n\r\n out_pars = out.params.copy()\r\n n_boot = 100\r\n II = {}\r\n II_esd = {}\r\n\r\n # # Get uncertainty estimate for integrated intensity (?)\r\n # for comp in out.model.components:\r\n # if 'linear' in comp.name: continue\r\n # elif 'constant' in comp.name: continue\r\n # # Get the names and params\r\n # comp_par_names = comp._param_names\r\n # comp_pars = []\r\n # for par_name in comp_par_names:\r\n # par = out_pars[par_name]\r\n # if par.stderr is None:\r\n # comp_pars.append(np.ones(n_boot)*par.value)\r\n # # tqdm.write(str(par))\r\n # else:\r\n # try:\r\n # comp_pars.append(norm.rvs(loc=par.value,scale=par.stderr,size=n_boot))\r\n # except ValueError:\r\n # comp_pars.append(np.ones(n_boot)*par.value)\r\n\r\n # comp_pars = np.asarray(comp_pars).T\r\n # tt_ran2 = np.tile(tt_ran, [n_boot,1])\r\n # calc = comp.func(tt_ran2, comp_pars[:,0][:,None],comp_pars[:,1][:,None],comp_pars[:,2][:,None],comp_pars[:,3][:,None])\r\n # comp_II = np.trapz(calc, x=tt_ran2, dx=tt_ran[1]-tt_ran[0])\r\n\r\n\r\n # # comp_pars = np.asarray(comp_pars).T\r\n\r\n # # comp_II = []\r\n\r\n # # for n in range(n_boot):\r\n # # # Evaluate the new set\r\n # # calc = comp.func(tt_ran,amplitude=comp_pars[n,0],center=comp_pars[n,1],sigma=comp_pars[n,2],fraction=comp_pars[n,3])\r\n # # comp_II.append(np.trapz(y = calc,\r\n # # x = tt_ran,\r\n # # dx = tt_ran[1]-tt_ran[0]))\r\n \r\n # comp_II = removeOutliers(comp_II, 1.5)\r\n # II[comp.prefix] = np.mean(comp_II)\r\n # II_esd[comp.prefix] = np.std(comp_II)\r\n\r\n # # esd = out.params[comp_par_names[0]].stderr\r\n # # print(f'{II[comp.prefix]} - {II_esd[comp.prefix]} - {esd} | {comp.prefix}')\r\n\r\n # Get uncertainty estimate for integrated intensity - fast way, just use cov\r\n for comp in out.model.components:\r\n if 'linear' in comp.name: continue\r\n elif 'constant' in comp.name: continue\r\n comp_par_names = comp._param_names\r\n # II[comp.prefix] = np.mean(out.params[comp_par_names[0]].stderr)\r\n esd = out.params[comp_par_names[0]].stderr\r\n if esd is None:\r\n II_esd[comp.prefix] = 0.0\r\n elif np.isnan(esd) is False:\r\n II_esd[comp.prefix] = esd\r\n else:\r\n II_esd[comp.prefix] = 0.0\r\n\r\n prev_label = label\r\n\r\n # calculate weighted R (fit quality)\r\n rwp = np.sum( weights * out.residual**2 ) / np.sum( weights * inten**2 )\r\n\r\n # write to console\r\n # this goes fast.. only print if there's a problem\r\n if not out.success: \r\n refine_pbar.write(clear_border + '--- ω:{} | χ:{} | φ:{} | η:{} ---'.format(int(o),int(c),int(p),int(eta)))\r\n refine_pbar.update()\r\n refine_pbar.write(clear_border + 'Fit was not successful!')\r\n refine_pbar.update()\r\n refine_pbar.write(clear_border + 'Rwp : {:3.2f}%'.format(rwp*100))\r\n refine_pbar.update()\r\n refine_pbar.write(border)\r\n refine_pbar.update()\r\n\r\n # save fit params for posterity\r\n with open(fitResult,'w') as f_out:\r\n out.params.dump(f_out) \r\n\r\n t5 = time.time()\r\n\r\n # refine_pbar.write('model output time:{}'.format(t5-t4))\r\n\r\n # store peak intensity\r\n for i in tt_pres_num:\r\n\r\n # get q counter\r\n pole_cnt = pfd[i]['pole_cnt']\r\n\r\n # get 2theta\r\n tt = out.params['p{}_center'.format(i)].value\r\n\r\n # get projection (q)\r\n q = rotate_project_q(tt/2, o, c, p, 270 - eta) #was 360 - p\r\n\r\n # store it\r\n pfd[i]['data'][pole_cnt,0] = q[0]\r\n pfd[i]['data'][pole_cnt,1] = q[1]\r\n pfd[i]['data'][pole_cnt,2] = q[2] \r\n\r\n # tell me it's fit\r\n pfd[i]['fit'] = True\r\n\r\n # tell me what type to output\r\n pfd[i]['type'] = pfType\r\n \r\n # integrate\r\n II = np.trapz(y = comps['p{}_'.format(i)],\r\n x = tt_ran,\r\n dx = tt_ran[1]-tt_ran[0])\r\n\r\n # # store integ. int\r\n # pfd[i]['data'][pole_cnt,3] = II\r\n \r\n # store integ. int\r\n pfd[i]['data'][pole_cnt,3] = II\r\n pfd[i]['data'][pole_cnt,4] = II_esd['p{}_'.format(i)]\r\n\r\n ## counter for Δk\r\n pfd[i]['pole_cnt'] += 1\r\n \r\n # too fast to plot live\r\n if liveplot is True:\r\n\r\n # ## figure setup\r\n fig = plt.figure(figsize=(12.8,4.8),constrained_layout=True)\r\n gs = fig.add_gridspec(5,4)\r\n ax1 = fig.add_subplot(gs[:4,:2])\r\n ax2 = fig.add_subplot(gs[:4,2:])\r\n ax3 = fig.add_subplot(gs[4,:2])\r\n\r\n # if k > 0:\r\n # ax1.clear()\r\n # ax2.clear()\r\n # ax3.clear()\r\n \r\n ## print result plot \r\n ax1.plot(tt_ran, inten, 'b')\r\n ax1.plot(tt_ran, init, 'k--', label='initial fit')\r\n ax1.plot(tt_ran, out.best_fit, 'r-', label='best fit')\r\n ax3.plot(tt_ran, out.best_fit - inten, 'g-')\r\n ax2.plot(tt_ran, inten, 'b')\r\n \r\n for i in tt_pres_num:\r\n \r\n ax2.plot(tt_ran, comps['p{}_'.format(i)], '--', label='Peak {}_{}'.format(pfd[i]['phase'],pfd[i]['ref']))\r\n \r\n # housekeeping\r\n ax1.legend(loc='best')\r\n if num_peaks < 7: ax2.legend(loc='best')\r\n ax1.set_ylim(0,max_int+50)\r\n ax2.set_ylim(0,max_int+50)\r\n ax1.set_ylabel('Intensity')\r\n ax1.set_xlabel('2θ (degrees)')\r\n ax2.set_ylabel('Intensity')\r\n ax2.set_xlabel('2θ (degrees)')\r\n ax3.set_ylabel('Difference')\r\n ax3.set_xlabel('2θ (degrees)')\r\n\r\n ax2.set_ylim(top=0.20*np.max(meas_int))\r\n \r\n # plt.pause(0.05) \r\n # plt.show() \r\n\r\n ## save fit image for posterity\r\n # plt.savefig(os.path.join(fitImage_path,'fit_{}'.format(label)),dpi=300)\r\n plt.close()\r\n\r\n k += 1\r\n\r\n t6 = time.time()\r\n\r\n # refine_pbar.write('plot save time:{}'.format(t6-t5))\r\n\r\n ## close out\r\n if liveplot: plt.close()\r\n \r\n # export the pole figures\r\n export_pfs(inst, desc, pfd, poleFig_path)\r\n\r\n # # write the MTEX file\r\n write_MTEX(desc, pfd, poleFig_path, smplSym='1', smplRot=smplRot, ranges=ranges, rot_phase=rot_phase)",
"def append_rates(path, detection_rate, formation_rate, merger_rate, redshifts, COMPAS, n_redshifts_detection,\n maxz=1., sensitivity=\"O1\", dco_type=\"BHBH\", mu0=0.035, muz=-0.23, sigma0=0.39, sigmaz=0., alpha=0.,\n append_binned_by_z = False, redshift_binsize=0.1):\n print('shape redshifts', np.shape(redshifts))\n print('shape COMPAS.sw_weights', np.shape(COMPAS.sw_weights) )\n print('COMPAS.DCOmask', COMPAS.DCOmask, ' was set for dco_type', dco_type)\n print('shape COMPAS COMPAS.DCOmask', np.shape(COMPAS.DCOmask) )\n\n #################################################\n #Open hdf5 file that we will write on\n print('pathToData', path)\n with h5.File(path, 'r+') as h_new:\n # The rate info is shaped as BSE_Double_Compact_Objects[COMPAS.DCOmask] , len(redshifts)\n DCO = h_new['BSE_Double_Compact_Objects']#\n print('shape DCO[SEED]', np.shape(DCO['SEED'][()]) )\n\n #################################################\n # Create a new group where we will store data\n new_rate_group = 'Rates_mu0{}_muz{}_alpha{}_sigma0{}_sigmaz{}'.format(mu0, muz, alpha, sigma0, sigmaz)\n if append_binned_by_z:\n new_rate_group = new_rate_group + '_zBinned'\n\n if new_rate_group not in h_new:\n h_new.create_group(new_rate_group)\n else:\n print(new_rate_group, 'exists, we will overrwrite the data')\n\n\n #################################################\n # Bin rates by redshifts\n #################################################\n if append_binned_by_z:\n # Choose how you want to bin the redshift, these represent the left and right boundaries\n redshift_bins = np.arange(0, redshifts[-1]+redshift_binsize, redshift_binsize)\n fine_binsize = np.diff(redshifts)[0] #Assunming your redshift bins are equally spaced!!\n print('fine_binsize', fine_binsize)\n #Assuming your crude redshift bin is made up of an integer number of fine z-bins!!!\n i_per_crude_bin = redshift_binsize/fine_binsize \n i_per_crude_bin = int(i_per_crude_bin)\n\n ###################\n # convert crude redshift bins to volumnes and ensure all volumes are in Gpc^3\n crude_volumes = cosmology.comoving_volume(redshift_bins).to(u.Gpc**3).value\n # split volumes into shells \n crude_shell_volumes = np.diff(crude_volumes)\n\n ###################\n # convert redshifts to volumnes and ensure all volumes are in Gpc^3\n fine_volumes = cosmology.comoving_volume(redshifts).to(u.Gpc**3).value\n fine_shell_volumes = np.diff(fine_volumes)\n fine_shell_volumes = np.append(fine_shell_volumes, fine_shell_volumes[-1])\n\n # Convert your merger_rate back to 1/yr by multiplying by the fine_shell_volumes\n N_dco_in_z_bin = (merger_rate[:,:] * fine_shell_volumes[:])\n print('fine_shell_volumes', fine_shell_volumes)\n\n # The number of merging BBHs that need a weight\n N_dco = len(merger_rate[:,0])\n \n ####################\n # binned_merger_rate will be the (observed) weights, binned by redshhift\n binned_merger_rate = np.zeros( (N_dco, len(redshift_bins)-1) )# create an empty list to fill\n binned_detection_rate = np.zeros( (N_dco, len(redshift_bins)-1) )# create an empty list to fill\n\n # loop over all redshift redshift_bins\n for i in range(len(redshift_bins)-1):\n # Sum the number of mergers per year, and divide by the new dz volume to get a density\n # binned_merger_rate[:,i] = np.sum(N_dco_in_z_bin[:,digitized == i+1], axis = 1)/crude_shell_volumes[i]\n binned_merger_rate[:,i] = np.sum(N_dco_in_z_bin[:,i*i_per_crude_bin:(i+1)*i_per_crude_bin], axis = 1)/crude_shell_volumes[i]\n\n # only add detected rates for the 'detectable' redshifts\n if redshift_bins[i] < redshifts[n_redshifts_detection]:\n # The detection rate was already multiplied by the shell volumes, so we can sum it directly\n binned_detection_rate[:,i] = np.sum(detection_rate[:,i*i_per_crude_bin:(i+1)*i_per_crude_bin], axis = 1)\n save_redshifts = redshift_bins\n save_merger_rate = binned_merger_rate\n save_detection_rate = binned_detection_rate\n else: \n # To avoid huge filesizes, we don't really want All the data, \n # so we're going to save up to some redshift\n z_index = np.digitize(maxz, redshifts) -1\n\n # The detection_rate is a smaller array, make sure you don't go beyond the end\n detection_index = z_index if z_index < n_redshifts_detection else n_redshifts_detection\n\n print('You will only save data up to redshift ', maxz, ', i.e. index', z_index)\n save_redshifts = redshifts\n save_merger_rate = merger_rate[:,:z_index]\n save_detection_rate = detection_rate[:,:detection_index]\n\n print('save_redshifts', save_redshifts)\n\n #################################################\n # Write the rates as a separate dataset\n # re-arrange your list of rate parameters\n DCO_to_rate_mask = COMPAS.DCOmask #save this bool for easy conversion between BSE_Double_Compact_Objects, and CI weights\n rate_data_list = [DCO['SEED'][DCO_to_rate_mask], DCO_to_rate_mask , save_redshifts, save_merger_rate, merger_rate[:,0], save_detection_rate]\n rate_list_names = ['SEED', 'DCOmask', 'redshifts', 'merger_rate','merger_rate_z0', 'detection_rate'+sensitivity]\n for i, data in enumerate(rate_data_list):\n print('Adding rate info of shape', np.shape(data))\n # Check if dataset exists, if so, just delete it\n if rate_list_names[i] in h_new[new_rate_group].keys():\n del h_new[new_rate_group][rate_list_names[i]]\n # write rates as a new data set\n dataNew = h_new[new_rate_group].create_dataset(rate_list_names[i], data=data)\n\n #Always close your files again ;)\n h_new.close()\n print(('Done with append_rates :) your new files are here: {}'.format(path)))",
"def generateSDFitsFromHipsr(filename_in, path_in, filename_out, path_out, write_stokes=0, cal=None):\n \n # Open h5 file\n print \"\\nOpening files\"\n print \"-------------\"\n h5file = os.path.join(path_in, filename_in)\n out_file = os.path.join(path_out, filename_out)\n h6 = Hipsr6(h5file)\n pointing = h6.tb_pointing.cols\n obs = h6.tb_observation.cols\n obs_mode = obs.obs_mode[0].strip()\n ref_beams= obs.ref_beam[:]\n\n freqs = h6.freqs\n freqs_cal = h6.freqs_cal\n \n firmware = h6.tb_firmware_config.cols.firmware[0]\n \n print \"Input file: %s\"%h6.h5.filename\n print h6\n\n if cal == None:\n abspath = os.path.abspath( __file__ ).replace('sdfits.pyc', '').replace('sdfits.py', '')\n #diode_cal_file_x = \"%s/diode_jy_x.cal\"%abspath\n #diode_cal_file_y = \"%s/diode_jy_y.cal\"%abspath\n diode_cal_file = \"%s/diode_jy.cal\"%abspath\n else:\n diode_cal_file = cal\n\n print \"Using calibration %s\"%cal\n diode_temps_x, diode_temps_y, rx_temps_x, rx_temps_y = loadDiodeTemp(h6, diode_cal_file)\n\n scan_pointing_len = h6.tb_scan_pointing.shape[0]\n \n tb_lengths = []\n for beam in h6.h5.root.raw_data:\n if beam.shape[0] != scan_pointing_len:\n beam_id = int(beam.name.lstrip('beam_'))\n print \"WARNING: beam %i len: %i, scan_pointing len: %i\"%(beam_id, beam.shape[0], scan_pointing_len)\n tb_lengths.append(np.min([beam.shape[0], scan_pointing_len]))\n \n \n num_acc = np.max(tb_lengths) \n num_rows = num_acc * 13\n\n if num_acc == 0:\n print \"No data in %s. Skipping.\"%h5file\n return -1\n \n print \"No accumulations: %s, no rows: %s\"%(num_acc, num_rows)\n\n # We now need to generate a blank SD-FITS file, with the same number of rows\n print \"\\nGenerating blank SD-FITS file with %i rows...\"%num_rows\n\n path = findLibraryPath()\n if obs_mode == 'MXCAL':\n header_primary = os.path.join(path, 'header_primaryHDU.txt')\n header_tbl = os.path.join(path, 'header_dataHDU_mxcal.txt')\n coldef_file = os.path.join(path, 'coldefs_dataHDU_mxcal.txt')\n elif write_stokes == 2:\n print \"Stokes flag found - writing I,Q,U,V\"\n header_primary = os.path.join(path, 'header_primaryHDU.txt')\n header_tbl = os.path.join(path, 'header_dataHDU_stokes.txt')\n coldef_file = os.path.join(path, 'coldefs_dataHDU_stokes.txt')\n elif write_stokes == 0:\n print \"Writing XX, YY\"\n header_primary = os.path.join(path, 'header_primaryHDU.txt')\n header_tbl = os.path.join(path, 'header_dataHDU.txt')\n coldef_file = os.path.join(path, 'coldefs_dataHDU.txt')\n else:\n print \"Writing XX, YY, XY, YX\"\n header_primary = os.path.join(path, 'header_primaryHDU.txt')\n header_tbl = os.path.join(path, 'header_dataHDU_xpol.txt')\n coldef_file = os.path.join(path, 'coldefs_dataHDU_xpol.txt')\n \n if '200_16384' in firmware:\n coldef_file = os.path.join(path, 'coldefs_dataHDU_200_16384.txt')\n \n hdulist = generateBlankSDFits(num_rows, header_primary, header_tbl, coldef_file)\n print hdulist.info()\n \n # Next, we copy over observation data \n print \"Filling new SD-FITS with HIPSR data...\"\n sdtab = hdulist[1].data\n sdhead = hdulist[1].header\n\n # Fill in header values\n sdhead[\"OBSERVER\"] = obs.observer[0]\n sdhead[\"PROJID\"] = obs.project_id[0]\n \n # Fill in common values\n # NEW METHOD OF TIMESTAMPING - AUG 27 2013\n ref_time = int(h6.h5.root.raw_data.beam_01.cols.timestamp[0])\n ref_id = int(h6.h5.root.raw_data.beam_01.cols.id[0])\n ref_clk = np.abs(h6.h5.root.observation.cols.bandwidth[0]) * 1e6\n num_chans = h6.h5.root.raw_data.beam_01.cols.xx[0].shape[0]\n acc_len = h6.h5.root.firmware_config.cols.acc_len[0]\n # OLD - BEFORE MAR 2018 ref_delta = num_chans * acc_len * 2 / ref_clk\n # NEW - post MAR 2018\n fs = 800e6\n ref_delta = 4 * num_chans * acc_len / fs\n \n f = h6.freqs\n\n print \"Filling in common values... \",\n sdtab[\"SCAN\"][:] = 1\n sdtab[\"EXPOSURE\"][:] = ref_delta\n sdtab[\"OBJECT\"][:] = pointing.source[0]\n sdtab[\"OBJ-RA\"][:] = pointing.ra[0]\n sdtab[\"OBJ-DEC\"][:] = pointing.dec[0]\n sdtab[\"RESTFRQ\"][:] = obs.frequency[0] * 1e6\n sdtab[\"FREQRES\"][:] = np.abs(obs.bandwidth[0])*1e6 / num_chans\n sdtab[\"BANDWID\"][:] = np.abs(obs.bandwidth[0]) * 1e6\n sdtab[\"CRPIX1\"][:] = num_chans/2 + 1\n sdtab[\"CRVAL1\"][:] = obs.frequency[0] * 1e6\n sdtab[\"CDELT1\"][:] = np.abs(obs.bandwidth[0])*1e6 / num_chans\n sdtab[\"FLAGGED\"][:] = 0\n sdtab[\"SCANRATE\"][:] = obs.scan_rate[0] / 60 # Deg/min to deg/s\n\n\n # TCS INFO\n sdtab[\"OBSMODE\"][:] = obs.obs_mode[0] \n sdtab[\"IF\"][:] = 1\n print \"OK.\"\n \n row_sd = 0\n cycle_id = 0\n \n flipped = False\n if obs.bandwidth[0] < 0:\n flipped = True\n \n print \"Filling in unique values... \"\n num_cycles = np.min([scan_pointing_len, num_acc])\n for row_h5 in range(num_acc):\n cycle_id += 1 # Starts at 1 in SD-FITS file\n\n for beam in h6.h5.root.raw_data:\n beam_id = int(beam.name.lstrip('beam_'))\n LinePrint(\"%i of %i\"%(row_sd, num_rows))\n \n if cycle_id <= num_cycles:\n raj_id = \"mb%s_raj\"%beam.name.lstrip('beam_')\n dcj_id = \"mb%s_dcj\"%beam.name.lstrip('beam_')\n \n sdtab[\"CYCLE\"][row_sd] = cycle_id\n\n # Fix beam mapping (remove after fixing mapping)\n sdtab[\"BEAM\"][row_sd] = beam_id\n \n sdtab[\"CRVAL3\"][row_sd] = h6.tb_scan_pointing.col(raj_id)[cycle_id-1]\n sdtab[\"CRVAL4\"][row_sd] = h6.tb_scan_pointing.col(dcj_id)[cycle_id-1]\n\n # AZ, EL and PARANGLE should be stored for beam 1 only\n if beam_id == 1:\n sdtab[\"AZIMUTH\"][row_sd] = h6.tb_scan_pointing.col(\"azimuth\")[cycle_id-1]\n sdtab[\"ELEVATIO\"][row_sd] = h6.tb_scan_pointing.col(\"elevation\")[cycle_id-1]\n sdtab[\"PARANGLE\"][row_sd] = h6.tb_scan_pointing.col(\"par_angle\")[cycle_id-1]\n\n #sdtab[\"FOCUSAXI\"][row_sd] = h6.tb_scan_pointing.col(\"focus_axi\")[cycle_id-1]\n sdtab[\"FOCUSTAN\"][row_sd] = h6.tb_scan_pointing.col(\"focus_tan\")[cycle_id-1]\n\n # This is confusing - but it looks like FOCUSROT should be 15.0, which is sent as feed_angle\n # Likewise, focusaxi is probably supposed to be what we receive as focus_rot\n focus_rot = h6.tb_scan_pointing.col(\"focus_rot\")[cycle_id-1]\n sdtab[\"FOCUSROT\"][row_sd] = focus_rot\n sdtab[\"FOCUSAXI\"][row_sd] = h6.tb_observation.col(\"feed_angle\")[0]\n\n try:\n\n # OLD - 27 Aug 2013\n #timestamp = beam.cols.timestamp[row_h5]\n # New - based off integration length\n if beam_id == 1:\n new_id = beam.cols.id[row_h5]\n timestamp = (new_id - ref_id) * ref_delta + ref_time\n date_obs, time = timestamp2dt(timestamp)\n\n sdtab[\"DATE-OBS\"][row_sd] = date_obs\n sdtab[\"TIME\"][row_sd] = time\n\n ref_beam = ref_beams[np.argmin(np.abs(timestamp - obs.date[:]))]\n \n # Compute T_sys for each beam\n T_d_x = diode_temps_x[beam_id-1]\n T_d_y = diode_temps_y[beam_id-1]\n\n T_sys_x, T_sys_y = computeTsys(beam, row_h5, T_d_x, T_d_y)\n S_sys_x, S_sys_y = computeTsysSpec(h6, beam, row_h5, T_d_x, T_d_y)\n\n\n #print T_sys_x, T_sys_y\n sdtab[\"TSYS\"][row_sd] = (T_sys_x, T_sys_y)\n sdtab[\"TCAL\"][row_sd] = (np.average(extractMid(T_d_x)), np.average(extractMid(T_d_y)))\n #sdtab[\"CALFCTR\"][row_sd] = (1, 1)\n\n xx = beam.cols.xx[row_h5].astype('float32')\n yy = beam.cols.yy[row_h5].astype('float32')\n xx[0], yy[0] = 0, 0\n \n # See if there is cross corr \n if write_stokes in (1, 2):\n re_xy = beam.cols.re_xy[row_h5].astype('float32')\n im_xy = beam.cols.im_xy[row_h5].astype('float32')\n re_xy[0], im_xy[0] = 0, 0\n \n if flipped:\n xx, yy = xx[::-1], yy[::-1]\n if write_stokes in (1, 2):\n re_xy, im_xy = re_xy[::-1], im_xy[::-1]\n\n # DCP 2019.01 - Adding refbeam to all file types\n sdtab[\"REFBEAM\"][row_sd] = ref_beam\n #if obs_mode == 'MXCAL':\n # sdtab[\"REFBEAM\"][row_sd] = ref_beam\n\n if write_stokes == 2:\n xx = xx / fitLine(f, xx, num_chans) * S_sys_x\n yy = yy / fitLine(f, yy, num_chans) * S_sys_y\n\n re_xy = re_xy / fitLine(f, re_xy, num_chans)* np.sqrt(S_sys_x * S_sys_y)\n im_xy = im_xy / fitLine(f, im_xy, num_chans) * np.sqrt(S_sys_x * S_sys_y)\n \n # Ettore tells me Parkes uses this definition\n # i.e. that I is the average of xx + yy\n ii = (xx + yy) / 2\n qq = (xx - yy) / 2\n uu = re_xy\n vv = im_xy\n \n # Form one data vector\n data1 = np.append(ii, qq)\n data2 = np.append(uu, vv)\n data = np.append(data1, data2)\n data = data.reshape([1,1,4,num_chans])\n else:\n\n if write_stokes == 1:\n re_xy = re_xy / fitLine(f, re_xy, num_chans) * np.sqrt(S_sys_x * S_sys_y)\n im_xy = im_xy / fitLine(f, re_im, num_chans) * np.sqrt(S_sys_x * S_sys_y)\n re_xy[0], im_xy[0] = 0, 0\n\n #print \"cal factor: %2.3f\"%cf\n #print \"Diode temp: %s\"%T_d\n #xx, yy = applyCal(beam, row_h5, freqs, freqs_cal, cf, T_d_x, T_d_y)\n \n xx = xx / fitLine(f, xx, num_chans) * S_sys_x\n yy = yy / fitLine(f, yy, num_chans) * S_sys_y\n\n # Multibeam stats screws up if it encounters division by 1\n xx[xx <= 1 ] = 1\n yy[yy <= 1 ] = 1\n \n do_flagger = True\n if do_flagger:\n flags = np.zeros(len(xx))\n flags[xx > 1000] = 1\n flags[yy > 1000] = 1\n flags[xx==1] = 1\n flags[yy==1] = 1\n flags = np.append(flags, flags)\n flags = flags.reshape([1,1,2,num_chans])\n \n sdtab[\"FLAGGED\"][row_sd] = flags\n \n data = np.append(xx, yy)\n data = data.reshape([1,1,2,num_chans])\n \n sdtab[\"DATA\"][row_sd] = data\n\n if write_stokes == 1:\n sdtab[\"XPOLDATA\"][row_sd] = np.row_stack((re_xy, im_xy)).flatten()\n \n except:\n if beam.name != 'beam_02':\n print \"\\nWARNING: missing row in %s\"%beam.name\n print \"Current index: %i\"%row_h5\n print \"Row length: %i\"%beam.shape[0]\n raise\n try:\n sdtab[\"FLAGGED\"][row_sd] = np.ones_like([1,1,2,num_chans])\n except ValueError:\n pass\n row_sd += 1\n else:\n print \"WARNING: scan_pointing table is not complete.\"\n print \"%s table length: %i\"%(beam.name, beam.shape[0])\n print \"scan_pointing table length: %i\"%scan_pointing_len\n\n \n h6.h5.close()\n \n if os.path.exists(out_file):\n print \"\\nInfo: File exists, deleting...\"\n os.remove(out_file)\n\n print \"\\nInfo: Saving to file\"\n hdulist.writeto(out_file)\n hdulist.close()",
"def run(self):\n\n # Setup hdf5 file and datasets\n self.vw_f = h5py.File(self.name,'w')\n self.vw,self.vwts = [],[]\n for i in range(self.n_cams):\n x,y = self.resolution[i]\n vw = self.vw_f.create_dataset('mov{}'.format(i), (self.hdf_resize, y, x), maxshape=(None, y, x), dtype='uint8', compression='lzf') \n vwts = self.vw_f.create_dataset('ts{}'.format(i), (self.hdf_resize,2), maxshape=(None,2), dtype=np.float64, compression='lzf')\n self.vw.append(vw)\n self.vwts.append(vwts)\n \n # Counters and buffers\n _sav_idx = [0]*self.n_cams # index within hdf5 dataset\n _buf_idx = [0]*self.n_cams # index of in-memory buffer that is periodicially dumped to hdf5 dataset\n _saving_buf,_saving_ts_buf = [],[]\n for i in range(self.n_cams):\n x,y = self.resolution[i]\n sb = np.empty((self.buffer_size,y,x), dtype=np.uint8)\n stb = np.empty((self.buffer_size,2), dtype=np.float64)\n _saving_buf.append(sb)\n _saving_ts_buf.append(stb)\n\n cams_running = [True for i in range(self.n_cams)]\n # Main loop\n while any(cams_running):\n # For all datasets: if there's not enough room to dump another buffer's worth into dataset, extend it\n # Then read new frames, and save/query as desired\n for di in range(self.n_cams):\n if not cams_running[di]:\n continue\n \n if self.vw[di].shape[0]-_sav_idx[di] <= self.buffer_size:\n assert self.vw[di].shape[0] == self.vwts[di].shape[0], 'Frame and timestamp dataset lengths are mismatched.'\n self.vw[di].resize((self.vw[di].shape[0]+self.hdf_resize, self.vw[di].shape[1], self.vw[di].shape[2]))\n self.vwts[di].resize((self.vwts[di].shape[0]+self.hdf_resize,self.vwts[di].shape[1]))\n \n # Get new frames from buffer, breaking out if empty and kill flag has been raised\n ts=temp=bsave=None\n try:\n ts,temp,bsave = self.frame_buffer[di].get(block=False)\n except Queue.Empty:\n if self.kill_flag.value:\n cams_running[di] = False\n continue\n\n if self.kill_flag.value==True:\n logging.info('Final flush for camera {}: {} frames remain.'.format(di, self.frame_buffer[di].qsize()))\n \n if di==self.query_idx and self.query_flag.value:\n self.query_queue[:] = temp.copy()\n self.query_queue_ts.value = ts[1]\n self.query_flag.value = False\n \n if bsave: # flag that this frame was added to queue during a saving period\n\n # add new data to in-memory buffer\n x,y = self.resolution[di]\n _saving_buf[di][_buf_idx[di]] = temp.reshape([y,x])\n _saving_ts_buf[di][_buf_idx[di]] = ts\n _buf_idx[di] += 1\n # if necessary, flush out buffer to hdf dataset\n if (self.flushing.value and _buf_idx[di]>=self.min_flush) or _buf_idx[di] >= self.buffer_size:\n if _buf_idx[di] >= self.buffer_size:\n logging.warning('Dumping camera b/c reached max buffer (buffer={}, current idx={})'.format(self.buffer_size, _buf_idx[di]))\n self.vw[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:,:] = _saving_buf[di][:_buf_idx[di]]\n self.vwts[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:] = _saving_ts_buf[di][:_buf_idx[di]]\n _sav_idx[di] += _buf_idx[di]\n _buf_idx[di] = 0\n\n # final flush:\n for di in range(self.n_cams):\n self.vw[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:,:] = _saving_buf[di][:_buf_idx[di]]\n self.vwts[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di]] = _saving_ts_buf[di][:_buf_idx[di]]\n _sav_idx[di] += _buf_idx[di]\n # cut off all unused allocated space \n self.vw[di].resize([_sav_idx[di],self.vw[di].shape[1],self.vw[di].shape[2]])\n self.vwts[di].resize([_sav_idx[di],2])\n\n self.vw_f.close()\n self.saving_complete.value = True",
"async def on_setfitcalibrations(sid, data):\n try:\n calibrations = []\n with open(os.path.join(LOCATION, CALIBRATIONS_FILENAME)) as f:\n calibrations = json.load(f)\n for calibration in calibrations:\n if calibration[\"name\"] == data[\"name\"]:\n if calibration.get(\"fits\", None) is not None:\n index_to_delete = -1\n for i, fit in enumerate(calibration['fits']):\n if fit[\"name\"] == data[\"fit\"][\"name\"]:\n index_to_delete = i\n if index_to_delete >= 0:\n del calibrations[\"fits\"][index_to_delete]\n calibration[\"fits\"].append(data[\"fit\"])\n else:\n calibration[\"fits\"] = [].append(data[\"fit\"])\n with open(os.path.join(LOCATION, CALIBRATIONS_FILENAME), 'w') as f:\n json.dump(calibrations, f)\n except FileNotFoundError:\n print_calibration_file_error()",
"def hrc_gain_fit_gaus(c_input):\n#\n#--- if an obsid is provided, analyize that, else get new obsids from databases\n#\n\n if mcf.chkNumeric(c_input):\n candidate_list = [c_input]\n else:\n candidate_list = arlist.hrc_gain_find_ar_lac()\n\n if len(candidate_list) > 0:\n for obsid in candidate_list:\n file = extract_hrc_evt2(obsid)\n if file == 'na':\n continue\n#\n#--- get a file name header for the later use\n#\n temp = re.split('N', file)\n hname = temp[0]\n#\n#--- extract information from the fits file header\n#\n [obsid, detnam, date_obs, date_end, tstart, tstop, ra_pnt, dec_pnt, ra_nom, dec_nom, roll_pnt, foc_len, defocus, sim_x, sim_y, sim_z] = find_header_info(file)\n#\n#--- find the diffrence between real AR Lac position and nominal postion so that we can determin how much area we should include \n#\n ra_diff = abs(ra - ra_nom) * 60.0\n dec_diff = abs(dec - dec_nom) * 60.0\n rad_diff = math.sqrt(ra_diff * ra_diff + dec_diff * dec_diff)\n\n if rad_diff < 10.0:\n fit_rad = 60.0\n else:\n fit_rad = 200.0\n#\n#--- find a location of the brightest object (assume it is AR Lac) in sky coordinates\n#\n [x, y] = find_center(file)\n#\n#--- extract pha values in the given area\n#\n pha = extract_pha(file, x, y, fit_rad)\n#\n#--- create pha count distribution\n#\n pmax = max(pha) + 1\n pha_bin = [x for x in range(0, pmax)]\n pha_hist = [0 for x in range(0, pmax)]\n\n for ent in pha:\n pha_hist[ent] += 1\n#\n#--- print out the distirbution results\n#\n outfile = data_dir + hname + '_pha.dat'\n fo = open(outfile, 'w')\n for i in range(0, pmax):\n line = str(pha_bin[i]) + '\\t' + str(pha_hist[i]) + '\\n'\n fo.write(line)\n fo.close()\n#\n#--- find median point\n#\n med = find_med(pha_hist)\n#\n#--- fit a normal distribution on the data\n#\n [amp, center, width] = fit_gauss(pha_bin, pha_hist)\n#\n#--- print out the fitting result\n#\n outfile = house_keeping + 'fitting_results'\n\n copied_file = outfile + '~'\n cmd = 'cp ' + outfile + ' ' + copied_file\n os.system(cmd)\n\n fo = open(outfile, 'a')\n line = str(obsid) + '\\t' + date_obs + '\\t' + str(tstart) + '\\t' + detnam + '\\t' + str(ra_pnt) + '\\t' + str(dec_pnt) + '\\t\\t'\n line = line + str(round(ra_diff,3)) + '\\t' + str(round(dec_diff, 3)) + '\\t' + str(round(rad_diff,3)) + '\\t' + str(med) + '\\t\\t'\n line = line + str(round(center, 3)) + '\\t' + str(round(amp, 3)) + '\\t' + str(round(width, 3)) + '\\t'\n line = line + str(roll_pnt) + '\\t' + str(foc_len) + '\\t' + str(defocus) + '\\t'\n line = line + str(sim_x) + '\\t' + str(sim_y) + '\\t' + str(sim_z) + '\\n'\n fo.write(line)\n fo.close()\n#\n#--- plot the data\n#\n outfile = plot_dir + hname + '_gfit.png'\n plot_gauss(pha_bin, pha_hist, amp, center, width, file, outfile)\n#\n#--- remove the evt2 file\n#\n mcf.rm_file(file)",
"def addFit(self, fitdata, name='default'):\n if not hasattr(self, 'fits'):\n self.fits={}\n self.fits[name] = fitdata\n return",
"def add_experiment(hdf5_filename, exp_filename):\n # handling input errors\n if not isinstance(hdf5_filename, str):\n raise TypeError('Passed value of `hdf5_filename` is not a string! Instead, it is: '\n + str(type(hdf5_filename)))\n if not hdf5_filename.split('/')[-1].split('.')[-1] == 'hdf5':\n raise TypeError('`hdf5_filename` is not type = .hdf5! Instead, it is: '\n + hdf5_filename.split('/')[-1].split('.')[-1])\n if not isinstance(exp_filename, str):\n raise TypeError('Passed value of `data_filename` is not a string! Instead, it is: '\n + str(type(exp_filename)))\n # confirm exp_filename is correct format (can handle additional decimals in exp_filename\n label = '.'.join(exp_filename.split('/')[-1].split('.')[:-1])\n if len(label.split('_')) < 2:\n raise ValueError(\"\"\"Passed value of `exp_filename` inapproprate. exp_filename must contain\n at least one '_', preferably of the format somename_temp_time.xlsx (or .csv)\"\"\")\n # r+ is read/write mode and will fail if the file does not exist\n exp_file = h5py.File(hdf5_filename, 'r+')\n if exp_filename.split('.')[-1] == 'xlsx':\n data = pd.read_excel(exp_filename, header=None, names=('wavenumber', 'counts'))\n elif exp_filename.split('.')[-1] == 'csv':\n data = pd.read_csv(exp_filename, header=None, names=('wavenumber', 'counts'))\n else:\n print('data file type not recognized')\n # ensure that the data is listed from smallest wavenumber first\n if data['wavenumber'][:1].values > data['wavenumber'][-1:].values:\n data = data.iloc[::-1]\n data.reset_index(inplace=True, drop=True)\n else:\n pass\n # peak detection and data fitting\n fit_result, residuals = spectrafit.fit_data(data['wavenumber'].values, data['counts'].values)\n # extract experimental parameters from filename\n specs = exp_filename.split('/')[-1].split('.')[-2]\n if len(specs) > 1:\n spec = ''\n for _, element in enumerate(specs):\n spec = str(spec+element)\n specs = spec\n specs = specs.split('_')\n time = specs[-1]\n temp = specs[-2]\n # write data to .hdf5\n exp_file['{}/{}/wavenumber'.format(temp, time)] = data['wavenumber']\n exp_file['{}/{}/counts'.format(temp, time)] = data['counts']\n exp_file['{}/{}/residuals'.format(temp, time)] = residuals\n for i, result in enumerate(fit_result):\n # create custom datatype\n my_datatype = np.dtype([('fraction', np.float),\n ('center', np.float),\n ('sigma', np.float),\n ('amplitude', np.float),\n ('fwhm', np.float),\n ('height', np.float),\n ('area under the curve', np.float)])\n if i < 9:\n dataset = exp_file.create_dataset('{}/{}/Peak_0{}'.format(temp, time, i+1),\n (1,), dtype=my_datatype)\n else:\n dataset = exp_file.create_dataset('{}/{}/Peak_{}'.format(temp, time, i+1),\n (1,), dtype=my_datatype)\n # apply data to tuple\n data = tuple(result[:7])\n data_array = np.array(data, dtype=my_datatype)\n # write new values to the blank dataset\n dataset[...] = data_array\n print(\"\"\"Data from {} fit with compound pseudo-Voigt model.\n Results saved to {}.\"\"\".format(exp_filename, hdf5_filename))\n exp_file.close()",
"def add_random(fr_data_path, random_path, output_path):\n with h5py.File(random_path, 'r') as data:\n random = np.asarray(data['images'].value)\n \n means = np.mean(np.mean(random, axis=-1), axis=-1)\n empty = means == 0.0\n error = np.isnan(means)\n discard = empty | error\n\n random_i = np.where(~discard)\n random = random[random_i]\n\n random = center_on_brightest(random)\n \n with h5py.File(fr_data_path, 'r') as data:\n images = np.asarray(data[\"images\"].value) \n images = center_on_brightest(images)\n \n labels = np.where(np.asarray(data['labels']), 2, 1)\n\n images = np.concatenate((images, random), axis=0)\n labels = np.concatenate((labels, np.full((random.shape[0],), \n fill_value=0)), axis=0)\n\n with h5py.File(output_path, 'w') as f:\n f.create_dataset('images', data=images)\n f.create_dataset('labels', data=labels)\n\n with h5py.File(fr_data_path, 'r') as data: \n f.copy(data, 'fri_data')\n f.copy(data, 'frii_data')",
"def calibrate_file(self, calibration_slope, calibration_offset):\n\n self.data.average_spectrum = (calibration_slope * self.data.average_spectrum \n + calibration_offset)\n\n individual_wavelength = np.zeros(2048)\n individual_slope = np.zeros(2048)\n individual_offset = np.zeros(2048)\n\n for i_wavelength in range(2048):\n individual_wavelength[i_wavelength] = self.data.wavelength[\n i_wavelength * self.header.zero_fill]\n individual_slope[i_wavelength] = calibration_slope[\n i_wavelength * self.header.zero_fill]\n individual_offset[i_wavelength] = calibration_offset[\n i_wavelength * self.header.zero_fill]\n\n index = np.argsort(individual_wavelength)\n individual_wavelength = individual_wavelength[index]\n self.data.individual_wavelength = individual_wavelength\n average_spectrum = self.data.average_spectrum[index]\n\n i_min = np.argmin(abs(individual_wavelength - 8.0))\n i_max = np.argmin(abs(individual_wavelength - 14.0))\n\n for i in range(self.header.number_of_coadds):\n i_center_burst = np.argmax(np.absolute(self.data.interferogram[i]))\n\n size = self.header.interferogram_size\n interferogram_shift = size/2 - i_center_burst\n\n self.data.interferogram[i] = np.roll(self.data.interferogram[i], \n interferogram_shift)\n self.data.interferogram[i] = self.data.interferogram[i][\n size/2-2048:size/2+2048]\n\n window_fn = np.hanning(4096)\n \n spectrum = np.fft.fft(self.data.interferogram[i] * window_fn)\n spectrum = spectrum/3300\n spectrum = individual_slope * np.absolute(spectrum[0:2048]\n ) + individual_offset\n spectrum = spectrum[index]\n\n self.data.spectrum.append(spectrum)",
"def write(self,data): \n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n # We will store these in a separate file and link them to the level2s\n fname = data.filename.split('/')[-1]\n \n if os.path.exists(self.outfile):\n output = h5py.File(self.outfile,'a')\n else:\n output = h5py.File(self.outfile,'w')\n\n # Set permissions and group\n if self.set_permissions:\n try:\n os.chmod(self.outfile,0o664)\n shutil.chown(self.outfile, group=self.permissions_group)\n except PermissionError:\n self.logger(f'{fname}:{self.name}: Warning, couldnt set the file permissions.')\n\n # Store datasets in root\n data_out = {'tod':self.all_tod,\n 'weights':self.all_weights,\n 'mask':self.all_mask,\n 'cal_factors':self.all_cal_factors,\n 'frequency':self.all_frequency,\n 'auto_rms':self.all_auto}\n\n for dname, dset in data_out.items():\n if dname in output:\n del output[dname]\n output.create_dataset(dname, data=dset)\n\n output.attrs['version'] = __level3_version__\n output['cal_factors'].attrs['source'] = self.cal_source\n output['cal_factors'].attrs['calibrator_obsid'] = self.nearest_calibrator\n\n output.close()\n \n if self.level3 in data.keys():\n del data[self.level3]\n data[self.level3] = h5py.ExternalLink(self.outfile,'/')",
"def get_Sparrow_vols(vol_points_list,calibration_list,output_file,demagnified_pitch_size=6.25):\n # main HDF5 file\n try:\n print \"Creating new HDF5 file:\", output_file\n volumes = h5py.File(output_file,'w-')\n except:\n print \"Opening existing HDF5 file:\", output_file\n volumes = h5py.File(output_file,'r+')\n\n # main loop over points in volume and supersampling factors\n for i in xrange(len(vol_points_list)):\n \n # get volume points and create HDF5 group to store results for this loop\n vol_points = vol_points_list[i]\n vols_by_points = volumes.create_group('vol_points_'+str(i))\n\n # loop over calibration files for different supersampling factors\n for calibration_file in calibration_list:\n print \"Analyzing:\", calibration_file\n \n # create subgroup to write data to for this calibration file\n vols_by_sampling = vols_by_points.create_group('supersampling_factor_'+calibration_file.split('/')[-1].split('.')[0].split('_')[1])\n\n # get covariance operator\n Cov, raydb = get_Cov_from_calibration( calibration_file )\n\n # get point coordinates in discretized volume\n vol_coords = []\n vol_coords.append( get_voxel_coords(vol_points[0], raydb, pitch=demagnified_pitch_size))\n vol_coords.append( get_voxel_coords(vol_points[1], raydb, pitch=demagnified_pitch_size))\n print \"Volume points:\", vol_points\n print \"Volume coordinates:\",vol_coords\n\n # generate two psfs for vol_points and add them to get a volume containing both\n psf0 = get_psf_vol(vol_coords[0],Cov,raydb=raydb)\n psf1 = get_psf_vol(vol_coords[1],Cov,raydb=raydb)\n vol_vec = psf0 + psf1\n vol = np.reshape(vol_vec, Cov.vol_shape)\n dset = vols_by_sampling.create_dataset('Sparrow_volume', data=vol)\n volumes.close()\n return True",
"def time_calibration(input_file):\n original_path = os.getcwd()\n save_path = input_file['save_path']\n #change to save data reduction directory\n os.chdir(save_path)\n print '\\n Reading the list of images ....\\n'\n planet = input_file['exoplanet'] #set exoplanet name\n images = sorted(glob.glob('AB'+planet+'*.fits'))\n print images\n #include de RA,DEC and epoch of the exoplanet\n RA,DEC,epoch = input_file['RA'],input_file['DEC'],input_file['epoch']\n #obtain ST JD using iraf task and introduce in the header\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n if int(split(hdr['UT'],':')[0]) < int(hdr['timezone']):\n new_date = use.yesterday(hdr['date-obs'])\n #print images[i], new_date\n else:\n new_date = hdr['date-obs']\n year,month,day = split(new_date,'-')\n iraf.asttimes(year=year,month=month,day=day,time=hdr['loctime'],obs=input_file['observatory'])\n JD = iraf.asttimes.jd #obtain julian date\n LMST = iraf.asttimes.lmst #obtain the sideral time\n LMST = use.sexagesimal_format(LMST) #convert sideral time in sexagesimal format\n iraf.hedit(images[i],'ST',LMST,add='yes',verify='no',show='no',update='yes') #create the ST keyword in the header\n iraf.ccdhedit(images[i],'LMST',LMST,type='string') #include the mean sideral time in the header\n iraf.ccdhedit(images[i],'JD',JD,type='string') #include de julian date in the header\n #include RA, and DEC of the object in your header\n iraf.ccdhedit(images[i],\"RA\",RA,type=\"string\") #include right ascention in the header\n iraf.ccdhedit(images[i],\"DEC\",DEC,type=\"string\") #include declination in the header\n iraf.ccdhedit(images[i],\"epoch\",epoch,type=\"string\") #include epoch in the header\n # use.update_progress((i+1.)/len(images))\n print '\\n Setting airmass ....\\n'\n for i in range(len(images)):\n print '# ',images[i]\n #iraf.hedit(images[i],'airmass',airmass,add='yes')\n #iraf.hedit(images[i],'HJD',HJD,add='yes')\n iraf.setairmass.observatory = input_file['observatory']\n iraf.setairmass(images[i])\n iraf.setjd.time = 'ut'\n iraf.setjd(images[i])\n print '\\n.... done.\\n'\n #export information\n hjd, jd, airmass, st = [],[],[],[]\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n hjd.append(hdr['HJD'])\n jd.append(hdr['JD'])\n airmass.append(hdr['airmass'])\n st.append(hdr['st'])\n #saving the data\n data = DataFrame([list(hjd),list(jd),list(st),list(airmass)]).T\n data.columns = ['HJD','JD','ST','Airmass']\n data.to_csv('results_iraf_calibrations.csv')\n #change to workings directory\n os.chdir(original_path)\n return",
"def saveFits(self, filename):\n \n if isinstance(self.res, type(None)):\n raise Exception('Result is not yet aviable.')\n \n header = fits.Header()\n header['NAXIS1'] = self.naxis\n header['NAXIS2'] = self.naxis\n header['CTYPE1'] = 'RA---SIN'\n header['CTYPE2'] = 'DEC--SIN'\n header['CDELT1'] = - self.fov/(np.pi/180 * self.naxis)\n header['CDELT2'] = self.fov/(np.pi/180 * self.naxis)\n header['BUNIT'] = 'JY/PIXEL'\n \n hdu = fits.PrimaryHDU(self.res, header=header)\n hdulist = fits.HDUList([hdu])\n hdulist.writeto(filename, overwrite=True)\n \n print(\"Saved as '%s'.\" %(filename))",
"def log_file1D(fast5_data , basecall_stat):\n\n version, flowcell_id, hostname, numMinion, run_id = fast5_data\n\n #Retrieve the dataframe with statitstics such as the quartile or std\n #Retrieve the dictionary from albacore summary log\n\n num_called_template, mean_qscore_template = basecall_stat.stat_generation()\n\n counter_template, total_nucleotide_template = basecall_stat.counter()\n\n occupancy_pore = basecall_stat.occupancy_pore()\n\n completeName = os.path.join('/home/ferrato/Documents/fast5', \"fichier_aozan.txt\")\n\n with open(completeName, 'w') as file_data:\n\n for index, element in num_called_template.iteritems():\n file_data.write(\"num.called.template.{}={}\\n\".format(index, element))\n\n for index, element in num_called_template.iteritems():\n file_data.write(\"mean.qscore.template.{}={}\\n\".format(index, element))\n\n for nucleotide, count in counter_template.items():\n file_data.write(\"nucleotide.{}.template={}\\n\".format(nucleotide,count))\n if nucleotide == 'total':\n continue\n calcul = float(count) / float(total_nucleotide_template)\n file_data.write(\"nucleotide.{}.proportion={}\\n\".format(nucleotide, calcul))\n\n\n file_data.write(\"total.number.of.sequence={}\\n\".format(basecall_stat.fast5_tot))\n\n for index, value in occupancy_pore.items():\n file_data.write(\"pore.occupancy.{}={}\\n\".format(index, value))\n\n\n file_data.write(\"flowcell.serial.number={}\\n\".format(flowcell_id))\n file_data.write(\"minknown.version={}\\n\".format(version))\n file_data.write(\"hostname={}\\n\".format(hostname))\n file_data.write(\"minion.serial.number={}\\n\".format(numMinion))\n file_data.write((\"run.id={}\\n\".format(run_id)))\n\n for index, element in basecall_stat.statistics_read_size().iteritems():\n file_data.write(\"Read.fastq.length.{}={}\\n\".format(index, element))",
"def load_HIDRA(runNumber, rootDir, dataDir, phases, mode, exportPFs=True, pfType='jul', smplRot=None, pbaridx=None, pbarcolor='WHITE', ranges=None, rot_phase=None):\r\n\r\n # define instrument\r\n inst = 'HIDRA'\r\n\r\n # load in .h5 file\r\n if mode == 'auto': \r\n fname = 'HB2B_{}.h5'.format(runNumber)\r\n desc = '{}_aut_uq'.format(runNumber)\r\n elif mode == 'texture': \r\n fname = 'HB2B_{}_texture.h5'.format(runNumber)\r\n desc = '{}_tex_uq'.format(runNumber)\r\n else: raise ValueError('reduction not recognized..')\r\n \r\n # will need to open this later\r\n exp_h5 = h5py.File(os.path.join(dataDir,fname), 'r')\r\n\r\n # read wavelength\r\n lmbda = exp_h5['instrument/monochromator setting/wave length'][()][0]\r\n\r\n # read angular data\r\n chi = exp_h5['raw data/logs/chi'][()]\r\n phi = exp_h5['raw data/logs/phi'][()]\r\n omega = exp_h5['raw data/logs/omega'][()]\r\n two_theta = exp_h5['reduced diffraction data/2theta'][()]\r\n\r\n # number of measured patterns (for loop)\r\n meas_num = len(phi)\r\n\r\n # read intensity data\r\n if mode == 'auto': #no eta slice\r\n \r\n # get from raw data/logs/2thetaSetpoint\r\n num_det_pos = len(np.unique(exp_h5['raw data/logs/2thetaSetpoint'][()]))\r\n num_eta_slice = 1\r\n\r\n eta_zero = np.nan_to_num(exp_h5['reduced diffraction data/main'][()])\r\n max_int = np.max(eta_zero)\r\n\r\n elif mode == 'texture': #should have eta slices\r\n\r\n num_det_pos = len(np.unique(exp_h5['raw data/logs/2thetaSetpoint'][()]))\r\n num_eta_slice = 3\r\n\r\n eta_neg5 = np.nan_to_num(exp_h5['reduced diffraction data/eta_-5.0'][()])\r\n eta_zero = np.nan_to_num(exp_h5['reduced diffraction data/eta_0.0'][()])\r\n eta_pos5 = np.nan_to_num(exp_h5['reduced diffraction data/eta_5.0'][()])\r\n\r\n max_int = np.max([np.max(eta) for eta in [eta_neg5, eta_zero, eta_pos5]])\r\n\r\n # close the h5 file\r\n exp_h5.close()\r\n\r\n # number of measured q\r\n rot_num = int((meas_num/num_det_pos)*num_eta_slice)\r\n\r\n ## fitting setup ##\r\n d_all = []\r\n ref_all = []\r\n cnt_all = []\r\n name_all = []\r\n\r\n ## get phase data ##\r\n for pi, (pn, ph) in enumerate(phases.items()):\r\n\r\n for k,v in ph.d_spacing(dmin=lmbda/2).items():\r\n\r\n d_all.append(v[0])\r\n ref_all.append(v[-1])\r\n cnt_all.append(pi)\r\n\r\n name_all.append(pn)\r\n\r\n sort_idx = np.argsort(d_all)\r\n d_all = [d_all[i] for i in sort_idx]\r\n ref_all = [ref_all[i] for i in sort_idx]\r\n cnt_all = [cnt_all[i] for i in sort_idx]\r\n tt_all = [2*np.rad2deg(np.arcsin(lmbda/(2*d))) for d in d_all]\r\n \r\n ## setup pole fig dictionary ##\r\n pfd = {}\r\n for i,(d,ref,pi,tt) in enumerate(zip(d_all,ref_all,cnt_all,tt_all)):\r\n \r\n pfd[i+1] = {}\r\n pfd[i+1]['phase'] = name_all[pi]\r\n pfd[i+1]['ref'] = ''.join(map(str,ref))\r\n pfd[i+1]['data'] = np.zeros(( rot_num, 5 ))\r\n pfd[i+1]['tt'] = tt\r\n pfd[i+1]['lattice'] = phases[name_all[pi]].lattice\r\n pfd[i+1]['lattice_type'] = phases[name_all[pi]].get_type()\r\n\r\n # for PF Δk index\r\n # will iterate +1 on each insertion\r\n # to account for variable # of points for each PF \r\n # (shouldn't be the case in CW?)\r\n pfd[i+1]['pole_cnt'] = 0\r\n \r\n # setup flag if it was fit or not\r\n pfd[i+1]['fit'] = False\r\n\r\n # where to store\r\n poleFig_path = os.path.join(rootDir,'pole_figs',desc)\r\n fitResult_path = os.path.join(rootDir,'fit_results',desc,'params')\r\n fitImage_path = os.path.join(rootDir,'fit_results',desc,'figures')\r\n\r\n if not os.path.exists(fitResult_path): os.makedirs(fitResult_path)\r\n if not os.path.exists(fitImage_path): os.makedirs(fitImage_path)\r\n if not os.path.exists(poleFig_path): os.makedirs(poleFig_path)\r\n\r\n # progress bar setup\r\n if pbaridx is None:\r\n refine_pbar = tqdm(range(meas_num),desc=desc)\r\n else:\r\n refine_pbar = tqdm(range(meas_num),desc=desc, position=pbaridx)\r\n \r\n border = \"=\"*80\r\n clear_border = _term_move_up() + \"\\r\" + \" \"*len(border) + \"\\r\"\r\n\r\n # ## figure setup\r\n # if liveplot is True:\r\n \r\n # fig = plt.figure(figsize=(12.8,4.8),constrained_layout=True)\r\n # gs = fig.add_gridspec(5,4)\r\n # ax1 = fig.add_subplot(gs[:4,:2])\r\n # ax2 = fig.add_subplot(gs[:4,2:])\r\n # ax3 = fig.add_subplot(gs[4,:2])\r\n # plt.pause(0.05)\r\n\r\n # k = 0\r\n\r\n ## loop over rotations\r\n for ri in refine_pbar:\r\n\r\n # easy to reference these later \r\n o = omega[ri]\r\n c = 90 - chi[ri]\r\n p = 360 - phi[ri]\r\n \r\n if mode == 'auto': inner_iter = zip([eta_zero],[0])\r\n elif mode == 'texture': inner_iter = zip([eta_neg5, eta_zero, eta_pos5],[-5, 0, 5])\r\n # inner_iter = zip([eta_neg5, eta_zero, eta_pos5],[-5, 0, 5])\r\n\r\n # loop over data\r\n for meas_int,eta in inner_iter:\r\n\r\n counter = 0\r\n\r\n label = 'tt{}_o{}_c{}_p{}_e{}'.format(round(o*2),round(o),round(c),round(p),270 - eta)\r\n \r\n # get mask on invalid data on edges\r\n valid_data = ma.masked_where(meas_int[ri,:]==0,meas_int[ri,:])\r\n valid = ~valid_data.mask\r\n\r\n # get 2theta range of measurement\r\n tt_ran = two_theta[ri,valid]\r\n\r\n # get weights\r\n weights = 1 / np.sqrt(meas_int[ri,valid])\r\n \r\n # find what peaks are present\r\n tt_mask = (tt_all >= min(tt_ran)) * (tt_all <= max(tt_ran))\r\n tt_pres = list(itertools.compress(tt_all,tt_mask))\r\n # only these are present\r\n tt_pres_num = list(itertools.compress(range(len(tt_all)),tt_mask))\r\n # adjust index\r\n tt_pres_num = [v+1 for v in tt_pres_num]\r\n \r\n # num of peaks\r\n num_peaks = len(tt_pres_num)\r\n\r\n # setup lmfit model\r\n model = ConstantModel()\r\n for i in tt_pres_num:\r\n # add individual peaks\r\n model = model + PseudoVoigtModel(prefix='p{}_'.format(i))\r\n \r\n ## initialize params\r\n params = model.make_params()\r\n\r\n # setup file to save parameters (.json)\r\n fitResult = os.path.join(fitResult_path,'fitParams_{}.json'.format(label))\r\n\r\n with open(fitResult,'r') as f_in:\r\n try:\r\n params = params.load(f_in)\r\n out = ModelResult(model,params)\r\n comps = out.eval_components(x=tt_ran)\r\n except:\r\n pass \r\n\r\n # # calculate weighted R (fit quality)\r\n # rwp = np.sum( weights * out.residual**2 ) / np.sum( weights * meas_int[ri,valid]**2 )\r\n\r\n # # write to console\r\n # # this goes fast.. only print if there's a problem\r\n # if not out.success: \r\n # refine_pbar.write(clear_border + '--- ω:{} | χ:{} | φ:{} | η:{} ---'.format(int(o),int(c),int(p),int(eta)))\r\n # refine_pbar.update()\r\n # refine_pbar.write(clear_border + 'Fit was not successful!')\r\n # refine_pbar.update()\r\n # refine_pbar.write(clear_border + 'Rwp : {:3.2f}%'.format(rwp*100))\r\n # refine_pbar.update()\r\n # refine_pbar.write(border)\r\n # refine_pbar.update()\r\n\r\n # store peak intensity\r\n for i in tt_pres_num:\r\n\r\n # get q counter\r\n pole_cnt = pfd[i]['pole_cnt']\r\n\r\n # get 2theta\r\n # tt = out.params['p{}_center'.format(i)].value\r\n tt = pfd[i]['tt']\r\n ref = pfd[i]['ref']\r\n \r\n # print(out.params['p{}_center'.format(i)])\r\n # print(f'{tt} - {ref}')\r\n # print(f'{tt_ran[:10]}')\r\n # print('--- ω:{} | χ:{} | φ:{} | η:{} ---'.format(int(o),int(c),int(p),int(eta)))\r\n # raise Exception\r\n\r\n # get projection (q)\r\n q = rotate_project_q(tt/2, o, c, 360 + p, 270 - eta) #was 360 - p\r\n\r\n # pfd[i]['offset'][pole_cnt,0] = (tt/2) - o\r\n\r\n # store it\r\n pfd[i]['data'][pole_cnt,0] = q[0]\r\n pfd[i]['data'][pole_cnt,1] = q[1]\r\n pfd[i]['data'][pole_cnt,2] = q[2] \r\n\r\n # tell me it's fit\r\n pfd[i]['fit'] = True\r\n\r\n # tell me what type to output\r\n pfd[i]['type'] = pfType\r\n \r\n # integrate\r\n II = np.trapz(y = comps['p{}_'.format(i)],\r\n x = tt_ran,\r\n dx = tt_ran[1]-tt_ran[0])\r\n\r\n # store integ. int\r\n pfd[i]['data'][pole_cnt,3] = II\r\n pfd[i]['data'][pole_cnt,4] = 0.0\r\n \r\n ## counter for Δk\r\n pfd[i]['pole_cnt'] += 1\r\n \r\n # export the pole figures\r\n if exportPFs: export_pfs(inst, desc, pfd, poleFig_path)\r\n\r\n # # # write the MTEX file\r\n # write_MTEX(desc, pfd, poleFig_path, smplSym='1', smplRot=smplRot, ranges=ranges, rot_phase=rot_phase) \r\n \r\n return pfd",
"def masterbias(input_file):\n #Set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all bias images\n bias = glob.glob('bias*.fits')\n print 'Loading bias images \\nTotal of bias files = ',len(bias),'\\nFiles = \\n'\n print bias\n print '\\nCreating superbias \\n'\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #copy bias images to save_path\n os.system('cp bias*.fits '+save_path)\n #change to sabe_path\n os.chdir(save_path)\n #verify if previous superbias exist\n if os.path.isfile('superbias.fits') == True:\n os.system('rm superbias.fits')\n # --------------------------------------------------------------------------\n # --- Using only with a few bias images\n #create the list of bias images\n #bias_list = string.join(bias,',')\n #combine the bias image and create the superbias\n #iraf.imcombine(bias_list,'superbias.fits')\n #iraf.imstat('superbias.fits')\n # --------------------------------------------------------------------------\n\n #Using numpy package to take the mean value of bias images\n #Problem: does not include the superbias header in this version\n bias_array = []\n for i in range(len(bias)):\n image = fits.getdata(bias[i])\n bias_array.append(np.array(image,dtype='Float64'))\n superbias_array = np.median(bias_array,axis=0)\n hdu_superbias = fits.PrimaryHDU(superbias_array)\n hdulist_superbias = fits.HDUList([hdu_superbias])\n hdulist_superbias.writeto('superbias.fits')\n\n #clean previos bias files\n print '\\n Cleaning bias*.fits images ....\\n'\n os.system('rm bias*.fits')\n print '\\n.... done.'\n #print output\n #test of outpu value\n #os.remove('superbias.fits')\n #Verify if the image was created:\n output = glob.glob('superbias*.fits')\n if len(output) != 0:\n output = 0\n else:\n output = 1\n #Return to original directory\n os.chdir(original_path)\n #END of the masterbias reduction messsage\n print '\\nsuperbias.fits created!\\n'\n print '\\nEND of superbias reduction!\\n'\n #obtain the value of return\n if output == 1:\n print '!!! ERROR/WARNING !!!'\n print 'Check if the superbias was created or if there is more than one superbias image.'\n return output",
"def save(self):\n if os.path.isfile(self.filename): os.remove(self.filename)\n fits.HDUList([self.primary_hdu, self.energs_hdu, self.params_hdu, self.spectra_hdu]).writeto(self.filename)",
"def reassemble(file, save=False):\n # Open the FITS file\n hdulist = fits.open(file, mode='update')\n filename = os.path.basename(file).replace('.fits', '')\n directory = os.path.join(os.path.dirname(file), filename + '_data')\n\n # Large file\n if os.path.isdir(directory):\n\n # Populate file with data\n for hdu in hdulist:\n\n # Get the real data files\n filestr = filename + '.{}.*'.format(hdu.name)\n files = glob(os.path.join(directory, filestr))\n\n # Load and recombine the data\n if len(files) > 0:\n data = np.concatenate([np.load(f) for f in files])\n else:\n data = None\n\n # Replace with real data\n hdulist[hdu.name].data = data\n\n # Write the file changes\n if save:\n hdulist.writeto(file, overwrite=True)\n shutil.rmtree(directory)\n\n return hdulist",
"def save_calibrated_thar(head, spec, calib, channel):\n k = calib['k']\n offset = calib['offset']\n xorder = calib['xorder']\n yorder = calib['yorder']\n coeff = calib['coeff']\n\n if channel is None:\n leading_str = 'HIERARCH GAMSE WLCALIB'\n else:\n leading_str = 'HIERARCH GAMSE WLCALIB CHANNEL %s'%channel\n head[leading_str+' K'] = k\n head[leading_str+' OFFSET'] = offset\n head[leading_str+' XORDER'] = xorder\n head[leading_str+' YORDER'] = yorder\n\n # write the coefficients\n for j, i in itertools.product(range(yorder+1), range(xorder+1)):\n head[leading_str+' COEFF %d %d'%(j, i)] = coeff[j,i]\n\n head[leading_str+' MAXITER'] = calib['maxiter']\n head[leading_str+' STDDEV'] = calib['std']\n head[leading_str+' WINDOW_SIZE'] = calib['window_size']\n head[leading_str+' SNR_THRESHOLD'] = calib['snr_threshold']\n head[leading_str+' CLIPPING'] = calib['clipping']\n head[leading_str+' NTOT'] = calib['ntot']\n head[leading_str+' NUSE'] = calib['nuse']\n head[leading_str+' NPIXEL'] = calib['npixel']\n\n file_identlist = []\n\n # pack the identfied line list\n for aperture, list1 in calib['identlist'].items():\n for row in list1:\n file_identlist.append(row)\n\n pri_hdu = fits.PrimaryHDU(header=head)\n tbl_hdu1 = fits.BinTableHDU(spec)\n lst = [pri_hdu, tbl_hdu1]\n file_identlist = np.array(file_identlist, dtype=list1.dtype)\n tbl_hdu2 = fits.BinTableHDU(file_identlist)\n lst.append(tbl_hdu2)\n hdu_lst = fits.HDUList(lst)\n\n return hdu_lst",
"def convert_calculations(filename, hdf5_data):\n x1 = []\n\n with open(filename, 'r') as inp:\n for line in inp:\n x1.append(line)\n\n idx = 1\n dset = require_dataset(hdf5_data, structure.H5_ENV_VOLUME, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_VOLUME_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_GRAVITY, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_GRAVITY_ATTR)\n idx += 1\n\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_DEPTH, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_DEPTH_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_WAVE_POINT, (2,), dtype=settings.NEMOH_FLOAT)\n x2 = x1[idx].split()\n dset[0] = float(x2[0])\n dset[1] = float(x2[1])\n set_hdf5_attributes(dset, structure.H5_ENV_WAVE_POINT_ATTR)\n\n idx = 6\n\n num_bodies = int(x1[idx].split()[0])\n\n for i in range(num_bodies):\n\n body = structure.H5_BODIES + structure.H5_BODY_BASE + str(i+1) + '/'\n idx += 2\n\n mesh_x = []\n\n mesh_path = os.path.join(os.path.abspath(os.path.dirname(filename)), str(x1[idx].split()[0]).strip(' \\t\\n\\r'))\n\n with open(mesh_path, 'r') as mesh_file:\n for line in mesh_file:\n mesh_x.append(line)\n\n idx += 1\n x2 = x1[idx].split()\n\n num_points = int(x2[0])\n num_panels = int(x2[1])\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_NUM_POINTS, (1, ), dtype=settings.NEMOH_INT)\n dset[0] = num_points\n set_hdf5_attributes(dset, structure.H5_BODY_NUM_POINTS_ATTR)\n\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_NUM_PANELS, (1, ), dtype=settings.NEMOH_INT)\n dset[0] = num_panels\n set_hdf5_attributes(dset, structure.H5_BODY_NUM_PANELS_ATTR)\n\n mesh_idx = 0\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_MESH, (num_points+num_panels+1, 4), dtype=settings.NEMOH_FLOAT)\n mesh_x2 = mesh_x[mesh_idx].split()\n set_hdf5_attributes(dset, structure.H5_BODY_MESH_ATTR)\n\n dset[0, 0] = int(mesh_x2[0])\n dset[0, 1] = int(mesh_x2[1])\n\n for j in range(1, num_points+num_panels+1):\n mesh_idx += 1\n mesh_x2 = mesh_x[mesh_idx].split()\n dset[j, :] = [float(x) for x in mesh_x2[:4]]\n\n if j == num_points:\n mesh_idx += 1\n\n idx += 1\n num = int(x1[idx].split()[0])\n dset = require_dataset(hdf5_data, body + structure.H5_FREEDOM_DEGREE, (num, 7), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREEDOM_DEGREE_ATTR)\n for j in range(num):\n idx += 1\n x2 = x1[idx].split()\n dset[j, :] = np.array([float(x) for x in x2[:7]])\n\n idx += 1\n num = int(x1[idx].split()[0])\n dset = require_dataset(hdf5_data, body + structure.H5_GENERALISED_FORCES, (num, 7), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_GENERALISED_FORCES_ATTR)\n for j in range(num):\n idx += 1\n x2 = x1[idx].split()\n dset[j, :] = [float(x) for x in x2[:7]]\n\n idx += 1\n num = int(x1[idx].split()[0])\n for j in range(num):\n idx += 1\n\n idx += 2\n x2 = x1[idx].split()\n\n dset = require_dataset(hdf5_data, structure.H5_NUM_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_NUM_WAVE_FREQUENCIES_ATTR)\n dset[0] = int(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_MIN_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MIN_WAVE_FREQUENCIES_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_MAX_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MAX_WAVE_FREQUENCIES_ATTR)\n dset[0] = float(x2[2])\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_NUM_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_NUM_WAVE_DIRECTIONS_ATTR)\n dset[0] = int(x2[0])\n\n dset = require_dataset(hdf5_data, structure.H5_MIN_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MIN_WAVE_DIRECTIONS_ATTR)\n dset[0] = float(x2[1])\n\n dset = require_dataset(hdf5_data, structure.H5_MAX_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MAX_WAVE_DIRECTIONS_ATTR)\n dset[0] = float(x2[2])\n\n idx += 2\n x2 = x1[idx].split()\n\n dset = require_dataset(hdf5_data, structure.H5_COMPUTE_IRF, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_COMPUTE_IRF_ATTR)\n dset[0] = int(x2[0])\n\n dset = require_dataset(hdf5_data, structure.H5_IRF_TIME_STEP, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_TIME_STEP_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_IRF_DURATION, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_DURATION_ATTR)\n dset[0] = float(x2[2])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_SHOW_PRESSURE, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_SHOW_PRESSURE_ATTR)\n dset[0] = int(x2[0])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_NUMBER, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_NUMBER_ATTR)\n dset[0] = float(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MIN, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MIN_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MAX, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MAX_ATTR)\n dset[0] = float(x2[2])\n\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_X, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_X_ATTR)\n dset[0] = int(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_Y, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_Y_ATTR)\n dset[0] = int(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_X, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_X_ATTR)\n dset[0] = float(x2[2])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_Y, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_Y_ATTR)\n dset[0] = float(x2[3])",
"def fit(self, resonance_file, experiment, out_paths):\n # Set up temporary file names #\n inp = temp_file_gen('Sammy_fit','inp')\n par = temp_file_gen('Sammy_fit','par')\n cov = temp_file_gen('Sammy_fit','cov')\n ndf = temp_file_gen('Sammy_fit','ndf')\n parout = temp_file_gen('Sammy_fit','out.par')\n covout = temp_file_gen('Sammy_fit','out.cov')\n #\n # Construct SAMMY input using resonance_file and information about the #\n # 'experiment' #\n self.endf2inp_par_ndf(resonance_file, [inp, par, ndf], \n experiment[1], flag_all = True)\n #\n # Change from MLBW formalism if this was in original file. #\n # Reich-Moore will be used instead, which is recommended. #\n self.modify_inp(inp, keyremove = ['mlbw formalism is wanted'])\n #\n # Fit to total cross section data without prior #\n message = self.g_least_squares(inp, par, experiment['total'],\n parout, covout)\n shutil.move(parout, par)\n shutil.move(covout, cov)\n #\n # Check if convergence was reached. Otherwise, something is bad. #\n if message[:len('Did not converge')] == 'Did not converge':\n raise RuntimeError(message)\n #\n # Perform a Beyesian update using capture data\n self.bayesian([inp, par, cov], experiment['capture'], [parout, covout])\n #\n # Construct ENDF formatted files from output #\n self.inp_par_ndf_cov2endfs([inp, parout, ndf, covout], out_paths)\n #\n # Include ENDF file paths in ResonanceFile instance to return\n resonance_file_out = ResonanceFile(out_paths[0], resonance_file.nuclide)\n resonance_file_out.cov = ResonanceCovFile(out_paths[1])\n #\n # Clean up\n if self.cleanup:\n for p in [inp, par, cov, ndf, parout, covout]: os.remove(p)\n #\n return resonance_file_out",
"def generate_data(self):\n self.remove_hdf5_file()\n hdf5_handler = self.create_hdf5_file()\n self.populate_hdf5_file(hdf5_handler, self.dataset)",
"def collect_data(ra,dec,unix,Nspectra,dt,fileName,fitName,noise=False):\n with open('{}'.format(fileName), 'w') as pointFile:\n pointFile.write('{}'.format('agilent'))\n \n alt, az = get_altaz(ra[0],dec[0],jd =uni_to_jul(unix), lat=37.9183, lon=-122.1067, alt =304)\n LeuschTelescope.point(alt,az)\n print(LeuschTelescope.get_pointing())\n\n if noise:\n ugradio.leusch.LeuschNoise()\n LeuschNoise.on()\n \n ugradio.agilent.SynthClient(host='127.0.0.1')\n pointFile.write('{}'.format(SynthClient.get_frequency()))\n \n #initialize spectrometer thing\n leuschner.Spectrometer('10.0.1.2')\n \n for r,d in zip(ra,dec):\n obsv_time = uni_to_jul(time.time())\n alt,az = get_altaz(ra[0],dec[0], jd=obsv_time, lat=37.9183, lon=-122.1067, alt = 304)\n LeuschTelescope.point(alt,az)\n currentAlt, currentAz = leusch.get_pointing()\n print('alt: {} , az: {}'.format(currentAlt, currentAz))\n Spectrometer.read_spec('{}_{}_r_d.fits'.format(unix,fitName), Nspec, (r,d), 'eq')",
"def apply_radcal(self, input_radcal=None):\n if input_radcal is None:\n # Preflight radcal from HDF5 header file\n new_radcal = self.meta['radcal']\n else:\n # User-inputted radcal curve\n new_radcal = np.array(input_radcal)\n if len(new_radcal) != self.data.shape[-1]:\n print('Error: input_radcal must have the same number of elements'\n +' as the last dimension in the data array.')\n return self\n\n output_radcal = new_radcal\n if self.unit != u.photon:\n if str(self.radcal) == 'unknown':\n print('Error: Data currently has an unknown radcal applied.'\n +' Unable to apply new calibration.')\n return self\n elif np.all(self.radcal == new_radcal):\n print('Error: input_radcal is identical to current radcal.'\n +' No calculation is required.')\n return self\n else:\n print('Warning: Data currently has a different radcal applied.'\n +' Old calibration curve will be removed.')\n new_radcal = new_radcal/self.radcal\n\n new_data = self.data.copy()*new_radcal\n new_errs = self.uncertainty.array.copy()*new_radcal\n new_meta = copy.deepcopy(self.meta)\n new_meta['notes'].append('Applied radcal to convert photon counts to intensity')\n wcs_mask = (np.array(tuple(reversed(self.wcs.array_shape))) <= 1).tolist()\n\n output_cube = EISCube(new_data, wcs=self.wcs, uncertainty=new_errs,\n wavelength=self.wavelength, radcal=output_radcal,\n meta=new_meta, unit='erg / (cm2 s sr)',\n mask=self.mask, missing_axes=wcs_mask)\n return output_cube",
"def test_append_with_header(self):\n testfile = self.temp(\"test_append_1.fits\")\n with fits.open(self.data(\"test0.fits\")) as hdus:\n for hdu in hdus:\n fits.append(testfile, hdu.data, hdu.header, checksum=True)\n\n with fits.open(testfile, checksum=True) as hdus:\n assert len(hdus) == 5",
"def _setup_h5(self, data_gen_parms):\n\n '''\n Build the group structure down to the channel group\n '''\n # Set up the basic group structure\n root_grp = VirtualGroup('')\n root_parms = dict()\n root_parms['translator'] = 'FAKEBEPS'\n root_parms['data_type'] = data_gen_parms['data_type']\n root_grp.attrs = root_parms\n\n meas_grp = VirtualGroup('Measurement_')\n chan_grp = VirtualGroup('Channel_')\n\n meas_grp.attrs.update(data_gen_parms)\n\n # Create the Position and Spectroscopic datasets for the Raw Data\n ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals = self._build_ancillary_datasets()\n\n raw_chunking = calc_chunks([self.n_pixels,\n self.n_spec_bins],\n np.complex64(0).itemsize,\n unit_chunks=[1, self.n_bins])\n\n ds_raw_data = VirtualDataset('Raw_Data', data=None,\n maxshape=[self.n_pixels, self.n_spec_bins],\n dtype=np.complex64,\n compression='gzip',\n chunking=raw_chunking,\n parent=meas_grp)\n\n chan_grp.add_children([ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals,\n ds_raw_data])\n meas_grp.add_children([chan_grp])\n root_grp.add_children([meas_grp])\n\n hdf = HDFwriter(self.h5_path)\n hdf.delete()\n h5_refs = hdf.write(root_grp)\n\n # Delete the MicroDatasets to save memory\n del ds_raw_data, ds_spec_inds, ds_spec_vals, ds_pos_inds, ds_pos_vals\n\n # Get the file and Raw_Data objects\n h5_raw = get_h5_obj_refs(['Raw_Data'], h5_refs)[0]\n h5_chan_grp = h5_raw.parent\n\n # Get the Position and Spectroscopic dataset objects\n h5_pos_inds = get_h5_obj_refs(['Position_Indices'], h5_refs)[0]\n h5_pos_vals = get_h5_obj_refs(['Position_Values'], h5_refs)[0]\n h5_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_refs)[0]\n h5_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of Raw_Data\n link_as_main(h5_raw, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals)\n\n '''\n Build the SHO Group\n '''\n sho_grp = VirtualGroup('Raw_Data-SHO_Fit_', parent=h5_chan_grp.name)\n\n # Build the Spectroscopic datasets for the SHO Guess and Fit\n sho_spec_starts = np.where(h5_spec_inds[h5_spec_inds.attrs['Frequency']].squeeze() == 0)[0]\n sho_spec_labs = get_attr(h5_spec_inds, 'labels')\n ds_sho_spec_inds, ds_sho_spec_vals = build_reduced_spec_dsets(h5_spec_inds,\n h5_spec_vals,\n keep_dim=sho_spec_labs != 'Frequency',\n step_starts=sho_spec_starts)\n\n sho_chunking = calc_chunks([self.n_pixels,\n self.n_sho_bins],\n sho32.itemsize,\n unit_chunks=[1, 1])\n ds_sho_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n ds_sho_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n\n sho_grp.add_children([ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals])\n\n # Write the SHO group and datasets to the file and delete the MicroDataset objects\n h5_sho_refs = hdf.write(sho_grp)\n del ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals\n\n # Get the dataset handles for the fit and guess\n h5_sho_fit = get_h5_obj_refs(['Fit'], h5_sho_refs)[0]\n h5_sho_guess = get_h5_obj_refs(['Guess'], h5_sho_refs)[0]\n\n # Get the dataset handles for the SHO Spectroscopic datasets\n h5_sho_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_sho_refs)[0]\n h5_sho_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_sho_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of the SHO Fit and Guess\n link_as_main(h5_sho_fit, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n link_as_main(h5_sho_guess, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n\n '''\n Build the loop group\n '''\n loop_grp = VirtualGroup('Fit-Loop_Fit_', parent=h5_sho_fit.parent.name)\n\n # Build the Spectroscopic datasets for the loops\n loop_spec_starts = np.where(h5_sho_spec_inds[h5_sho_spec_inds.attrs['DC_Offset']].squeeze() == 0)[0]\n loop_spec_labs = get_attr(h5_sho_spec_inds, 'labels')\n ds_loop_spec_inds, ds_loop_spec_vals = build_reduced_spec_dsets(h5_sho_spec_inds,\n h5_sho_spec_vals,\n keep_dim=loop_spec_labs != 'DC_Offset',\n step_starts=loop_spec_starts)\n\n # Create the loop fit and guess MicroDatasets\n loop_chunking = calc_chunks([self.n_pixels, self.n_loops],\n loop_fit32.itemsize,\n unit_chunks=[1, 1])\n ds_loop_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n ds_loop_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n # Add the datasets to the loop group then write it to the file\n loop_grp.add_children([ds_loop_fit, ds_loop_guess, ds_loop_spec_inds, ds_loop_spec_vals])\n h5_loop_refs = hdf.write(loop_grp)\n\n # Delete the MicroDatasets\n del ds_loop_spec_vals, ds_loop_spec_inds, ds_loop_guess, ds_loop_fit\n\n # Get the handles to the datasets\n h5_loop_fit = get_h5_obj_refs(['Fit'], h5_loop_refs)[0]\n h5_loop_guess = get_h5_obj_refs(['Guess'], h5_loop_refs)[0]\n h5_loop_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_loop_refs)[0]\n h5_loop_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_loop_refs)[0]\n\n # Link the Position and Spectroscopic datasets to the Loop Guess and Fit\n link_as_main(h5_loop_fit, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n link_as_main(h5_loop_guess, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n\n self.h5_raw = USIDataset(h5_raw)\n self.h5_sho_guess = USIDataset(h5_sho_guess)\n self.h5_sho_fit = USIDataset(h5_sho_fit)\n self.h5_loop_guess = USIDataset(h5_loop_guess)\n self.h5_loop_fit = USIDataset(h5_loop_fit)\n self.h5_spec_vals = h5_spec_vals\n self.h5_spec_inds = h5_spec_inds\n self.h5_sho_spec_inds = h5_sho_spec_inds\n self.h5_sho_spec_vals = h5_sho_spec_vals\n self.h5_loop_spec_inds = h5_loop_spec_inds\n self.h5_loop_spec_vals = h5_loop_spec_vals\n self.h5_file = h5_raw.file\n\n return",
"def generate_data(path=resource_filename('locals', 'data/fake/'), mag_range=(11.13,18)):\n # Get some random spectra\n try:\n files = glob.glob('/user/jfilippazzo/Models/ACES/default/*.fits')[::50]\n except:\n files = glob.glob('/Users/jfilippazzo/Documents/Modules/_DEPRECATED/limb_dark_jeff/limb/specint/*.fits')[::20]\n \n # Make a fake source catalog (with only essential columns for now)\n catpath = os.path.join(path,'fake_source_catalog.ecsv')\n ids = list(range(len(files)))\n coords = SkyCoord([89.7455]*len(ids), [-29.05744]*len(ids), unit='deg', frame='icrs')\n cat = at.QTable([ids,coords], names=('id','icrs_centroid'))\n cat.write(catpath)\n \n # Open the x1d file\n header = fits.getheader(resource_filename('locals', 'data/template_x1d.fits'))\n \n # Make Spectrum objects from models at R=150\n wavelength = np.arange(0.05,2.6,0.0001)[::66]*q.um\n \n # Normalize the spectra to a random F200W magnitude\n spectra = []\n f200w = Bandpass('NIRISS.F200W')\n f200w.wave_units = q.um\n for file in files:\n \n # Create Spectrum\n flux = fits.getdata(file)[-1][::66]*q.erg/q.s/q.cm**2/q.AA\n unc = flux/50.\n spec = Spectrum(wavelength, flux, unc)\n \n # Normalize to F200W\n mag = np.random.uniform(*mag_range)\n norm_spec = spec.renormalize(mag, f200w)\n spectra.append(norm_spec)\n \n # Make a separate x1d file and photometry file for each bandpass\n # containing data for each source\n for band in NIRISS_bands:\n \n try:\n \n # Get the Bandpass object\n bp = Bandpass(band)\n bp.wave_units = q.um\n \n # Make x1d file for spectra\n x1d_file = os.path.join(path,'{}_x1d.fits'.format(band))\n x1d_hdu = fits.HDUList(fits.PrimaryHDU(header=header))\n \n # Make csv file for photometry\n phot_file = os.path.join(path,'{}_phot.csv'.format(band))\n phot_data = at.Table(names=('id','band','magnitude','magnitude_unc'), dtype=(int,'S20',float,float))\n \n # Iterate over spectra\n for id,(f,spec) in enumerate(zip(files,spectra)):\n \n # Trim spectrum to bandpass for x1d file\n spec = Spectrum(*spec.spectrum, trim=[(0*q.um,bp.WavelengthMin*1E-4*q.um),(bp.WavelengthMax*1E-4*q.um,10*q.um)])\n \n # Calculate magnitude and add to photometry table\n mag, mag_unc = spec.synthetic_magnitude(bp, force=True)\n phot_data.add_row([id, band, mag, mag_unc])\n \n # Add source spectrum params for verification\n params = f.split('/')[-1].split('-')\n header['TEFF'] = int(params[0].replace('lte',''))\n header['LOGG'] = float(params[1][:4])\n header['FEH'] = float(params[-6][:-8].split('+')[-1])\n header['FILEPATH'] = f\n header['PUPIL'] = band\n\n # Put spectrum in x1d fits file\n data = fits.BinTableHDU(data=np.rec.array(list(zip(*spec.data)),\n formats='float32,float32,float32',\n names='WAVELENGTH,FLUX,ERROR'),\n header=header)\n data.name = 'EXTRACT1D'\n \n x1d_hdu.append(data)\n \n # Write the photometry file\n phot_data.write(phot_file, format='ascii.csv')\n del phot_data\n \n # Write the x1d file\n x1d_hdu.writeto(x1d_file, overwrite=True)\n del x1d_hdu\n \n except IOError:\n pass",
"def add_files(self, file_dict):\n from xeye_calib import resize_rgb_b64\n if self.src_keys is None:\n self.src_keys, self.rgb_cam_list, self.rgb_of_depth_cam_list = init_cam_set(file_dict)\n self.src_keys_dict = {v: i for i, v in enumerate(self.src_keys)}\n logger.info('Init Calibrator done.')\n logger.info('src_keys_dict, {}'.format(self.src_keys_dict))\n logger.info('file_dict.keys, {}'.format(file_dict.keys()))\n for k, v in file_dict.items():\n filename = str(10000000 + self.counter)[1:]\n if k.startswith('cam'):\n if 'dept' in k:\n continue\n print(self.src_keys_dict.keys())\n cam_id = self.src_keys_dict[k]\n dst_path = os.path.join(self.calib_data_dir, str(\n cam_id), 'cam0', filename + '.png')\n if not os.path.exists(os.path.dirname(dst_path)):\n os.makedirs(os.path.dirname(dst_path))\n # print('calib data copy', v, dst_path)\n # print('calib data copy', v, dst_path, file=sys.stderr)\n # with open(self.record_path, 'a') as fout:\n # fout.write('cp ' + v + ' ' + dst_path + '\\n')\n with open(dst_path, 'wb') as fout:\n fout.write(base64.b64decode(v))\n elif k.startswith('rgb'):\n cam_id = self.src_keys_dict[k]\n dst_path = os.path.join(self.calib_data_dir, str(\n cam_id), 'cam0', filename + '.jpg')\n if not os.path.exists(os.path.dirname(dst_path)):\n os.makedirs(os.path.dirname(dst_path))\n if self.resize_xeye:\n resize_rgb_b64(v, dst_path)\n else:\n with open(dst_path, 'wb') as fout:\n fout.write(base64.b64decode(v))\n\n else:\n logger.warn('Unrocognize key: {}'.format(k))\n return\n self.counter += 1"
] | [
"0.62519884",
"0.6020593",
"0.57739514",
"0.5635872",
"0.5571112",
"0.543021",
"0.5419918",
"0.54129684",
"0.5387982",
"0.53669906",
"0.53284085",
"0.53150004",
"0.5314605",
"0.53093696",
"0.530017",
"0.5263094",
"0.52598375",
"0.5245885",
"0.5220459",
"0.5217278",
"0.52140796",
"0.5212574",
"0.5204419",
"0.51837265",
"0.517574",
"0.51731235",
"0.5166076",
"0.51635236",
"0.5118651",
"0.51092553"
] | 0.6727075 | 0 |
This function adds Raman experimental data to an existing hdf5 file. It uses the spectrafit.fit_data function to fit the data before saving the fit result and the raw data to the hdf5 file. The data_filename must be in a standardized format to interact properly with this function. It must take the form anyname_temp_time.xlsx (or .csv) since this function will parse the the temp and time from the filename to label the data and fit result in the hdf5 file. | def add_experiment(hdf5_filename, exp_filename):
# handling input errors
if not isinstance(hdf5_filename, str):
raise TypeError('Passed value of `hdf5_filename` is not a string! Instead, it is: '
+ str(type(hdf5_filename)))
if not hdf5_filename.split('/')[-1].split('.')[-1] == 'hdf5':
raise TypeError('`hdf5_filename` is not type = .hdf5! Instead, it is: '
+ hdf5_filename.split('/')[-1].split('.')[-1])
if not isinstance(exp_filename, str):
raise TypeError('Passed value of `data_filename` is not a string! Instead, it is: '
+ str(type(exp_filename)))
# confirm exp_filename is correct format (can handle additional decimals in exp_filename
label = '.'.join(exp_filename.split('/')[-1].split('.')[:-1])
if len(label.split('_')) < 2:
raise ValueError("""Passed value of `exp_filename` inapproprate. exp_filename must contain
at least one '_', preferably of the format somename_temp_time.xlsx (or .csv)""")
# r+ is read/write mode and will fail if the file does not exist
exp_file = h5py.File(hdf5_filename, 'r+')
if exp_filename.split('.')[-1] == 'xlsx':
data = pd.read_excel(exp_filename, header=None, names=('wavenumber', 'counts'))
elif exp_filename.split('.')[-1] == 'csv':
data = pd.read_csv(exp_filename, header=None, names=('wavenumber', 'counts'))
else:
print('data file type not recognized')
# ensure that the data is listed from smallest wavenumber first
if data['wavenumber'][:1].values > data['wavenumber'][-1:].values:
data = data.iloc[::-1]
data.reset_index(inplace=True, drop=True)
else:
pass
# peak detection and data fitting
fit_result, residuals = spectrafit.fit_data(data['wavenumber'].values, data['counts'].values)
# extract experimental parameters from filename
specs = exp_filename.split('/')[-1].split('.')[-2]
if len(specs) > 1:
spec = ''
for _, element in enumerate(specs):
spec = str(spec+element)
specs = spec
specs = specs.split('_')
time = specs[-1]
temp = specs[-2]
# write data to .hdf5
exp_file['{}/{}/wavenumber'.format(temp, time)] = data['wavenumber']
exp_file['{}/{}/counts'.format(temp, time)] = data['counts']
exp_file['{}/{}/residuals'.format(temp, time)] = residuals
for i, result in enumerate(fit_result):
# create custom datatype
my_datatype = np.dtype([('fraction', np.float),
('center', np.float),
('sigma', np.float),
('amplitude', np.float),
('fwhm', np.float),
('height', np.float),
('area under the curve', np.float)])
if i < 9:
dataset = exp_file.create_dataset('{}/{}/Peak_0{}'.format(temp, time, i+1),
(1,), dtype=my_datatype)
else:
dataset = exp_file.create_dataset('{}/{}/Peak_{}'.format(temp, time, i+1),
(1,), dtype=my_datatype)
# apply data to tuple
data = tuple(result[:7])
data_array = np.array(data, dtype=my_datatype)
# write new values to the blank dataset
dataset[...] = data_array
print("""Data from {} fit with compound pseudo-Voigt model.
Results saved to {}.""".format(exp_filename, hdf5_filename))
exp_file.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_calibration(hdf5_filename, data_filename, label=None):\n # handling input errors\n if not isinstance(hdf5_filename, str):\n raise TypeError('Passed value of `cal_filename` is not a string! Instead, it is: '\n + str(type(hdf5_filename)))\n if not hdf5_filename.split('/')[-1].split('.')[-1] == 'hdf5':\n raise TypeError('`cal_filename` is not type = .hdf5! Instead, it is: '\n + hdf5_filename.split('/')[-1].split('.')[-1])\n if not isinstance(data_filename, str):\n raise TypeError('Passed value of `data_filename` is not a string! Instead, it is: '\n + str(type(data_filename)))\n # r+ is read/write mode and will fail if the file does not exist\n cal_file = h5py.File(hdf5_filename, 'r+')\n if data_filename.split('.')[-1] == 'xlsx':\n data = pd.read_excel(data_filename, header=None, names=('wavenumber', 'counts'))\n elif data_filename.split('.')[-1] == 'csv':\n data = pd.read_csv(data_filename, header=None, names=('wavenumber', 'counts'))\n else:\n print('data file type not recognized')\n # ensure that the data is listed from smallest wavenumber first\n if data['wavenumber'][:1].values > data['wavenumber'][-1:].values:\n data = data.iloc[::-1]\n data.reset_index(inplace=True, drop=True)\n else:\n pass\n # peak detection and data fitting\n fit_result, residuals = spectrafit.fit_data(data['wavenumber'].values, data['counts'].values)\n # write data to .hdf5 using custom label if provided\n if label is not None:\n cal_file['{}/wavenumber'.format(label)] = data['wavenumber']\n cal_file['{}/counts'.format(label)] = data['counts']\n cal_file['{}/residuals'.format(label)] = residuals\n for i, result in enumerate(fit_result):\n # create custom datatype\n my_datatype = np.dtype([('fraction', np.float),\n ('center', np.float),\n ('sigma', np.float),\n ('amplitude', np.float),\n ('fwhm', np.float),\n ('height', np.float),\n ('area under the curve', np.float)])\n if i < 9:\n dataset = cal_file.create_dataset('{}/Peak_0{}'.format(label, i+1),\n (1,), dtype=my_datatype)\n else:\n dataset = cal_file.create_dataset('{}/Peak_0{}'.format(label, i+1),\n (1,), dtype=my_datatype)\n # apply data to tuple\n data = tuple(result[:7])\n data_array = np.array(data, dtype=my_datatype)\n # write new values to the blank dataset\n dataset[...] = data_array\n else:\n label = (data_filename.split('/')[-1]).split('.')[0]\n cal_file['{}/wavenumber'.format(label)] = data['wavenumber']\n cal_file['{}/counts'.format(label)] = data['counts']\n cal_file['{}/residuals'.format(label)] = residuals\n for i, result in enumerate(fit_result):\n # create custom datatype\n my_datatype = np.dtype([('fraction', np.float),\n ('center', np.float),\n ('sigma', np.float),\n ('amplitude', np.float),\n ('fwhm', np.float),\n ('height', np.float),\n ('area under the curve', np.float)])\n if i < 9:\n dataset = cal_file.create_dataset('{}/Peak_0{}'.format(label, i+1),\n (1,), dtype=my_datatype)\n else:\n dataset = cal_file.create_dataset('{}/Peak_{}'.format(label, i+1),\n (1,), dtype=my_datatype)\n # apply data to tuple\n data = tuple(result[:7])\n data_array = np.array(data, dtype=my_datatype)\n # write new values to the blank dataset\n dataset[...] = data_array\n print(\"\"\"Data from {} fit with compound pseudo-Voigt model.\n Results saved to {}.\"\"\".format(data_filename, hdf5_filename))\n cal_file.close()",
"def test_append_filename(self, home_is_temp):\n data = np.arange(6)\n testfile = self.temp(\"test_append_1.fits\")\n\n # Test case 1: creation of file\n fits.append(testfile, data=data, checksum=True)\n\n # Test case 2: append to existing file, with verify=True\n # Also test that additional keyword can be passed to fitsopen\n fits.append(testfile, data=data * 2, checksum=True, ignore_blank=True)\n\n # Test case 3: append to existing file, with verify=False\n fits.append(testfile, data=data * 3, checksum=True, verify=False)\n\n with fits.open(testfile, checksum=True) as hdu1:\n np.testing.assert_array_equal(hdu1[0].data, data)\n np.testing.assert_array_equal(hdu1[1].data, data * 2)\n np.testing.assert_array_equal(hdu1[2].data, data * 3)",
"def write(data: orm.Data, filename: str) -> None:\n save(to_bands_inspect(data), hdf5_file=filename)",
"def read_fit_results_rikhav(path,\n feature_names=('pulse integral fit',\n 'amplitude', 'rise time', 'decay time', 'chi2 reduced')\n ):\n\n data_files = sorted(glob.glob(os.path.join(path, '*.npy')))\n if not len(data_files):\n raise ValueError(\"No data files found!\")\n\n result = dict()\n result['type'] = []\n for k in feature_names:\n result[k.replace(' ', '_')] = []\n result['chi2'] = []\n result['t0'] = []\n result['tmax'] = []\n result['integral'] = []\n tl = TimeLine(numcomp=1, function='expflare')\n\n for i, df in enumerate(data_files):\n logging.info(f\"Reading file {df:s}, assigned type: {i}\")\n x = np.load(df, allow_pickle=True).tolist()\n for xi in x.values():\n for k in feature_names:\n result[k.replace(' ', '_')].append(xi[k])\n result['type'].append(i)\n result['chi2'].append(xi['chi2 reduced'] * (xi['data'].size - 4))\n\n result['t0'].append(xi['time'][0])\n result['tmax'].append(xi['time'][-1])\n\n result['integral'].append(tl.integral(0., 100.,\n tstep=1000,\n t0_000=10.,\n tr_000=result['rise_time'][-1] * 1e6, # s to micro s\n td_000=result['decay_time'][-1] * 1e6, # s to micro s\n A_000=-result['amplitude'][-1],\n c=0.)[0]) # integral in (micro s) * V\n if not np.isfinite(result['integral'][-1]):\n result['integral'][-1] = 1e20\n\n del x\n\n for k, v in result.items():\n result[k] = np.array(result[k])\n return result",
"def log_file1D(fast5_data , basecall_stat):\n\n version, flowcell_id, hostname, numMinion, run_id = fast5_data\n\n #Retrieve the dataframe with statitstics such as the quartile or std\n #Retrieve the dictionary from albacore summary log\n\n num_called_template, mean_qscore_template = basecall_stat.stat_generation()\n\n counter_template, total_nucleotide_template = basecall_stat.counter()\n\n occupancy_pore = basecall_stat.occupancy_pore()\n\n completeName = os.path.join('/home/ferrato/Documents/fast5', \"fichier_aozan.txt\")\n\n with open(completeName, 'w') as file_data:\n\n for index, element in num_called_template.iteritems():\n file_data.write(\"num.called.template.{}={}\\n\".format(index, element))\n\n for index, element in num_called_template.iteritems():\n file_data.write(\"mean.qscore.template.{}={}\\n\".format(index, element))\n\n for nucleotide, count in counter_template.items():\n file_data.write(\"nucleotide.{}.template={}\\n\".format(nucleotide,count))\n if nucleotide == 'total':\n continue\n calcul = float(count) / float(total_nucleotide_template)\n file_data.write(\"nucleotide.{}.proportion={}\\n\".format(nucleotide, calcul))\n\n\n file_data.write(\"total.number.of.sequence={}\\n\".format(basecall_stat.fast5_tot))\n\n for index, value in occupancy_pore.items():\n file_data.write(\"pore.occupancy.{}={}\\n\".format(index, value))\n\n\n file_data.write(\"flowcell.serial.number={}\\n\".format(flowcell_id))\n file_data.write(\"minknown.version={}\\n\".format(version))\n file_data.write(\"hostname={}\\n\".format(hostname))\n file_data.write(\"minion.serial.number={}\\n\".format(numMinion))\n file_data.write((\"run.id={}\\n\".format(run_id)))\n\n for index, element in basecall_stat.statistics_read_size().iteritems():\n file_data.write(\"Read.fastq.length.{}={}\\n\".format(index, element))",
"def openMCSH5File(filename, verbose=False):\n rf = h5py.File(filename, 'r')\n \n stream = rf.require_group('/Data/Recording_0/AnalogStream/Stream_0')\n data = np.array(stream.get('ChannelData'),dtype=np.int)\n timestamps = np.array(stream.get('ChannelDataTimeStamps'))\n info = np.array(stream.get('InfoChannel'))\n \n Unit = info['Unit'][0]\n Tick = info['Tick'][0]/1e6\n exponent = info['Exponent'][0]\n convFact = info['ConversionFactor'][0]\n \n nRecCh, nFrames = data.shape\n channel_ids = info['ChannelID']\n assert len(np.unique(channel_ids)) == len(channel_ids), 'Duplicate MCS channel IDs found'\n electrodeLabels = info['Label']\n \n TimeVals = np.arange(timestamps[0][0],timestamps[0][2]+1,1)*Tick\n \n assert Unit==b'V', 'Unexpected units found, expected volts, found {}'.format(Unit.decode('UTF-8'))\n data_V = data*convFact.astype(float)*(10.0**(exponent))\n \n timestep_avg = np.mean(TimeVals[1:]-TimeVals[0:-1])\n timestep_std = np.std(TimeVals[1:]-TimeVals[0:-1])\n timestep_min = np.min(TimeVals[1:]-TimeVals[0:-1])\n timestep_max = np.min(TimeVals[1:]-TimeVals[0:-1])\n assert all(np.abs(np.array((timestep_min, timestep_max))-timestep_avg)/timestep_avg < 1e-6), 'Time steps vary by more than 1 ppm'\n samplingRate = 1./timestep_avg\n\n if verbose:\n print('# MCS H5 data format')\n print('#')\n print('# File: {}'.format(rf.filename))\n print('# File size: {:.2f} MB'.format(rf.id.get_filesize()/1024**2))\n print('#')\n for key in rf.attrs.keys():\n print('# {}: {}'.format(key,rf.attrs[key]))\n print('#')\n print('# Signal range: {:.2f} to {:.2f} µV'.format(np.amin(data_V)*1e6,np.amax(data_V)*1e6))\n print('# Number of channels: {}'.format(nRecCh))\n print('# Number of frames: {}'.format(nFrames))\n print('# Time step: {:.2f} µs ± {:.5f} % (range {} to {})'.format(timestep_avg*1e6, timestep_std/timestep_avg*100, timestep_min*1e6, timestep_max*1e6))\n print('# Sampling rate: {:.2f} Hz'.format(samplingRate))\n print('#')\n print('# MCSH5RecordingExtractor currently only reads /Data/Recording_0/AnalogStream/Stream_0')\n\n return (rf, nFrames, samplingRate, nRecCh, channel_ids, electrodeLabels, exponent, convFact)",
"def write_uvh5_part(\n self,\n filename,\n data_array,\n flag_array,\n nsample_array,\n check_header=True,\n antenna_nums=None,\n antenna_names=None,\n ant_str=None,\n bls=None,\n frequencies=None,\n freq_chans=None,\n times=None,\n time_range=None,\n polarizations=None,\n blt_inds=None,\n run_check_acceptability=True,\n add_to_history=None,\n ):\n # check that the file already exists\n if not os.path.exists(filename):\n raise AssertionError(\n \"{0} does not exists; please first initialize it with \"\n \"initialize_uvh5_file\".format(filename)\n )\n\n if check_header:\n self._check_header(\n filename, run_check_acceptability=run_check_acceptability\n )\n\n # figure out which \"full file\" indices to write data to\n blt_inds, freq_inds, pol_inds, _ = self._select_preprocess(\n antenna_nums,\n antenna_names,\n ant_str,\n bls,\n frequencies,\n freq_chans,\n times,\n time_range,\n polarizations,\n blt_inds,\n )\n\n # make sure that the dimensions of the data to write are correct\n if data_array.shape != flag_array.shape:\n raise AssertionError(\"data_array and flag_array must have the same shape\")\n if data_array.shape != nsample_array.shape:\n raise AssertionError(\n \"data_array and nsample_array must have the same shape\"\n )\n\n # check what part of each dimension to grab\n # we can use numpy slice objects to index the h5py indices\n if blt_inds is not None:\n Nblts = len(blt_inds)\n\n # test if blts are regularly spaced\n if len(set(np.ediff1d(blt_inds))) <= 1:\n blt_reg_spaced = True\n blt_start = blt_inds[0]\n blt_end = blt_inds[-1] + 1\n if len(blt_inds) == 1:\n d_blt = 1\n else:\n d_blt = blt_inds[1] - blt_inds[0]\n blt_inds = np.s_[blt_start:blt_end:d_blt]\n else:\n blt_reg_spaced = False\n else:\n Nblts = self.Nblts\n blt_reg_spaced = True\n blt_inds = np.s_[:]\n if freq_inds is not None:\n Nfreqs = len(freq_inds)\n\n # test if frequencies are regularly spaced\n if len(set(np.ediff1d(freq_inds))) <= 1:\n freq_reg_spaced = True\n freq_start = freq_inds[0]\n freq_end = freq_inds[-1] + 1\n if len(freq_inds) == 1:\n d_freq = 1\n else:\n d_freq = freq_inds[1] - freq_inds[0]\n freq_inds = np.s_[freq_start:freq_end:d_freq]\n else:\n freq_reg_spaced = False\n else:\n Nfreqs = self.Nfreqs\n freq_reg_spaced = True\n freq_inds = np.s_[:]\n if pol_inds is not None:\n Npols = len(pol_inds)\n\n # test if pols are regularly spaced\n if len(set(np.ediff1d(pol_inds))) <= 1:\n pol_reg_spaced = True\n pol_start = pol_inds[0]\n pol_end = pol_inds[-1] + 1\n if len(pol_inds) == 1:\n d_pol = 1\n else:\n d_pol = pol_inds[1] - pol_inds[0]\n pol_inds = np.s_[pol_start:pol_end:d_pol]\n else:\n pol_reg_spaced = False\n else:\n Npols = self.Npols\n pol_reg_spaced = True\n pol_inds = np.s_[:]\n\n # check for proper size of input arrays\n proper_shape = (Nblts, 1, Nfreqs, Npols)\n if data_array.shape != proper_shape:\n raise AssertionError(\n \"data_array has shape {0}; was expecting {1}\".format(\n data_array.shape, proper_shape\n )\n )\n\n # actually write the data\n with h5py.File(filename, \"r+\") as f:\n dgrp = f[\"/Data\"]\n visdata_dset = dgrp[\"visdata\"]\n flags_dset = dgrp[\"flags\"]\n nsamples_dset = dgrp[\"nsamples\"]\n visdata_dtype = visdata_dset.dtype\n if visdata_dtype not in (\"complex64\", \"complex128\"):\n custom_dtype = True\n else:\n custom_dtype = False\n\n # check if we can do fancy indexing\n # as long as at least 2 out of 3 axes can be written as slices,\n # we can be fancy\n n_reg_spaced = np.count_nonzero(\n [blt_reg_spaced, freq_reg_spaced, pol_reg_spaced]\n )\n if n_reg_spaced >= 2:\n if custom_dtype:\n indices = (blt_inds, np.s_[:], freq_inds, pol_inds)\n _write_complex_astype(data_array, visdata_dset, indices)\n else:\n visdata_dset[blt_inds, :, freq_inds, pol_inds] = data_array\n flags_dset[blt_inds, :, freq_inds, pol_inds] = flag_array\n nsamples_dset[blt_inds, :, freq_inds, pol_inds] = nsample_array\n elif n_reg_spaced == 1:\n # figure out which axis is regularly spaced\n if blt_reg_spaced:\n for ifreq, freq_idx in enumerate(freq_inds):\n for ipol, pol_idx in enumerate(pol_inds):\n if custom_dtype:\n indices = (blt_inds, np.s_[:], freq_idx, pol_idx)\n _write_complex_astype(\n data_array[:, :, ifreq, ipol], visdata_dset, indices\n )\n else:\n visdata_dset[\n blt_inds, :, freq_idx, pol_idx\n ] = data_array[:, :, ifreq, ipol]\n flags_dset[blt_inds, :, freq_idx, pol_idx] = flag_array[\n :, :, ifreq, ipol\n ]\n nsamples_dset[\n blt_inds, :, freq_idx, pol_idx\n ] = nsample_array[:, :, ifreq, ipol]\n elif freq_reg_spaced:\n for iblt, blt_idx in enumerate(blt_inds):\n for ipol, pol_idx in enumerate(pol_inds):\n if custom_dtype:\n indices = (blt_idx, np.s_[:], freq_inds, pol_idx)\n _write_complex_astype(\n data_array[iblt, :, :, ipol], visdata_dset, indices\n )\n else:\n visdata_dset[\n blt_idx, :, freq_inds, pol_idx\n ] = data_array[iblt, :, :, ipol]\n flags_dset[blt_idx, :, freq_inds, pol_idx] = flag_array[\n iblt, :, :, ipol\n ]\n nsamples_dset[\n blt_idx, :, freq_inds, pol_idx\n ] = nsample_array[iblt, :, :, ipol]\n else: # pol_reg_spaced\n for iblt, blt_idx in enumerate(blt_inds):\n for ifreq, freq_idx in enumerate(freq_inds):\n if custom_dtype:\n indices = (blt_idx, np.s_[:], freq_idx, pol_inds)\n _write_complex_astype(\n data_array[iblt, :, ifreq, :], visdata_dset, indices\n )\n else:\n visdata_dset[\n blt_idx, :, freq_idx, pol_inds\n ] = data_array[iblt, :, ifreq, :]\n flags_dset[blt_idx, :, freq_idx, pol_inds] = flag_array[\n iblt, :, ifreq, :\n ]\n nsamples_dset[\n blt_idx, :, freq_idx, pol_inds\n ] = nsample_array[iblt, :, ifreq, :]\n else:\n # all axes irregularly spaced\n # perform a triple loop -- probably very slow!\n for iblt, blt_idx in enumerate(blt_inds):\n for ifreq, freq_idx in enumerate(freq_inds):\n for ipol, pol_idx in enumerate(pol_inds):\n if custom_dtype:\n indices = (blt_idx, np.s_[:], freq_idx, pol_idx)\n _write_complex_astype(\n data_array[iblt, :, ifreq, ipol],\n visdata_dset,\n indices,\n )\n else:\n visdata_dset[\n blt_idx, :, freq_idx, pol_idx\n ] = data_array[iblt, :, ifreq, ipol]\n flags_dset[blt_idx, :, freq_idx, pol_idx] = flag_array[\n iblt, :, ifreq, ipol\n ]\n nsamples_dset[\n blt_idx, :, freq_idx, pol_idx\n ] = nsample_array[iblt, :, ifreq, ipol]\n\n # append to history if desired\n if add_to_history is not None:\n history = np.string_(self.history) + np.string_(add_to_history)\n if \"history\" in f[\"Header\"]:\n # erase dataset first b/c it has fixed-length string datatype\n del f[\"Header\"][\"history\"]\n f[\"Header\"][\"history\"] = np.string_(history)\n\n return",
"def example_data_file():\n\n header1 = \"#Sample Interval: 0.100000 (seconds)\"\n header2 = \"Timestamp,AccelX,AccelY,RateX,RateY\"\n header3 = \"dd-mmm-yyyy HH:MM:SS.FFF,mm/s2,mm/s2,rad/s,rad/s\"\n\n start_date = dt.datetime(2016, 3, 17, 1, 0, 0)\n\n # Add one tenth of a second\n time_delta = dt.timedelta(0, 0, 0, 100)\n\n # Sample frequency in Hz\n sample_freq = 10\n\n # 20 in event duration in seconds\n Ts = 60 * 20\n\n # Number of points\n N = Ts * sample_freq\n\n # Array of times\n time = [start_date + i * time_delta for i in range(N)]\n time_str = [t.strftime(\"%Y-%m-%d %H:%M:%S.%f\") for t in time]\n\n ax, ay, Rx, Ry = example_data(sample_freq, Ts)\n\n data = [\n \",\".join([time_str[i], str(ax[i]), str(ay[i]), str(Rx[i]), str(Ry[i])]) for i in range(N)\n ]\n\n data.insert(0, header3)\n data.insert(0, header2)\n data.insert(0, header1)\n\n return \"\\n\".join(data)",
"def fit_HIDRA(runNumber, rootDir, dataDir, phases, mode='texture', sequential=False, liveplot=True, exportPFs=False, pfType='jul', smplRot=None, pbaridx=None, pbarcolor='WHITE', ranges=None, rot_phase=None):\r\n\r\n # define instrument\r\n inst = 'HIDRA'\r\n\r\n # load in .h5 file\r\n if mode == 'auto': \r\n fname = 'HB2B_{}.h5'.format(runNumber)\r\n desc = '{}_aut_uq'.format(runNumber)\r\n elif mode == 'texture': \r\n fname = 'HB2B_{}_texture.h5'.format(runNumber)\r\n desc = '{}_tex_uq'.format(runNumber)\r\n else: raise ValueError('mode not recognized..')\r\n \r\n exp_h5 = h5py.File(os.path.join(dataDir,fname), 'r')\r\n\r\n # read wavelength\r\n lmbda = exp_h5['instrument/monochromator setting/wave length'][()][0]\r\n\r\n # read angular data\r\n chi = exp_h5['raw data/logs/chi'][()]\r\n phi = exp_h5['raw data/logs/phi'][()]\r\n omega = exp_h5['raw data/logs/omega'][()]\r\n two_theta = exp_h5['reduced diffraction data/2theta'][()]\r\n\r\n # number of measured patterns (for loop)\r\n meas_num = len(phi)\r\n\r\n # read intensity data\r\n if mode == 'auto': #no eta slice\r\n \r\n # get from raw data/logs/2thetaSetpoint\r\n num_det_pos = len(np.unique(exp_h5['raw data/logs/2thetaSetpoint'][()]))\r\n num_eta_slice = 1\r\n\r\n eta_zero = np.nan_to_num(exp_h5['reduced diffraction data/main'][()])\r\n max_int = np.max(eta_zero)\r\n\r\n elif mode == 'texture': #should have eta slices\r\n\r\n num_det_pos = len(np.unique(exp_h5['raw data/logs/2thetaSetpoint'][()]))\r\n num_eta_slice = 3\r\n\r\n eta_neg5 = np.nan_to_num(exp_h5['reduced diffraction data/eta_-5.0'][()])\r\n eta_zero = np.nan_to_num(exp_h5['reduced diffraction data/eta_0.0'][()])\r\n eta_pos5 = np.nan_to_num(exp_h5['reduced diffraction data/eta_5.0'][()])\r\n\r\n max_int = np.max([np.max(eta) for eta in [eta_neg5, eta_zero, eta_pos5]])\r\n\r\n # close the h5 file\r\n exp_h5.close()\r\n\r\n # number of measured q\r\n rot_num = int((meas_num/num_det_pos)*num_eta_slice)\r\n\r\n ## fitting setup ##\r\n d_all = []\r\n ref_all = []\r\n cnt_all = []\r\n name_all = []\r\n\r\n ## get phase data ##\r\n for pi, (pn, ph) in enumerate(phases.items()):\r\n\r\n for k,v in ph.d_spacing(dmin=lmbda/2).items():\r\n\r\n d_all.append(v[0])\r\n ref_all.append(v[-1])\r\n cnt_all.append(pi)\r\n\r\n name_all.append(pn)\r\n\r\n sort_idx = np.argsort(d_all)\r\n d_all = [d_all[i] for i in sort_idx]\r\n ref_all = [ref_all[i] for i in sort_idx]\r\n cnt_all = [cnt_all[i] for i in sort_idx]\r\n tt_all = [2*np.rad2deg(np.arcsin(lmbda/(2*d))) for d in d_all]\r\n \r\n ## setup pole fig dictionary ##\r\n pfd = {}\r\n for i,(d,ref,pi,tt) in enumerate(zip(d_all,ref_all,cnt_all,tt_all)):\r\n \r\n pfd[i+1] = {}\r\n pfd[i+1]['phase'] = name_all[pi]\r\n pfd[i+1]['ref'] = ''.join(map(str,ref))\r\n pfd[i+1]['data'] = np.zeros(( rot_num, 5 ))\r\n pfd[i+1]['tt'] = tt\r\n pfd[i+1]['lattice'] = phases[name_all[pi]].lattice\r\n pfd[i+1]['lattice_type'] = phases[name_all[pi]].get_type()\r\n\r\n # for PF Δk index\r\n # will iterate +1 on each insertion\r\n # to account for variable # of points for each PF \r\n # (shouldn't be the case in CW?)\r\n pfd[i+1]['pole_cnt'] = 0\r\n \r\n # setup flag if it was fit or not\r\n pfd[i+1]['fit'] = False\r\n\r\n # where to store\r\n poleFig_path = os.path.join(rootDir,'pole_figs',desc)\r\n fitResult_path = os.path.join(rootDir,'fit_results',desc,'params')\r\n fitImage_path = os.path.join(rootDir,'fit_results',desc,'figures')\r\n\r\n if not os.path.exists(fitResult_path): os.makedirs(fitResult_path)\r\n if not os.path.exists(fitImage_path): os.makedirs(fitImage_path)\r\n if not os.path.exists(poleFig_path): os.makedirs(poleFig_path)\r\n\r\n # progress bar setup\r\n if pbaridx is None:\r\n refine_pbar = tqdm(range(meas_num),desc=desc)\r\n else:\r\n refine_pbar = tqdm(range(meas_num),desc=desc, position=pbaridx)\r\n \r\n border = \"=\"*80\r\n clear_border = _term_move_up() + \"\\r\" + \" \"*len(border) + \"\\r\"\r\n\r\n liveplot = False\r\n\r\n # ## figure setup\r\n # if liveplot is True:\r\n \r\n # fig = plt.figure(figsize=(12.8,4.8),constrained_layout=True)\r\n # gs = fig.add_gridspec(5,4)\r\n # ax1 = fig.add_subplot(gs[:4,:2])\r\n # ax2 = fig.add_subplot(gs[:4,2:])\r\n # ax3 = fig.add_subplot(gs[4,:2])\r\n # plt.pause(0.05)\r\n\r\n k = 0\r\n\r\n ## loop over rotations\r\n for ri in refine_pbar:\r\n\r\n t0 = time.time()\r\n\r\n # easy to reference these later \r\n o = omega[ri]\r\n c = 90 - chi[ri]\r\n p = 360 - phi[ri]\r\n \r\n if mode == 'auto': inner_iter = zip([eta_zero],[0])\r\n elif mode == 'texture': inner_iter = zip([eta_neg5, eta_zero, eta_pos5],[-5, 0, 5])\r\n # inner_iter = zip([eta_neg5, eta_zero, eta_pos5],[-5, 0, 5])\r\n\r\n # loop over data\r\n for meas_int,eta in inner_iter:\r\n\r\n # refine_pbar.write('\\n')\r\n\r\n t2 = time.time()\r\n\r\n # if o*2 < 90: continue\r\n\r\n counter = 0\r\n\r\n label = 'tt{}_o{}_c{}_p{}_e{}'.format(round(o*2),round(o),round(c),round(p),270 - eta)\r\n \r\n # get mask on invalid data on edges\r\n valid_data = ma.masked_where(meas_int[ri,:]==0,meas_int[ri,:])\r\n valid = ~valid_data.mask\r\n\r\n # get 2theta range of measurement\r\n tt_ran = two_theta[ri,valid]\r\n\r\n # get weights\r\n weights = 1 / meas_int[ri,valid]**2\r\n # get intensity\r\n inten = meas_int[ri,valid]\r\n \r\n # find what peaks are present\r\n tt_mask = (tt_all >= min(tt_ran)) * (tt_all <= max(tt_ran))\r\n tt_pres = list(itertools.compress(tt_all,tt_mask))\r\n # only these are present\r\n tt_pres_num = list(itertools.compress(range(len(tt_all)),tt_mask))\r\n # adjust index\r\n tt_pres_num = [v+1 for v in tt_pres_num]\r\n \r\n # num of peaks\r\n num_peaks = len(tt_pres_num)\r\n\r\n # setup lmfit model\r\n model = ConstantModel()\r\n for i in tt_pres_num:\r\n # add individual peaks\r\n model = model + PseudoVoigtModel(prefix='p{}_'.format(i))\r\n \r\n ## initialize params\r\n params = model.make_params()\r\n \r\n # guess the background\r\n I_bkgd = np.median(inten)\r\n params['c'].set(value = I_bkgd)\r\n\r\n # set peak initial parameters\r\n for i in tt_pres_num:\r\n \r\n\r\n \r\n pk_loc = pfd[i]['tt']\r\n pk_loc_lo = pfd[i]['tt'] - 0.5\r\n pk_loc_hi = pfd[i]['tt'] + 0.5\r\n\r\n loi = np.argmin( np.abs( tt_ran - pk_loc_lo ) )\r\n hii = np.argmin( np.abs( tt_ran - pk_loc_hi ) )\r\n\r\n I_guess = (np.max(inten[loi:hii]) - I_bkgd)/2\r\n if I_guess < 4:\r\n I_guess = 1E-2\r\n TT_guess = tt_ran[np.argmax(inten[loi:hii])+loi]\r\n\r\n # set center\r\n params['p{}_center'.format(i)].set(value = TT_guess,\r\n min = TT_guess - 0.5,\r\n max = TT_guess + 0.5)\r\n # set amplitude\r\n # print(f'{i} - {pk_loc}:{TT_guess} - {I_guess}')\r\n # print(f'{pk_loc_lo} - {pk_loc_hi}')\r\n # print(f'{i} - {inten[loi:hii]}')\r\n params['p{}_amplitude'.format(i)].set(I_guess, min=0)\r\n \r\n # set lims on FWHM\r\n params['p{}_sigma'.format(i)].set(value=0.2,min=0,max=0.35)\r\n\r\n # setup file to save parameters (.json)\r\n fitResult = os.path.join(fitResult_path,'fitParams_{}.json'.format(label))\r\n\r\n if sequential:\r\n # skip on first run\r\n if counter == 0: pass\r\n else: \r\n priorFitResult = os.path.join(fitResult_path,\r\n 'fitParams_{}.json'.format(prev_label))\r\n with open(priorFitResult,'r') as f_in:\r\n params = params.load(f_in)\r\n \r\n # fit model\r\n\r\n t3 = time.time()\r\n\r\n # refine_pbar.write('model setup time:{}'.format(t3-t2)) \r\n\r\n init = model.eval(params, x=tt_ran)\r\n out = model.fit(meas_int[ri, valid],\r\n params,\r\n x=tt_ran, \r\n fit_kws={'gtol':1E-3,\r\n 'xtol':1E-3,\r\n 'ftol':1E-3},\r\n method='least_squares') \r\n\r\n comps = out.eval_components(x=tt_ran)\r\n\r\n t4 = time.time()\r\n \r\n # refine_pbar.write('model fit time:{}'.format(t4-t3))\r\n\r\n out_pars = out.params.copy()\r\n n_boot = 100\r\n II = {}\r\n II_esd = {}\r\n\r\n # # Get uncertainty estimate for integrated intensity (?)\r\n # for comp in out.model.components:\r\n # if 'linear' in comp.name: continue\r\n # elif 'constant' in comp.name: continue\r\n # # Get the names and params\r\n # comp_par_names = comp._param_names\r\n # comp_pars = []\r\n # for par_name in comp_par_names:\r\n # par = out_pars[par_name]\r\n # if par.stderr is None:\r\n # comp_pars.append(np.ones(n_boot)*par.value)\r\n # # tqdm.write(str(par))\r\n # else:\r\n # try:\r\n # comp_pars.append(norm.rvs(loc=par.value,scale=par.stderr,size=n_boot))\r\n # except ValueError:\r\n # comp_pars.append(np.ones(n_boot)*par.value)\r\n\r\n # comp_pars = np.asarray(comp_pars).T\r\n # tt_ran2 = np.tile(tt_ran, [n_boot,1])\r\n # calc = comp.func(tt_ran2, comp_pars[:,0][:,None],comp_pars[:,1][:,None],comp_pars[:,2][:,None],comp_pars[:,3][:,None])\r\n # comp_II = np.trapz(calc, x=tt_ran2, dx=tt_ran[1]-tt_ran[0])\r\n\r\n\r\n # # comp_pars = np.asarray(comp_pars).T\r\n\r\n # # comp_II = []\r\n\r\n # # for n in range(n_boot):\r\n # # # Evaluate the new set\r\n # # calc = comp.func(tt_ran,amplitude=comp_pars[n,0],center=comp_pars[n,1],sigma=comp_pars[n,2],fraction=comp_pars[n,3])\r\n # # comp_II.append(np.trapz(y = calc,\r\n # # x = tt_ran,\r\n # # dx = tt_ran[1]-tt_ran[0]))\r\n \r\n # comp_II = removeOutliers(comp_II, 1.5)\r\n # II[comp.prefix] = np.mean(comp_II)\r\n # II_esd[comp.prefix] = np.std(comp_II)\r\n\r\n # # esd = out.params[comp_par_names[0]].stderr\r\n # # print(f'{II[comp.prefix]} - {II_esd[comp.prefix]} - {esd} | {comp.prefix}')\r\n\r\n # Get uncertainty estimate for integrated intensity - fast way, just use cov\r\n for comp in out.model.components:\r\n if 'linear' in comp.name: continue\r\n elif 'constant' in comp.name: continue\r\n comp_par_names = comp._param_names\r\n # II[comp.prefix] = np.mean(out.params[comp_par_names[0]].stderr)\r\n esd = out.params[comp_par_names[0]].stderr\r\n if esd is None:\r\n II_esd[comp.prefix] = 0.0\r\n elif np.isnan(esd) is False:\r\n II_esd[comp.prefix] = esd\r\n else:\r\n II_esd[comp.prefix] = 0.0\r\n\r\n prev_label = label\r\n\r\n # calculate weighted R (fit quality)\r\n rwp = np.sum( weights * out.residual**2 ) / np.sum( weights * inten**2 )\r\n\r\n # write to console\r\n # this goes fast.. only print if there's a problem\r\n if not out.success: \r\n refine_pbar.write(clear_border + '--- ω:{} | χ:{} | φ:{} | η:{} ---'.format(int(o),int(c),int(p),int(eta)))\r\n refine_pbar.update()\r\n refine_pbar.write(clear_border + 'Fit was not successful!')\r\n refine_pbar.update()\r\n refine_pbar.write(clear_border + 'Rwp : {:3.2f}%'.format(rwp*100))\r\n refine_pbar.update()\r\n refine_pbar.write(border)\r\n refine_pbar.update()\r\n\r\n # save fit params for posterity\r\n with open(fitResult,'w') as f_out:\r\n out.params.dump(f_out) \r\n\r\n t5 = time.time()\r\n\r\n # refine_pbar.write('model output time:{}'.format(t5-t4))\r\n\r\n # store peak intensity\r\n for i in tt_pres_num:\r\n\r\n # get q counter\r\n pole_cnt = pfd[i]['pole_cnt']\r\n\r\n # get 2theta\r\n tt = out.params['p{}_center'.format(i)].value\r\n\r\n # get projection (q)\r\n q = rotate_project_q(tt/2, o, c, p, 270 - eta) #was 360 - p\r\n\r\n # store it\r\n pfd[i]['data'][pole_cnt,0] = q[0]\r\n pfd[i]['data'][pole_cnt,1] = q[1]\r\n pfd[i]['data'][pole_cnt,2] = q[2] \r\n\r\n # tell me it's fit\r\n pfd[i]['fit'] = True\r\n\r\n # tell me what type to output\r\n pfd[i]['type'] = pfType\r\n \r\n # integrate\r\n II = np.trapz(y = comps['p{}_'.format(i)],\r\n x = tt_ran,\r\n dx = tt_ran[1]-tt_ran[0])\r\n\r\n # # store integ. int\r\n # pfd[i]['data'][pole_cnt,3] = II\r\n \r\n # store integ. int\r\n pfd[i]['data'][pole_cnt,3] = II\r\n pfd[i]['data'][pole_cnt,4] = II_esd['p{}_'.format(i)]\r\n\r\n ## counter for Δk\r\n pfd[i]['pole_cnt'] += 1\r\n \r\n # too fast to plot live\r\n if liveplot is True:\r\n\r\n # ## figure setup\r\n fig = plt.figure(figsize=(12.8,4.8),constrained_layout=True)\r\n gs = fig.add_gridspec(5,4)\r\n ax1 = fig.add_subplot(gs[:4,:2])\r\n ax2 = fig.add_subplot(gs[:4,2:])\r\n ax3 = fig.add_subplot(gs[4,:2])\r\n\r\n # if k > 0:\r\n # ax1.clear()\r\n # ax2.clear()\r\n # ax3.clear()\r\n \r\n ## print result plot \r\n ax1.plot(tt_ran, inten, 'b')\r\n ax1.plot(tt_ran, init, 'k--', label='initial fit')\r\n ax1.plot(tt_ran, out.best_fit, 'r-', label='best fit')\r\n ax3.plot(tt_ran, out.best_fit - inten, 'g-')\r\n ax2.plot(tt_ran, inten, 'b')\r\n \r\n for i in tt_pres_num:\r\n \r\n ax2.plot(tt_ran, comps['p{}_'.format(i)], '--', label='Peak {}_{}'.format(pfd[i]['phase'],pfd[i]['ref']))\r\n \r\n # housekeeping\r\n ax1.legend(loc='best')\r\n if num_peaks < 7: ax2.legend(loc='best')\r\n ax1.set_ylim(0,max_int+50)\r\n ax2.set_ylim(0,max_int+50)\r\n ax1.set_ylabel('Intensity')\r\n ax1.set_xlabel('2θ (degrees)')\r\n ax2.set_ylabel('Intensity')\r\n ax2.set_xlabel('2θ (degrees)')\r\n ax3.set_ylabel('Difference')\r\n ax3.set_xlabel('2θ (degrees)')\r\n\r\n ax2.set_ylim(top=0.20*np.max(meas_int))\r\n \r\n # plt.pause(0.05) \r\n # plt.show() \r\n\r\n ## save fit image for posterity\r\n # plt.savefig(os.path.join(fitImage_path,'fit_{}'.format(label)),dpi=300)\r\n plt.close()\r\n\r\n k += 1\r\n\r\n t6 = time.time()\r\n\r\n # refine_pbar.write('plot save time:{}'.format(t6-t5))\r\n\r\n ## close out\r\n if liveplot: plt.close()\r\n \r\n # export the pole figures\r\n export_pfs(inst, desc, pfd, poleFig_path)\r\n\r\n # # write the MTEX file\r\n write_MTEX(desc, pfd, poleFig_path, smplSym='1', smplRot=smplRot, ranges=ranges, rot_phase=rot_phase)",
"def write(self,data): \n \n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n # We will store these in a separate file and link them to the level2s\n fname = data.filename.split('/')[-1]\n units = {'A':'K','x0':'degrees','y0':'degrees','sigx':'degrees','sigy':'degrees','sigy_scale':'none','B':'K','phi':'radians'}\n\n outfile = '{}/{}_{}'.format(self.output_dir,self.prefix,fname)\n\n print ('WRITING: ',outfile)\n output = h5py.File(outfile,'a')\n\n # Set permissions and group\n os.chmod(outfile,0o664)\n shutil.chown(outfile, group='comap')\n\n ##\n ## Narrow channel fits\n ##\n\n for valerr in ['Values','Errors','Chi2']:\n if f'Gauss_Narrow_{valerr}' in output:\n del output[f'Gauss_Narrow_{valerr}']\n gauss_fits = output.create_group(f'Gauss_Narrow_{valerr}')\n gauss_fits.attrs['FitFunc'] = self.model.__name__\n gauss_fits.attrs['source_el'] = self.source_positions['mean_el']\n gauss_fits.attrs['source_az'] = self.source_positions['mean_az']\n\n dnames = self.map_parameters\n dsets = [self.map_fits[valerr][...,iparam] for iparam in range(self.map_fits[valerr].shape[-1])]\n\n for (dname, dset) in zip(dnames, dsets):\n if dname in output:\n del output[dname]\n print(dname,dset.shape,units[dname])\n gauss_dset = gauss_fits.create_dataset(dname, data=dset)\n gauss_dset.attrs['Unit'] = units[dname]\n \n\n output.attrs['SourceFittingVersion'] = __version__\n output.attrs['source'] = self.getSource(data)\n output.close()\n self.linkfile(data)",
"def generate_data(path=resource_filename('locals', 'data/fake/'), mag_range=(11.13,18)):\n # Get some random spectra\n try:\n files = glob.glob('/user/jfilippazzo/Models/ACES/default/*.fits')[::50]\n except:\n files = glob.glob('/Users/jfilippazzo/Documents/Modules/_DEPRECATED/limb_dark_jeff/limb/specint/*.fits')[::20]\n \n # Make a fake source catalog (with only essential columns for now)\n catpath = os.path.join(path,'fake_source_catalog.ecsv')\n ids = list(range(len(files)))\n coords = SkyCoord([89.7455]*len(ids), [-29.05744]*len(ids), unit='deg', frame='icrs')\n cat = at.QTable([ids,coords], names=('id','icrs_centroid'))\n cat.write(catpath)\n \n # Open the x1d file\n header = fits.getheader(resource_filename('locals', 'data/template_x1d.fits'))\n \n # Make Spectrum objects from models at R=150\n wavelength = np.arange(0.05,2.6,0.0001)[::66]*q.um\n \n # Normalize the spectra to a random F200W magnitude\n spectra = []\n f200w = Bandpass('NIRISS.F200W')\n f200w.wave_units = q.um\n for file in files:\n \n # Create Spectrum\n flux = fits.getdata(file)[-1][::66]*q.erg/q.s/q.cm**2/q.AA\n unc = flux/50.\n spec = Spectrum(wavelength, flux, unc)\n \n # Normalize to F200W\n mag = np.random.uniform(*mag_range)\n norm_spec = spec.renormalize(mag, f200w)\n spectra.append(norm_spec)\n \n # Make a separate x1d file and photometry file for each bandpass\n # containing data for each source\n for band in NIRISS_bands:\n \n try:\n \n # Get the Bandpass object\n bp = Bandpass(band)\n bp.wave_units = q.um\n \n # Make x1d file for spectra\n x1d_file = os.path.join(path,'{}_x1d.fits'.format(band))\n x1d_hdu = fits.HDUList(fits.PrimaryHDU(header=header))\n \n # Make csv file for photometry\n phot_file = os.path.join(path,'{}_phot.csv'.format(band))\n phot_data = at.Table(names=('id','band','magnitude','magnitude_unc'), dtype=(int,'S20',float,float))\n \n # Iterate over spectra\n for id,(f,spec) in enumerate(zip(files,spectra)):\n \n # Trim spectrum to bandpass for x1d file\n spec = Spectrum(*spec.spectrum, trim=[(0*q.um,bp.WavelengthMin*1E-4*q.um),(bp.WavelengthMax*1E-4*q.um,10*q.um)])\n \n # Calculate magnitude and add to photometry table\n mag, mag_unc = spec.synthetic_magnitude(bp, force=True)\n phot_data.add_row([id, band, mag, mag_unc])\n \n # Add source spectrum params for verification\n params = f.split('/')[-1].split('-')\n header['TEFF'] = int(params[0].replace('lte',''))\n header['LOGG'] = float(params[1][:4])\n header['FEH'] = float(params[-6][:-8].split('+')[-1])\n header['FILEPATH'] = f\n header['PUPIL'] = band\n\n # Put spectrum in x1d fits file\n data = fits.BinTableHDU(data=np.rec.array(list(zip(*spec.data)),\n formats='float32,float32,float32',\n names='WAVELENGTH,FLUX,ERROR'),\n header=header)\n data.name = 'EXTRACT1D'\n \n x1d_hdu.append(data)\n \n # Write the photometry file\n phot_data.write(phot_file, format='ascii.csv')\n del phot_data\n \n # Write the x1d file\n x1d_hdu.writeto(x1d_file, overwrite=True)\n del x1d_hdu\n \n except IOError:\n pass",
"def _update_hdf5_file(self, field_name, saveformat, data, timestep, t):\n assert saveformat == \"hdf5\"\n fullname, metadata = self._get_datafile_name(field_name, saveformat, timestep)\n\n # Create \"good enough\" hash. This is done to avoid data corruption when restarted from\n # different number of processes, different distribution or different function space\n local_hash = sha1()\n local_hash.update(str(data.function_space().mesh().num_cells()))\n local_hash.update(str(data.function_space().ufl_element()))\n local_hash.update(str(data.function_space().dim()))\n local_hash.update(str(MPI.size(mpi_comm_world())))\n\n # Global hash (same on all processes), 10 digits long\n global_hash = MPI.sum(mpi_comm_world(), int(local_hash.hexdigest(), 16))\n global_hash = str(int(global_hash%1e10)).zfill(10)\n\n #key = (field_name, saveformat)\n #datafile = self._datafile_cache.get(key)\n #if datafile is None:\n # datafile = HDF5File(mpi_comm_world(), fullname, 'w')\n # self._datafile_cache[key] = datafile\n\n # Open HDF5File\n if not os.path.isfile(fullname):\n datafile = HDF5File(mpi_comm_world(), fullname, 'w')\n else:\n datafile = HDF5File(mpi_comm_world(), fullname, 'a')\n\n # Write to hash-dataset if not yet done\n if not datafile.has_dataset(global_hash) or not datafile.has_dataset(global_hash+\"/\"+field_name):\n datafile.write(data, str(global_hash)+\"/\"+field_name)\n\n if not datafile.has_dataset(\"Mesh\"):\n datafile.write(data.function_space().mesh(), \"Mesh\")\n\n # Write vector to file\n # TODO: Link vector when function has been written to hash\n datafile.write(data.vector(), field_name+str(timestep)+\"/vector\")\n\n # HDF5File.close is broken in 1.4\n if dolfin_version() == \"1.4.0+\":\n datafile.close()\n del datafile\n # Link information about function space from hash-dataset\n hdf5_link(fullname, str(global_hash)+\"/\"+field_name+\"/x_cell_dofs\", field_name+str(timestep)+\"/x_cell_dofs\")\n hdf5_link(fullname, str(global_hash)+\"/\"+field_name+\"/cell_dofs\", field_name+str(timestep)+\"/cell_dofs\")\n hdf5_link(fullname, str(global_hash)+\"/\"+field_name+\"/cells\", field_name+str(timestep)+\"/cells\")\n\n return metadata",
"def read_fit_results_axel(path):\n\n data_files = glob.glob(os.path.join(path, \"*.npy\"))\n\n tl = TimeLine(numcomp=1, function='expflare')\n if not len(data_files):\n raise ValueError(\"No data files found!\")\n\n for i, result_file in enumerate(data_files):\n r = np.load(result_file, allow_pickle=True).flat[0]\n logging.info(\"Reading file {0:s} of data type {1:n}\".format(result_file, r['type'][0]))\n\n r['integral'] = np.zeros_like(r['rise'])\n # add the integral\n for j in range(r['rise'].size):\n r['integral'][j] = tl.integral(0., 100.,\n tstep=1000,\n t0_000=r['peak'][j] * 1e6, # s to micro s\n tr_000=r['rise'][j] * 1e6, # s to micro s\n td_000=r['decay'][j] * 1e6, # s to micro s\n A_000=-r['ampli'][j],\n c=0.) # integral in (micro s) * V\n if not np.isfinite(r['integral'][j]):\n r['integral'][j] = 1e20\n if not i:\n result = r\n else:\n for k, v in result.items():\n result[k] = np.append(v, r[k])\n\n return result",
"def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)",
"def insert_spec_data(spectre_points_filename, spec_data_filename):\n # Read the interpolated data into a numpy array\n data_to_insert = np.loadtxt(spec_data_filename)\n\n # Get the legend\n spec_file = open(spec_data_filename, 'r')\n lines = spec_file.readlines()\n\n # spec output lists the components as a comment on the second line\n # in the format '# psitt psitx ...'\n legend_line = lines[1][2:]\n legend_line = rename_variables(legend_line)\n legend = legend_line.split(\" \")\n\n legend_dict = {}\n for i, key in enumerate(legend):\n legend_dict[key] = i\n spec_file.close()\n\n # Open file read-only to determine observation_id\n spectre_file = h5py.File(spectre_points_filename, 'r')\n observation_id = list(spectre_file['element_data.vol'].keys())[0]\n spectre_file.close()\n\n # Open file ready to append data\n output_file = h5py.File(spectre_points_filename, 'a')\n\n # Loop over keys\n for key in legend_dict:\n print(\"Inserting \" + key)\n spec_data = data_to_insert[:, legend_dict[key]]\n output_file['element_data.vol'][observation_id][key] = spec_data\n\n output_file.close()\n return legend_dict, data_to_insert",
"def generate_data(self):\n self.remove_hdf5_file()\n hdf5_handler = self.create_hdf5_file()\n self.populate_hdf5_file(hdf5_handler, self.dataset)",
"def test_add_to_hdf5_cmd(tmpdir, datadir):\n filename_original = datadir.join(\"test_sensitivity_cube.fits\").strpath\n \n # Make some files to input\n scube_fn1 = tmpdir.join(\"20181203v013_multi_324_062_055.fits\").strpath\n scube_fn2 = tmpdir.join(\"20181203v013_multi_013_103_019.fits\").strpath\n copy(filename_original, scube_fn1)\n copy(filename_original, scube_fn2)\n\n output = tmpdir.join(\"test_output.h5\").strpath\n\n # Run with command line arguments passed\n args = [\"--regex\", \".*(2[0-9]{7}v[0-9]{3})_multi_[0-9]{3}_([0-9]{3})\",\n scube_fn1, scube_fn2, output] \n add_sensitivity_cube_to_hdf5(args=args)\n \n assert isfile(output)",
"def add_table_to_hdf(self, run_group, type_dict, data, name = 'bla',filename = []):\n\t\tif filename == []:\n\t\t\tfilename = self.edf_operator.inputFileName\n\t\t\t\n\t\tthis_table = self.h5f.createTable(run_group, name, type_dict, '%s in file %s' % (name, self.edf_operator.inputFileName))\n\t\t\n\t\trow = this_table.row\n\t\tfor r in data:\n\t\t\tfor par in r.keys():\n\t\t\t\trow[par] = r[par]\n\t\t\trow.append()\n\t\tthis_table.flush()",
"def collect_data(ra,dec,unix,Nspectra,dt,fileName,fitName,noise=False):\n with open('{}'.format(fileName), 'w') as pointFile:\n pointFile.write('{}'.format('agilent'))\n \n alt, az = get_altaz(ra[0],dec[0],jd =uni_to_jul(unix), lat=37.9183, lon=-122.1067, alt =304)\n LeuschTelescope.point(alt,az)\n print(LeuschTelescope.get_pointing())\n\n if noise:\n ugradio.leusch.LeuschNoise()\n LeuschNoise.on()\n \n ugradio.agilent.SynthClient(host='127.0.0.1')\n pointFile.write('{}'.format(SynthClient.get_frequency()))\n \n #initialize spectrometer thing\n leuschner.Spectrometer('10.0.1.2')\n \n for r,d in zip(ra,dec):\n obsv_time = uni_to_jul(time.time())\n alt,az = get_altaz(ra[0],dec[0], jd=obsv_time, lat=37.9183, lon=-122.1067, alt = 304)\n LeuschTelescope.point(alt,az)\n currentAlt, currentAz = leusch.get_pointing()\n print('alt: {} , az: {}'.format(currentAlt, currentAz))\n Spectrometer.read_spec('{}_{}_r_d.fits'.format(unix,fitName), Nspec, (r,d), 'eq')",
"def write_hdf5(filename, data):\n \n if '.h5' in filename:\n fid = h5py.File(filename, 'w')\n else:\n filename = filename+'.h5'\n fid = h5py.File(filename, 'w')\n\n print('Writing %s...'%filename)\n\n write_hdf5_group(fid, data)\n\n fid.close()\n print('Finished writting %s.'%filename)\n return",
"def convert_calculations(filename, hdf5_data):\n x1 = []\n\n with open(filename, 'r') as inp:\n for line in inp:\n x1.append(line)\n\n idx = 1\n dset = require_dataset(hdf5_data, structure.H5_ENV_VOLUME, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_VOLUME_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_GRAVITY, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_GRAVITY_ATTR)\n idx += 1\n\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_DEPTH, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_DEPTH_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_WAVE_POINT, (2,), dtype=settings.NEMOH_FLOAT)\n x2 = x1[idx].split()\n dset[0] = float(x2[0])\n dset[1] = float(x2[1])\n set_hdf5_attributes(dset, structure.H5_ENV_WAVE_POINT_ATTR)\n\n idx = 6\n\n num_bodies = int(x1[idx].split()[0])\n\n for i in range(num_bodies):\n\n body = structure.H5_BODIES + structure.H5_BODY_BASE + str(i+1) + '/'\n idx += 2\n\n mesh_x = []\n\n mesh_path = os.path.join(os.path.abspath(os.path.dirname(filename)), str(x1[idx].split()[0]).strip(' \\t\\n\\r'))\n\n with open(mesh_path, 'r') as mesh_file:\n for line in mesh_file:\n mesh_x.append(line)\n\n idx += 1\n x2 = x1[idx].split()\n\n num_points = int(x2[0])\n num_panels = int(x2[1])\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_NUM_POINTS, (1, ), dtype=settings.NEMOH_INT)\n dset[0] = num_points\n set_hdf5_attributes(dset, structure.H5_BODY_NUM_POINTS_ATTR)\n\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_NUM_PANELS, (1, ), dtype=settings.NEMOH_INT)\n dset[0] = num_panels\n set_hdf5_attributes(dset, structure.H5_BODY_NUM_PANELS_ATTR)\n\n mesh_idx = 0\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_MESH, (num_points+num_panels+1, 4), dtype=settings.NEMOH_FLOAT)\n mesh_x2 = mesh_x[mesh_idx].split()\n set_hdf5_attributes(dset, structure.H5_BODY_MESH_ATTR)\n\n dset[0, 0] = int(mesh_x2[0])\n dset[0, 1] = int(mesh_x2[1])\n\n for j in range(1, num_points+num_panels+1):\n mesh_idx += 1\n mesh_x2 = mesh_x[mesh_idx].split()\n dset[j, :] = [float(x) for x in mesh_x2[:4]]\n\n if j == num_points:\n mesh_idx += 1\n\n idx += 1\n num = int(x1[idx].split()[0])\n dset = require_dataset(hdf5_data, body + structure.H5_FREEDOM_DEGREE, (num, 7), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREEDOM_DEGREE_ATTR)\n for j in range(num):\n idx += 1\n x2 = x1[idx].split()\n dset[j, :] = np.array([float(x) for x in x2[:7]])\n\n idx += 1\n num = int(x1[idx].split()[0])\n dset = require_dataset(hdf5_data, body + structure.H5_GENERALISED_FORCES, (num, 7), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_GENERALISED_FORCES_ATTR)\n for j in range(num):\n idx += 1\n x2 = x1[idx].split()\n dset[j, :] = [float(x) for x in x2[:7]]\n\n idx += 1\n num = int(x1[idx].split()[0])\n for j in range(num):\n idx += 1\n\n idx += 2\n x2 = x1[idx].split()\n\n dset = require_dataset(hdf5_data, structure.H5_NUM_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_NUM_WAVE_FREQUENCIES_ATTR)\n dset[0] = int(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_MIN_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MIN_WAVE_FREQUENCIES_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_MAX_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MAX_WAVE_FREQUENCIES_ATTR)\n dset[0] = float(x2[2])\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_NUM_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_NUM_WAVE_DIRECTIONS_ATTR)\n dset[0] = int(x2[0])\n\n dset = require_dataset(hdf5_data, structure.H5_MIN_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MIN_WAVE_DIRECTIONS_ATTR)\n dset[0] = float(x2[1])\n\n dset = require_dataset(hdf5_data, structure.H5_MAX_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MAX_WAVE_DIRECTIONS_ATTR)\n dset[0] = float(x2[2])\n\n idx += 2\n x2 = x1[idx].split()\n\n dset = require_dataset(hdf5_data, structure.H5_COMPUTE_IRF, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_COMPUTE_IRF_ATTR)\n dset[0] = int(x2[0])\n\n dset = require_dataset(hdf5_data, structure.H5_IRF_TIME_STEP, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_TIME_STEP_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_IRF_DURATION, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_DURATION_ATTR)\n dset[0] = float(x2[2])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_SHOW_PRESSURE, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_SHOW_PRESSURE_ATTR)\n dset[0] = int(x2[0])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_NUMBER, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_NUMBER_ATTR)\n dset[0] = float(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MIN, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MIN_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MAX, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MAX_ATTR)\n dset[0] = float(x2[2])\n\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_X, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_X_ATTR)\n dset[0] = int(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_Y, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_Y_ATTR)\n dset[0] = int(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_X, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_X_ATTR)\n dset[0] = float(x2[2])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_Y, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_Y_ATTR)\n dset[0] = float(x2[3])",
"def write_inputdata_file(filename = None, nerve_conditions = None, onsettime_samples = None):\n if not filename:\n print \"filename not valid\"\n return\n if not nerve_conditions:\n print \"no nerve_conditions given\"\n return\n if not onsettime_samples:\n print \"no onsettime_samples given\"\n return\n \n a_time_samples = np.array([onsettime_samples[nervcond] for nervcond in nerve_conditions]).T\n \n outfile = open(os.path.join('/extra/InVivoDog/Elazar/inputdata', filename),\n 'wt')\n \n csv_writer = csv.writer(outfile)\n \n csv_writer.writerow(['File #'] + nerve_conditions)\n \n for num, row in enumerate(a_time_samples):\n filenum = num + 1\n \n csv_writer.writerow([filenum] + list(row))\n \n outfile.close()",
"def write_data_to_h5(data, filename):\n f = h5py.File(filename, 'w', libver='latest')\n dset = f.create_dataset('array', shape=(data.shape), data=data, compression='gzip', compression_opts=9)\n f.close()",
"def writeH5Dataset( self, foldername, time, nameConvention = \"grid\" ):\n filename = \"{0}/{1}_{2:06}.h5\".format(foldername,nameConvention,time)\n file = h5py.File(filename,'w',driver='mpio',comm=self.global_comm)\n dset = file.create_dataset(\"dset\",self._layout.fullShape, dtype = self._f.dtype)\n slices = tuple([slice(s,e) for s,e in zip(self._layout.starts,self._layout.ends)])\n dset[slices]=self._f[:]\n attr_data = np.array(self._layout.dims_order)\n dset.attrs.create(\"Layout\", attr_data, (self._nDims,), h5py.h5t.STD_I32BE)\n file.close()",
"def save_as_hdf5(self, filename):",
"def experiment():\n error = 0\n st.write(\"# New experiment\")\n st.write(\"## Files\")\n\n recorder = {}\n recorder[\"experiment\"] = {}\n\n cwd = os.getcwd()\n file_folder = st.text_input(\n \"Enter path to folder that contains all experimental files. AlphaPept will parse for raw (.d / .raw), FASTA and AlphaPept database (.db_files.hdf) files and add them to the experiment.\",\n cwd,\n )\n\n if not os.path.isdir(file_folder):\n st.warning(\"Not a valid folder.\")\n else:\n with st.spinner(\"Parsing folder\"):\n\n raw_files, fasta_files, db_files = parse_folder(file_folder)\n\n if st.button(\"Reload folder\"):\n raw_files, fasta_files, db_files = parse_folder(file_folder)\n\n fasta_files = [os.path.join(file_folder, _) for _ in fasta_files]\n\n recorder[\"experiment\"][\"file_paths\"] = [\n os.path.join(file_folder, _) for _ in raw_files\n ]\n\n if len(raw_files) == 0:\n st.warning(\"No raw files in folder.\")\n\n else:\n exclude = st.multiselect(\"Exclude files\", raw_files)\n raw_files = [_ for _ in raw_files if _ not in exclude]\n\n file_df = file_df_from_files(raw_files, file_folder)\n #file_df[\"Fraction\"] = \"\"\n #file_df[\"Matching group\"] = \"\"\n\n gb = GridOptionsBuilder.from_dataframe(file_df)\n gb.configure_default_column(\n groupable=True,\n value=True,\n enableRowGroup=True,\n aggFunc=\"sum\",\n editable=True,\n )\n gb.configure_grid_options(domLayout=\"normal\")\n gridOptions = gb.build()\n\n grid_response = AgGrid(\n file_df,\n height=300,\n width=\"100%\",\n gridOptions=gridOptions,\n )\n\n file_df_selected = grid_response[\"data\"]\n\n with st.expander(\"Additional info\"):\n st.write(\n \"- Filename: Name of the file.\"\n \" \\n- Creation date of file.\"\n \" \\n- Size (GB): Size in GB of the file.\"\n \" \\n- Shortname: Unique shortname for each file.\"\n \" \\n- Fraction: Fraction of each file.\"\n \" \\n- Matching Group: Match-between-runs only among members of this group.\"\n )\n\n shortnames = file_df_selected[\"Shortname\"].values.tolist()\n if len(shortnames) != len(set(shortnames)):\n st.warning(\"Warning: Shortnames are not unique.\")\n error += 1\n\n fasta_files_home_dir = files_in_folder(FASTA_PATH, \".fasta\")\n fasta_files_home_dir = [\n os.path.join(FASTA_PATH, _) for _ in fasta_files_home_dir\n ]\n\n fasta_files_home_dir += fasta_files\n\n selection = st.multiselect(\n \"Select FASTA files\",\n options=fasta_files_home_dir,\n default=fasta_files,\n )\n recorder[\"experiment\"][\"fasta_paths\"] = selection\n\n if len(recorder[\"experiment\"][\"fasta_paths\"]) == 0:\n st.warning(\"Warning: No FASTA files selected.\")\n error += 1\n\n recorder[\"experiment\"][\"shortnames\"] = shortnames\n recorder[\"experiment\"][\"file_paths\"] = [\n os.path.join(file_folder, _)\n for _ in file_df_selected[\"Filename\"].values.tolist()\n ]\n\n #recorder[\"experiment\"][\"fractions\"] = file_df_selected[\n # \"Fraction\"\n #].values.tolist()\n #recorder[\"experiment\"][\"matching_groups\"] = file_df_selected[\n # \"Matching group\"\n #].values.tolist()\n\n st.write(f\"## Workflow\")\n\n with st.expander(\"Steps\"):\n group = SETTINGS_TEMPLATE[\"workflow\"]\n for element in group:\n recorder = widget_from_setting(\n recorder, \"workflow\", group, element\n )\n\n st.write(\"## Modify settings\")\n\n prev_settings = st.checkbox(\"Use previous settings as template\")\n\n loaded = False\n uploaded_settings = None\n if prev_settings:\n uploaded_file = st.file_uploader(\"Choose a file\")\n if uploaded_file is not None:\n uploaded_settings = yaml.load(\n uploaded_file, Loader=yaml.FullLoader\n )\n loaded = True\n\n recorder = customize_settings(recorder, uploaded_settings, loaded)\n\n st.write(\"## Submit experiment\")\n if error != 0:\n st.warning(\"Some warnings exist. Please check settings.\")\n else:\n submit_experiment(recorder)",
"def loadDiodeTemp(h6, filename):\n \n f_fine = h6.freqs\n f = h6.freqs_cal\n num_chans = h6.h5.root.raw_data.beam_01.cols.xx[0].shape[0]\n \n #temps_x = np.fromfile(filename_x).reshape([13,16])\n #temps_y = np.fromfile(filename_y).reshape([13,16])\n\n if filename.endswith('.hdf') or filename.endswith('.h5') or filename.endswith('.hdf5'):\n temps, tsys = mbcal(filename)\n else:\n temps = np.fromfile(filename).reshape([26,16])\n tsys = np.zeros_like(temps)\n\n temps_x = temps[0:13]\n temps_y = temps[13:26]\n tsys_x = tsys[0:13]\n tsys_y = tsys[13:26]\n\n temps_fine_x = np.zeros([13, num_chans])\n temps_fine_y = np.zeros([13, num_chans])\n tsys_fine_x = np.zeros([13, num_chans])\n tsys_fine_y = np.zeros([13, num_chans])\n \n for i in range(0,13):\n temps_fine_x[i] = fitLine(f, temps_x[i], num_chans)\n temps_fine_y[i] = fitLine(f, temps_y[i], num_chans)\n tsys_fine_x[i] = fitLine(f, tsys_x[i], num_chans)\n tsys_fine_y[i] = fitLine(f, tsys_y[i], num_chans)\n \n return temps_x, temps_y, tsys_x, tsys_y",
"def add_fit_data(self, model: str, tab_line, plot_data: hdu.table.TableHDU):\n # This stores the exposure time that XSPEC uses for this specific spectrum.\n if self._exp is None:\n self._exp = float(tab_line[\"EXPOSURE\"])\n\n # This is the count rate and error for this spectrum.\n self._count_rate[model] = [float(tab_line[\"COUNT_RATE\"]), float(tab_line[\"COUNT_RATE_ERR\"])]\n\n # Searches for column headers with 'Lx' in them (this has to be dynamic as the user can calculate\n # luminosity in as many bands as they like)\n lx_inds = np.where(np.char.find(tab_line.dtype.names, \"Lx\") == 0)[0]\n lx_cols = np.array(tab_line.dtype.names)[lx_inds]\n\n # Constructs a dictionary of luminosities and their errors for the different energy bands\n # in this XSPEC fit.\n lx_dict = {}\n for col in lx_cols:\n lx_info = col.split(\"_\")\n if lx_info[2][-1] == \"-\" or lx_info[2][-1] == \"+\":\n en_band = \"bound_{l}-{u}\".format(l=lx_info[1], u=lx_info[2][:-1])\n err_type = lx_info[-1][-1]\n else:\n en_band = \"bound_{l}-{u}\".format(l=lx_info[1], u=lx_info[2])\n err_type = \"\"\n\n if en_band not in lx_dict:\n lx_dict[en_band] = [0, 0, 0]\n\n if err_type == \"\":\n lx_dict[en_band][0] = Quantity(float(tab_line[col])*(10**44), \"erg s^-1\")\n elif err_type == \"-\":\n lx_dict[en_band][1] = Quantity(float(tab_line[col])*(10**44), \"erg s^-1\")\n elif err_type == \"+\":\n lx_dict[en_band][2] = Quantity(float(tab_line[col])*(10**44), \"erg s^-1\")\n\n self._luminosities[model] = lx_dict\n\n self._plot_data[model] = {\"x\": plot_data[\"X\"][:], \"x_err\": plot_data[\"XERR\"][:],\n \"y\": plot_data[\"Y\"][:], \"y_err\": plot_data[\"YERR\"][:],\n \"model\": plot_data[\"YMODEL\"][:]}",
"def Test_data():\n print (\"loading test data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n\n with h5py.File(join(data_root, './data/test_real2.h5')) as f:\n test_real = f['test_real'][:]\n with h5py.File(join(data_root, './data/test_imag2.h5')) as f:\n test_imag = f['test_imag'][:]\n test_real = np.transpose(test_real, (0, 1, 3, 2))\n test_imag = np.transpose(test_imag, (0, 1, 3, 2))\n test_data = test_real+1j*test_imag\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end - time_start))\n return test_data",
"def convert_input(filename, hdf5_data):\n x1 = []\n with open(filename, 'r') as inp:\n for line in inp:\n x1.append(line)\n idx = 1\n dset = require_dataset(hdf5_data, structure.H5_SOLVER_TYPE, (1,), dtype=settings.NEMOH_INT)\n dset[0] = int(float(x1[idx].split()[0]))\n set_hdf5_attributes(dset, structure.H5_SOLVER_TYPE_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_SOLVER_GMRES_RESTART, (1,), dtype=settings.NEMOH_INT)\n dset[0] = int(float(x1[idx].split()[0]))\n set_hdf5_attributes(dset, structure.H5_SOLVER_GMRES_RESTART_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_SOLVER_GMRES_STOPPING, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_SOLVER_GMRES_STOPPING_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_SOLVER_GMRES_MAX_ITERATIONS, (1,), dtype=settings.NEMOH_INT)\n dset[0] = int(float(x1[idx].split()[0]))\n set_hdf5_attributes(dset, structure.H5_SOLVER_GMRES_MAX_ITERATIONS_ATTR)"
] | [
"0.62783426",
"0.54187465",
"0.52853376",
"0.52826935",
"0.5281087",
"0.5270762",
"0.52683157",
"0.5252653",
"0.52074903",
"0.51874214",
"0.5176318",
"0.5172828",
"0.51372814",
"0.5134237",
"0.51305115",
"0.5124579",
"0.511208",
"0.5110209",
"0.5110135",
"0.5080256",
"0.506206",
"0.50566447",
"0.50419325",
"0.50292367",
"0.50133604",
"0.5010321",
"0.49672616",
"0.4958337",
"0.4957369",
"0.49548924"
] | 0.7044707 | 0 |
Function that allows the user to manually add or remove peaks from the automatic spectra fitting by inputing an add_list and/or a drop_list. The function pulls some data from the existing fit and overwrites it with the new results. | def adjust_peaks(hdf5_file, key, add_list=None, drop_list=None, plot_fits=False):
# handling input errors
if not isinstance(hdf5_file, str):
raise TypeError('Passed value of `hdf5_file` is not a string! Instead, it is: '
+ str(type(hdf5_file)))
if not hdf5_file.split('/')[-1].split('.')[-1] == 'hdf5':
raise TypeError('`hdf5_file` is not type = .hdf5! Instead, it is: '
+ hdf5_file.split('/')[-1].split('.')[-1])
if not isinstance(key, str):
raise TypeError('Passed value of `key` is not a string! Instead, it is: '
+ str(type(key)))
if add_list is None:
pass
else:
if not isinstance(add_list, list):
raise TypeError('Passed value of `add_list` is not a list! Instead, it is: '
+ str(type(add_list)))
if drop_list is None:
pass
else:
if not isinstance(drop_list, list):
raise TypeError('Passed value of `drop_list` is not a list! Instead, it is: '
+ str(type(drop_list)))
if not isinstance(plot_fits, bool):
raise TypeError('Passed value of `plot_fits` is not a boolean! Instead, it is: '
+ str(type(plot_fits)))
hdf5 = h5py.File(hdf5_file, 'r+')
# extract raw x-y data
x_data = np.asarray(hdf5['{}/{}'.format(key, 'wavenumber')])
y_data = np.asarray(hdf5['{}/{}'.format(key, 'counts')])
# extract peak center and height locations from hdf5
peaks = []
for _, peak in enumerate(list(hdf5[key])[:-3]):
peaks.append(list(hdf5['{}/{}'.format(key, peak)][0]))
# drop desired tuples from peaks
if drop_list is not None:
drop_index = []
for _, name in enumerate(drop_list):
drop_index.append(int(name.split('_')[-1])-1)
for i, index in enumerate(drop_index):
peaks.pop(index-i)
else:
pass
if add_list is not None:
# interpolate data
comp_int = interpolate.interp1d(x_data, y_data, kind='cubic')
# iterate through add_list
peaks_add = []
for _, guess in enumerate(add_list):
height = comp_int(int(guess))
peaks_add.append((int(guess), int(height)))
else:
peaks_add = []
# build new model
fit_result, residuals = spectrafit.build_custom_model(x_data, y_data,
peaks, peaks_add, plot_fits)
# delete old fit data
del hdf5[key]
# write data to .hdf5
hdf5['{}/wavenumber'.format(key)] = x_data
hdf5['{}/counts'.format(key)] = y_data
hdf5['{}/residuals'.format(key)] = residuals
for i, result in enumerate(fit_result):
# create custom datatype
my_datatype = np.dtype([('fraction', np.float),
('center', np.float),
('sigma', np.float),
('amplitude', np.float),
('fwhm', np.float),
('height', np.float),
('area under the curve', np.float)])
if len(result) == 7:
if i < 9:
dataset = hdf5.create_dataset('{}/Peak_0{}'.format(key, i+1),
(1,), dtype=my_datatype)
else:
dataset = hdf5.create_dataset('{}/Peak_{}'.format(key, i+1),
(1,), dtype=my_datatype)
elif len(result) == 8:
if i < 9:
dataset = hdf5.create_dataset('{}/Peak_0{}*'.format(key, i+1),
(1,), dtype=my_datatype)
else:
dataset = hdf5.create_dataset('{}/Peak_{}*'.format(key, i+1),
(1,), dtype=my_datatype)
else:
print('fit_result for Peak_{} contains an inappropriate number of values'.format(i))
# apply data to tuple
data = tuple(result[:7])
data_array = np.array(data, dtype=my_datatype)
# write new values to the blank dataset
dataset[...] = data_array
hdf5.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fitPeaks(self, new_peaks, peaks_type):\n # Check if we need to do anything.\n if (new_peaks[\"x\"].size > 0):\n\n # Update status of current peaks (if any) that are near\n # to the new peaks that are being added.\n #\n if (self.mfitter.getNFit() > 0):\n c_x = self.mfitter.getPeakProperty(\"x\")\n c_y = self.mfitter.getPeakProperty(\"y\")\n status = self.mfitter.getPeakProperty(\"status\")\n new_status = iaUtilsC.runningIfHasNeighbors(status,\n c_x,\n c_y,\n new_peaks[\"x\"],\n new_peaks[\"y\"],\n self.neighborhood)\n self.mfitter.setPeakStatus(new_status)\n \n # Add new peaks.\n self.mfitter.newPeaks(new_peaks, peaks_type)\n\n # Iterate fitting and remove any error peaks.\n #\n # The assumption is that because error peaks are longer in the\n # fit image we don't have to do additional iterations on the\n # remaining peaks after the error peaks have been removed.\n #\n if not self.no_fitting:\n self.mfitter.doFit()\n self.mfitter.removeErrorPeaks()\n\n # Remove peaks that are too close to each other and/or that\n # have a low significance score.\n #\n status = self.mfitter.getPeakProperty(\"status\")\n\n # Identify peaks that are to close based on the somewhat\n # arbitrary criteria of being within 1 sigma.\n #\n # markDimmerPeaks() will update the status array, in particular\n # it will mark the dimmer of two peaks that are too close as ERROR.\n #\n px = self.mfitter.getPeakProperty(\"x\")\n py = self.mfitter.getPeakProperty(\"y\")\n n_proximity = iaUtilsC.markDimmerPeaks(px,\n py,\n self.mfitter.getPeakProperty(\"height\"),\n status,\n self.sigma,\n self.neighborhood)\n\n # Identify peaks that have a low significance score.\n #\n # markLowSignificancePeaks() will update the status array, in particular\n # it will mark low significance peaks as ERROR.\n #\n n_significance = iaUtilsC.markLowSignificancePeaks(px,\n py,\n self.mfitter.getPeakProperty(\"significance\"),\n status,\n self.minimum_significance,\n self.neighborhood)\n\n # This does the actual peak removal. We update the peak status in\n # mfitter, then tell mfitter to remove all the ERROR peaks.\n #\n if ((n_proximity + n_significance) > 0):\n self.mfitter.setPeakStatus(status)\n self.mfitter.removeErrorPeaks()\n self.mfitter.incProximityCounter(n_proximity)\n self.mfitter.incSignificanceCounter(n_significance)\n\n # If we have unconverged peaks, iterate some more.\n if (self.mfitter.getUnconverged() > 0) and (not self.no_fitting):\n self.mfitter.doFit()\n self.mfitter.removeErrorPeaks()\n\n # Return the current fit image.\n return self.mfitter.getFitImage()",
"def coadd_spectra(spec_list_fits, out_name, scale_spectra=True,\r\n use_ratios=False, ratio_range=[4200, 4300], \r\n one_side=True):\r\n\r\n spec_list_txt = [f.replace('fits', 'txt') for f in spec_list_fits]\r\n\r\n # first spectrum in the list is always the reference spectrum\r\n hdr = pyfits.getheader(spec_list_fits[0])\r\n #mjd = hdr['MJD']\r\n #date_obs = hdr['DATE-OBS']\r\n #epoch = hdr['EPOCH']\r\n #observat = hdr['OBSERVAT']\r\n exptime = hdr['EXPTIME']\r\n seeing = hdr['FWHM']\r\n # save some keywords\r\n keys = ['OBJECT', 'OBSERVER', 'DICHROIC', 'APERTURE', 'LAMPS', 'UTSHUT', 'OBSLST', 'RA', 'DEC', 'HOURANG', 'HA', 'TELFOCUS', 'CASSPA', 'PARALLAC', 'CCDTEMP', 'ANGLE', 'GRATING', 'AIRMASS']\r\n #mjd_blue = hdr['MJD']\r\n exptime_blue = hdr['EXPTIME']\r\n hdr_save = {}\r\n for key in keys:\r\n hdr_save[key] = hdr[key]\r\n verr = np.float(hdr['VERR'])**2\r\n spec_ref = np.genfromtxt(spec_list_txt[0], names='wave, flux', \r\n dtype='f4, f4')\r\n err_ref = np.genfromtxt(spec_list_txt[0].replace('spec', 'err'), \r\n names='wave, flux', dtype='f4, f4')\r\n wave = spec_ref['wave']\r\n spec_ref = spec_ref['flux'].view(np.ma.masked_array)\r\n err_ref = err_ref['flux'].view(np.ma.masked_array)\r\n\r\n\r\n # err_ref['flux'] = np.where(err_ref['flux'] <= 0, 1, err_ref['flux']) # reset bad error values to 1\r\n # boolean array: mask out invalid regions so average excludes zeros\r\n bad_err = err_ref <= 0\r\n spec_ref[bad_err] = np.ma.masked\r\n err_ref[bad_err] = np.ma.masked\r\n\r\n\r\n # spectra and their errors will be stored here\r\n spectra = np.ma.zeros((spec_ref.size, len(spec_list_fits)), dtype='f4')\r\n spectra_err = np.ma.zeros((spec_ref.size, len(spec_list_fits)), dtype='f4')\r\n\r\n spectra[:, 0] = spec_ref\r\n spectra_err[:, 0] = err_ref\r\n\r\n ratio = [1]\r\n\r\n for i, fname in enumerate(spec_list_fits[1:]):\r\n fname_txt = spec_list_txt[i+1]\r\n hdr = pyfits.getheader(fname)\r\n exptime += hdr['EXPTIME']\r\n seeing += hdr['FWHM']\r\n verr += np.float(hdr['VERR'])**2\r\n spec = np.genfromtxt(fname_txt, names='wave, flux', dtype='f4, f4')\r\n err = np.genfromtxt(fname_txt.replace('spec', 'err'), \r\n names='wave, flux', dtype='f4, f4')\r\n spec = spec['flux'].view(np.ma.masked_array)\r\n err = err['flux'].view(np.ma.masked_array)\r\n # reset bad error values to 1\r\n # err['flux'] = np.where(err['flux'] <= 0, 1, err['flux']) \r\n bad_err = err <= 0\r\n spec[bad_err] = np.ma.masked\r\n err[bad_err] = np.ma.masked\r\n\r\n spectra[:, i+1] = spec\r\n spectra_err[:, i+1] = err\r\n if scale_spectra:\r\n if use_ratios:\r\n # use the specified region to determine te ratio of spectra\r\n good = np.where((spec > ratio_range[0]) & \r\n (spec < ratio_range[1]))\r\n ratio.append(np.median(spec_ref[good]/spec[good]))\r\n else:\r\n spec_good_err = err > 0\r\n # identify overlap between sides\r\n wgd = (err_ref > 0) & (err > 0)\r\n\r\n ratio.append(match_spectra_leastsq(spec[wgd], \r\n spec_ref[wgd], err[wgd], \r\n err_ref[wgd]))\r\n\r\n \r\n\r\n spec_avg, sum_weights = np.average(spectra*ratio, weights=1./(spectra_err*ratio)**2, axis=1, returned=True)\r\n spec_err = 1./np.sqrt(sum_weights)\r\n # output coadded spectra and uncertainties\r\n f = open('%s.spec.txt' % out_name, 'w')\r\n g = open('%s.err.txt' % out_name, 'w')\r\n h = open('%s.snr.txt' % out_name, 'w')\r\n # add some header keywords\r\n for key in hdr_save.keys():\r\n f.write('# %s = %s\\n' % (key, hdr_save[key]))\r\n if one_side:\r\n # exposure time and velocity error are only well-defined for\r\n # data combined from a single side\r\n f.write('# FWHM = %.2f\\n' % float(seeing/len(spec_list_fits)))\r\n f.write('# VERR = %.2f\\n' % np.sqrt(verr))\r\n #f.write('# MJD = %.6f\\n' % (mjd + exptime/(2.*60.*60.*24.)))\r\n else:\r\n # when combining sides, use the MJD and EXPTIME from the combined blue side\r\n f.write('# EXPTIME = %.0f\\n' % exptime_blue)\r\n #f.write('# MJD = %.6f\\n' % mjd_blue)\r\n\r\n for x, y, z in zip(wave, spec_avg, spec_err):\r\n f.write('%.3f %.5g\\n' % (x, y))\r\n g.write('%.3f %.5g\\n' % (x, z))\r\n h.write('%.3f %.5g\\n' % (x, y/z))\r\n f.close()\r\n g.close()\r\n h.close()\r\n # save as 1D IRAF FITS files\r\n iraf.delete('%s.spec.fits' % out_name, verify=\"no\")\r\n iraf.delete('%s.err.fits' % out_name, verify=\"no\")\r\n iraf.delete('%s.snr.fits' % out_name, verify=\"no\")\r\n iraf.rspectext('%s.spec.txt' % out_name, '%s.spec.fits' % out_name, \r\n crval1 = hdr['CRVAL1'], cdelt1 = hdr['CDELT1'])\r\n iraf.rspectext('%s.err.txt' % out_name, '%s.err.fits' % out_name, \r\n crval1 = hdr['CRVAL1'], cdelt1 = hdr['CDELT1'])\r\n iraf.rspectext('%s.snr.txt' % out_name, '%s.snr.fits' % out_name, \r\n crval1 = hdr['CRVAL1'], cdelt1 = hdr['CDELT1'])\r\n # add keywords\r\n f = pyfits.open('%s.spec.fits' % out_name)\r\n for key in hdr_save.keys():\r\n #f[0].header.update(key, hdr_save[key])\r\n f[0].header[key]= hdr_save[key]\r\n #f[0].header.update('DATE-OBS', date_obs)\r\n #f[0].header.update('OBSERVAT', observat)\r\n #f[0].header.update('EPOCH', epoch)\r\n #f[0].header['DATE-OBS']= date_obs\r\n #f[0].header['OBSERVAT']= observat\r\n #f[0].header['EPOCH']= epoch\r\n if one_side:\r\n # exposure time and velocity error are only well-defined for\r\n # data combined from a single side\r\n #f[0].header.update('EXPTIME', exptime)\r\n #f[0].header.update('FWHM', seeing/len(spec_list_fits))\r\n #f[0].header.update('VERR', '%.2f' % np.sqrt(verr), 'Uncertainty in km/s')\r\n f[0].header['EXPTIME']= exptime\r\n f[0].header['FWHM']= seeing/len(spec_list_fits)\r\n f[0].header['VERR']= '%.2f' %np.sqrt(verr)\r\n #mjd += exptime/(2.*60.*60.*24.)\r\n else:\r\n # when combining sides, use the EXPTIME from the combined blue side\r\n #f[0].header.update('EXPTIME', exptime_blue)\r\n f[0].header['EXPTIME']= exptime_blue\r\n #del f[0].header['VERR'] #DaveC\r\n #f[0].header.update('MJD', np.round(mjd, decimals=6))\r\n #f[0].header['MJD']= np.round(mjd, decimals=6)\r\n\r\n f.writeto('%s.spec.fits' % out_name, clobber=True)\r\n f.close()",
"def substract_given_gaussian(wavelength, spectrum, centre, peak=0, sigma=0, flux=0, search_peak=False, allow_absorptions = False,\n lowlow= 20, lowhigh=10, highlow=10, highhigh = 20, \n lmin=0, lmax=0, fmin=0, fmax=0, plot=True, fcal=False, verbose = True, warnings=True): \n do_it = False\n # Check that we have the numbers!\n if peak != 0 and sigma != 0 : do_it = True\n\n if peak == 0 and flux != 0 and sigma != 0:\n #flux = peak * sigma * np.sqrt(2*np.pi)\n peak = flux / (sigma * np.sqrt(2*np.pi))\n do_it = True \n\n if sigma == 0 and flux != 0 and peak != 0 :\n #flux = peak * sigma * np.sqrt(2*np.pi)\n sigma = flux / (peak * np.sqrt(2*np.pi)) \n do_it = True \n \n if flux == 0 and sigma != 0 and peak != 0 :\n flux = peak * sigma * np.sqrt(2*np.pi)\n do_it = True\n\n if sigma != 0 and search_peak == True: do_it = True \n\n if do_it == False:\n print(\"> Error! We need data to proceed! Give at least two of [peak, sigma, flux], or sigma and force peak to f[centre]\")\n s_s = spectrum\n else:\n # Setup wavelength limits\n if lmin == 0 :\n lmin = centre-65. # By default, +-65 A with respect to line\n if lmax == 0 :\n lmax = centre+65.\n \n # Extract subrange to fit\n w_spec = []\n f_spec = []\n w_spec.extend((wavelength[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n f_spec.extend((spectrum[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n \n # Setup min and max flux values in subrange to fit\n if fmin == 0 :\n fmin = np.nanmin(f_spec) \n if fmax == 0 :\n fmax = np.nanmax(f_spec) \n \n # Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to centre\n w_cont=[]\n f_cont=[]\n w_cont.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > centre-lowlow and w_spec[i] < centre-lowhigh) or (w_spec[i] > centre+highlow and w_spec[i] < centre+highhigh) ) \n f_cont.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > centre-lowlow and w_spec[i] < centre-lowhigh) or (w_spec[i] > centre+highlow and w_spec[i] < centre+highhigh) ) \n \n # Linear Fit to continuum \n try: \n mm,bb = np.polyfit(w_cont, f_cont, 1)\n except Exception:\n bb = np.nanmedian(spectrum)\n mm = 0.\n if verbose or warnings: \n print(\" WARNING! Impossible to get the continuum!\")\n print(\" Scaling the continuum to the median value\") \n continuum = mm*np.array(w_spec)+bb \n # c_cont = mm*np.array(w_cont)+bb \n # rms continuum\n # rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)\n\n if search_peak:\n # Search for index here w_spec(index) closest to line\n try:\n min_w = np.abs(np.array(w_spec)-centre)\n mini = np.nanmin(min_w)\n peak = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n flux = peak * sigma * np.sqrt(2*np.pi) \n if verbose: print(\" Using peak as f[\",np.round(centre,2),\"] = \",np.round(peak,2),\" and sigma = \", np.round(sigma,2), \" flux = \",np.round(flux,2))\n except Exception:\n if verbose or warnings: print(\" Error trying to get the peak as requested wavelength is \",np.round(centre,2),\"! Ignoring this fit!\")\n peak = 0.\n flux = -0.0001\n \n no_substract = False\n if flux < 0:\n if allow_absorptions == False:\n if np.isnan(centre) == False:\n if verbose or warnings : print(\" WARNING! This is an ABSORPTION Gaussian! As requested, this Gaussian is NOT substracted!\")\n no_substract = True\n if no_substract == False: \n if verbose: print(\" Substracting Gaussian at {:7.1f} with peak ={:10.4f} sigma ={:6.2f} and flux ={:9.4f}\".format(centre, peak,sigma,flux))\n \n gaussian_fit = gauss(w_spec, centre, peak, sigma)\n \n \n index=0\n s_s=np.zeros_like(spectrum)\n for wave in range(len(wavelength)):\n s_s[wave]=spectrum[wave]\n if wavelength[wave] == w_spec[0] : \n s_s[wave] = f_spec[0]-gaussian_fit[0]\n index=1\n if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:\n s_s[wave] = f_spec[index]-gaussian_fit[index]\n index=index+1\n if plot: \n plt.figure(figsize=(10, 4))\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at line\n plt.axvline(x=centre, color='k', linestyle='-', alpha=0.8)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(centre+highlow, centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(centre-lowlow, centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical lines to emission line\n #plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n #plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n #plt.plot(w_spec, residuals, 'k')\n #plt.title('Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit))\n plt.show() \n plt.close()\n \n plt.figure(figsize=(10, 4))\n plt.plot(wavelength,spectrum, \"r\")\n plt.plot(wavelength,s_s, \"c\")\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n plt.show()\n plt.close()\n else:\n s_s = spectrum\n return s_s",
"def changePeaks(self):\n # Change the number of peaks\n if self.minpeaks is not None and self.maxpeaks is not None:\n npeaks = len(self.peaks_function)\n u = self.random.random()\n r = self.maxpeaks - self.minpeaks\n if u < 0.5:\n # Remove n peaks or less depending on the minimum number of peaks\n u = self.random.random()\n n = min(npeaks - self.minpeaks, int(round(r * u * self.number_severity)))\n for i in range(n):\n idx = self.random.randrange(len(self.peaks_function))\n self.peaks_function.pop(idx)\n self.peaks_position.pop(idx)\n self.peaks_height.pop(idx)\n self.peaks_width.pop(idx)\n self.last_change_vector.pop(idx)\n else:\n # Add n peaks or less depending on the maximum number of peaks\n u = self.random.random()\n n = min(self.maxpeaks - npeaks, int(round(r * u * self.number_severity)))\n for i in range(n):\n self.peaks_function.append(self.random.choice(self.pfunc_pool))\n self.peaks_position.append([self.random.uniform(self.min_coord, self.max_coord) for _ in range(self.dim)])\n self.peaks_height.append(self.random.uniform(self.min_height, self.max_height))\n self.peaks_width.append(self.random.uniform(self.min_width, self.max_width))\n self.last_change_vector.append([self.random.random() - 0.5 for _ in range(self.dim)])\n\n for i in range(len(self.peaks_function)):\n # Change peak position\n shift = [self.random.random() - 0.5 for _ in range(len(self.peaks_position[i]))]\n shift_length = sum(s**2 for s in shift)\n shift_length = self.move_severity / math.sqrt(shift_length) if shift_length > 0 else 0\n \n shift = [shift_length * (1.0 - self.lambda_) * s \\\n + self.lambda_ * c for s, c in zip(shift, self.last_change_vector[i])]\n \n shift_length = sum(s**2 for s in shift)\n shift_length = self.move_severity / math.sqrt(shift_length) if shift_length > 0 else 0\n\n shift = [s*shift_length for s in shift]\n \n new_position = []\n final_shift = []\n for pp, s in zip(self.peaks_position[i], shift):\n new_coord = pp + s\n if new_coord < self.min_coord:\n new_position.append(2.0 * self.min_coord - pp - s)\n final_shift.append(-1.0 * s)\n elif new_coord > self.max_coord:\n new_position.append(2.0 * self.max_coord - pp - s)\n final_shift.append(-1.0 * s)\n else:\n new_position.append(new_coord)\n final_shift.append(s)\n\n self.peaks_position[i] = new_position\n self.last_change_vector[i] = final_shift\n\n # Change peak height\n change = self.random.gauss(0, 1) * self.height_severity\n new_value = change + self.peaks_height[i]\n if new_value < self.min_height:\n self.peaks_height[i] = 2.0 * self.min_height - self.peaks_height[i] - change\n elif new_value > self.max_height:\n self.peaks_height[i] = 2.0 * self.max_height - self.peaks_height[i] - change\n else:\n self.peaks_height[i] = new_value\n\n # Change peak width\n change = self.random.gauss(0, 1) * self.width_severity\n new_value = change + self.peaks_width[i]\n if new_value < self.min_width:\n self.peaks_width[i] = 2.0 * self.min_width - self.peaks_width[i] - change\n elif new_value > self.max_width:\n self.peaks_width[i] = 2.0 * self.max_width - self.peaks_width[i] - change\n else:\n self.peaks_width[i] = new_value\n\n self._optimum = None",
"def update_graph(input_values,show_fit,excel_name):\n fitted = new_file_name = 'fitted_data/fitted_'+excel_name[5:]+'.json'\n if os.path.isfile(fitted):\n with open(fitted) as json_file:\n peak_data = ujson.load(json_file)\n\n if not input_values:\n return {'data':[]}\n traces = []\n if show_fit: \n for val in input_values:\n yfit = peak_data[str(val)]['fit']\n legendgroup_name = 'group'+str(val)\n traces.append(dict(\n x=np.array(x_data)[:,0].tolist(),\n y=np.array(y_data)[:,val].tolist(),\n text=str(val),\n mode='markers',\n opacity=0.5,\n legendgroup=legendgroup_name,\n name='Data PointsX:'+str(val//13) + ', Y:' + str(val%13),\n marker = dict(color=str(val)),\n ))\n traces.append(dict(\n x=np.array(x_data)[:,0].tolist(),\n y=yfit,\n text=str(val),\n mode='line',\n opacity=0.7,\n legendgroup=legendgroup_name,\n name='Fitted, X:'+str(val//13) + ', Y:' + str(val%13),\n line = dict(color=str(val)),\n ))\n else: \n for val in input_values:\n traces.append(dict(\n x=np.array(x_data)[:,0].tolist(),\n y=np.array(y_data)[:,val].tolist(),\n text=str(val),\n mode='markers',\n opacity=0.7,\n name='Data Points X:'+str(val//13) + ', Y:' + str(val%13),\n ))\n\n return {\n 'data': traces,\n 'layout': dict(\n xaxis={ 'title': 'KE'},\n yaxis={'title': 'Data points'},\n margin={'l': 40, 'b': 40, 't': 10, 'r': 10},\n legend={'x': 0, 'y': 1},\n hovermode='closest',\n )\n }\n else:\n return {'data':[]}",
"def run_fit(self, optimize_opts=None):\n fit_range = self.config[\"fit\"].get(\"fit_range\")\n model = self.config[\"fit\"][\"model\"]\n\n for obs in self.extraction.spectrum_observations:\n if fit_range is not None:\n obs.mask_fit = obs.counts.energy_mask(fit_range[0], fit_range[1])\n obs.model = model\n\n self.fit = Fit(self.extraction.spectrum_observations)\n self.fit_result = self.fit.run(optimize_opts=optimize_opts)\n\n model = self.config[\"fit\"][\"model\"]\n modelname = model.__class__.__name__\n\n model.parameters.covariance = self.fit_result.parameters.covariance\n\n filename = make_path(self.config[\"outdir\"]) / \"fit_result_{}.yaml\".format(\n modelname\n )\n\n self.write(filename=filename)\n\n obs_stacker = SpectrumDatasetOnOffStacker(self.extraction.spectrum_observations)\n obs_stacker.run()\n\n datasets_fp = obs_stacker.stacked_obs\n datasets_fp.model = model\n self.flux_point_estimator = FluxPointsEstimator(\n e_edges=self.config[\"fp_binning\"], datasets=datasets_fp\n )\n fp = self.flux_point_estimator.run()\n fp.table[\"is_ul\"] = fp.table[\"ts\"] < 4\n self.flux_points = fp",
"def Perform_Fits(spec2, segment_a,segment_b, break_wavelength):\n\n\t# Read in spectrum to be corrected \n\twave2,flux2,error2 = np.loadtxt(spec2,unpack=True,usecols = [0,1,2])\n\t\n\t# Unpack segment; len(central_wave_sega) = number of points/lines fitted = len(shift_sega)\n\tcentral_wave_sega,shift_sega, fit_order_a, scale_sega = segment_a\n\tcentral_wave_segb,shift_segb, fit_order_b, scale_segb = segment_b\n\n\t# Get pivot for the fit. \n\tpivot_wave_b = np.mean(central_wave_segb)\n\tpivot_wave_a = np.mean(central_wave_sega)\n\n\t# Get the fitted parameters\n\tparams_segb = np.polyfit(central_wave_segb-pivot_wave_b, shift_segb, fit_order_b)\n\tparams_sega = np.polyfit(central_wave_sega-pivot_wave_a, shift_sega, fit_order_a)\n\n\t# Find all the wavelength for segment\n\tbest_fit_wave_segb = [w2 for w2 in wave2 if w2 <= break_wavelength]\t\n\tbest_fit_wave_sega = [w2 for w2 in wave2 if w2 > break_wavelength]\n\n\t# Get the delta lambda for all the wavelength in each segment \n\tbest_fit_shift_segb = np.polyval(params_segb, best_fit_wave_segb-pivot_wave_b)\n\tbest_fit_shift_sega = np.polyval(params_sega, best_fit_wave_sega-pivot_wave_a)\n\n\t# Add the offset to the original wavelength \n\twave_segb = best_fit_wave_segb + best_fit_shift_segb\n\twave_sega = best_fit_wave_sega + best_fit_shift_sega\n\tnew_wave = np.hstack((wave_segb,wave_sega))\n\n\t# Write rectified spec\n\tscalings = np.mean(scale_sega),np.mean(scale_segb),break_wavelength\n\tshift_spec(new_wave,scalings,spec2)\n\t#shiftspec(spec2,params_sega,params_segb,pivot_wave_a,pivot_wave_b,0,break_wavelength)\n\n\t# Write out points and fit \n\tWriteFitPoints(spec2, central_wave_segb, shift_segb, central_wave_sega, shift_sega)\n\tprint 'Written rectified spectrum %s\\n'\t% spec2[:-16] +'rect_' + spec2[-16:]\n\n\tsegment_a_fit = best_fit_wave_sega,best_fit_shift_sega,\n\tsegment_b_fit = best_fit_wave_segb,best_fit_shift_segb\n\treturn segment_a_fit, segment_b_fit",
"def addFit(self, fitdata, name='default'):\n if not hasattr(self, 'fits'):\n self.fits={}\n self.fits[name] = fitdata\n return",
"def fit_wave_soln(fnlist):\n \n #This bit takes care of the 's' to save shortcut in matplotlib.\n oldsavekey = plt.rcParams[\"keymap.save\"]\n plt.rcParams[\"keymap.save\"] = \"\"\n \n #Open all of the images\n imagelist = []\n arclist = []\n objlist = []\n for i in range(len(fnlist)):\n imagelist.append(openfits(fnlist[i]))\n if i == 0: filt = imagelist[0][0].header[\"FILTER\"]\n if imagelist[i][0].header[\"FILTER\"] != filt:\n print \"Error! Some of these images are in different filters!\"\n crash()\n if imagelist[i][0].header[\"OBJECT\"]==\"ARC\": arclist.append(imagelist[i])\n else:\n if not isfile(join(split(fnlist[i])[0],\"median.fits\")):\n print \"Error! No 'median.fits' file found.\"\n crash()\n medimage = openfits(join(split(fnlist[i])[0],\"median.fits\"))\n imagelist[i][0].data += -medimage[0].data\n medimage.close()\n objlist.append(imagelist[i])\n \n #Load wavelength libraries\n arclib, nightlib = get_libraries(filt)\n if arclib is None:\n print \"Error! Your filter isn't the wavelength library!\"\n crash()\n\n \n\n\n\n #This next bit fits all of the rings that the user marks\n\n #Fit rings in the object images\n radlists = []\n for i in range(len(objlist)):\n radlists.append([])\n i=0\n while True:\n xgrid, ygrid = np.meshgrid(np.arange(objlist[i][0].data.shape[1]), np.arange(objlist[i][0].data.shape[0]))\n xcen = objlist[i][0].header[\"FPXCEN\"]\n ycen = objlist[i][0].header[\"FPYCEN\"]\n axcen = objlist[i][0].header[\"FPAXCEN\"]\n aycen = objlist[i][0].header[\"FPAYCEN\"]\n arad = objlist[i][0].header[\"FPARAD\"]\n rgrid = np.sqrt((xgrid - xcen)**2 + (ygrid - ycen)**2)\n rbins = np.arange(arad-np.int(max(abs(axcen-xcen),abs(aycen-ycen))))+1\n intbins = np.empty_like(rbins)\n for j in range(len(rbins)):\n intbins[j] = np.median(objlist[i][0].data[np.logical_and(np.logical_and(objlist[i][0].data!=0,rgrid<rbins[j]),rgrid>rbins[j]-1)])\n ringplot = PlotRingProfile(objlist[i][0].data[aycen-arad:aycen+arad,axcen-arad:axcen+arad], #Data to be plotted. Only want stuff inside aperture\n rbins+(xcen-axcen)+arad, #Radii bins shifted to image center\n intbins*arad/np.percentile(np.abs(intbins),98)+(ycen-aycen)+arad, #Intensity bins, rescaled and shifted by image center\n xcen-axcen+arad, ycen-aycen+arad, #Shifted center\n radlists[i], #Previously fitted rings\n repr(i+1)+\"/\"+repr(len(objlist))) #numstring\n #Changing images and loop breakout conditions\n if ringplot.key == \"d\": i+=1\n if ringplot.key == \"a\": i+=-1\n if i == -1 or i == len(objlist):\n while True:\n yn = raw_input(\"Finished marking sky rings? (y/n) \")\n if \"n\" in yn or \"N\" in yn:\n if i == -1: i=0\n if i == len(objlist): i = len(objlist)-1\n break\n elif \"y\" in yn or \"Y\" in yn:\n break\n if i == -1 or i == len(objlist): break\n #Force-marking a ring\n if ringplot.key == \"e\" and ringplot.xcoo != None: radlists[i].append(ringplot.xcoo-arad-(xcen-axcen))\n #Deleting a ring\n if ringplot.key == \"s\" and ringplot.xcoo != None and len(radlists[i])>0:\n radlists[i].pop(np.argmin(np.array(radlists[i])-np.sqrt((ringplot.xcoo-arad-(xcen-axcen))**2 + (ringplot.ycoo-arad-(ycen-aycen))**2)))\n #Fitting a ring profile\n if ringplot.key == \"w\" and ringplot.xcoo != None:\n x = rbins[max(ringplot.xcoo-arad-(xcen-axcen)-50,0):min(ringplot.xcoo-arad-(xcen-axcen)+50,len(rbins))]**2\n y = intbins[max(ringplot.xcoo-arad-(xcen-axcen)-50,0):min(ringplot.xcoo-arad-(xcen-axcen)+50,len(rbins))]\n fit = GaussFit(x,y)\n fitplot = PlotRingFit(x,y,fit)\n if fitplot.key == \"w\": radlists[i].append(np.sqrt(fit[2]))\n zo = []\n to = []\n ro = []\n for i in range(len(objlist)):\n for j in range(len(radlists[i])):\n zo.append(objlist[i][0].header[\"ET1Z\"])\n to.append(objlist[i][0].header[\"JD\"])\n ro.append(radlists[i][j])\n \n #Fit rings in the ARC images\n xcen = objlist[0][0].header[\"FPXCEN\"]\n ycen = objlist[0][0].header[\"FPYCEN\"]\n radlists = []\n for i in range(len(arclist)):\n radlists.append([])\n i=0\n while True:\n xgrid, ygrid = np.meshgrid(np.arange(arclist[i][0].data.shape[1]), np.arange(arclist[i][0].data.shape[0]))\n axcen = arclist[i][0].header[\"FPAXCEN\"]\n aycen = arclist[i][0].header[\"FPAYCEN\"]\n arad = arclist[i][0].header[\"FPARAD\"]\n rgrid = np.sqrt((xgrid - xcen)**2 + (ygrid - ycen)**2)\n rbins = np.arange(arad-np.int(max(abs(axcen-xcen),abs(aycen-ycen))))+1\n intbins = np.empty_like(rbins)\n for j in range(len(rbins)):\n intbins[j] = np.median(arclist[i][0].data[np.logical_and(np.logical_and(arclist[i][0].data!=0,rgrid<rbins[j]),rgrid>rbins[j]-1)])\n ringplot = PlotRingProfile(arclist[i][0].data[aycen-arad:aycen+arad,axcen-arad:axcen+arad], #Data to be plotted. Only want stuff inside aperture\n rbins+(xcen-axcen)+arad, #Radii bins shifted to image center\n intbins*arad/np.percentile(np.abs(intbins),98)+(ycen-aycen)+arad, #Intensity bins, rescaled and shifted by image center\n xcen-axcen+arad, ycen-aycen+arad, #Shifted center\n radlists[i], #Previously fitted rings\n repr(i+1)+\"/\"+repr(len(arclist))) #numstring\n #Changing images and loop breakout conditions\n if ringplot.key == \"d\": i+=1\n if ringplot.key == \"a\": i+=-1\n if i == -1 or i == len(arclist):\n while True:\n yn = raw_input(\"Finished marking ARC rings? (y/n) \")\n if \"n\" in yn or \"N\" in yn:\n if i == -1: i=0\n if i == len(arclist): i = len(arclist)-1\n break\n elif \"y\" in yn or \"Y\" in yn:\n break\n if i == -1 or i == len(arclist): break\n #Force-marking a ring\n if ringplot.key == \"e\" and ringplot.xcoo != None: radlists[i].append(ringplot.xcoo-arad-(xcen-axcen))\n #Deleting a ring\n if ringplot.key == \"s\" and ringplot.xcoo != None and len(radlists[i])>0:\n radlists[i].pop(np.argmin(np.array(radlists[i])-np.sqrt((ringplot.xcoo-arad-(xcen-axcen))**2 + (ringplot.ycoo-arad-(ycen-aycen))**2)))\n #Fitting a ring profile\n if ringplot.key == \"w\" and ringplot.xcoo != None:\n x = rbins[max(ringplot.xcoo-arad-(xcen-axcen)-50,0):min(ringplot.xcoo-arad-(xcen-axcen)+50,len(rbins))]**2\n y = intbins[max(ringplot.xcoo-arad-(xcen-axcen)-50,0):min(ringplot.xcoo-arad-(xcen-axcen)+50,len(rbins))]\n fit = GaussFit(x,y)\n fitplot = PlotRingFit(x,y,fit)\n if fitplot.key == \"w\": radlists[i].append(np.sqrt(fit[2]))\n za = []\n ta = []\n ra = []\n for i in range(len(arclist)):\n for j in range(len(radlists[i])):\n za.append(arclist[i][0].header[\"ET1Z\"])\n ta.append(arclist[i][0].header[\"JD\"])\n ra.append(radlists[i][j])\n \n# #Load previous ring fits from a text file - COMMENT THIS OUT LATER\n# rr,zz,tt = np.loadtxt(\"test.out\",unpack=True)\n# za = list(zz[zz>0])\n# ta = list(tt[zz>0])\n# ra = list(rr[zz>0])\n# zo = list(zz[zz<0])\n# to = list(tt[zz<0])\n# ro = list(rr[zz<0])\n\n #Now we try to get a good guess at the wavelengths\n \n #Get a good guess at which wavelengths are which\n Bguess = objlist[0][0].header[\"ET1B\"]\n Fguess = 5600\n \n #Figure out A by matching rings to the wavelength libraries\n master_r = np.array(ro+ra)\n master_z = np.array(zo+za)\n wavematch = np.zeros_like(master_r)\n isnight = np.array([True]*len(ro)+[False]*len(ra))\n oldrms = 10000 #Really high initial RMS for comparisons\n for i in range(len(master_r)):\n if isnight[i]: lib = nightlib\n else: lib = arclib\n for j in range(len(lib)):\n #Assume the i'th ring is the j'th line\n Aguess = lib[j]*np.sqrt(1+master_r[i]**2/Fguess**2)-Bguess*master_z[i]\n #What are all of the other rings, given this A?\n waveguess = (Aguess+Bguess*master_z)/np.sqrt(1+master_r**2/Fguess**2)\n for k in range(len(master_r)):\n if isnight[k]: wavematch[k] = nightlib[np.argmin(np.abs(nightlib-waveguess[k]))]\n else: wavematch[k] = arclib[np.argmin(np.abs(arclib-waveguess[k]))]\n rms = np.sqrt(np.average((waveguess-wavematch)**2))\n if rms < oldrms:\n #This is the new best solution. Keep it!\n oldrms = rms\n bestA = Aguess\n master_wave = wavematch.copy()\n \n #Make more master arrays for the plotting\n master_t = np.array(to+ta)\n t0 = np.min(master_t)\n master_t += -t0\n master_t *= 24*60 #Convert to minutes\n master_color = np.array(len(ro)*[\"blue\"]+len(ra)*[\"red\"]) #Colors for plotting\n toggle = np.ones(len(master_r),dtype=\"bool\")\n dotime = False\n time_dividers = []\n \n #Do the interactive plotting\n while True:\n rplot = master_r[toggle]\n zplot = master_z[toggle]\n tplot = master_t[toggle]\n colorplot = master_color[toggle]\n waveplot = master_wave[toggle]\n fitplot = np.zeros(len(waveplot))\n xs = np.zeros((3,len(rplot)))\n xs[0] = rplot\n xs[1] = zplot\n xs[2] = tplot\n fit = [0]*(len(time_dividers)+1)\n time_dividers = sorted(time_dividers)\n if len(time_dividers)>1: print \"Warning: Too many time divisions is likely unphysical. Be careful!\"\n for i in range(len(time_dividers)+1):\n #Create a slice for all of the wavelengths before this time divider\n #but after the one before it\n if len(time_dividers)==0: tslice = tplot==tplot\n elif i == 0: tslice = tplot<time_dividers[i]\n elif i==len(time_dividers): tslice = tplot>time_dividers[i-1]\n else: tslice = np.logical_and(tplot<time_dividers[i],tplot>time_dividers[i-1])\n if dotime:\n fit[i] = curve_fit(fpfunc_for_curve_fit_with_t, xs[:,tslice], waveplot[tslice], p0=(bestA,Bguess,0,Fguess))[0]\n fitplot[tslice] = fpfunc_for_curve_fit_with_t(xs[:,tslice], fit[i][0], fit[i][1], fit[i][2], fit[i][3])\n else:\n fit[i] = curve_fit(fpfunc_for_curve_fit, xs[:,tslice], waveplot[tslice], p0=(bestA,Bguess,Fguess))[0]\n fitplot[tslice] = fpfunc_for_curve_fit(xs[:,tslice], fit[i][0], fit[i][1], fit[i][2])\n resid = waveplot - fitplot\n solnplot = WaveSolnPlot(rplot,zplot,tplot,waveplot,resid,colorplot,time_dividers)\n #Breakout case\n if solnplot.key == \"a\":\n while True:\n for i in range(len(time_dividers)+1):\n if dotime: print \"Solution 1: A = \"+str(fit[i][0])+\", B = \"+str(fit[i][1])+\", E = \"+str(fit[i][2])+\", F = \"+str(fit[i][3])\n else: print \"Solution 1: A = \"+str(fit[i][0])+\", B = \"+str(fit[i][1])+\", F = \"+str(fit[i][2])\n print \"Residual rms=\"+str(np.sqrt(np.average(resid**2)))+\" for \"+repr(len(time_dividers)+1)+\" independent \"+repr(3+dotime)+\"-parameter fits to \"+repr(len(rplot))+\" rings.\"\n yn = raw_input(\"Accept wavelength solution? (y/n) \")\n if \"n\" in yn or \"N\" in yn:\n break\n elif \"y\" in yn or \"Y\" in yn:\n solnplot.key = \"QUIT\"\n break\n if solnplot.key == \"QUIT\": break\n #Restore all points case\n if solnplot.key == \"r\": toggle = np.ones(len(master_r),dtype=\"bool\")\n #Delete nearest point case\n if solnplot.key == \"d\" and solnplot.axis != None:\n #Figure out which plot was clicked in\n if solnplot.axis == 1:\n #Resid vs. z plot\n z_loc = solnplot.xcoo\n resid_loc = solnplot.ycoo\n dist2 = ((zplot-z_loc)/(np.max(zplot)-np.min(zplot)))**2 + ((resid-resid_loc)/(np.max(resid)-np.min(resid)))**2\n elif solnplot.axis == 2:\n #Resid vs. R plot\n r_loc = solnplot.xcoo\n resid_loc = solnplot.ycoo\n dist2 = ((rplot-r_loc)/(np.max(rplot)-np.min(rplot)))**2 + ((resid-resid_loc)/(np.max(resid)-np.min(resid)))**2\n elif solnplot.axis == 3:\n #Resit vs. T plot\n t_loc = solnplot.xcoo\n resid_loc = solnplot.ycoo\n dist2 = ((tplot-t_loc)/(np.max(tplot)-np.min(tplot)))**2 + ((resid-resid_loc)/(np.max(resid)-np.min(resid)))**2\n elif solnplot.axis == 4:\n #Resid vs. Wave plot\n wave_loc = solnplot.xcoo\n resid_loc = solnplot.ycoo\n dist2 = ((waveplot-wave_loc)/(np.max(waveplot)-np.min(waveplot)))**2 + ((resid-resid_loc)/(np.max(resid)-np.min(resid)))**2\n #Get the radius and time of the worst ring\n r_mask = rplot[dist2 == np.min(dist2)][0]\n t_mask = tplot[dist2 == np.min(dist2)][0]\n toggle[np.logical_and(master_r == r_mask, master_t == t_mask)] = False\n #Fit for time case\n if solnplot.key == \"t\": dotime = not dotime\n #Add time break\n if solnplot.key == \"w\":\n timeplot = TimePlot(tplot,resid,colorplot,time_dividers)\n if timeplot.xcoo != None: time_dividers.append(timeplot.xcoo)\n #Remove time breaks\n if solnplot.key == \"q\":\n time_dividers = []\n\n #Close all images\n for i in range(len(fnlist)):\n imagelist[i].close()\n \n #For each image, write the central wavelength and F to the image header\n for i in range(len(fnlist)):\n image = openfits(fnlist[i],mode=\"update\")\n image_t = (image[0].header[\"JD\"]-t0)*24*60\n #Figure out which time division it's in\n div_index = np.where(np.array(time_dividers)>image_t)[0]\n if len(div_index>0): div_index = div_index[0]\n else: div_index = len(time_dividers)\n image_fit = fit[div_index]\n if dotime:\n image_wave0 = image_fit[0]+image_fit[1]*image[0].header[\"ET1Z\"]+image_fit[2]*image_t\n image_F = image_fit[3]\n else:\n image_wave0 = image_fit[0]+image_fit[1]*image[0].header[\"ET1Z\"]\n image_F = image_fit[2]\n image[0].header[\"FPWAVE0\"] = image_wave0\n image[0].header[\"FPCALF\"] = image_F\n image.close()\n \n #Restore the old keyword shortcut\n plt.rcParams[\"keymap.save\"] = oldsavekey\n \n \n \n return",
"def resonator_fit(self, sweep_points, linecut_mag):\n min_index = np.argmin(linecut_mag)\n max_index = np.argmax(linecut_mag)\n\n min_frequency = sweep_points[min_index]\n max_frequency = sweep_points[max_index]\n\n measured_powers_smooth = a_tools.smooth(linecut_mag,\n window_len=11)\n peaks = a_tools.peak_finder(sweep_points,\n measured_powers_smooth,\n window_len=0)\n\n # Search for peak\n if peaks['dip'] is not None: # look for dips first\n f0 = peaks['dip']\n amplitude_factor = -1.\n elif peaks['peak'] is not None: # then look for peaks\n f0 = peaks['peak']\n amplitude_factor = 1.\n else: # Otherwise take center of range\n f0 = np.median(sweep_points)\n amplitude_factor = -1.\n logging.warning('No peaks or dips in range')\n # If this error is raised, it should continue the analysis but\n # not use it to update the qubit object\n # N.B. This not updating is not implemented as of 9/2017\n\n # f is expected in Hz but f0 in GHz!\n Model = fit_mods.SlopedHangerAmplitudeModel\n # added reject outliers to be robust agains CBox data acq bug.\n # this should have no effect on regular data acquisition and is\n # only used in the guess.\n amplitude_guess = max(\n dm_tools.reject_outliers(np.sqrt(linecut_mag)))\n\n # Creating parameters and estimations\n S21min = (min(dm_tools.reject_outliers(np.sqrt(linecut_mag))) /\n max(dm_tools.reject_outliers(np.sqrt(linecut_mag))))\n\n Q = f0 / abs(min_frequency - max_frequency)\n Qe = abs(Q / abs(1 - S21min))\n\n # Note: input to the fit function is in GHz for convenience\n Model.set_param_hint('f0', value=f0 * 1e-9,\n min=min(sweep_points) * 1e-9,\n max=max(sweep_points) * 1e-9)\n Model.set_param_hint('A', value=amplitude_guess)\n Model.set_param_hint('Q', value=Q, min=1, max=50e6)\n Model.set_param_hint('Qe', value=Qe, min=1, max=50e6)\n # NB! Expressions are broken in lmfit for python 3.5 this has\n # been fixed in the lmfit repository but is not yet released\n # the newest upgrade to lmfit should fix this (MAR 18-2-2016)\n Model.set_param_hint('Qi', expr='abs(1./(1./Q-1./Qe*cos(theta)))',\n vary=False)\n Model.set_param_hint('Qc', expr='Qe/cos(theta)', vary=False)\n Model.set_param_hint('theta', value=0, min=-np.pi / 2,\n max=np.pi / 2)\n Model.set_param_hint('slope', value=0, vary=True)\n\n params = Model.make_params()\n\n data_x = sweep_points\n data_y = np.sqrt(linecut_mag)\n\n # # make sure that frequencies are in Hz\n # if np.floor(data_x[0]/1e8) == 0: # frequency is defined in GHz\n # data_x = data_x*1e9\n\n fit_res = Model.fit(data=data_y,\n f=data_x, verbose=False)\n return fit_res",
"def stack_plot(spec_list, offset = False, alpha=1.):\r\n\r\n import matplotlib.pyplot as plt\r\n\r\n offset_val = 0.\r\n for spec in spec_list:\r\n dat, errdat = read_spectrum(spec)\r\n plt.plot(dat['wave'], dat['flux']+offset_val, label = spec, alpha=alpha)\r\n if offset:\r\n offset_val -= np.median(dat['flux'])\r\n print spec\r\n plt.legend()\r\n plt.show()",
"def removeRunningPeaks(self):\n if not self.no_fitting:\n self.mfitter.removeRunningPeaks()",
"def plot_peaks(wavelists: list, title: str, save: bool, plot_path: str):\n\n # if a single WaveList is passed, package it in a list so the method works\n if isinstance(wavelists, WaveList):\n wavelists = [wavelists]\n\n columns = [{'desc': ' Before Algorithm', 'source': 'peaks_initial'},\n {'desc': ' After Sub Algorithm A', 'source': 'peaks_sub_a'},\n {'desc': ' After Sub Algorithm B', 'source': 'peaks_sub_b'},\n {'desc': ' After Sub Algorithm C&D', 'source': 'peaks_sub_c'}]\n\n fig, axs = plt.subplots(nrows=len(wavelists), ncols=len(columns), sharex=True, figsize=(14, 7))\n plt.suptitle(title)\n\n for i, wavelist in enumerate(wavelists):\n for j, column in enumerate(columns):\n peaks = getattr(wavelist, column['source'])['location'].values\n axs[i, j].set_title(wavelist.series_name + column['desc'])\n axs[i, j].plot(wavelist.raw_data.values)\n axs[i, j].scatter(peaks, wavelist.raw_data.values[peaks.astype(int)], color='red', marker='o')\n axs[i, j].get_xaxis().set_visible(False)\n axs[i, j].get_yaxis().set_visible(False)\n\n fig.tight_layout()\n\n if save:\n plt.savefig(os.path.join(plot_path, title + '.png'))\n plt.close('all')",
"def dfluxes(wavelength, s, line1, line2, lowlow= 25, lowhigh=15, highlow=15, highhigh = 25, \n lmin=0, lmax=0, fmin=0, fmax=0,\n broad1=2.355, broad2=2.355, sus_line1=True, sus_line2=True,\n plot=True, verbose=True, plot_sus = False, fcal = True, \n fit_continuum = True, median_kernel=35, warnings = True ): # Broad is FWHM for Gaussian sigma= 1, \n # Setup wavelength limits\n if lmin == 0 :\n lmin = line1-65. # By default, +-65 A with respect to line\n if lmax == 0 :\n lmax = line2+65.\n \n # Extract subrange to fit\n w_spec = []\n f_spec = []\n w_spec.extend((wavelength[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n f_spec.extend((s[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n \n \n if np.nanmedian(f_spec) == np.nan: print(\" NO HAY DATOS.... todo son NANs!\")\n\n \n # Setup min and max flux values in subrange to fit\n if fmin == 0 :\n fmin = np.nanmin(f_spec) \n if fmax == 0 :\n fmax = np.nanmax(f_spec) \n \n\n # We have to find some \"guess numbers\" for the Gaussian\n # Now guess_centre is line\n guess_centre1 = line1\n guess_centre2 = line2 \n guess_centre = (guess_centre1+guess_centre2)/2. \n # Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to guess_centre\n \n\n w_cont=[]\n f_cont=[]\n w_cont.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n\n if fit_continuum:\n # Linear Fit to continuum \n f_cont_filtered=sig.medfilt(f_cont,np.int(median_kernel))\n try: \n mm,bb = np.polyfit(w_cont, f_cont_filtered, 1)\n except Exception:\n bb = np.nanmedian(f_cont_filtered)\n mm = 0.\n if warnings: \n print(\" WARNING: Impossible to get the continuum!\")\n print(\" Scaling the continuum to the median value\") \n continuum = mm*np.array(w_spec)+bb \n c_cont = mm*np.array(w_cont)+bb \n\n else: \n # Median value in each continuum range # NEW 15 Sep 2019\n w_cont_low = []\n f_cont_low = []\n w_cont_low.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n f_cont_low.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n median_w_cont_low = np.nanmedian(w_cont_low)\n median_f_cont_low = np.nanmedian(f_cont_low)\n w_cont_high = []\n f_cont_high = []\n w_cont_high.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont_high.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n median_w_cont_high = np.nanmedian(w_cont_high)\n median_f_cont_high = np.nanmedian(f_cont_high) \n \n b = (median_f_cont_low-median_f_cont_high)/(median_w_cont_low-median_w_cont_high)\n a = median_f_cont_low- b * median_w_cont_low\n \n continuum = a + b*np.array(w_spec)\n c_cont = b*np.array(w_cont)+ a \n \n # rms continuum\n rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)\n\n # Search for index here w_spec(index) closest to line\n min_w = np.abs(np.array(w_spec)-line1)\n mini = np.nanmin(min_w)\n guess_peak1 = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n min_w = np.abs(np.array(w_spec)-line2)\n mini = np.nanmin(min_w)\n guess_peak2 = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n\n # Search for beginning/end of emission line, choosing line +-10 \n # 28th Feb 2019: Check central value between low_limit and high_limit\n\n # LOW limit\n low_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre1-15 and w_spec[i] < guess_centre1)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre1-15 and w_spec[i] < guess_centre1)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n\n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1,1,-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii-1]/c_fit[ii-1] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n# if f_fit[ii]/c_fit[ii] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if low_limit == 0: \n sorted_by_flux=np.argsort(fs)\n low_limit = ws[sorted_by_flux[0]]\n \n # HIGH LIMIT \n high_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre2 and w_spec[i] < guess_centre2+15)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre2 and w_spec[i] < guess_centre2+15)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii+1]/c_fit[ii+1] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n# if f_fit[ii]/c_fit[ii] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if high_limit == 0: \n sorted_by_flux=np.argsort(fs)\n high_limit = ws[sorted_by_flux[0]] \n \n # Fit a Gaussian to data - continuum \n p0 = [guess_centre1, guess_peak1, broad1/2.355, guess_centre2, guess_peak2, broad2/2.355] # broad is the Gaussian sigma, 1.0 for emission lines\n try:\n fit, pcov = curve_fit(dgauss, w_spec, f_spec-continuum, p0=p0, maxfev=10000) # If this fails, increase maxfev...\n fit_error = np.sqrt(np.diag(pcov))\n\n\n # New 28th Feb 2019: Check central value between low_limit and high_limit\n # Better: between guess_centre - broad, guess_centre + broad\n # If not, redo fit fixing central value to the peak (it does not work... just fix FWHM= (high_limit-low_limit)/2.5 )\n\n if verbose != False: print(\" ----------------------------------------------------------------------------------------\")\n if fit[0] < guess_centre1 - broad1 or fit[0] > guess_centre1 + broad1 or fit[3] < guess_centre2 - broad2 or fit[3] > guess_centre2 + broad2:\n if warnings: \n if fit[0] < guess_centre1 - broad1 or fit[0] > guess_centre1 + broad1: \n print(\" Fitted center wavelength\", fit[0],\"is NOT in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n else:\n print(\" Fitted center wavelength\", fit[0],\"is in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n if fit[3] < guess_centre2 - broad2 or fit[3] > guess_centre2 + broad2: \n print(\" Fitted center wavelength\", fit[3],\"is NOT in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n else:\n print(\" Fitted center wavelength\", fit[3],\"is in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n print(\" Fit failed!\")\n \n fit[0]=guess_centre1\n fit_error[0] = 0.000001\n fit[1]=guess_peak1\n fit_error[1] = 0.000001\n fit[2] = broad1/2.355\n fit_error[2] = 0.000001 \n fit[3]=guess_centre2\n fit_error[3] = 0.000001\n fit[4]=guess_peak2\n fit_error[4] = 0.000001\n fit[5] = broad2/2.355\n fit_error[5] = 0.000001\n else:\n if warnings: print(\" Fitted center wavelength\", fit[0],\"is in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n if warnings: print(\" Fitted center wavelength\", fit[3],\"is in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n \n\n if warnings: \n print(\" Fit parameters = \", fit[0], fit[1], fit[2]) \n print(\" \", fit[3], fit[4], fit[5])\n if fit[2] == broad1/2.355 and warnings == True : \n print(\" WARNING: Fit in\",fit[0],\"failed! Using given centre wavelengths (cw), peaks at (cv) & sigmas=broad/2.355 given.\") # CHECK THIS \n\n gaussian_fit = dgauss(w_spec, fit[0], fit[1], fit[2],fit[3], fit[4], fit[5])\n \n gaussian_1 = gauss(w_spec, fit[0], fit[1], fit[2])\n gaussian_2 = gauss(w_spec, fit[3], fit[4], fit[5])\n \n\n # Estimate rms of the Gaussian fit in range [low_limit, high_limit]\n residuals = f_spec-gaussian_fit-continuum\n rms_fit = np.nansum([ ((residuals[i]**2)/(len(residuals)-2))**0.5 for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n \n # Fluxes, FWHM and Eq. Width calculations # CHECK THIS , not well done for dfluxes !!!\n \n gaussian_flux_1 = gauss_flux(fit[1],fit[2])\n gaussian_flux_2 = gauss_flux(fit[4],fit[5]) \n gaussian_flux = gaussian_flux_1+ gaussian_flux_2 \n if warnings: \n print(\" Gaussian flux = \", gaussian_flux_1, \" + \",gaussian_flux_2,\" = \",gaussian_flux)\n print(\" Gaussian ratio = \", gaussian_flux_1/gaussian_flux_2)\n \n error1 = np.abs(gauss_flux(fit[1]+fit_error[1],fit[2]) - gaussian_flux)\n error2 = np.abs(gauss_flux(fit[1],fit[2]+fit_error[2]) - gaussian_flux)\n gaussian_flux_error = 1 / ( 1/error1**2 + 1/error2**2 )**0.5\n \n fwhm=fit[2]*2.355\n fwhm_error = fit_error[2] *2.355\n fwhm_vel = fwhm / fit[0] * C \n fwhm_vel_error = fwhm_error / fit[0] * C \n \n gaussian_ew = gaussian_flux/np.nanmedian(f_cont)\n gaussian_ew_error = gaussian_ew * gaussian_flux_error/gaussian_flux \n \n # Integrated flux\n # IRAF: flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i2) \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n gauss_to_integrated = gaussian_flux/flux * 100.\n \n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n #Plot input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec), \"blue\", lw=2, alpha = 0.7)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim((line1+line2)/2-40,(line1+line2)/2+40)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre1, color='r', linestyle='-', alpha=0.5)\n plt.axvline(x=guess_centre2, color='r', linestyle='-', alpha=0.5)\n\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n plt.axvline(x=fit[3], color='k', linestyle='-', alpha=0.5)\n # Plot Gaussians + cont\n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.5, lw=3) \n plt.plot(w_spec, gaussian_1+continuum, color=\"navy\",linestyle='--', alpha=0.8)\n plt.plot(w_spec, gaussian_2+continuum, color=\"#1f77b4\",linestyle='--', alpha=0.8)\n plt.plot(w_spec, np.array(f_spec)-(gaussian_fit), 'orange', alpha=0.4, linewidth=5) \n\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n plt.title('Double Gaussian Fit') # Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit))\n plt.show()\n plt.close()\n \n # Plot residuals\n# plt.figure(figsize=(10, 1))\n# plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n# plt.ylabel(\"RMS\")\n# plt.xlim((line1+line2)/2-40,(line1+line2)/2+40)\n# plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n# plt.axvline(x=fit[3], color='k', linestyle='-', alpha=0.5)\n# plt.plot(w_spec, residuals, 'k')\n# plt.minorticks_on()\n# plt.show()\n# plt.close()\n\n \n # Printing results\n if verbose :\n #print \"\\n> WARNING !!! CAREFUL WITH THE VALUES PROVIDED BELOW, THIS TASK NEEDS TO BE UPDATED!\\n\"\n print(\"\\n> Gauss and continuum fitting + integrated flux calculations:\\n\")\n print(\" rms continuum = %.3e erg/cm/s/A \" % (rms_cont)) \n print(\" Gaussian Fit parameters: x0 = ( %.2f +- %.2f ) A \" % (fit[0], fit_error[0]))\n print(\" y0 = ( %.3f +- %.3f ) 1E-16 erg/cm2/s/A\" % (fit[1]/1E-16, fit_error[1]/1E-16 ))\n print(\" sigma = ( %.3f +- %.3f ) A\" % (fit[2], fit_error[2])) \n print(\" rms fit = %.3e erg/cm2/s/A\" % (rms_fit))\n print(\" Gaussian Flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent)\" % (gaussian_flux/1E-16, gaussian_flux_error/1E-16, gaussian_flux_error/gaussian_flux*100))\n print(\" FWHM = ( %.3f +- %.3f ) A = ( %.1f +- %.1f ) km/s \" % (fwhm, fwhm_error, fwhm_vel, fwhm_vel_error))\n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (-gaussian_ew, gaussian_ew_error)) \n print(\"\\n Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n print(\" Gauss/Integrated = %.2f per cent \" % gauss_to_integrated)\n \n \n # New 22 Jan 2019: sustract Gaussian fit\n index=0\n s_s=np.zeros_like(s)\n sustract_this = np.zeros_like(gaussian_fit)\n if sus_line1:\n sustract_this = sustract_this + gaussian_1\n if sus_line2:\n sustract_this = sustract_this + gaussian_2 \n \n \n for wave in range(len(wavelength)):\n s_s[wave]=s[wave]\n if wavelength[wave] == w_spec[0] : \n s_s[wave] = f_spec[0]-sustract_this[0]\n index=1\n if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:\n s_s[wave] = f_spec[index]-sustract_this[index]\n index=index+1\n if plot_sus: \n plt.figure(figsize=(10, 4))\n plt.plot(wavelength,s, \"r\")\n plt.plot(wavelength,s_s, \"c\")\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n plt.show()\n plt.close()\n \n # This gaussian_flux in 3 is gaussian 1 + gaussian 2, given in 15, 16, respectively\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16\n resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, s_s, fit[3], fit[4],fit[5], gaussian_flux_1, gaussian_flux_2 ]\n return resultado \n except Exception:\n if verbose: print(\" Double Gaussian fit failed!\")\n resultado = [0, line1, 0, 0, 0, 0, 0, 0, 0, 0, 0, s, 0, 0, 0, 0, 0 ] # line was identified at lambda=line but Gaussian fit failed\n\n # NOTA: PUEDE DEVOLVER EL FLUJO INTEGRADO AUNQUE FALLE EL AJUSTE GAUSSIANO...\n\n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\") \n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.5)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n# plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n# plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n# plt.plot(w_spec, residuals, 'k')\n plt.title(\"No Gaussian fit obtained...\")\n plt.show()\n\n\n return resultado",
"def add_X_NNLO_all_fit(axe, xran, values, errors, fill=False, save=False):\n B = values[\"B\"]\n F_0 = values[\"F_0\"]\n LAMBDA4 = values[\"Lambda4\"]\n LAMBDA3 = values[\"Lambda3\"]\n # LAMBDA12 = values[\"Lambda12\"]\n km = values[\"km\"]\n kf = values[\"kf\"]\n\n x = np.linspace(xran[0], xran[1], num=500)\n\n Msqr = x * (8 * (np.pi**2) * (F_0**2))\n arg4 = LAMBDA4**2 / Msqr\n arg3 = LAMBDA3**2 / Msqr\n # arg12 = LAMBDA12**2 / Msqr\n\n l1 = -0.4\n l2 = 4.3\n\n Lambda1sqr = (phys_pion**2) * np.exp(l1)\n Lambda2sqr = (phys_pion**2) * np.exp(l2)\n\n lnLambda12sqr = (7.0 * np.log(Lambda1sqr) + 8.0 * np.log(Lambda2sqr)) / 15.0\n lambda12sqr = np.exp(lnLambda12sqr)\n\n arg12 = lambda12sqr / Msqr\n\n lm = 1.0 / 51.0 * (60.0 * np.log(arg12) - 9.0 * np.log(arg3) + 49.0)\n lf = 1.0 / 30.0 * (30.0 * np.log(arg12) + 6.0 * np.log(arg3) - 6.0 * np.log(arg4) + 23.0)\n\n y = F_0 * (1.0 + x * np.log(arg4) - 5.0 / 4.0 * (x**2) * (lf)**2 + kf * x**2)\n\n plots = []\n paramstring = \" \".join(\"${}={}$\".format(format_parameters(k), print_paren_error(float(v), float(errors[k])))\n for k, v in sorted(values.iteritems()))\n paramstring = \"$ M_\\pi<{}$\".format(values[\" M_\\pi<\"])\n plabel = \"NNLO Mss=0 fit: {}\".format(paramstring)\n plabel = \"NNLO $a\\\\to 0$ $\\Delta Mss=0$ \"\n plabel = \"NNLO\"\n\n if \"cutoff\" in values:\n plabel += \" $M_\\pi < {}$\".format(values[\"cutoff\"])\n addplot(plots, axe, fill, save, x=x, y=y, params={\"label\":plabel, \"ls\":\"--\", \"lw\":4})\n\n return plots",
"def add_measurement(self):\n key, ok = QInputDialog.getText(self, 'Add measurement', 'Enter the parameter to measure:')\n if ok:\n if key in self.mgr.obj.measurements:\n print(\"parameter {:s} is already measured by the survey\".format(key))\n return\n idx = self.measurementsListWidget.currentRow()+1\n self.mgr.obj.add_measurement(key, idx=idx)\n self.load_measurements()\n self.measurementsListWidget.setCurrentRow(idx)",
"def fit_and_write_json(excel_file):\n print(excel_file)\n # These variables are used subsequently in the code\n x_data,y_data = read_data(excel_file)\n \n # Create a dictionary to store peaks for now\n data = {}\n \n height = []\n for i in range(169):\n peaks,_ = find_peaks(y_data[:,i],height=5000,distance=50)\n data[i] = np.array(peaks,dtype=int).tolist()\n \n # Currently the dictionary should look like {'1': 1, '2': 2, '3':2 ...} and so on\n peak_data = data\n \n # Iterating over all 13 X and 13 Ys \n for i in range(169):\n \n # If scipy.signal.find_peaks finds only one peak\n if len(peak_data[i]) == 1:\n gmodel = Model(gaussian)\n peak = x_data[peak_data[i][0],0]\n \n # Initialize appropriate singal from the peak data\n # center \"c1\" comes from the peak data itself\n c1 = peak\n a1 = y_data[peak_data[i][0],i]\n if peak <= 850:\n w1 = 20\n elif peak <= 900:\n w1 = 15\n else:\n w1 = 10\n\n # Fit using these initial estimates\n result = gmodel.fit(y_data[:,i], x=x_data[:,0],amp=a1,cen=c1,width=w1)\n y1 = gaussian(x_data,result.best_values['amp'],result.best_values['cen'],result.best_values['width'])\n new_dict = {'peak':1,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),\n 'y1':y1.tolist(),'mu1':result.best_values['cen']}\n \n elif len(peak_data[i]) == 2:\n # For two peaks\n peak1 = x_data[peak_data[i][0],0]\n peak2 = x_data[peak_data[i][1],0]\n \n c1 = peak1\n a1 = y_data[peak_data[i][0],i]\n c2 = peak2\n a2 = y_data[peak_data[i][1],i]\n if peak1<= 850:\n w1 = 20\n elif peak1 <= 900:\n w1 = 15\n else:\n w1 = 10\n \n if peak2<= 850:\n w2 = 20\n elif peak2 <= 900:\n w2 = 15\n else:\n w2 = 10\n\n # Fit two peaks\n gmodel = Model(gauss2)\n result = gmodel.fit(y_data[:,i], x=x_data[:,0], a1 = a1,c1=c1,w1=w1,a2=a2,c2=c2,w2=w2)\n y1 = gaussian(x_data[:,0],result.best_values['a1'],result.best_values['c1'],result.best_values['w1'])\n y2 = gaussian(x_data[:,0],result.best_values['a2'],result.best_values['c2'],result.best_values['w2'])\n new_dict = {'peak':2,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),\n 'y1':y1.tolist(),'y2':y2.tolist(),\n 'mu1':result.best_values['c1'],'mu2':result.best_values['c2']}\n \n else:\n peak1 = x_data[peak_data[i][0],0]\n peak2 = x_data[peak_data[i][1],0]\n peak3 = x_data[peak_data[i][2],0]\n \n c1 = peak1\n a1 = y_data[peak_data[i][0],i]\n c2 = peak2\n a2 = y_data[peak_data[i][1],i]\n c3 = peak3\n a3 = y_data[peak_data[i][2],i]\n \n if peak1<= 850:\n w1 = 20\n elif peak1 <= 900:\n w1 = 15\n else:\n w1 = 10\n \n if peak2<= 850:\n w2 = 20\n elif peak2 <= 900:\n w2 = 15\n else:\n w2 = 10\n \n if peak3<= 850:\n w3 = 20\n elif peak3 <= 900:\n w3 = 15\n else:\n w3 = 10 \n \n # Fit three peaks\n gmodel = Model(gauss3)\n result = gmodel.fit(y_data[:,i], x=x_data[:,0], a1 = a1,c1=c1,w1=w1,a2=a2,c2=c2,w2=w2,a3=a3,c3=c3,w3=w3)\n y1 = gaussian(x_data[:,0],result.best_values['a1'],result.best_values['c1'],result.best_values['w1'])\n y2 = gaussian(x_data[:,0],result.best_values['a2'],result.best_values['c2'],result.best_values['w2'])\n y3 = gaussian(x_data[:,0],result.best_values['a3'],result.best_values['c3'],result.best_values['w3'])\n new_dict = {'peak':3,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),'y1':y1.tolist(),\n 'y2':y2.tolist(),'y3':y3.tolist(),\n 'mu1':result.best_values['c1'],'mu2':result.best_values['c2'],\n 'mu3':result.best_values['c3']}\n peak_data[i] = new_dict\n \n \n # At this point all the fitting is completed\n # Write the data into a json file\n new_file_name = 'fitted_data/fitted_'+excel_file[5:]+'.json'\n with open(new_file_name, 'w') as outfile:\n ujson.dump(peak_data, outfile)",
"def PlotCollectionAddOneTwoThree(title, frit, ulist, addvalues):\n\tfig = pp.figure( figsize=printsize )\n\tnxpoints = 907\n\tfig.suptitle = \"TEST SUP TITLE\" #title\n\tplotspots = [231,232,233,234,235,236]\n\tfor i,(uvalue,ulabel,xrange,yrange) in enumerate(ulist):\n\t print('making sub-plot for u=%g' % (uvalue,)\t\n\t\tPlotOneSubplot( fig.add_subplot(plotspots[i]), frit, xrange, nxpoints, yrange, uvalue,ulabel, addvalues)\n\t#pp.show() \n\tfig.savefig(title+\".eps\", format='eps', pad_inches=0.02)\n\n\naddvalues = [\n # value to \"add\", color\n\t[ -2, \"#6688ee\" ],\n\t[ -1, \"#66aacc\" ],\n\t[ 0, \"#000000\" ],\n\t[ 1, \"#ee6688\" ],\n\t[ 2, \"#dd9966\" ],\n\t[ 3, \"#bbaa66\" ],",
"def fit_psp(data, search_window, clamp_mode, sign=0, exp_baseline=True, baseline_like_psp=False, refine=True, init_params=None, fit_kws=None, ui=None): \n import pyqtgraph as pg\n prof = pg.debug.Profiler(disabled=True, delayed=False)\n prof(\"args: %s %s %s %s %s %s %s %s\" % (search_window, clamp_mode, sign, exp_baseline, baseline_like_psp, refine, init_params, fit_kws))\n \n if ui is not None:\n ui.clear()\n ui.console.setStack()\n ui.plt1.plot(data.time_values, data.data)\n ui.plt1.addLine(x=search_window[0], pen=0.3)\n ui.plt1.addLine(x=search_window[1], pen=0.3)\n prof('plot')\n\n if fit_kws is None:\n fit_kws = {}\n if init_params is None:\n init_params = {}\n\n method = 'leastsq'\n fit_kws.setdefault('maxfev', 500)\n\n # good fit, slow\n # method = 'Nelder-Mead'\n \n # fit_kws.setdefault('options', {\n # 'maxiter': 300, \n \n # # 'disp': True,\n # })\n \n # good fit\n # method = 'Powell'\n # fit_kws.setdefault('options', {'maxfev': 200, 'disp': True})\n\n # bad fit\n # method = 'CG'\n # fit_kws.setdefault('options', {'maxiter': 100, 'disp': True})\n\n # method = 'L-BFGS-B'\n # fit_kws.setdefault('options', {'maxiter': 100, 'disp': True})\n\n # take some measurements to help constrain fit\n data_min = data.data.min()\n data_max = data.data.max()\n data_mean = data.mean()\n \n baseline_mode = float_mode(data.time_slice(None, search_window[0]).data)\n \n # set initial conditions depending on whether in voltage or current clamp\n # note that sign of these will automatically be set later on based on the \n # the *sign* input\n if clamp_mode == 'ic':\n amp_init = init_params.get('amp', .2e-3)\n amp_max = min(100e-3, 3 * (data_max-data_min))\n rise_time_init = init_params.get('rise_time', 5e-3)\n decay_tau_init = init_params.get('decay_tau', 50e-3)\n exp_tau_init = init_params.get('exp_tau', 50e-3)\n exp_amp_max = 100e-3\n elif clamp_mode == 'vc':\n amp_init = init_params.get('amp', 20e-12)\n amp_max = min(500e-12, 3 * (data_max-data_min))\n rise_time_init = init_params.get('rise_time', 1e-3)\n decay_tau_init = init_params.get('decay_tau', 4e-3)\n exp_tau_init = init_params.get('exp_tau', 4e-3)\n exp_amp_max = 10e-9\n else:\n raise ValueError('clamp_mode must be \"ic\" or \"vc\"')\n\n # Set up amplitude initial values and boundaries depending on whether *sign* are positive or negative\n if sign == -1:\n amp = (-amp_init, -amp_max, 0)\n elif sign == 1:\n amp = (amp_init, 0, amp_max)\n elif sign == 0:\n amp = (0, -amp_max, amp_max)\n else:\n raise ValueError('sign must be 1, -1, or 0')\n \n # initial condition, lower boundary, upper boundary\n base_params = {\n 'yoffset': (init_params.get('yoffset', baseline_mode), data_min, data_max),\n 'rise_time': (rise_time_init, rise_time_init/10., rise_time_init*10.),\n 'decay_tau': (decay_tau_init, decay_tau_init/10., decay_tau_init*10.),\n 'rise_power': (2, 'fixed'),\n 'amp': amp,\n }\n \n # specify fitting function and set up conditions\n psp = StackedPsp()\n if exp_baseline:\n if baseline_like_psp:\n exp_min = 0 if sign == 1 else -exp_amp_max \n exp_max = 0 if sign == -1 else exp_amp_max \n base_params['exp_tau'] = 'decay_tau'\n else:\n exp_min = -exp_amp_max \n exp_max = exp_amp_max \n base_params['exp_tau'] = (exp_tau_init, exp_tau_init / 10., exp_tau_init * 20.)\n base_params['exp_amp'] = (0.01 * sign * amp_init, exp_min, exp_max)\n else:\n base_params.update({'exp_amp': (0, 'fixed'), 'exp_tau': (1, 'fixed')})\n \n # print(clamp_mode, base_params, sign, amp_init)\n \n # if weight is None: #use default weighting\n # weight = np.ones(len(y))\n # else: #works if there is a value specified in weight\n # if len(weight) != len(y):\n # raise Exception('the weight and array vectors are not the same length') \n # fit_kws['weights'] = weight\n\n # Round 1: coarse fit\n\n # Coarse search xoffset\n n_xoffset_chunks = max(1, int((search_window[1] - search_window[0]) / 1e-3))\n xoffset_chunks = np.linspace(search_window[0], search_window[1], n_xoffset_chunks+1)\n xoffset = [{'xoffset': ((a+b)/2., a, b)} for a,b in zip(xoffset_chunks[:-1], xoffset_chunks[1:])]\n \n prof('prep for coarse fit')\n\n # Find best coarse fit \n search = SearchFit(psp, [xoffset], params=base_params, x=data.time_values, data=data.data, fit_kws=fit_kws, method=method)\n for i,result in enumerate(search.iter_fit()):\n pass\n # prof(' coarse fit iteration %d/%d: %s %s' % (i, len(search), result['param_index'], result['params']))\n fit = search.best_result.best_values\n prof(\"coarse fit done (%d iter)\" % len(search))\n\n if ui is not None:\n br = search.best_result\n ui.plt1.plot(data.time_values, br.best_fit, pen=(0, 255, 0, 100))\n\n if not refine:\n return search.best_result\n\n # Round 2: fine fit\n \n # Fine search xoffset\n fine_search_window = (max(search_window[0], fit['xoffset']-1e-3), min(search_window[1], fit['xoffset']+1e-3))\n n_xoffset_chunks = max(1, int((fine_search_window[1] - fine_search_window[0]) / .2e-3))\n xoffset_chunks = np.linspace(fine_search_window[0], fine_search_window[1], n_xoffset_chunks + 1)\n xoffset = [{'xoffset': ((a+b)/2., a, b)} for a,b in zip(xoffset_chunks[:-1], xoffset_chunks[1:])]\n\n # Search amp / rise time / decay tau to avoid traps\n rise_time_inits = base_params['rise_time'][0] * 1.2**np.arange(-1,6)\n rise_time = [{'rise_time': (x,) + base_params['rise_time'][1:]} for x in rise_time_inits]\n\n decay_tau_inits = base_params['decay_tau'][0] * 2.0**np.arange(-1,2)\n decay_tau = [{'decay_tau': (x,) + base_params['decay_tau'][1:]} for x in decay_tau_inits]\n\n search_params = [\n rise_time, \n decay_tau, \n xoffset,\n ]\n \n # if 'fixed' not in base_params['exp_amp']:\n # exp_amp_inits = [0, amp_init*0.01, amp_init]\n # exp_amp = [{'exp_amp': (x,) + base_params['exp_amp'][1:]} for x in exp_amp_inits]\n # search_params.append(exp_amp)\n\n # if no sign was specified, search from both sides \n if sign == 0:\n amp = [{'amp': (amp_init, -amp_max, amp_max)}, {'amp': (-amp_init, -amp_max, amp_max)}]\n search_params.append(amp)\n\n prof(\"prepare for fine fit %r\" % base_params)\n\n # Find best fit \n search = SearchFit(psp, search_params, params=base_params, x=data.time_values, data=data.data, fit_kws=fit_kws, method=method)\n for i,result in enumerate(search.iter_fit()):\n pass\n prof(' fine fit iteration %d/%d: %s %s' % (i, len(search), result['param_index'], result['params']))\n fit = search.best_result\n prof('fine fit done (%d iter)' % len(search))\n\n return fit",
"def add_X_NNLO_fixa0_fit(axe, xran, values, errors, fill=False, save=False):\n B = values[\"B\"]\n F_0 = values[\"F_0\"]\n\n LAMBDA4 = values[\"Lambda4\"]\n LAMBDA3 = values[\"Lambda3\"]\n # LAMBDA12 = values[\"Lambda12\"]\n km = values[\"km\"]\n kf = values[\"kf\"]\n\n x = np.linspace(xran[0], xran[1], num=500)\n\n gamma_2 = values[\"gamma_2\"]\n\n Msqr = x * (8 * (np.pi**2) * (F_0**2))\n arg4 = LAMBDA4**2 / Msqr\n arg3 = LAMBDA3**2 / Msqr\n # arg12 = LAMBDA12**2 / Msqr\n\n l1 = -0.4\n l2 = 4.3\n\n Lambda1sqr = (phys_pion**2) * np.exp(l1)\n Lambda2sqr = (phys_pion**2) * np.exp(l2)\n\n lnLambda12sqr = (7.0 * np.log(Lambda1sqr) + 8.0 * np.log(Lambda2sqr)) / 15.0\n lambda12sqr = np.exp(lnLambda12sqr)\n\n arg12 = lambda12sqr / Msqr\n\n lm = 1.0 / 51.0 * (60.0 * np.log(arg12) - 9.0 * np.log(arg3) + 49.0)\n lf = 1.0 / 30.0 * (30.0 * np.log(arg12) + 6.0 * np.log(arg3) - 6.0 * np.log(arg4) + 23.0)\n\n y = F_0 * (1.0 + x * np.log(arg4) - 5.0 / 4.0 * (x**2) * (lf)**2 + kf * x**2) / (1 + gamma_2 * (0.05**2))\n\n plots = []\n paramstring = \" \".join(\"${}={}$\".format(format_parameters(k), print_paren_error(float(v), float(errors[k])))\n for k, v in sorted(values.iteritems()))\n paramstring = \"$ M_\\pi<{}$\".format(values[\" M_\\pi<\"])\n plabel = \"NNLO Mss=0 fit: {}\".format(paramstring)\n plabel = \"NNLO\"\n if \"cutoff\" in values:\n plabel += \" $M_\\pi < {}$\".format(values[\"cutoff\"])\n addplot(plots, axe, fill, save, x=x, y=y, params={\"label\":plabel, \"ls\":\"--\", \"lw\":4})\n\n return plots",
"def append_basic_fit(self, param ,basic_fit):\n if param == SHAPE_STRING:\n self._shape_value[0].append(basic_fit.shape)\n self._shape_value[1].append(basic_fit.loc)\n self._shape_value[2].append(basic_fit.scale)\n self.shape_samples.append(basic_fit.samples)\n elif param == LOCATION_STRING:\n self._loc_value[0].append(basic_fit.shape)\n self._loc_value[1].append(basic_fit.loc)\n self._loc_value[2].append(basic_fit.scale)\n self.loc_samples.append(basic_fit.samples)\n elif param == SCALE_STRING:\n self._scale_value[0].append(basic_fit.shape)\n self._scale_value[1].append(basic_fit.loc)\n self._scale_value[2].append(basic_fit.scale)\n self.scale_samples.append(basic_fit.samples)\n else:\n err_msg = \"Parameter '{}' is unknown.\".format(param)\n raise ValueError(err_msg)",
"def params(self, ra, dec, ra_psf = None, dec_psf =None, r_cut=100, add_mask=5, pick_choice=False,\n multi_band_type='joint-linear', kwargs_numerics={}, img_name='prodata_psf.pdf',img_id=0, if_plot=True):\n kwargs_numerics = self.numerics(**kwargs_numerics)\n kwargs_psf = self.pick_psf(ra=ra_psf, dec=dec_psf)\n multi_band_list =[]\n x_detector = []\n y_detector = []\n xylenslight_list = []\n data_mask_list = []\n lens_mask_list = []\n plu_mask_out_list = []\n for i in range(len(ra)):\n xy = self.radec2detector(ra[i], dec[i])\n x_detector.append(xy[0])\n y_detector.append(xy[1])\n cutsize = self.cutsize(xy[0], xy[1], r_cut=r_cut)\n kwargs_data, _, xylenslight= self.data_assemble(x=xy[0], y=xy[1],r_cut=cutsize, add_mask=add_mask,pick_choice=pick_choice)\n multi_band_list.append([kwargs_data, kwargs_psf, kwargs_numerics])\n xylenslight_list.append(xylenslight)\n data_mask_list.append(self.data_mask)\n lens_mask_list.append(self.lens_mask)\n plu_mask_out_list.append(self.plu_mask)\n #plot data, lens light, pollution\n if if_plot:\n self.plot_prodata_psf(img_name=img_name,img_id=img_id)\n kwargs_data_joint = {'multi_band_list': multi_band_list, 'multi_band_type': multi_band_type}\n self.data_mask_list = data_mask_list\n self.lens_mask_list = lens_mask_list\n self.plu_mask_out_list = plu_mask_out_list\n\n return x_detector, y_detector, xylenslight, kwargs_data_joint",
"def findpeakl(data, zp, noise, bottom, top, r):\n length = len(data)\n width = (top - bottom)/(length - 1)\n absdata = []\n peak = []\n for i in range(length):\n absdata.append(abs(data[i] - zp[i]))\n i = 0\n fsnum = 0#fitting section number\n fsec = []#fitting section\n fdata = []#signal of fitting section\n fnum = []#fitting number\n fm = []#index of max and min points\n while(i<length):\n if absdata[i]>noise*r:\n fsnum = fsnum + 1\n fsec.append([])\n fdata.append([])\n tempmax = absdata[i]\n tempmin = absdata[i]\n inma = i\n inmi = i\n fnum.append(0)\n fm.append([])\n direction = 1#1:rising,0:descending\n while(absdata[i]>noise*r):\n if direction==1:\n if absdata[i]>tempmax:\n tempmax = absdata[i]\n inma = i\n elif absdata[i]<tempmax - noise*r:\n direction = 0\n fm[fsnum - 1].append([inma,inmi])\n tempmin = absdata[i]\n inmi = i\n fnum[fsnum - 1] = fnum[fsnum - 1] + 1\n elif direction==0:\n if absdata[i]<tempmin:\n tempmin = absdata[i]\n inmi = i\n elif absdata[i]>tempmin + noise*r:\n direction = 1\n tempmax = absdata[i]\n inma = i\n fsec[fsnum - 1].append(bottom + width*i)\n fdata[fsnum - 1].append(absdata[i])\n i = i + 1\n if i>=length:\n break\n if fm[fsnum - 1]==[]:\n del fsec[fsnum - 1]\n del fdata[fsnum - 1]\n del fnum[fsnum - 1]\n del fm[fsnum - 1]\n fsnum = fsnum - 1\n i = i + 1\n for i in range(fsnum):\n pars = Parameters()\n j = 0\n mod = LorentzianModel(prefix = 'l1_')\n pars.update(LorentzianModel(prefix = 'l%i_'%(j + 1)).make_params())\n sigma0 = abs(width*(fm[i][j][0] - fm[i][j][1]))/math.sqrt(absdata[fm[i][j][0]]/absdata[fm[i][j][1]] - 1)\n pars['l%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['l%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['l%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]*sigma0/0.3183099,min = noise*r*sigma0/0.3183099,max = absdata[fm[i][j][0]]*20*sigma0/0.3183099)\n for j in range(1,fnum[i]):\n mod = mod + LorentzianModel(prefix = 'l%i_'%(j + 1))\n pars.update(LorentzianModel(prefix = 'l%i_'%(j + 1)).make_params())\n sigma0 = abs(width*(fm[i][j][0] - fm[i][j][1]))/math.sqrt(absdata[fm[i][j][0]]/absdata[fm[i][j][1]] - 1)\n pars['l%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['l%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['l%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]*sigma0/0.3183099,min = noise*r*sigma0/0.3183099,max = absdata[fm[i][j][0]]*20*sigma0/0.3183099)\n# =============================================================================\n# result = mod.fit(fdata[i],pars,x = fsec[i])\n# #print(result.fit_report())\n# plt.plot(fsec[i],fdata[i],'bo',label = 'original')\n# plt.plot(fsec[i],result.best_fit,'r-',label = 'fitting')\n# plt.title('Lorentzian fitting')\n# plt.show()\n# =============================================================================\n tempbo = int((fsec[i][0] - bottom)/width)\n tempto = int((fsec[i][ - 1] - bottom)/width)\n for k in range(fnum[i]):\n gama2 = (pars['l%i_sigma'%(k + 1)].value)**2\n amplitude = pars['l%i_height'%(k + 1)].value*gama2\n miu = pars['l%i_center'%(k + 1)].value\n sum1 = 0\n for p in range(tempbo,tempto + 1):\n v = abs(amplitude/((bottom + width*p - miu)*(bottom + width*p - miu) + gama2))\n sum1 = sum1 + (v - absdata[k])*(v - absdata[k])\n sum1 = sum1/(tempto - tempbo + 1)\n peak.append([gama2,miu,amplitude,sum1,tempbo,tempto])\n return peak",
"def findpeakg(data, zp, noise, bottom, top, r):\n length = len(data)\n width = (top - bottom)/(length - 1)\n absdata = []\n peak = []\n for i in range(length):\n absdata.append(abs(data[i] - zp[i]))\n i = 0\n fsnum = 0#fitting section number\n fsec = []#fitting section\n fdata = []#signal of fitting section\n fnum = []#fitting number\n fm = []#index of max and min points\n while(i<length):\n if absdata[i]>noise*r:\n fsnum = fsnum + 1\n fsec.append([])\n fdata.append([])\n tempmax = absdata[i]\n tempmin = absdata[i]\n inma = i\n inmi = i\n fnum.append(0)\n fm.append([])\n direction = 1#1:rising,0:descending\n while(absdata[i]>noise*r):\n if direction==1:\n if absdata[i]>tempmax:\n tempmax = absdata[i]\n inma = i\n elif absdata[i]<tempmax - noise*r:\n direction = 0\n fm[fsnum - 1].append([inma,inmi])\n tempmin = absdata[i]\n inmi = i\n fnum[fsnum - 1] = fnum[fsnum - 1] + 1\n elif direction==0:\n if absdata[i]<tempmin:\n tempmin = absdata[i]\n inmi = i\n elif absdata[i]>tempmin + noise*r:\n direction = 1\n tempmax = absdata[i]\n inma = i\n fsec[fsnum - 1].append(bottom + width*i)\n fdata[fsnum - 1].append(absdata[i])\n i = i + 1\n if i>=length:\n break\n if fm[fsnum - 1]==[]:\n del fsec[fsnum - 1]\n del fdata[fsnum - 1]\n del fnum[fsnum - 1]\n del fm[fsnum - 1]\n fsnum = fsnum - 1\n i = i + 1\n for i in range(fsnum):\n pars = Parameters()\n j = 0\n mod = GaussianModel(prefix = 'g1_')\n pars.update(GaussianModel(prefix = 'g%i_'%(j + 1)).make_params())\n sigma0 = math.sqrt((width*(fm[i][j][0] - fm[i][j][1]))**2/(2*math.log(absdata[fm[i][j][0]]/absdata[fm[i][j][1]])))\n pars['g%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['g%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['g%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]/0.3989423*sigma0,min = noise*r/0.3989423*sigma0,max = absdata[fm[i][j][0]]*20/0.3989423*sigma0)\n for j in range(1,fnum[i]):\n mod = mod + GaussianModel(prefix = 'g%i_'%(j + 1))\n pars.update(GaussianModel(prefix = 'g%i_'%(j + 1)).make_params())\n sigma0 = math.sqrt((width*(fm[i][j][0] - fm[i][j][1]))**2/(2*math.log(absdata[fm[i][j][0]]/absdata[fm[i][j][1]])))\n pars['g%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][-1])\n pars['g%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['g%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]/0.3989423*sigma0,min = noise*r/0.3989423*sigma0,max = absdata[fm[i][j][0]]*20/0.3989423*sigma0)\n# =============================================================================\n# result = mod.fit(fdata[i],pars,x = fsec[i])\n# #print(result.fit_report())\n# plt.plot(fsec[i],fdata[i],'bo',label = 'original')\n# plt.plot(fsec[i],result.best_fit,'r-',label = 'fitting')\n# plt.title('Gaussian fitting')\n# plt.show()\n# =============================================================================\n tempbo = int((fsec[i][0] - bottom)/width)\n tempto = int((fsec[i][-1] - bottom)/width)\n for k in range(fnum[i]):\n amplitude = pars['g%i_height'%(k + 1)].value\n sigma = pars['g%i_sigma'%(k + 1)].value\n miu = pars['g%i_center'%(k + 1)].value\n sum1 = 0\n for p in range(tempbo,tempto + 1):\n v = abs(amplitude*math.exp( - (bottom + width*p - miu)*(bottom + width*p - miu)/(2*sigma*sigma)))\n sum1 = sum1 + (v - absdata[k])*(v - absdata[k])\n sum1 = sum1/(tempto - tempbo + 1)\n peak.append([sigma,miu,amplitude,sum1,tempbo,tempto])\n return peak",
"def addFakes(v):\n try:\n fakes = input(\" Enter IP addresses separated with a space: \")\n except KeyboardInterrupt:\n return\n\n fakes = fakes.split(\" \")\n tmp = True\n\n for x in fakes:\n if validIPAddress(x):\n bash = (\"ip addr add \" + x + \"/0 dev dummy label dummy:\" + str(len(v.fakes)))\n os.system(bash)\n if len(v.fakes) == 0:\n v.fakes.append(x)\n else:\n for y in v.fakes:\n if y == x:\n tmp = False\n\n if tmp:\n v.fakes.append(x)\n else:\n print(\" \" + bcolors.WARNING + x + \" already in list\" + bcolors.ENDC)\n tmp = True\n time.sleep(1)\n\n else:\n print(\" \" + bcolors.WARNING + x + \" is not a valid IP\" + bcolors.ENDC)\n time.sleep(1)\n\n return",
"def fluxes(wavelength, s, line, lowlow= 14, lowhigh=6, highlow=6, highhigh = 14, lmin=0, lmax=0, fmin=0, fmax=0, \n broad=2.355, plot=True, verbose=True, plot_sus = False, fcal = True, fit_continuum = True, median_kernel=35, warnings = True ): # Broad is FWHM for Gaussian sigma= 1,\n # s must be an array, no a list\n try: \n index_maximo_del_rango = s.tolist().index(np.nanmax(s))\n #print \" is AN ARRAY\"\n except Exception:\n #print \" s is A LIST -> must be converted into an ARRAY\" \n s = np.array(s)\n \n # Setup wavelength limits\n if lmin == 0 :\n lmin = line-65. # By default, +-65 A with respect to line\n if lmax == 0 :\n lmax = line+65.\n \n # Extract subrange to fit\n w_spec = []\n f_spec = []\n w_spec.extend((wavelength[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n f_spec.extend((s[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n \n if np.isnan(np.nanmedian(f_spec)): \n # The data are NAN!! Nothing to do\n if verbose or warnings: print(\" There is no valid data in the wavelength range [{},{}] !!\".format(lmin,lmax))\n \n resultado = [0, line, 0, 0, 0, 0, 0, 0, 0, 0, 0, s ] \n\n return resultado\n \n else: \n \n ## 20 Sep 2020\n f_spec_m=signal.medfilt(f_spec,median_kernel) # median_kernel = 35 default\n \n \n # Remove nans\n median_value = np.nanmedian(f_spec)\n f_spec = [median_value if np.isnan(x) else x for x in f_spec] \n \n \n # Setup min and max flux values in subrange to fit\n if fmin == 0 :\n fmin = np.nanmin(f_spec) \n if fmax == 0 :\n fmax = np.nanmax(f_spec) \n \n # We have to find some \"guess numbers\" for the Gaussian. Now guess_centre is line\n guess_centre = line\n \n # Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to guess_centre\n \n w_cont=[]\n f_cont=[]\n w_cont.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n \n if fit_continuum:\n # Linear Fit to continuum \n f_cont_filtered=sig.medfilt(f_cont,np.int(median_kernel))\n #print line #f_cont\n # if line == 8465.0:\n # print w_cont\n # print f_cont_filtered\n # plt.plot(w_cont,f_cont_filtered)\n # plt.show()\n # plt.close()\n # warnings=True\n try: \n mm,bb = np.polyfit(w_cont, f_cont_filtered, 1)\n except Exception:\n bb = np.nanmedian(f_cont_filtered)\n mm = 0.\n if warnings: \n print(\" WARNING: Impossible to get the continuum!\")\n print(\" Scaling the continuum to the median value b = \",bb,\": cont = 0 * w_spec + \", bb)\n continuum = mm*np.array(w_spec)+bb \n c_cont = mm*np.array(w_cont)+bb \n \n else: \n # Median value in each continuum range # NEW 15 Sep 2019\n w_cont_low = []\n f_cont_low = []\n w_cont_low.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n f_cont_low.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n median_w_cont_low = np.nanmedian(w_cont_low)\n median_f_cont_low = np.nanmedian(f_cont_low)\n w_cont_high = []\n f_cont_high = []\n w_cont_high.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont_high.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n median_w_cont_high = np.nanmedian(w_cont_high)\n median_f_cont_high = np.nanmedian(f_cont_high) \n \n b = (median_f_cont_low-median_f_cont_high)/(median_w_cont_low-median_w_cont_high)\n a = median_f_cont_low- b * median_w_cont_low\n \n continuum = a + b*np.array(w_spec)\n c_cont = a + b*np.array(w_cont) \n \n \n # rms continuum\n rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)\n \n # Search for index here w_spec(index) closest to line\n min_w = np.abs(np.array(w_spec)-line)\n mini = np.nanmin(min_w)\n # guess_peak = f_spec[min_w.tolist().index(mini)] # WE HAVE TO SUSTRACT CONTINUUM!!!\n guess_peak = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n \n # LOW limit\n low_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-15 and w_spec[i] < guess_centre)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-15 and w_spec[i] < guess_centre)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a \n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1,1,-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii-1]/c_fit[ii-1] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n # if f_fit[ii]/c_fit[ii] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if low_limit == 0: \n sorted_by_flux=np.argsort(fs)\n try:\n low_limit = ws[sorted_by_flux[0]]\n except Exception:\n plot=True\n low_limit = 0\n \n # HIGH LIMIT \n high_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre and w_spec[i] < guess_centre+15)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre and w_spec[i] < guess_centre+15)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii+1]/c_fit[ii+1] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n # if f_fit[ii]/c_fit[ii] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if high_limit == 0: \n sorted_by_flux=np.argsort(fs)\n try:\n high_limit = ws[sorted_by_flux[0]] \n except Exception:\n plot=True\n high_limit = 0 \n \n # Guess centre will be the highest value in the range defined by [low_limit,high_limit]\n \n try: \n rango = np.where((high_limit >= wavelength ) & (low_limit <= wavelength)) \n index_maximo_del_rango = s.tolist().index(np.nanmax(s[rango]))\n guess_centre = wavelength[index_maximo_del_rango]\n except Exception:\n guess_centre = line #### It was 0 before\n \n \n # Fit a Gaussian to data - continuum \n p0 = [guess_centre, guess_peak, broad/2.355] # broad is the Gaussian sigma, 1.0 for emission lines\n try:\n fit, pcov = curve_fit(gauss, w_spec, f_spec-continuum, p0=p0, maxfev=10000) # If this fails, increase maxfev...\n fit_error = np.sqrt(np.diag(pcov))\n \n # New 28th Feb 2019: Check central value between low_limit and high_limit\n # Better: between guess_centre - broad, guess_centre + broad\n # If not, redo fit fixing central value to the peak (it does not work... just fix FWHM= (high_limit-low_limit)/2.5 )\n \n if verbose != False: print(\" ----------------------------------------------------------------------------------------\")\n # if low_limit < fit[0] < high_limit:\n if fit[0] < guess_centre - broad or fit[0] > guess_centre + broad:\n # if verbose: print \" Fitted center wavelength\", fit[0],\"is NOT in the range [\",low_limit,\",\",high_limit,\"]\"\n if verbose: print(\" Fitted center wavelength\", fit[0],\"is NOT in the expected range [\",guess_centre - broad,\",\",guess_centre + broad,\"]\")\n \n # print \"Re-do fitting fixing center wavelength\"\n # p01 = [guess_peak, broad]\n # fit1, pcov1 = curve_fit(gauss_fix_x0, w_spec, f_spec-continuum, p0=p01, maxfev=100000) # If this fails, increase maxfev...\n # fit_error1 = np.sqrt(np.diag(pcov1))\n # fit[0]=guess_centre\n # fit_error[0] = 0.\n # fit[1] = fit1[0]\n # fit_error[1] = fit_error1[0]\n # fit[2] = fit1[1]\n # fit_error[2] = fit_error1[1] \n \n fit[0]=guess_centre\n fit_error[0] = 0.000001\n fit[1]=guess_peak\n fit_error[1] = 0.000001\n fit[2] = broad/2.355\n fit_error[2] = 0.000001 \n else:\n if verbose: print(\" Fitted center wavelength\", fit[0],\"IS in the expected range [\",guess_centre - broad,\",\",guess_centre + broad,\"]\")\n \n \n if verbose: print(\" Fit parameters = \", fit[0], fit[1], fit[2])\n if fit[2] == broad and warnings == True : \n print(\" WARNING: Fit in\",fit[0],\"failed! Using given centre wavelength (cw), peak at (cv) & sigma = broad/2.355 given.\") \n gaussian_fit = gauss(w_spec, fit[0], fit[1], fit[2])\n \n \n # Estimate rms of the Gaussian fit in range [low_limit, high_limit]\n residuals = f_spec-gaussian_fit-continuum\n rms_fit = np.nansum([ ((residuals[i]**2)/(len(residuals)-2))**0.5 for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n \n # Fluxes, FWHM and Eq. Width calculations\n gaussian_flux = gauss_flux(fit[1],fit[2])\n error1 = np.abs(gauss_flux(fit[1]+fit_error[1],fit[2]) - gaussian_flux)\n error2 = np.abs(gauss_flux(fit[1],fit[2]+fit_error[2]) - gaussian_flux)\n gaussian_flux_error = 1 / ( 1/error1**2 + 1/error2**2 )**0.5\n \n \n fwhm=fit[2]*2.355\n fwhm_error = fit_error[2] *2.355\n fwhm_vel = fwhm / fit[0] * C \n fwhm_vel_error = fwhm_error / fit[0] * C \n \n gaussian_ew = gaussian_flux/np.nanmedian(f_cont)\n gaussian_ew_error = gaussian_ew * gaussian_flux_error/gaussian_flux \n \n # Integrated flux\n # IRAF: flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i2) \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n gauss_to_integrated = gaussian_flux/flux * 100.\n \n index=0\n s_s=np.zeros_like(s)\n for wave in range(len(wavelength)):\n s_s[wave]=s[wave]\n if wavelength[wave] == w_spec[0] : \n s_s[wave] = f_spec[0]-gaussian_fit[0]\n index=1\n if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:\n s_s[wave] = f_spec[index]-gaussian_fit[index]\n index=index+1\n \n # Plotting \n ptitle = 'Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit)\n if plot :\n plt.figure(figsize=(10, 4))\n # Plot input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.8)\n # Plot median input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec_m), \"orange\", lw=3, alpha = 0.5) # 2021: era \"g\"\n # Plot spectrum - gauss subtracted\n plt.plot(wavelength,s_s,\"g\",lw=3, alpha = 0.6)\n \n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$ ]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.3)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n plt.plot(w_spec, residuals, 'k')\n plt.title(ptitle)\n plt.show()\n \n # Printing results\n if verbose :\n print(\"\\n - Gauss and continuum fitting + integrated flux calculations:\\n\")\n print(\" rms continuum = %.3e erg/cm/s/A \" % (rms_cont)) \n print(\" Gaussian Fit parameters: x0 = ( %.2f +- %.2f ) A \" % (fit[0], fit_error[0]))\n print(\" y0 = ( %.3f +- %.3f ) 1E-16 erg/cm2/s/A\" % (fit[1]/1E-16, fit_error[1]/1E-16 ))\n print(\" sigma = ( %.3f +- %.3f ) A\" % (fit[2], fit_error[2])) \n print(\" rms fit = %.3e erg/cm2/s/A\" % (rms_fit))\n print(\" Gaussian Flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent)\" % (gaussian_flux/1E-16, gaussian_flux_error/1E-16, gaussian_flux_error/gaussian_flux*100))\n print(\" FWHM = ( %.3f +- %.3f ) A = ( %.1f +- %.1f ) km/s \" % (fwhm, fwhm_error, fwhm_vel, fwhm_vel_error))\n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (-gaussian_ew, gaussian_ew_error)) \n print(\"\\n Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n print(\" Gauss/Integrated = %.2f per cent \" % gauss_to_integrated)\n \n \n # Plot independent figure with substraction if requested \n if plot_sus: plot_plot(wavelength,[s,s_s], xmin=lmin, xmax=lmax, ymin=fmin, ymax=fmax, fcal=fcal, frameon=True, ptitle=ptitle)\n \n # 0 1 2 3 4 5 6 7 8 9 10 11\n resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, s_s ]\n return resultado \n except Exception:\n if verbose: \n print(\" - Gaussian fit failed!\")\n print(\" However, we can compute the integrated flux and the equivalent width:\")\n \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n \n if verbose:\n print(\" Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n \n resultado = [0, guess_centre, 0, 0, 0, 0, 0, flux, flux_error, ew, ew_error, s ] # guess_centre was identified at maximum value in the [low_limit,high_limit] range but Gaussian fit failed\n \n \n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\") \n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.5)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n # plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n # plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n # plt.plot(w_spec, residuals, 'k')\n plt.title(\"No Gaussian fit obtained...\")\n plt.show()\n \n \n return resultado",
"def collapse_to_spectrum(self, add_data=True, **kwargs):\n # get glue Data objects for the spectral cube and uncertainties\n flux_viewer = self._app.get_viewer(\n self._app._jdaviz_helper._default_flux_viewer_reference_name\n )\n uncert_viewer = self._app.get_viewer(\n self._app._jdaviz_helper._default_uncert_viewer_reference_name\n )\n [spectral_cube] = flux_viewer.data()\n [uncert_cube] = uncert_viewer.data()\n\n # This plugin collapses over the *spatial axes* (optionally over a spatial subset,\n # defaults to ``No Subset``). Since the Cubeviz parser puts the fluxes\n # and uncertainties in different glue Data objects, we translate the spectral\n # cube and its uncertainties into separate NDDataArrays, then combine them:\n if self.spatial_subset_selected != self.spatial_subset.default_text:\n nddata = spectral_cube.get_subset_object(\n subset_id=self.spatial_subset_selected, cls=NDDataArray\n )\n uncertainties = uncert_cube.get_subset_object(\n subset_id=self.spatial_subset_selected, cls=StdDevUncertainty\n )\n else:\n nddata = spectral_cube.get_object(cls=NDDataArray)\n uncertainties = uncert_cube.get_object(cls=StdDevUncertainty)\n\n # Use the spectral coordinate from the WCS:\n if '_orig_spec' in spectral_cube.meta:\n wcs = spectral_cube.meta['_orig_spec'].wcs.spectral\n else:\n wcs = spectral_cube.coords.spectral\n\n flux = nddata.data << nddata.unit\n mask = nddata.mask\n\n nddata_reshaped = NDDataArray(\n flux, mask=mask, uncertainty=uncertainties, wcs=wcs, meta=nddata.meta\n )\n\n # by default we want to use operation_ignores_mask=True in nddata:\n kwargs.setdefault(\"operation_ignores_mask\", True)\n # by default we want to propagate uncertainties:\n kwargs.setdefault(\"propagate_uncertainties\", True)\n\n # Collapse an e.g. 3D spectral cube to 1D spectrum, assuming that last axis\n # is always wavelength. This may need adjustment after the following\n # specutils PR is merged: https://github.com/astropy/specutils/pull/1033\n spatial_axes = (0, 1)\n\n collapsed_nddata = getattr(nddata_reshaped, self.function_selected.lower())(\n axis=spatial_axes, **kwargs\n ) # returns an NDDataArray\n\n # Convert to Spectrum1D, with the spectral axis in correct units:\n if hasattr(spectral_cube.coords, 'spectral_wcs'):\n target_wave_unit = spectral_cube.coords.spectral_wcs.world_axis_units[0]\n else:\n target_wave_unit = spectral_cube.coords.spectral.world_axis_units[0]\n\n flux = collapsed_nddata.data << collapsed_nddata.unit\n mask = collapsed_nddata.mask\n uncertainty = collapsed_nddata.uncertainty\n\n collapsed_spec = _return_spectrum_with_correct_units(\n flux, wcs, collapsed_nddata.meta, 'flux',\n target_wave_unit=target_wave_unit,\n uncertainty=uncertainty,\n mask=mask\n )\n\n if add_data:\n self.add_results.add_results_from_plugin(\n collapsed_spec, label=self.results_label, replace=False\n )\n\n snackbar_message = SnackbarMessage(\n \"Spectrum extracted successfully.\",\n color=\"success\",\n sender=self)\n self.hub.broadcast(snackbar_message)\n\n return collapsed_spec",
"def delete_fit(self):\n self.fft_fit_plotter.delete_plot(self.ax)\n plt.draw()",
"def addData(self, data, model, limits,l=1024, **kwargs):\n n = len(data) # Number of data points\n data = array(data)\n (ll,ul) = limits #limits for the parameter space\n step = (ul-ll)/float(l)\n \n if model == 'normal': # In this case, L is a function of the mean. SD is set to the SD(data)\n sd = std(data) #standard deviation of data\n prec = 1/sd #precision of the data\n res = array([exp(like.Normal(data,mu,prec)) for mu in arange(ll,ul,step)]) \n lik = res/max(res) # Likelihood function \n print max(lik), min(lik)\n elif model == 'exponential':\n res = [lamb**n*exp(-lamb*sum(data)) for lamb in arange(ll,ul,step)]\n lik = array(res)/max(array(res))\n elif model == 'beta':\n # TODO: Make sure pars is passed as an extra parameter\n res = [exp(like.Beta(data,*kwargs['pars'])) for i in arange(ll,ul,step)]\n lik = array(res)/max(array(res))\n elif model == 'bernoulli':\n if ll<0 or ul>1:\n print \"Parameter p of the bernoulli is out of range[0,1]\"\n res = [exp(like.Bernoulli(data,p)) for p in arange(ll,ul,step)]\n lik = array(res)/max(array(res))\n \n elif model == 'poisson':\n res = [exp(like.Poisson(data,lb)) for lb in arange(ll,ul,step)]\n lik = array(res)/max(array(res))\n \n elif model == 'lognormal':\n sd = std(data) #standard deviation of data\n prec = 1/sd #precision of the data\n res = [exp(like.Lognormal(data,mu,prec)) for mu in arange(ll,ul,step)]\n lik = array(res)/max(array(res)) \n else:\n print 'Invalid distribution type. Valid distributions: normal,lognormal, exponential, bernoulli and poisson'\n self.likelist.append(lik)\n return lik",
"def add_detectors(self, detect_list):\n if self.barrier is None:\n raise RuntimeError(\"You need to call setup_processes() first\")\n try:\n if 'KoopaTroopaBeach' not in self.variables[0]['course']:\n # Find SHORTCUT and remove it\n for detector in detect_list:\n if isinstance(detector, detection.Shortcut):\n detect_list.remove(detector)\n break\n except:\n # Assume phase 0\n pass\n\n self.manager.set_detectors(detect_list)\n self.manager.start_workers()"
] | [
"0.56082404",
"0.5501705",
"0.53077525",
"0.52218217",
"0.50149703",
"0.4998173",
"0.4938318",
"0.49304774",
"0.48862016",
"0.48674485",
"0.48613563",
"0.4812949",
"0.48021144",
"0.47739965",
"0.4761532",
"0.4754908",
"0.47529873",
"0.47505748",
"0.4730679",
"0.4723984",
"0.46782076",
"0.46744406",
"0.46407452",
"0.4634632",
"0.46307972",
"0.46295026",
"0.4627918",
"0.46120805",
"0.45967075",
"0.45911536"
] | 0.6284394 | 0 |
This function prints out a display of the contents of any hdf5 file. It prints the filename followed by a list of the groups and datasets in a familiar directory/file format. Groups (folders appear bold) while datasets (files) appear in a standard font. | def view_hdf5(filename):
# handling input errors
if not isinstance(filename, str):
raise TypeError('Passed value of `filename` is not a string! Instead, it is: '
+ str(type(filename)))
if not filename.split('/')[-1].split('.')[-1] == 'hdf5':
raise TypeError('`filename` is not type = .hdf5! Instead, it is: '
+ filename.split('/')[-1].split('.')[-1])
# pring groups and datasets in first three layers
print('**** {} ****'.format(filename))
hdf5 = h5py.File(filename, 'r')
for _, layer_1 in enumerate(list(hdf5.keys())):
if isinstance(hdf5[layer_1], h5py.Group):
print('\033[1m{}\033[0m'.format(layer_1))
for _, layer_2 in enumerate(list(hdf5[layer_1].keys())):
if isinstance(hdf5['{}/{}'.format(layer_1, layer_2)], h5py.Group):
print('| \033[1m{}\033[0m'.format(layer_2))
for _, layer_3 in enumerate(list(hdf5['{}/{}'.format(layer_1, layer_2)])):
if isinstance(hdf5['{}/{}/{}'.format(layer_1, layer_2, layer_3)],
h5py.Group):
print('| | \033[1m{}\033[0m/...'.format(layer_3))
else:
print('| | {}'.format(layer_3))
else:
print('| {}'.format(layer_2))
else:
print('{}'.format(layer_1))
hdf5.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_h5(fname: str) -> None:\n try:\n with h5py.File(fname, 'r') as h:\n print(fname)\n recursively_print_structure(h, ' ')\n except IOError as e:\n print(f\"Cannot open HDF5 file {fname}\")\n print(f\"IOError: {e}\")",
"def printAllColumnsInH5(pathToData):\n\n #Check if a correct path is given\n\n if not os.path.isfile(pathToData):\n raise ValueError(\"h5 file not found. Wrong path given?\")\n elif os.path.isfile(pathToData):\n Data = h5.File(pathToData, 'r')\n\n\n Files = Data.keys()\n\n for File in Files:\n print()\n print('Filename = %s' %(File))\n print('----------------------')\n\n #Every time you see Xr*' '\n #It means I add X spaces to line it\n print('\\t column name%sunit%slength'%(29*' ',16*' '))\n print('\\t '+'-----------------'*4)\n \n #In this file give me all the column names\n columns = Data[File].keys()\n \n #for every column in the columns\n for nrc,column in enumerate(columns):\n #always want the column name printed in 40 char\n spaces = ' '*(40 - len(column))\n length = Data[File][column].shape[0]\n #always want the unit name printed over 20 chars\n unit = Data[File][column].attrs['units']\n spaces2 = ' '*(20 - len(unit))\n #--\n length = Data[File][column].shape[0]\n\n print('\\t %s%s%s%s%s'%(column,spaces, unit,spaces2, length))\n #Every 4 lines print a dashed line to read output easier\n if (nrc%5==4):\n print('\\t '+'-----------------'*4)\n Data.close()",
"def print_h5(g, offset = '\\t\\t'):\r\n import h5py\r\n if isinstance(g,h5py.File) :\r\n print g.file, '(File)', g.name\r\n\r\n elif isinstance(g,h5py.Dataset) :\r\n print '(Dataset)', g.name, ' len =', g.shape #, g.dtype\r\n\r\n elif isinstance(g,h5py.Group) :\r\n print '(Group)', g.name\r\n\r\n if isinstance(g, h5py.File) or isinstance(g, h5py.Group) :\r\n for key,val in dict(g).iteritems() :\r\n subg = val\r\n print offset, key, #,\" \", subg.name #, val, subg.len(), type(subg),\r\n print_h5(subg, offset + ' ')",
"def display_dataset(path, save, dset='sum'):\n # List datasets\n files_surf = os.listdir(path[0])\n files_surf.sort()\n files_deep = os.listdir(path[1])\n files_deep.sort()\n files_calc = os.listdir(path[2])\n files_calc.sort()\n\n # Corrected names\n files = os.listdir(r'Y:\\3DHistoData\\Subvolumes_2mm')\n files.sort()\n\n k = 0\n # Loop for displaying images\n for fsurf, fdeep, fcalc in zip(files_surf, files_deep, files_calc):\n # Load images\n im_surf = loadh5(path[0], fsurf, dset)\n im_deep = loadh5(path[1], fdeep, dset)\n im_calc = loadh5(path[2], fcalc, dset)\n # Create figure\n fig = plt.figure(dpi=300)\n ax1 = fig.add_subplot(131)\n ax1.imshow(im_surf, cmap='gray')\n plt.title(fsurf + ', Surface')\n ax2 = fig.add_subplot(132)\n ax2.imshow(im_deep, cmap='gray')\n plt.title('Deep')\n ax3 = fig.add_subplot(133)\n ax3.imshow(im_calc, cmap='gray')\n plt.title('Calcified')\n if save is not None:\n while files[k] == 'Images' or files[k] == 'MeanStd':\n k += 1\n\n # Save figure\n if not os.path.exists(save):\n os.makedirs(save, exist_ok=True)\n plt.tight_layout()\n fig.savefig(os.path.join(save, files[k]), bbox_inches=\"tight\", transparent=True)\n plt.close()\n\n # Save h5\n if not os.path.exists(save + '\\\\MeanStd\\\\'):\n os.makedirs(save + '\\\\MeanStd\\\\', exist_ok=True)\n\n h5 = h5py.File(save + \"\\\\MeanStd\\\\\" + files[k] + '.h5', 'w')\n h5.create_dataset('surf', data=im_surf)\n h5.create_dataset('deep', data=im_deep)\n h5.create_dataset('calc', data=im_calc)\n h5.close()\n else:\n plt.show()\n k += 1",
"def show_contents(self):\n print(self.filename, 'loaded')\n\n table = [['group', 'parameter']]\n for group in self.file:\n table.append([group, self.dict[group]])\n display(HTML(tabulate.tabulate(table, tablefmt='html')))\n\n print('Call directly as an attribute or call (parameter) or (group, parameter) to retrieve data')\n print('Use .show_info(group) to show parameter shapes')",
"def display(self, contents=False, recurse=False): # FileObj.display\n print '# File\\t\\t' + str(self.deleted) + '\\t' + str(self.ignore) + '\\t' + str(self.depth) + '\\t' + self.hexdigest + ' ' + self.pathname + ' '",
"def print_structure(weight_file_path):\r\n f = h5py.File(\"./mnist_nn_quantized_zeroone_FC.h5\")\r\n file = open(\"datafile.txt\",\"a\")\r\n\r\n try:\r\n if len(f.attrs.items()):\r\n print(\"{} contains: \".format(weight_file_path))\r\n print(\"Root attributes:\")\r\n for key, value in f.attrs.items():\r\n print(\" {}: {}\".format(key, value))\r\n\r\n if len(f.items())==0:\r\n return \r\n\r\n for layer, g in f.items():\r\n print(\" {}\".format(layer))\r\n print(\" Attributes:\")\r\n for key, value in g.attrs.items():\r\n print(\" {}: {}\".format(key, value))\r\n\r\n print(\" Dataset:\")\r\n for p_name in g.keys():\r\n param = g[p_name]\r\n subkeys = param.keys()\r\n for k_name in param.keys():\r\n file.write(\" {}/{}: {}\".format(p_name, k_name, (param.get(k_name)[:]+1)/2))\r\n #print(\" {}/{}: {}\".format(p_name, k_name, param.get(k_name)[:]))\r\n \r\n finally:\r\n f.close()",
"def print_structure(weight_file_path):\n f = h5py.File(weight_file_path)\n try:\n if len(f.attrs.items()):\n print(\"{} contains: \".format(weight_file_path))\n print(\"Root attributes:\")\n for key, value in f.attrs.items():\n print(\" {}: {}\".format(key, value))\n\n if len(f.items())==0:\n return \n\n for layer, g in f.items():\n print(\" {}\".format(layer))\n print(\" Attributes:\")\n for key, value in g.attrs.items():\n print(\" {}: {}\".format(key, value))\n\n print(\" Dataset:\")\n for p_name in g.keys():\n param = g[p_name]\n subkeys = param.keys()\n for k_name in param.keys():\n print(\" {}/{}: {}\".format(p_name, k_name, len(param.get(k_name))))\n print(\" {}/{}: {}\".format(p_name, k_name, param.get(k_name)[:]))\n\t\t\t\t\t#if(k_name == \"kernel\"):\n \n #for k_whatever in param.get(k_name):\n\t\t\t\t\t\t\t#print(\"\t\t {}/{}: {}\".format(p_name, k_name, len(k_whatever)))\n \n finally:\n f.close()",
"def show_hdf(self):\n self._walk()",
"def visualize_h5_dataset(\n h5_file,\n dataset_key):\n r_cmd= (\n \"plot-h5.dataset.R {0} {1}\").format(\n h5_file,\n dataset_key)\n print r_cmd\n os.system(r_cmd)\n \n return None",
"def showFileTree():\n\treturn 0",
"def read_hdf5(path_to_file):\n\n print(\"\\nReading HDF5 file: \", path_to_file)\n file = h5py.File(path_to_file, 'r')\n\n # List the groups\n groups = list(file.keys())\n print(\"Groups available: \", groups)\n\n # Read Zemax Metadata\n zemax_metadata = {}\n print(\"\\nZemax Metadata:\")\n for key in file['Zemax Metadata'].attrs.keys():\n print('{} : {}'.format(key, file['Zemax Metadata'].attrs[key]))\n zemax_metadata[key] = file['Zemax Metadata'].attrs[key]\n\n # Read the analysis groups\n for group_name in groups:\n if group_name != 'Zemax Metadata':\n analysis_group = file[group_name]\n print('\\nAnalysis: ', group_name)\n # For each Analysis Group we loop over subgroups\n for subgroup_key in analysis_group.keys():\n subgroup = analysis_group[subgroup_key]\n print('Subgroup #', subgroup_key)\n # List the metadata of the subgroup\n for att_key in subgroup.attrs.keys():\n print(' {} : {}'.format(att_key, subgroup.attrs[att_key]))\n\n file.close()\n\n return zemax_metadata",
"def do_tree(self, args, opts=None):\n global __groupcount\n global __datasetcount\n __groupcount = 0\n __datasetcount = 0\n\n def children(item):\n if isinstance(item, h5py.Dataset):\n return []\n else:\n return [i[1] for i in item.items()]\n\n def format(item):\n name = os.path.basename(item.name)\n if name == '':\n name = '/'\n if isinstance(item, h5py.Dataset):\n if opts.shape:\n name = name + ' ' + str(item.shape)\n global __datasetcount\n __datasetcount += 1\n elif isinstance(item, h5py.Group):\n global __groupcount\n __groupcount += 1\n return name\n\n if len(args) == 0:\n args.append('')\n group = self.explorer.group(args[0])\n tree_format.print_tree(group, format, children)\n print('{} groups, {} datasets'.format(__groupcount - 1, __datasetcount))",
"def main():\n parser = ArgumentParser(description=\"write to a file\")\n\n parser.add_argument(\"-i\",\"--input\", type=setup.is_valid_h5_file, required=True, nargs='+',\n help=\"path(s) of HDF5 master file(s)\")\n\n parser.add_argument(\"-b\",\"--beamcenter\", nargs=2, required=True,\n help=\"beam center in X and Y (two arguments)\")\n\n parser.add_argument(\"-r\",\"--oscillation\", type=float, default=1,\n help=\"oscillation angle per well, default = 1\")\n\n parser.add_argument(\"-d\",\"--distance\", type=float, default=100,\n help=\"detector distance in mm\")\n\n parser.add_argument(\"-w\",\"--wavelength\", type=float, default=1.216,\n help=\"Wavelength in Angstrom, default is 1.216\")\n\n parser.add_argument(\"-f\",\"--framesperdegree\", type=int, default=5,\n help=\"Number of frames per degree, default is 5\")\n\n parser.add_argument(\"-t\",\"--totalframes\", type=int, default=0,\n help=\"Total number of frames to be processed, default all\")\n\n parser.add_argument(\"--output\", default=os.getcwd(),\n help=\"Use this option to change output directory, default pwd\")\n\n parser.add_argument(\"-sg\",\"--spacegroup\", type=int, default=0,\n help=\"Space group\")\n\n parser.add_argument(\"-u\",\"--unitcell\", type=str, default=\"50 50 50 90 90 90\",\n help=\"unit cell\")\n\n argslist = parser.parse_args()\n for masterfile in argslist.input:\n master1= Master(argslist,masterfile)\n master1.printDataWells()",
"def report(self):\n print()\n print(\"%-15s %-25s %s\" % (\"Class\", \"Name\", \"File\"))\n print(\"%-15s %-25s %s\" % (\"-----\", \"----\", \"----\"))\n for m in sorted(self.flatten(), key=lambda n: n.identifier):\n print(\"%-15s %-25s %s\" % (type(m).__name__, m.identifier, m.filename or \"\"))",
"def tabular_print(files_dict: dict):\r\n # create a list of file extensions\r\n file_extensions = []\r\n for filename in files_dict.keys():\r\n for file_ext in files_dict[filename].keys():\r\n # print(\"debug:::\", file_ext)\r\n file_extensions.append(file_ext)\r\n break\r\n # go through all the files and print them in a table with the file extension as the top row\r\n sep_line_len = 40 + 10 * len(file_extensions) # separator line length = max_filename_len [35] + 10*number of ext\r\n # print the first row\r\n print(\"filename\".ljust(40), end='')\r\n for ext in file_extensions:\r\n print(\"|\" + ext.center(9), end='')\r\n print()\r\n print(''.center(sep_line_len, '='))\r\n # print the rest of the files\r\n for filename, ext_dict in files_dict.items():\r\n print(filename.ljust(40), end='')\r\n for ext in ext_dict.keys():\r\n if ext_dict[ext]:\r\n print(\"|\" + \"V\".center(9), end='')\r\n else:\r\n print(\"|\" + \" \".center(9), end='')\r\n print()\r\n print(''.center(sep_line_len, '-'))",
"def print_tree(tree):\n if not tree:\n print None\n return\n \n if tree.children:\n print 'Directory hash = {}'.format(base64.urlsafe_b64encode(tree.dmt_hash))\n print 'Contents:'\n for name, subtree in tree.children.iteritems():\n print\n print name\n print_tree(subtree)\n \n else:\n print 'File hash = {}'.format(base64.urlsafe_b64encode(tree.dmt_hash))",
"def print_structure(file_path):\n pprint(read_or_exit(file_path), width=140)",
"def display(self, contents=False, recurse=False): # DirObj.display\n if recurse:\n for name, entry in self.subdirs.iteritems():\n entry.display(contents, recurse)\n if contents:\n for name, entry in self.files.iteritems():\n entry.display(contents, recurse);\n print '# Directory\\t' + str(self.deleted) + '\\t' + str(self.ignore) + '\\t' + str(self.depth) + '\\t' + self.hexdigest + ' ' + self.pathname",
"def load_and_print_file_list(file_list):\n for file in file_list:\n hdu_list = load_file(file)\n print(\"'{0}' has {1} hdus in it\".format(file, len(hdu_list)))\n for ii in range(len(hdu_list)):\n hdu1 = hdu_list[ii] # FITS HDU counting is from 1\n print('BITPIX type of HDU{0} = {1}'.format(ii + 1,\n hdu1.header['BITPIX']))\n # be sure to close the file handle\n hdu_list.close()",
"def display(items):\n\n # LOC, COMMENT, ...\n # (same as keys of TYPE_OF_LINE, but better to only rely on items here)\n what = next(iter(items))[1]\n\n # Headers\n print(bcolors.BOLD\n +(\"{:<30}\"+\":{:>10}\"*len(what)).format(\"path\", *what)\n +bcolors.ENDC)\n\n # Lines\n for k,v in items:\n print((bcolors.OKGREEN if v[\"LOC\"] == 0\n else bcolors.FAIL if v[\"COMMENTS\"] == 0\n else bcolors.WARNING if v[\"COMMENTS\"]/v[\"LOC\"] < 0.2\n else bcolors.OKGREEN )\n +(\"{:<30}\"+\":{:>10}\"*len(v)).format(k, *v.values())\n + bcolors.ENDC)",
"def visualize(stuff, **options):\n separate = r\"\\newpage\" #by default, a new tupel is put on a new page\n name = \"some_text_file\" #by default this file is used\n for key in options:\n if key == \"separate\":\n separate = options[key]\n if key == \"name\":\n name = options[key]\n works = True\n totallines = [r\"\\documentclass{article}\", r\"\\usepackage{xcolor}\", r\"\\usepackage{tikz,pgf}\", r\"\\usepackage[left = 0 cm, top = 0cm, bottom = 0cm, right = 2cm]{geometry}\", r\"\\begin{document}\", r\"\\pagestyle{empty}\"]\n for description in stuff:\n data = stuff[description]\n if checkdataformat(description, data):\n if description == \"config\":\n lines = gentikz(data)\n elif description == \"movelist\":\n lines = showmoveslist(data[0], data[1], data[2])\n elif description == \"movelists\":\n lines = compareshowmoveslists(data[0], data[1], data[2])\n elif description == \"list\":\n lines = showlist(data)\n elif description == \"configurations\":\n lines = showconfigurations(data)\n elif description == \"movetable\":\n lines = nktable(data[0], data[1], sort = 'value')\n elif description == \"incrementtable\":\n lines = nktable(data[0], data[1], sort = 'increment')\n elif description == \"totalptable\":\n lines = nktable(data[0], data[1], sort = 'totalpossibilities')\n elif description == \"ptable\":\n lines = nktable(data[0], data[1], sort = 'adjustedpossibilities')\n elif description == \"bfptable\":\n lines = nktable(data[0], data[1], sort = 'bfadjustedpossibilities')\n else:\n print(\"unknown description\")\n lines = []\n for line in lines:\n totallines.append(line)\n totallines.append(separate)\n else:\n print(description, \":\", data, \"don't match, please read help(visualization)\")\n works = False\n totallines.append(r\"\\end{document}\")\n if works:\n compile(totallines, name)",
"def get_lh5_header(in_file, verbose=False):\n hf = h5py.File(in_file)\n\n # pretty print the raw structure, with all attributes\n if verbose:\n def print_groups(name, obj):\n if isinstance(obj, h5py.Group):\n print(f\"GROUP /{name}\")\n indent = \" \"\n if isinstance(obj, h5py.Dataset):\n print(\" DATASET\", obj.shape, obj.name)\n indent = \" \"\n for att, val in obj.attrs.items():\n print(f\"{indent}ATTRIBUTE {att}:\", val)\n print(\" \")\n hf.visititems(print_groups) # accesses __call__\n \n # find each LH5 \"Table\" contained in the file, and create a DataFrame header\n tables = {}\n for g_top in hf.keys():\n \n h5group = hf[f\"/{g_top}\"]\n attrs = {att:val for att, val in h5group.attrs.items()}\n \n # LH5 table condition\n if \"datatype\" in attrs.keys() and \"table{\" in attrs[\"datatype\"]:\n \n # call our nice iterator at this group level\n table = {g_top:[]}\n for (path, name, size, dtype, units, spec) in get_datasets(h5group):\n table[g_top].append((name, size, dtype, units, spec))\n \n hdr = pd.DataFrame(table[g_top], columns=['name','size','dtype',\n 'units','spec'])\n \n # fix waveform datatype to match flattened_data\n if 'waveform' in hdr['name'].values:\n wf_dt = h5group['waveform/values/flattened_data'].dtype\n hdr.loc[hdr['name'] == 'waveform', ['dtype']] = wf_dt\n \n tables[g_top] = hdr\n\n return tables",
"def display_headers(model_file, model_data):\n # netCDF header\n print('\\n\\nnetCDF header information:\\n\\n', flush=True)\n\n # dimension information.\n nc_dims = [dim for dim in model_data.dimensions] # list of netCDF dimensions\n print ('\\tdimensions:', flush=True)\n for dim in nc_dims:\n print('\\t\\t{} {}'.format(model_data.dimensions[dim].name, model_data.dimensions[dim].size), flush=True)\n\n # variable information.\n nc_vars = [var for var in model_data.variables] # list of nc variables\n\n print('\\n\\tvariables:', flush=True)\n for var in nc_vars:\n if var not in nc_dims:\n print('\\t\\t{}:'.format(var), flush=True)\n for attr, value in vars(model_data.variables[var]).items():\n print('\\t\\t\\t{} = {}'.format(attr, value), flush=True)\n\n # global attributes\n print('\\n\\tglobal attributes:', flush=True)\n for attr, value in vars(model_data).items():\n if isinstance(value, str):\n value = value.replace('\\n', ' ')\n print('\\t\\t\\t{} = {}'.format(attr, value), flush=True)\n\n # GeoCSV header\n print('\\n\\nGeoCSV header information:\\n\\n{}\\n\\n'.format(get_model_header(model_file, model_data)), flush=True)",
"def print_all_files(self):\n\n print(\"db path/name (filesize, md5sum) F disk path/name (filesize, md5sum)\")\n allfiles = set(self.files_from_db).union(set(self.files_from_disk))\n fdisk_str = \"\"\n # loop over all found files\n for fname in allfiles:\n # if the file name is in the DB list\n if fname in self.files_from_db:\n finfo = self.files_from_db[fname]\n fullname = f\"{finfo['path']}/{fname}\"\n filesize = None\n if 'filesize' in finfo:\n filesize = finfo['filesize']\n md5sum = None\n if 'md5sum' in finfo:\n md5sum = finfo['md5sum']\n\n fdb_str = f\"{fullname} ({filesize}, {md5sum})\"\n else:\n fdb_str = \"\"\n # if the file name is in the disk list\n if fname in self.files_from_disk:\n finfo = self.files_from_disk[fname]\n fullname = f\"{finfo['relpath']}/{fname}\"\n filesize = None\n if 'filesize' in finfo:\n filesize = finfo['filesize']\n md5sum = None\n if 'md5sum' in finfo:\n md5sum = finfo['md5sum']\n\n fdisk_str = f\"{fullname} ({filesize}, {md5sum})\"\n else:\n fdisk_str = \"\"\n # not whether they are the same or not\n comp = 'X'\n if fname in self.comparison_info['equal']:\n comp = '='\n\n print(f\"{fdb_str:-140s} {comp} {fdisk_str:-140s}\")",
"def __repr__(self):\n spacing = ' '*2\n if not hasattr(self, 'hdu_list'):\n self.update_hdu_list()\n\n rep = ['']\n rep.append(\"%sfile: %s\" % (spacing, self._filename))\n rep.append(\"%smode: %s\" % (spacing, _modeprint_map[self.intmode]))\n\n rep.append('%sextnum %-15s %s' % (spacing, \"hdutype\", \"hduname[v]\"))\n for i, hdu in enumerate(self.hdu_list):\n t = hdu._info['hdutype']\n name = hdu.get_extname()\n if name != '':\n ver = hdu.get_extver()\n if ver != 0:\n name = '%s[%s]' % (name, ver)\n\n rep.append(\n \"%s%-6d %-15s %s\" % (spacing, i, _hdu_type_map[t], name))\n\n rep = '\\n'.join(rep)\n return rep",
"def display(self):\n print \"\\n\\n***********************\\n\"\n print \"Info about group %s, name=%s, path=%s\" % (self.sdef['id'], \n self.name, self.path)\n print \"sdef=\"\n pp.pprint(self.sdef)\n print \"expanded_def=\"\n pp.pprint (self.expanded_def)\n print \"includes=\"\n pp.pprint (self.includes)\n print \"parent_attributes=\"\n pp.pprint (self.parent_attributes)\n print \"attributes=\"\n pp.pprint (self.attributes)\n print \"mstats=\"\n pp.pprint (self.mstats)",
"def summarize_hdf5(hdf5_file):\n # characterize the h5file in a mostly content-agnostic way\n summary = {\n 'sums': {},\n 'shapes': {}\n }\n\n def characterize_object(obj_name, obj_data):\n \"\"\"retain some properties of each dataset in an hdf5 file\"\"\"\n if isinstance(obj_data, h5py.Dataset):\n summary['shapes'][obj_name] = obj_data.shape\n # note that this will break if the hdf5 file contains non-numeric datasets\n summary['sums'][obj_name] = obj_data[...].sum()\n print(\"dataset %s version = %s\" % (obj_name, hdf5_file.get_version(obj_name)))\n\n hdf5_file.visititems(characterize_object)\n\n return summary",
"def display_content_hashtable(self):\n\n file = open(\"../util/HashTable File\", \"r+\")\n file.truncate(0)\n file.close()\n for i in range(0, len(self.objects_list)):\n\n if self.objects_list[i].display_content() != None:\n lines = []\n lines = self.objects_list[i].display_content()\n file = open(\"../util/HashTable File\", \"a+\")\n for j in lines:\n file.write(str(j) + ' ')\n\n file.close()\n\n file = open(\"../util/HashTable File\", \"r\")\n for i in file:\n print(i)",
"def main(argv):\n\n csvPath = r\"F:\\Projects\\NationalAtlas\\src\\htmGenerate\\serviceFolderHierarchy.csv\"\n inHtmTemplate = r\"F:\\Projects\\NationalAtlas\\src\\htmGenerate\\index.html\"\n outHtm = r\"F:\\Projects\\NationalAtlas\\src\\NationalAtlas_HTML.git\\DataFactSheets\\index.html\"\n \n rows = csv.reader(open(csvPath, 'rb'), dialect='excel')\n\n hft = HtmlFolderTree(rows)\n hft.write(open(r'c:\\temp\\out\\out.htm','w'))"
] | [
"0.6818465",
"0.6535027",
"0.64906377",
"0.63089246",
"0.60178846",
"0.5999033",
"0.5991033",
"0.5967706",
"0.5928091",
"0.59176594",
"0.58546895",
"0.57313806",
"0.571641",
"0.56861824",
"0.5656443",
"0.5614497",
"0.55950373",
"0.5567397",
"0.55614275",
"0.5533533",
"0.5508945",
"0.5489333",
"0.54857886",
"0.545143",
"0.54499483",
"0.5448615",
"0.5442059",
"0.5431497",
"0.54264456",
"0.5378597"
] | 0.74592173 | 0 |
cast sha256 to int | def sha256(cls, value):
assert type(value) is str
return int(sha256(value.encode()).hexdigest(), 16) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hashToInt(h):\n orderBits = Curve.N.bit_length()\n orderBytes = (orderBits + 7) // 8\n if len(h) > orderBytes:\n h = h[:orderBytes]\n\n ret = int.from_bytes(h, byteorder=\"big\")\n excess = len(h) * 8 - orderBits\n if excess > 0:\n ret = ret >> excess\n return ret",
"def hash_message_as_int(message, hashfunction=sha256):\n return int(hashfunction(message).hexdigest(), 16)",
"def hex2int(r: str) -> int:",
"def phash2int(phash):\n phash.hash[-1] = False\n phash_as_bigint = struct.unpack('Q', np.packbits(phash.hash))[0]\n return phash_as_bigint",
"def bytes_to_int(s):\n # int type casts may return a long type\n return int(s.encode('hex'), 16)",
"def hash_int(c, hash_length):\n if isinstance(c, float):\n if numpy.isnan(c):\n return c\n else:\n raise ValueError(f\"numpy.nan expected, not {c}\")\n else:\n b = struct.pack(\"i\", c)\n m = hashlib.sha256()\n m.update(b)\n r = m.hexdigest()\n if len(r) >= hash_length:\n r = r[:hash_length]\n return int(r, 16) % (10 ** 8)",
"def hash_string_to_int(\r\n k: bytes,\r\n e: str,\r\n) -> int:\r\n return int.from_bytes(hash_string(k, e), 'big')",
"def __hex2int(_hex_str):\n return int(\"0x\"+_hex_str, 16)",
"def convert_to_SHA256(x):\n result = hashlib.sha256(x.encode())\n result = result.hexdigest()\n return result",
"def convert_to_SHA256(x):\n result = hashlib.sha256(x.encode())\n result = result.hexdigest()\n return result",
"def bin2int(r: str) -> int:",
"def _sha256(sha256):\n if not sha256:\n sha256 = \"0\" * 64\n\n return sha256",
"def hash2dec(hash_str: str) -> int:\n length = len(hash_str)\n bases = [32 ** i for i in range(length)][::-1]\n\n dec = 0\n for i, d in enumerate(hash_str):\n dec += ch2int[d] * bases[i]\n return dec",
"def compute_phash_int(im):\n return phash2int(compute_phash(im))",
"def castle_counter_to_int(castle_counter_val):\n return int(struct.unpack('q', castle_counter_val)[0])",
"def convertBytesToInt(self, bytes):\r\n result = 0\r\n for idx in range(len(bytes)):\r\n if idx == 0:\r\n result = int(bytes[0])\r\n else:\r\n result = (result << 8) + bytes[idx]\r\n\r\n return result",
"def _bytes_to_int32(b):\n\n\treturn b[0] | (b[1] << 8) | (b[2] << 16) | (b[3] << 24)",
"def to_int(buf: bytes) -> int:\n return int.from_bytes(buf, byteorder=\"little\")",
"def hash_int(\r\n k: bytes,\r\n e: int,\r\n) -> bytes:\r\n return hash_string(k, str(e))",
"def SHA256(self) -> _n_0_t_3[_n_0_t_9]:",
"def bytes_to_int(obj):\n return functools.reduce(lambda x, y: x << 8 | y, obj)",
"def _convert_to_int(backing: List[int]) -> int:\n return int.from_bytes(backing, byteorder=\"little\", signed=True)",
"def test_right_bytes_to_int(self):\n byte_string = b'\\x00\\x00\\xFA\\xFF'\n result = utils.bytes_to_int(byte_string, little_endian=False)\n expected_result = 64255\n self.assertEqual(result, expected_result)",
"def get_hash(hash_function, x: str):\n hash_function.update(x.encode())\n return int.from_bytes(hash_function.digest(), byteorder=\"big\")",
"def bytes2int(raw_bytes: bytes) -> int:\n return int.from_bytes(raw_bytes, \"big\", signed=False)",
"def hash(x) -> int:\n pass",
"def hash_bytes_256(b: bytes) -> str:\n return hashlib.sha256(b).hexdigest()",
"def IntDecode(int_bytes: bytes) -> int:\n return ed25519_lib.int_decode(int_bytes)",
"def hex_to_int(hex_string):\r\n return int(hex_string, 16)",
"def bytes_to_int(bs):\n v = 0\n p = 0\n for b in reversed(bs):\n v += b * (2 ** p)\n p += 8\n return v"
] | [
"0.7448692",
"0.72365516",
"0.7121622",
"0.7021218",
"0.68548185",
"0.68216866",
"0.6709854",
"0.66627985",
"0.66617006",
"0.66617006",
"0.6653154",
"0.6538216",
"0.64865804",
"0.6485043",
"0.64513963",
"0.6443496",
"0.64202505",
"0.6406009",
"0.64018744",
"0.6398126",
"0.6376543",
"0.636221",
"0.63583994",
"0.6327398",
"0.63238853",
"0.6309113",
"0.6308729",
"0.62959796",
"0.62686",
"0.625738"
] | 0.7255353 | 1 |
Process all examples in the input directory. Filenames should be of the form CLASSNAMEEXAMPLENAME.yaml E.g Person001.yaml | def process_examples(self):
input_dir = self.input_directory
counter_example_dir = self.counter_example_input_directory
if input_dir is None:
input_dir = Path.cwd() / "examples"
if counter_example_dir is None:
counter_example_dir = Path.cwd() / "counter_examples"
for fmt in self.input_formats:
input_examples = glob.glob(os.path.join(str(input_dir), f"*.{fmt}"))
input_counter_examples = glob.glob(os.path.join(str(counter_example_dir), f"*.{fmt}"))
if not input_counter_examples:
logging.warning(
f"No counter examples found in {self.counter_example_input_directory}"
)
self.process_examples_from_list(input_examples, fmt, False)
self.process_examples_from_list(input_counter_examples, fmt, True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_yamls(folder):\n for item in iglob(folder + \"/*.yaml\"):\n data_file = os.path.join(folder, item)\n data = yaml.load(open(data_file))\n load_data(data)",
"def generate_yaml_tests(directory):\n for yml_file in directory.glob(\"*/*.yml\"):\n data = yaml.safe_load(yml_file.read_text())\n assert \"cases\" in data, \"A fixture needs cases to be used in testing\"\n\n # Strip the parts of the directory to only get a name without\n # extension and resolver directory\n base_name = str(yml_file)[len(str(directory)) + 1:-4]\n\n base = data.get(\"base\", {})\n cases = data[\"cases\"]\n\n for i, case_template in enumerate(cases):\n case = base.copy()\n case.update(case_template)\n\n case[\":name:\"] = base_name\n if len(cases) > 1:\n case[\":name:\"] += \"-\" + str(i)\n\n if case.pop(\"skip\", False):\n case = pytest.param(case, marks=pytest.mark.xfail)\n\n yield case",
"def _generate_examples(self, data_dir_path):\n\n for class_name in tf.io.gfile.listdir(data_dir_path):\n class_dir_path = os.path.join(data_dir_path, class_name)\n for image_name in tf.io.gfile.listdir(class_dir_path):\n image = os.path.join(class_dir_path, image_name)\n yield image, {\n \"image\": image,\n \"label\": class_name,\n }",
"def discover_examples():\n root = './examples'\n for filename in os.listdir(root):\n if os.path.splitext(filename)[1] == '.py':\n yield os.path.join(root, filename)",
"def process_example(self):\n\n name_files, transition_funcs = self.treat.get_transition_functions()\n for name_file, transition_func in zip(name_files, transition_funcs):\n print(f\"Name file: {name_file}\")\n self.afd(transition_func, self.q0, self.qfs, self.words)\n print('-'*50)",
"def generate_yaml_tests(directory):\n for yml_file in directory.glob(\"*.yml\"):\n data = yaml.safe_load(yml_file.read_text())\n assert \"cases\" in data, \"A fixture needs cases to be used in testing\"\n\n # Strip the parts of the directory to only get a name without\n # extension and resolver directory\n base_name = str(yml_file)[len(str(directory)) + 1:-4]\n\n base = data.get(\"base\", {})\n cases = data[\"cases\"]\n\n for resolver in 'legacy', '2020-resolver':\n for i, case_template in enumerate(cases):\n case = base.copy()\n case.update(case_template)\n\n case[\":name:\"] = base_name\n if len(cases) > 1:\n case[\":name:\"] += \"-\" + str(i)\n case[\":name:\"] += \"*\" + resolver\n case[\":resolver:\"] = resolver\n\n skip = case.pop(\"skip\", False)\n assert skip in [False, True, 'legacy', '2020-resolver']\n if skip is True or skip == resolver:\n case = pytest.param(case, marks=pytest.mark.xfail)\n\n yield case",
"def test_examples():\n tests = [d for d in listdir(ex) if path.isdir(path.join(ex, d))]\n for d in tests:\n yield check_examples, d",
"def example_source_inputs(self, class_name: str = None) -> List[str]:\n input_dir = self.input_directory\n if input_dir is None:\n return []\n all_inputs = []\n for fmt in self.input_formats:\n glob_expr = f\"*.{fmt}\"\n if class_name is not None:\n glob_expr = f\"{class_name}-{glob_expr}\"\n input_examples = glob.glob(os.path.join(str(input_dir), glob_expr))\n all_inputs.extend(input_examples)\n return all_inputs",
"def main():\n args = utils.read_arguments(__doc__)\n documents = []\n filenames = list(traverse_directory(args[\"input_dirpath\"],'*clean*.txt'))\n labels_dirname = args[\"labels_dirpath\"]\n labels_from_json = get_all_labels_from_json(labels_dirname)\n for filename in tqdm(filenames):\n with AnnotatedIBMFactory(filename) as instance_extractor:\n filename_key = filename.split(\"/\")[-1]\n document = instance_extractor.build_document(\n labels_from_json[filename_key])\n documents.append(document)\n utils.pickle_to_file(documents, args['output_file'])",
"def generate_examples(self, docname):\n fake_factory = faker.Factory.create()\n for name, language, parent in self.data['examples']:\n props = self.get_object(name)\n sample_data = props.generate_sample_data(self.data['all_objects'],\n fake_factory)\n if language == 'yaml' and yaml is not None:\n title = 'YAML Example'\n code_text = yaml.safe_dump(sample_data, indent=4,\n default_flow_style=False,\n explicit_start=True,\n version=(1, 2))\n else:\n if language == 'yaml':\n self.env.warn(docname,\n 'YAML support is disabled, pip install yaml '\n 'to enable.')\n title = 'JSON Example'\n language = 'json'\n code_text = json.dumps(sample_data, indent=4,\n ensure_ascii=False)\n\n example = nodes.literal_block(code_text, code_text)\n example['language'] = language\n parent.append(nodes.strong(title, title))\n parent.append(example)",
"def _generate_examples(self, folders, split):\n raise NotImplementedError(\"TODO\")",
"def run_examples():\n\n for example in examples:\n\n print(str(example) + \" : \", end=\" \")\n try:\n t, smush = analyse(example, my_env)\n print(lookup(t, smush))\n # print(\"Smush\")\n # for k,v in smush.items():\n # print(f\"\\t{k} : {v}\")\n except (ParseError, InferenceError) as e:\n print(e)",
"def main(root_dir):\n # load annotations\n print('Loading instances and annotations...')\n captions_file = json.load(open('{}/annotations/captions_train2017.json'.format(root_dir), 'r'))\n categories_file = json.load(open('{}/annotations/instances_train2017.json'.format(root_dir), 'r'))\n print('Done.')\n\n # group categories by image\n image_categories = group_categories(categories_file)\n\n # group captions by image\n image_captions = group_captions(captions_file['annotations'])\n\n # get filename of each image\n image_file = get_filename(captions_file['images'])\n\n # assign each category an id.\n # we are not using the default ids given in the dataset because\n # the id ranges are not continuous.\n category_id, id_category = map_category_id(categories_file['categories'])\n \n # save parsed coco dataset\n save_dataset(image_categories, image_captions, image_file, category_id, id_category, root_dir)",
"def run_step(context):\n logger.debug(\"started\")\n context.assert_keys_have_values(__name__,\n 'fileFormatYamlIn',\n 'fileFormatYamlOut')\n\n in_path = context.get_formatted('fileFormatYamlIn')\n out_path = context.get_formatted('fileFormatYamlOut')\n\n logger.debug(f\"opening yaml source file: {in_path}\")\n with open(in_path) as infile:\n payload = yaml.load(infile, Loader=yaml.RoundTripLoader)\n\n logger.debug(f\"opening destination file for writing: {out_path}\")\n os.makedirs(os.path.abspath(os.path.dirname(out_path)), exist_ok=True)\n with open(out_path, 'w') as outfile:\n formatted_iterable = context.get_formatted_iterable(payload)\n yaml.dump(formatted_iterable,\n outfile,\n Dumper=yaml.RoundTripDumper,\n allow_unicode=True,\n width=50)\n\n logger.info(\n f\"Read {in_path} yaml, formatted contents and wrote to {out_path}\")\n logger.debug(\"done\")",
"def _generate_examples(self, files):\n idx = 0\n for filename in files:\n with open(filename) as file:\n for line in file:\n yield idx, {\"text\": line}\n idx += 1",
"def generate_examples(file_name):\n for line in read_file(file_name):\n yield json.loads(line)",
"def _generate_examples(self, data_dir_path):\n for file_name in tf.io.gfile.listdir(data_dir_path):\n if file_name.endswith(\".png\"):\n image = os.path.join(data_dir_path, file_name)\n angle_label = file_name.split(\"_\")[2].split(\".\")[0]\n object_id = file_name.split(\"_\")[0]\n yield file_name, {\n \"image\": image,\n \"angle_label\": angle_label,\n \"object_id\": object_id,\n \"angle\": int(angle_label),\n }",
"def generate_labels(cfg, split_files):\n for file_name in split_files:\n file_name = join(cfg.data_dir, file_name)\n\n for example in generate_examples(file_name):\n yield from example['labels']",
"def main():\n parser = ArgumentParser(description=\"pre-process nexus templates\")\n parser.add_argument(\n \"nexus_templates\",\n nargs=\"+\",\n help=\"Nexus template files to process\",\n )\n args = parser.parse_args()\n\n for template_file in args.nexus_templates:\n preprocess_template(template_file)",
"def read_examples(data_dir, data_sign):\n examples = []\n # read src data\n with open(data_dir / f'{data_sign}/sentences.txt', \"r\", encoding='utf-8') as f_sen, \\\n open(data_dir / f'{data_sign}/tags.txt', 'r', encoding='utf-8') as f_tag:\n for sen, tag in zip(f_sen, f_tag):\n example = InputExample(sentence=sen.strip().split(' '), tags=tag.strip().split(' '))\n examples.append(example)\n print(\"InputExamples:\", len(examples))\n return examples",
"def examplereader(path, lower=False):\n for line in filereader(path):\n line = line.lower() if lower else line\n tokens = tokens_from_treestring(line)\n tree = Tree.fromstring(line) # use NLTK's Tree\n label = int(line[1])\n trans = transitions_from_treestring(line)\n yield Example(tokens=tokens, tree=tree, label=label, transitions=trans)",
"def all(self):\n for path in sorted(self.directory.glob(\"*.yaml\"), key = lambda p: p.stem):\n yield self.from_path(path)",
"def loadInputFiles(self):\n\t\tfor filename in self.input_filename_list:\n\t\t\tfor module in self.modules:\n\t\t\t\tmodule.Add(filename)",
"def process_input_files(list_input_files):\n global dict_models_results\n global list_spacy_docs\n \n for input_file in list_input_files:\n prefix = prefix_from_filename(input_file)\n \n with open(input_file) as f:\n list_cases = json.load(f)\n dict_models_results[prefix] = list_cases\n \n \n #extract list of questions from all vignettes and create a mapping page -> vignette question\n dict_questions = {}\n for prefix, list_cases in dict_models_results.items():\n for vignette in list_cases:\n dict_questions[vignette[\"book_page\"]] = vignette[\"question\"]\n \n \n for book_page,question in dict_questions.items():\n doc_q = load_bner_onto_tokens_extension(question, book_page)\n list_spacy_docs.append(doc_q)\n \n return",
"def main(input_params):\n\n store = kgenlib.BaseStore()\n\n input_files = input_params[\"files\"]\n output_file = input_params.get(\"output_file\")\n\n for file in input_files:\n store.add(kgenlib.BaseStore.from_yaml_file(file))\n\n mutations = input_params.get(\"mutations\", {})\n store.process_mutations(mutations)\n return store.dump(output_filename=output_file)",
"def test_pep8_conformance_example(self):\n\n print(\"\\r\\n\")\n\n # Get the path to current directory\n path = os.path.dirname(os.path.realpath(__file__))\n path += \"/../docs/examples/\"\n\n # Find all the examples files\n file_paths = []\n for root, dirnames, filenames in os.walk(path):\n for file_path in fnmatch.filter(filenames, '*.py'):\n file_paths.append(os.path.join(root, file_path))\n\n for path in file_paths:\n self.run_check(path)",
"def main(cls, **kwargs):\n try:\n import file_transformer\n except Exception as e:\n sys.exit(\"{}\\nSee https://github.com/benkehoe/file-transformer\".format(e))\n \n def loader(input_stream, args):\n return yaml.load(input_stream)\n \n def processor(input, args):\n transform = cls(input, vars(args))\n transform.apply()\n return transform.template\n \n def dumper(output, output_stream, args):\n yaml.dump(output, output_stream)\n \n return file_transformer.main(processor, loader, dumper, **kwargs)",
"def run(self):\n args = self._parse_args(self._argv)\n with open(args['yaml']) as yaml_file:\n yaml_dict = yaml.safe_load(yaml_file) # returns list<dict>\n yaml_dict = yaml_dict[0]['machine_learning_setup']\n data = DataIngest(yaml_dict['data']).get()\n return PipelineWrapper(yaml_dict['pipeline']).fit_transform(data)",
"def main():\n argument_parser = argparse.ArgumentParser(add_help=True)\n argument_parser.add_argument(\"directory\", type=str,\n help=\"Directory to detect test smells.\")\n args = argument_parser.parse_args()\n \n if len(sys.argv) < 1:\n \n argument_parser.print_help()\n \n else:\n \n if os.path.exists(args.directory) or os.path.isdir(args.directory):\n\n #Stage 1: project level rule checking\n files = python_parser.get_python_files(os.path.abspath(args.directory))\n results_list = project_rule_runner(files)\n \n #Stage 2: test case level rule checking\n #test_case_pairs_list is a list of test cases paired with their file of origin\n filtered_files = python_parser.filter_python_files(files)\n test_case_pairs_list = python_parser.get_test_case_asts(filtered_files)\n \n for test_case_pair in test_case_pairs_list:\n results_list = results_list + test_case_rule_runner(test_case_pair)\n \n #Stage 3: test method level rule checking\n test_method_list = list()\n \n for test_case_pair in test_case_pairs_list:\n test_method_list = test_method_list + python_parser.get_test_asts(test_case_pair)\n \n for test_method in test_method_list: \n results_list = results_list + test_method_rule_runner(test_method)\n \n #Output formatting\n format_output(results_list)\n \n else:\n print(\"Invalid path given.\")",
"def process_collection_tests(pathname, ctx):\n ctx.enter_tests()\n for dirname, dirpath in os_listdir(pathname):\n if dirname == \"integration\" and os.path.isdir(dirpath):\n process_integration_tests(dirpath, ctx)\n elif os.path.isfile(os.path.join(dirpath, \"tests_default.yml\")):\n ctx.enter_role(dirname, dirpath)\n process_role_tests_path(dirpath, ctx)\n ctx.exit_role()\n elif os.path.isdir(dirpath) and dirname in SKIP_COLLECTION_TEST_DIRS:\n continue\n elif os.path.isfile(dirpath):\n process_ansible_file(dirpath, ctx)\n elif os.path.isdir(dirpath):\n # don't know what this is - process like ansible yml files\n process_ansible_yml_path(dirpath, ctx)\n\n ctx.exit_tests()"
] | [
"0.6632809",
"0.62900645",
"0.62750703",
"0.61933035",
"0.6179182",
"0.6156422",
"0.60064507",
"0.5971663",
"0.5965593",
"0.5941546",
"0.59226394",
"0.59129375",
"0.5911364",
"0.58914095",
"0.58837914",
"0.58785766",
"0.5834951",
"0.583384",
"0.5827961",
"0.58051383",
"0.5797755",
"0.57788974",
"0.577525",
"0.5769297",
"0.57536703",
"0.57504874",
"0.57476246",
"0.57102674",
"0.567799",
"0.5659997"
] | 0.74208486 | 0 |
Get the list of example source inputs. | def example_source_inputs(self, class_name: str = None) -> List[str]:
input_dir = self.input_directory
if input_dir is None:
return []
all_inputs = []
for fmt in self.input_formats:
glob_expr = f"*.{fmt}"
if class_name is not None:
glob_expr = f"{class_name}-{glob_expr}"
input_examples = glob.glob(os.path.join(str(input_dir), glob_expr))
all_inputs.extend(input_examples)
return all_inputs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_train_inputs(self, example):\n return example",
"def inputs(self) -> List[str]:\n return self._model.inputs",
"def get_inputs(self):\n return self.inputs",
"def prepare_inputs(example):\n return example['input_ids'], example['label_ids']",
"def prepare_inputs(example):\n return example['input_ids'], example['label_ids']",
"def getListOfInputs(self, *args):\n return _libsbml.Transition_getListOfInputs(self, *args)",
"def extract_inputs(self, data):\n inputs_vec = []\n for input_name in g_conf.INPUTS:\n inputs_vec.append(data[input_name])\n\n return torch.cat(inputs_vec, 1)",
"def inputs(self):\n return self._inputs",
"def _get_inputs(self):\n return self.__inputs",
"def _get_inputs(self):\n return self.__inputs",
"def _get_inputs(self):\n return self.__inputs",
"def get_inputs(self):\n return self.attributes[\"inputs\"]",
"def inputs(self):\n return self.inputs",
"def inputs() -> List[str]:\n return Invocation.current.required",
"def inputs(self):\n\n inputs = []\n for arg in self.arguments:\n if arg.IN:\n inputs.append(arg)\n\n return inputs",
"def get_inputs(self):\r\n raise NotImplementedError",
"def get_inputs(self):\r\n raise NotImplementedError",
"def inputs(self):\n return self._inputs",
"def inputs(self):\n return self._inputs",
"def inputs(self):\n return self._inputs",
"def get(self, *args):\n return _libsbml.ListOfInputs_get(self, *args)",
"def get_inputs(step):\n params = step.get('parameters', {})\n inputs = params.get('inputs', [])\n if inputs and isinstance(inputs[0], list):\n # Some steps may have sublists as inputs\n inputs = [item for sublist in inputs for item in sublist]\n for single_input in ['input', 'src_input', 'tgt_input']:\n if single_input in params:\n inputs.append(params[single_input])\n return inputs",
"def _get_inputs(self):\n return [InputDesc(tf.float32, (None, IMAGE_SIZE), 'input_sensor_1'),\n InputDesc(tf.float32, (None, IMAGE_SIZE), 'input_sensor_2'),\n InputDesc(tf.int32, (None,), 'label')]",
"def get_input_names(self):\n return self._input_names",
"def source_list(self):\n return self._source_list",
"def source_list(self):\n return self._source_list",
"def inputs(self):\n pass",
"def get_inputs(self) -> List[NodeValue]:\n\n return self.inputs_",
"def input_features(self) -> List[str]:\n return self._input_features",
"def get_inputs(list_labels, title):\n\n # your code\n print(title)\n\n inputs = []\n for i in list_labels:\n inputs.append(input(i))\n\n return inputs"
] | [
"0.7428947",
"0.7054078",
"0.6997518",
"0.6961838",
"0.6961838",
"0.6924607",
"0.6878051",
"0.6812642",
"0.6788958",
"0.6788958",
"0.6788958",
"0.6773123",
"0.6742478",
"0.67293483",
"0.67270637",
"0.6706153",
"0.6706153",
"0.6660394",
"0.6660394",
"0.6660394",
"0.66560566",
"0.66050583",
"0.65486264",
"0.65103245",
"0.6458501",
"0.6458501",
"0.6440875",
"0.64292943",
"0.6423265",
"0.6352991"
] | 0.7485938 | 0 |
Load an object from a dict, using the target class to determine the type of object to create. | def _load_from_dict(self, dict_obj: Any, target_class: Union[str, ElementName] = None) -> Any:
if not self.use_type_designators:
return dict_obj
sv = self.schemaview
if target_class is None:
target_class_names = [c.name for c in sv.all_classes().values() if c.tree_root]
if len(target_class_names) != 1:
raise ValueError(
f"Cannot determine single target class, found: {target_class_names}"
)
target_class = target_class_names[0]
if isinstance(dict_obj, dict):
if target_class not in sv.all_classes():
raise ValueError(f"No such class as {target_class}")
td_slot = sv.get_type_designator_slot(target_class) if target_class else None
if td_slot:
if td_slot.name in dict_obj:
target_class = dict_obj[td_slot.name]
elif "@type" in dict_obj:
target_class = dict_obj["@type"]
del dict_obj["@type"]
if ":" in target_class:
target_classes = [c for c in sv.all_classes() if sv.get_uri(c) == target_class]
if len(target_classes) != 1:
raise ValueError(
f"Cannot find unique class for URI {target_class}; got: {target_classes}"
)
target_class = target_classes[0]
new_dict_obj = {}
for k, v in dict_obj.items():
if v is not None:
islot = sv.induced_slot(k, target_class)
v2 = self._load_from_dict(v, target_class=islot.range)
new_dict_obj[k] = v2
py_target_class = getattr(self.python_module, camelcase(target_class))
return py_target_class(**new_dict_obj)
elif isinstance(dict_obj, list):
return [self._load_from_dict(x, target_class) for x in dict_obj]
else:
return dict_obj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_obj_by_type_from_dict(self):\n test_obj = {}\n returned_obj = self.tested_class._create_obj_by_type(test_obj)\n self.assertIsInstance(returned_obj, self.tested_class)",
"def from_dict(cls, obj):\r\n raise NotImplementedError",
"def load(d):\n\n def _load(d):\n if isinstance(d, list):\n li = []\n for item in d:\n li.append(_load(item))\n return li\n elif isinstance(d, dict) and \"type\" in d: # object\n t = d[\"type\"]\n if t == \"datetime\":\n if hasattr(datetime, \"fromisoformat\"):\n return datetime.fromisoformat(d[\"value\"])\n else:\n return datetime.strptime(d[\"value\"], date_format_str)\n if t == \"Severity\":\n return Severity.from_str(d[\"value\"])\n try:\n del d[\"type\"]\n clazz = getattr(importlib.import_module(\"vulndb.lib\"), t)\n if hasattr(clazz, \"from_dict\"):\n o = clazz.from_dict(d)\n else:\n o = clazz(**d)\n except KeyError:\n raise ClassNotFoundError(\n \"Class '%s' not found in the given module!\" % t\n )\n except TypeError as te:\n print(te)\n raise TypeError(\n \"Make sure there is an constuctor that doesn't take any arguments (class: %s)\"\n % t\n )\n return o\n elif isinstance(d, dict): # dict\n rd = {}\n for key in d:\n rd[key] = _load(d[key])\n return rd\n else:\n return d\n\n return _load(d)",
"def from_dict(cls, d):\n attr_cls = etau.get_class(d[\"type\"])\n return attr_cls._from_dict(d)",
"def load(cls, data):\n if isinstance(data, dict):\n print('>>> dict')\n else:\n print('>>> obj')\n # cls_fields = fields(cls)\n init()",
"def from_dict(cls, d):\n clsname = d['type']\n\n if clsname == cls.__name__:\n raise Exception('Cannot instantiate abstract class \"Node\"')\n\n clstype = getattr(sys.modules[__name__], clsname)\n return clstype.from_dict(d)",
"def from_dict(cls, dikt) -> 'ModelClass':\n return util.deserialize_model(dikt, cls)",
"def from_json_dict(j_dict):\n # determine the class it is.\n obj_type = j_dict[\"__obj_type\"]\n obj = Saveable.ALL_SAVEABLES[obj_type].from_json_dict(j_dict)\n return obj",
"def from_dict(cls, dict_object):\n\n return cls(**dict_object)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)"
] | [
"0.70920396",
"0.6989815",
"0.6780296",
"0.6534313",
"0.65280795",
"0.650231",
"0.64138657",
"0.6346336",
"0.625308",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886",
"0.62314886"
] | 0.7511082 | 0 |
Finds fused batch norm layers and folds them into preceding layers. | def _FoldFusedBatchNorms(graph):
for match in _FindFusedBatchNorms(graph):
scope, sep, _ = match.layer_op.name.rpartition('/')
# Make sure new ops are added to `graph` and put on the same device as
# `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope
# named `scope`. Otherwise, TF creates a unique scope whose name starts with
# `scope`.
with graph.as_default(), graph.name_scope(scope + sep), ops.device(
match.bn_op.device):
with graph.name_scope(scope + sep + 'BatchNorm_Fold' + sep):
# new weights = old weights * gamma / sqrt(variance + epsilon)
# new biases = -mean * gamma / sqrt(variance + epsilon) + beta
multiplier_tensor = match.gamma_tensor * math_ops.rsqrt(
match.variance_tensor + match.bn_op.get_attr('epsilon'))
bias_tensor = math_ops.subtract(
match.beta_tensor,
match.mean_tensor * multiplier_tensor,
name='bias')
# The shape of depthwise weights is different, so we need to reshape the
# multiplier_tensor to ensure that the scaled_weight_tensor has the
# expected shape.
if match.layer_op.type == 'DepthwiseConv2dNative':
new_shape = [
match.weight_tensor.get_shape().as_list()[2],
match.weight_tensor.get_shape().as_list()[3]
]
multiplier_tensor = array_ops.reshape(
multiplier_tensor, new_shape, name='scale_reshape')
# TODO(suharshs): This naming of the following ops needs to carefully
# follow the naming expected by quantize.py. Generalize the quantize code
# to not require these delicate naming conventions.
scaled_weight_tensor = math_ops.multiply(
match.weight_tensor, multiplier_tensor, name='mul_fold')
new_layer_tensor = _CloneWithNewOperands(
match.layer_op, match.input_tensor, scaled_weight_tensor)
bias_add_tensor = math_ops.add(
new_layer_tensor, bias_tensor, name='add_fold')
nodes_modified_count = graph_editor.reroute_ts(bias_add_tensor,
match.output_tensor)
if nodes_modified_count != 1:
raise ValueError(
'Unexpected inputs to op: %s' % match.output_tensor.name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def FoldBatchNorms(graph):\n _FoldFusedBatchNorms(graph)\n _FoldUnfusedBatchNorms(graph)",
"def _FoldUnfusedBatchNorms(graph):\n input_to_ops_map = input_to_ops.InputToOps(graph)\n\n for bn in common.BatchNormGroups(graph):\n has_scaling = _HasScaling(graph, input_to_ops_map, bn)\n\n # The mangling code intimately depends on BatchNorm node's internals.\n original_op, folded_op = _CreateFoldedOp(graph, bn, has_scaling=has_scaling)\n\n activation = common.GetEndpointActivationOp(graph, bn)\n if activation:\n nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],\n [original_op.outputs[0]],\n can_modify=[activation])\n if nodes_modified_count != 1:\n raise ValueError('Unexpected inputs to op: %s' % activation.name)\n continue\n\n # Treat consumer ops in bypass modules differently since they have Add\n # operations instead of Relu* above.\n add_bypass_ctx = re.search(r'^(.*)/([^/]+)', bn).group(1)\n add_bypass = graph.get_operation_by_name(add_bypass_ctx + '/Add')\n nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],\n [original_op.outputs[0]],\n can_modify=[add_bypass])\n if nodes_modified_count != 1:\n raise ValueError('Unexpected inputs to op: %s' % add_bypass.name)",
"def test_batch_norm_fold(self):\n inputs = tf.keras.Input(shape=(32, 32, 3,))\n conv = tf.keras.layers.Conv2D(32, (3, 3))(inputs)\n bn = tf.keras.layers.BatchNormalization(fused=True)(conv, training=False)\n relu = tf.nn.relu(bn)\n model = tf.keras.Model(inputs=inputs, outputs=relu)\n\n np.random.seed(0)\n w_shape = model.layers[0].input.shape\n numpy_data = np.random.rand(1, w_shape[1], w_shape[2], w_shape[3]).astype(np.float32)\n\n baseline_output = model(numpy_data)\n\n _, model = fold_all_batch_norms(model)\n output_after_fold = model(numpy_data)\n\n assert np.allclose(baseline_output, output_after_fold, atol=1.e-4)",
"def DerefBatchNormLayers(network, batch_norm_names, layers_dict, suffix='_fold', \n lr_mult=1.0, decay_mult=1.0):\n for bn_layer_name in batch_norm_names:\n index = layers_dict[bn_layer_name]\n bn_layer = network.layer[index]\n \n if (len(bn_layer.bottom) != 1) or (len(bn_layer.top) != 1):\n raise AssertionError('Expected bn layer to have one top and bottom')\n \n prev_layer_idx = index - 1\n next_layer_idx = index + 1\n prev_layer, next_layer = network.layer[prev_layer_idx], network.layer[next_layer_idx]\n \n if not (prev_layer.top == bn_layer.bottom and bn_layer.top == next_layer.bottom):\n raise AssertionError(\"Could not find previous and next nodes for\"\n \"batch norm layer\")\n \n if next_layer.type != 'Scale':\n print bn_layer_name, next_layer.type, next_layer.name\n raise AssertionError('Expected Scale layer to follow batch norm layer')\n \n if not (len(prev_layer.top) == 1 and len(next_layer.bottom) == 1):\n raise AssertionError(\"Expected previous and next blobs to have\" \n \"only one input and output\")\n \n next_layer.bottom[0] = prev_layer.top[0]\n next_layer.name = next_layer.name + suffix\n\n if lr_mult != 1.0 or decay_mult != 1.0:\n while len(next_layer.param) < 2:\n next_layer.param.add()\n for i in range(len(next_layer.param)):\n next_layer.param[i].lr_mult = lr_mult\n next_layer.param[i].decay_mult = decay_mult",
"def _FindFusedBatchNorms(graph):\n input_pattern = graph_matcher.OpTypePattern('*')\n weight_pattern = graph_matcher.OpTypePattern('*')\n gamma_pattern = graph_matcher.OpTypePattern('*')\n beta_pattern = graph_matcher.OpTypePattern('*')\n mean_pattern = graph_matcher.OpTypePattern('*')\n variance_pattern = graph_matcher.OpTypePattern('*')\n\n conv_pattern = graph_matcher.OpTypePattern(\n 'Conv2D|DepthwiseConv2dNative', inputs=[input_pattern, weight_pattern])\n # MatMul has a Reshape between it and FusedBatchNorm.\n matmul_pattern = graph_matcher.OpTypePattern(\n 'MatMul', inputs=[input_pattern, weight_pattern])\n matmul_reshape_pattern = graph_matcher.OpTypePattern(\n 'Reshape', inputs=[matmul_pattern,\n graph_matcher.OpTypePattern('*')])\n\n conv_batch_norm_pattern = graph_matcher.OpTypePattern(\n 'FusedBatchNorm',\n inputs=[\n conv_pattern, gamma_pattern, beta_pattern, mean_pattern,\n variance_pattern\n ])\n matmul_batch_norm_pattern = graph_matcher.OpTypePattern(\n 'FusedBatchNorm',\n inputs=[\n matmul_reshape_pattern, gamma_pattern, beta_pattern, mean_pattern,\n variance_pattern\n ])\n matmul_bn_output_reshape_pattern = graph_matcher.OpTypePattern(\n 'Reshape',\n inputs=[matmul_batch_norm_pattern,\n graph_matcher.OpTypePattern('*')])\n\n conv_matcher = graph_matcher.GraphMatcher(conv_batch_norm_pattern)\n matmul_matcher = graph_matcher.GraphMatcher(matmul_bn_output_reshape_pattern)\n\n def _GetCommonTensors(match_result, bn_op, bn_input_tensor):\n \"\"\"Gets tensors needed for FusedBatchNormMatch from match_result.\"\"\"\n input_tensor = match_result.get_tensor(input_pattern)\n weight_tensor = match_result.get_tensor(weight_pattern)\n gamma_tensor = match_result.get_tensor(gamma_pattern)\n beta_tensor = match_result.get_tensor(beta_pattern)\n # FusedBatchNorm in training is different from that in inference. It takes\n # empty 'mean' and empty 'variance', and produces the mean and the variance\n # of the batch. Therefore, when is_training is true, mean_tensor and\n # variance_tensor point to 1st and 2nd (0-based) output of bn_op,\n # respectively; when is_training is false, they point to bn_op's inputs.\n is_training = bn_op.get_attr('is_training')\n if is_training:\n # FusedBatchNormGrad doesn't compute gradients of the batch_mean and\n # batch_variance outputs, so we need to substitute our own custom\n # gradient.\n # TODO(suharshs, raghuramank): Find a way to avoid needing this hack.\n # pylint: disable=protected-access\n bn_op._set_attr(\n '_gradient_op_type',\n attr_value_pb2.AttrValue(s=compat.as_bytes('FoldFusedBatchNormGrad')))\n # pylint: enable=protected-access\n mean_tensor = bn_op.outputs[1]\n # The batch variance used during forward and backward prop is biased,\n # i.e it is calculated as: V=sum(x(k)-mu)^2/N. For the moving average\n # calculation, the variance is corrected by the term N/N-1 (Bessel's\n # correction). The variance tensor read from FuseBatchNorm has bessel's\n # correction applied, so we undo it here.\n n = math_ops.cast(\n array_ops.size(bn_input_tensor) / array_ops.size(mean_tensor),\n dtypes.float32)\n variance_tensor = bn_op.outputs[2] * (n - 1) / n\n else:\n mean_tensor = match_result.get_tensor(mean_pattern)\n variance_tensor = match_result.get_tensor(variance_pattern)\n return (input_tensor, weight_tensor, gamma_tensor, beta_tensor, mean_tensor,\n variance_tensor)\n\n for match_result in conv_matcher.match_graph(graph):\n layer_op = match_result.get_op(conv_pattern)\n layer_tensor = match_result.get_tensor(conv_pattern)\n bn_op = match_result.get_op(conv_batch_norm_pattern)\n # In the case of convolution the output_tensor is the output of bn_op.\n output_tensor = bn_op.outputs[0]\n\n (input_tensor, weight_tensor, gamma_tensor, beta_tensor, mean_tensor,\n variance_tensor) = _GetCommonTensors(match_result, bn_op, layer_tensor)\n yield _FusedBatchNormMatch(\n layer_op=layer_op,\n bn_op=bn_op,\n output_tensor=output_tensor,\n input_tensor=input_tensor,\n weight_tensor=weight_tensor,\n gamma_tensor=gamma_tensor,\n beta_tensor=beta_tensor,\n mean_tensor=mean_tensor,\n variance_tensor=variance_tensor)\n\n for match_result in matmul_matcher.match_graph(graph):\n layer_op = match_result.get_op(matmul_pattern)\n layer_tensor = match_result.get_tensor(matmul_pattern)\n bn_op = match_result.get_op(matmul_batch_norm_pattern)\n # In the MatMul case, the output of batch norm is reshaped back into a\n # 2D tensor, so the output_tensor is the output of the Reshape op.\n output_reshape_op = match_result.get_op(matmul_bn_output_reshape_pattern)\n output_tensor = output_reshape_op.outputs[0]\n\n (input_tensor, weight_tensor, gamma_tensor, beta_tensor, mean_tensor,\n variance_tensor) = _GetCommonTensors(match_result, bn_op, layer_tensor)\n yield _FusedBatchNormMatch(\n layer_op=layer_op,\n bn_op=bn_op,\n output_tensor=output_tensor,\n input_tensor=input_tensor,\n weight_tensor=weight_tensor,\n gamma_tensor=gamma_tensor,\n beta_tensor=beta_tensor,\n mean_tensor=mean_tensor,\n variance_tensor=variance_tensor)",
"def test_batch_norm_layers():\n layers = [[\"gru\", 20], [\"lstm\", 3], [\"linear\", 4], [\"linear\", 10]]\n rnn = RNN(layers_info=layers, hidden_activations=\"relu\", input_dim=5,\n output_activation=\"relu\", initialiser=\"xavier\", batch_norm=True)\n assert len(rnn.batch_norm_layers) == 3\n assert rnn.batch_norm_layers[0].num_features == 20\n assert rnn.batch_norm_layers[1].num_features == 3\n assert rnn.batch_norm_layers[2].num_features == 4",
"def forward(self, data_batch):\n\n x = data_batch[0]\n im_info = data_batch[1]\n gt_boxes = data_batch[2]\n num_boxes = data_batch[3]\n rel_mat = data_batch[4]\n\n if self.training:\n self.iter_counter += 1\n\n input_imgs = x.clone()\n\n sources = list()\n loc = list()\n conf = list()\n\n self.batch_size = x.size(0)\n\n # apply vgg up to conv4_3 relu\n if isinstance(self.base, nn.ModuleList):\n for k,v in enumerate(self.base):\n x = v(x)\n else:\n x = self.base(x)\n\n s = self.L2Norm(x)\n sources.append(s)\n base_feat = s\n\n # apply vgg up to fc7\n if isinstance(self.conv5, nn.ModuleList):\n for k,v in enumerate(self.conv5):\n x = v(x)\n else:\n x = self.conv5(x)\n sources.append(x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n\n loc = loc.view(loc.size(0), -1, 4)\n conf = conf.view(conf.size(0), -1, self.num_classes)\n\n SSD_loss_cls = 0\n SSD_loss_bbox = 0\n if self.training:\n predictions = (\n loc,\n conf,\n self.priors.type_as(loc)\n )\n # targets = torch.cat([gt_boxes[:,:,:4] / self.size, gt_boxes[:,:,4:5]],dim=2)\n targets = gt_boxes\n SSD_loss_bbox, SSD_loss_cls = self.criterion(predictions, targets, num_boxes)\n\n conf = self.softmax(conf)\n\n # online data\n if self.training:\n if self.iter_counter > cfg.TRAIN.VMRN.ONLINEDATA_BEGIN_ITER:\n obj_rois, obj_num = self._obj_det(conf, loc, self.batch_size, im_info)\n obj_rois = obj_rois.type_as(gt_boxes)\n obj_num = obj_num.type_as(num_boxes)\n else:\n obj_rois = torch.FloatTensor([]).type_as(gt_boxes)\n obj_num = torch.LongTensor([]).type_as(num_boxes)\n obj_labels = None\n else:\n # when testing, this is object detection results\n # TODO: SUPPORT MULTI-IMAGE BATCH\n obj_rois, obj_num = self._obj_det(conf, loc, self.batch_size, im_info)\n if obj_rois.numel() > 0:\n obj_labels = obj_rois[:, 5]\n obj_rois = obj_rois[:, :5]\n obj_rois = obj_rois.type_as(gt_boxes)\n obj_num = obj_num.type_as(num_boxes)\n else:\n # there is no object detected\n obj_labels = torch.Tensor([]).type_as(gt_boxes).long()\n obj_rois = obj_rois.type_as(gt_boxes)\n obj_num = obj_num.type_as(num_boxes)\n\n if self.training:\n # offline data\n for i in range(self.batch_size):\n obj_rois = torch.cat([obj_rois,\n torch.cat([(i * torch.ones(num_boxes[i].item(), 1)).type_as(gt_boxes),\n (gt_boxes[i][:num_boxes[i]][:, 0:4])], 1)\n ])\n obj_num = torch.cat([obj_num, torch.Tensor([num_boxes[i]]).type_as(obj_num)])\n\n\n obj_rois = Variable(obj_rois)\n\n VMRN_rel_loss_cls = 0\n rel_cls_prob = torch.Tensor([]).type_as(obj_rois)\n if (obj_num > 1).sum().item() > 0:\n\n obj_pair_feat = self.VMRN_obj_pair_feat_extractor(input_imgs, obj_rois, self.batch_size, obj_num)\n # obj_pair_feat = obj_pair_feat.detach()\n rel_cls_score = self.VMRN_rel_cls_score(obj_pair_feat)\n\n rel_cls_prob = F.softmax(rel_cls_score)\n\n self.rel_batch_size = obj_pair_feat.size(0)\n\n if self.training:\n obj_pair_rel_label = self._generate_rel_labels(obj_rois, gt_boxes, obj_num, rel_mat)\n obj_pair_rel_label = obj_pair_rel_label.type_as(gt_boxes).long()\n\n rel_not_keep = (obj_pair_rel_label == 0)\n # no relationship is kept\n if (rel_not_keep == 0).sum().item() > 0:\n rel_keep = torch.nonzero(rel_not_keep == 0).view(-1)\n\n rel_cls_score = rel_cls_score[rel_keep]\n\n obj_pair_rel_label = obj_pair_rel_label[rel_keep]\n obj_pair_rel_label -= 1\n VMRN_rel_loss_cls = F.cross_entropy(rel_cls_score, obj_pair_rel_label)\n else:\n if (not cfg.TEST.VMRN.ISEX) and cfg.TRAIN.VMRN.ISEX:\n rel_cls_prob = rel_cls_prob[::2, :]\n\n rel_result = None\n if not self.training:\n if obj_rois.numel() > 0:\n pred_boxes = obj_rois.data[:,1:5]\n pred_boxes[:, 0::2] /= im_info[0][3].item()\n pred_boxes[:, 1::2] /= im_info[0][2].item()\n rel_result = (pred_boxes, obj_labels, rel_cls_prob.data)\n else:\n rel_result = (obj_rois.data, obj_labels, rel_cls_prob.data)\n\n return loc, conf, rel_result, SSD_loss_bbox, SSD_loss_cls, VMRN_rel_loss_cls",
"def _find_all_batch_norms_to_fold(connected_graph: ConnectedGraph) -> Tuple[\n List[Tuple[LayerType, BatchNormType]], List[Tuple[BatchNormType, LayerType]]]:\n conv_bn_pairs, bn_conv_pairs, bn_to_fold = _find_foldable_bn_pair_and_bn_picked_for_folding(connected_graph)\n return conv_bn_pairs, bn_conv_pairs, bn_to_fold",
"def keras_model_functional_with_non_fused_batchnorms():\n is_training = tf.compat.v1.placeholder_with_default(tf.constant(True), shape=(), name='is_training')\n inputs = tf.keras.Input(shape=(32, 32, 3,))\n x = tf.keras.layers.Conv2D(32, (3, 3))(inputs)\n x = tf.keras.layers.BatchNormalization(momentum=.3, epsilon=.65, fused=False)(x, training=True)\n with tf.compat.v1.variable_scope(\"scope_1\"):\n x = tf.keras.layers.Conv2D(16, (2, 2), activation=tf.nn.tanh)(x)\n x = tf.keras.layers.BatchNormalization(momentum=.4, epsilon=.25, fused=False)(x, training=is_training)\n x = tf.keras.layers.Conv2D(8, (2, 2), activation=tf.nn.tanh)(x)\n x = tf.keras.layers.BatchNormalization(momentum=.5, epsilon=.35, fused=False)(x, training=False)\n x = tf.keras.layers.Conv2D(4, (2, 2), activation=tf.nn.relu6)(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax,\n name=\"keras_model_functional_with_non_fused_batchnorms\")(x)\n model = tf.keras.Model(inputs=inputs, outputs=outputs)\n return model",
"def test_cnn_starts_with_batchnorm(self):\n model = modelgen.generate_CNN_model((None, 20, 3), 2, [32, 32], 100)\n assert str(type(model.layers[0])) \\\n == \"<class 'keras.layers.normalization.BatchNormalization'>\", \\\n 'Wrong layer type.'",
"def test_bn_fold_auto_rules_bn_before_conv(self):\n inputs = tf.keras.Input(shape=(32, 32, 3,), name=\"inputs\")\n bn_op = tf.keras.layers.BatchNormalization(fused=True)(inputs)\n conv_op = tf.keras.layers.Conv2D(32, (3, 3))(bn_op)\n relu = tf.nn.relu(conv_op)\n model = tf.keras.Model(inputs=inputs, outputs=relu)\n\n conv_bn_pairs, bn_conv_pairs, _ = _find_all_batch_norms_to_fold(model)\n assert 1 == len(conv_bn_pairs) + len(bn_conv_pairs)",
"def test_cnn_enough_batchnorm(self):\n model = modelgen.generate_CNN_model((None, 20, 3), 2, [32, 32], 100)\n batch_norm_layers = len(\n [l for l in model.layers if 'BatchNormalization' in str(l)])\n activation_layers = len(\n [l for l in model.layers if 'Activation' in str(l)])\n assert batch_norm_layers == activation_layers",
"def residual_net_old(total_depth, data_layer_params, num_classes = 1000, acclayer = True):\n # figure out network structure\n net_defs = {\n 18:([2, 2, 2, 2], \"standard\"),\n 34:([3, 4, 6, 3], \"standard\"),\n 50:([3, 4, 6, 3], \"bottleneck\"),\n 101:([3, 4, 23, 3], \"bottleneck\"),\n 152:([3, 8, 36, 3], \"bottleneck\"),\n }\n assert total_depth in net_defs.keys(), \"net of depth:{} not defined\".format(total_depth)\n\n nunits_list, unit_type = net_defs[total_depth] # nunits_list a list of integers indicating the number of layers in each depth.\n nouts = [64, 128, 256, 512] # same for all nets\n\n # setup the first couple of layers\n n = caffe.NetSpec()\n n.data, n.label = L.Python(module = 'beijbom_caffe_data_layers', layer = 'ImageNetDataLayer',\n ntop = 2, param_str=str(data_layer_params))\n n.conv1, n.bn1, n.lrn1 = conv_bn(n.data, ks = 7, stride = 2, nout = 64, pad = 3)\n n.relu1 = L.ReLU(n.lrn1, in_place=True)\n n.pool1 = L.Pooling(n.relu1, stride = 2, kernel_size = 3)\n \n # make the convolutional body\n for nout, nunits in zip(nouts, nunits_list): # for each depth and nunits\n for unit in range(1, nunits + 1): # for each unit. Enumerate from 1.\n s = str(nout) + '_' + str(unit) + '_' # layer name prefix\n if unit_type == \"standard\":\n residual_standard_unit_old(n, nout, s, newdepth = unit is 1 and nout > 64)\n else:\n residual_bottleneck_unit_old(n, nout, s, newdepth = unit is 1)\n \n # add the end layers \n n.global_pool = L.Pooling(n.__dict__['tops'][n.__dict__['tops'].keys()[-1]], pooling_param = dict(pool = 1, global_pooling = True))\n n.score = L.InnerProduct(n.global_pool, num_output = num_classes,\n param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)])\n n.loss = L.SoftmaxWithLoss(n.score, n.label)\n if acclayer:\n n.accuracy = L.Accuracy(n.score, n.label)\n\n return n",
"def test_cnn_batchnorm_dim(self):\n model = modelgen.generate_CNN_model((None, 20, 3), 2, [32, 32], 100)\n batchnormlay = model.layers[2]\n assert batchnormlay.output_shape == (None, 20, 32)",
"def _fold_given_batch_norms(model,\n conv_bn_pairs: Iterable[Tuple[torch.nn.Module, torch.nn.Module]],\n bn_conv_pairs: Iterable[Tuple[torch.nn.Module, torch.nn.Module]]):\n # pylint: disable=protected-access\n for bn, conv in bn_conv_pairs:\n if isinstance(conv, QcQuantizeWrapper):\n raise RuntimeError(f\"Forward folding to scale is not possible. Got {conv}\")\n\n bn_modules = []\n\n def _fold(conv, bn, fold_backward):\n is_wrapped = isinstance(conv, QcQuantizeWrapper) or isinstance(bn, QcQuantizeWrapper)\n try:\n if is_wrapped:\n assert isinstance(conv, QcQuantizeWrapper) and isinstance(bn, QcQuantizeWrapper)\n _fold_to_scale(conv, bn)\n bn_modules.append(bn._module_to_wrap)\n else:\n _fold_to_weight(conv, bn, fold_backward=fold_backward)\n except _BatchNormFoldingNotSupported as e:\n bn_name = utils.get_layer_name(model, bn)\n conv_name = utils.get_layer_name(model, conv)\n _logger.warning(\n \"Failed to fold %s to %s. [Reason] %s\", bn_name, conv_name, str(e)\n )\n else:\n bn_modules.append(bn._module_to_wrap if is_wrapped else bn)\n\n\n with utils.in_eval_mode(model), torch.no_grad():\n for conv, bn in conv_bn_pairs:\n _fold(conv, bn, fold_backward=True)\n\n for bn, conv in bn_conv_pairs:\n _fold(conv, bn, fold_backward=False)\n\n _delete_bn_from_model(model, bn_modules)",
"def test_batch_norm_fold_with_random_data(self):\n inputs = tf.keras.Input(shape=(32, 32, 3,))\n conv = tf.keras.layers.Conv2D(32, (3, 3),\n kernel_initializer=tf.random_uniform_initializer(-1, 1),\n bias_initializer='random_uniform')(inputs)\n bn = tf.keras.layers.BatchNormalization(fused=True,\n beta_initializer='random_uniform',\n gamma_initializer='random_uniform',\n moving_mean_initializer='random_uniform',\n moving_variance_initializer='ones')(conv, training=False)\n relu = tf.nn.relu(bn)\n\n model = tf.keras.Model(inputs=inputs, outputs=relu)\n\n np.random.seed(0)\n w_shape = model.layers[0].input.shape\n numpy_data = np.random.rand(1, w_shape[1], w_shape[2], w_shape[3]).astype(np.float32)\n baseline_output = model(numpy_data)\n\n _, model = fold_all_batch_norms(model)\n\n output_after_fold = model(numpy_data)\n\n assert not np.allclose(baseline_output, output_after_fold, atol=0)\n assert np.allclose(baseline_output, output_after_fold, atol=1e-4)",
"def Batchnorm(name, axes, inputs, is_training=None, stats_iter=None, update_moving_stats=True, fused=True, labels=None, n_labels=None):\n if axes != [0,2,3]:\n raise Exception('unsupported')\n batch_mean, batch_var = tf.nn.moments(inputs, axes, keep_dims=True)\n shape = batch_mean.get_shape().as_list() # shape is [1,n,1,1]\n offset_m = lib.param(name+'.offset', np.zeros([n_labels,shape[1]], dtype='float32'))\n scale_m = lib.param(name+'.scale', np.ones([n_labels,shape[1]], dtype='float32'))\n offset = tf.nn.embedding_lookup(offset_m, labels)\n # offset = tf.Print(offset,['offset',offset])\n scale = tf.nn.embedding_lookup(scale_m, labels)\n # scale = tf.Print(scale,['scale',scale])\n\n moving_mean = lib.param(name + '.moving_mean', np.zeros(batch_mean.get_shape(), dtype='float32'), trainable=False)\n moving_variance = lib.param(name + '.moving_variance', np.ones(batch_var.get_shape(), dtype='float32'),trainable=False)\n\n def _batch_norm_training():\n return tf.nn.batch_normalization(inputs, batch_mean, batch_var, offset[:,:,None,None], scale[:,:,None,None], 1e-5)\n\n def _batch_norm_inference():\n # Version which blends in the current item's statistics\n mean = moving_mean[None, :, None, None]\n var = moving_variance[None, :, None, None]\n '''\n batch_size = tf.cast(tf.shape(inputs)[0], 'float32')\n mean, var = tf.nn.moments(inputs, [2,3], keep_dims=True)\n mean = ((1./batch_size)*mean) + (((batch_size-1.)/batch_size)*moving_mean)[None,:,None,None]\n var = ((1./batch_size)*var) + (((batch_size-1.)/batch_size)*moving_variance)[None,:,None,None]\n '''\n return tf.nn.batch_normalization(inputs, mean, var, offset[:,:,None,None], scale[:,:,None,None],\n 1e-5), mean, var\n\n if is_training is None:\n outputs = _batch_norm_training()\n else:\n if is_training:\n outputs = _batch_norm_training()\n else:\n outputs = _batch_norm_inference()\n\n if update_moving_stats:\n no_updates = lambda: outputs\n\n def _force_updates():\n \"\"\"Internal function forces updates moving_vars if is_training.\"\"\"\n float_stats_iter = tf.cast(stats_iter, tf.float32)\n update_moving_mean = tf.assign(moving_mean,\n ((float_stats_iter / (float_stats_iter + 1)) * moving_mean) + (\n (1 / (float_stats_iter + 1)) * batch_mean))\n update_moving_variance = tf.assign(moving_variance,\n ((float_stats_iter / (float_stats_iter + 1)) * moving_variance) + (\n (1 / (float_stats_iter + 1)) * batch_var))\n with tf.control_dependencies([update_moving_mean, update_moving_variance]):\n return tf.identity(outputs)\n\n if is_training:\n outputs = _force_updates()\n else:\n outputs = no_updates()\n\n return outputs",
"def test_bn_fold_auto_rules_bn_after_conv(self):\n inputs = tf.keras.Input(shape=(32, 32, 3,), name=\"inputs\")\n conv_op = tf.keras.layers.Conv2D(32, (3, 3))(inputs)\n bn_op = tf.keras.layers.BatchNormalization(fused=True)(conv_op)\n relu = tf.nn.relu(bn_op)\n model = tf.keras.Model(inputs=inputs, outputs=relu)\n\n conv_bn_pairs, bn_conv_pairs, _ = _find_all_batch_norms_to_fold(model)\n assert 1 == len(conv_bn_pairs) + len(bn_conv_pairs)",
"def batch_norm(x, training, name):\n with tf.variable_scope(name):\n x = tf.cond(training, lambda: tf.contrib.layers.batch_norm(x, is_training=True, scope=name+'_batch_norm'),\n lambda: tf.contrib.layers.batch_norm(x, is_training=False, scope=name+'_batch_norm', reuse=True))\n return x",
"def test_cnn_enough_batchnorm(self):\n model_type = CNN((None, 20, 3), 2)\n model = model_type.create_model(**{\"filters\": [32, 32],\n \"fc_hidden_nodes\": 100})\n\n batch_norm_layers = len([layer for layer in model.layers if 'BatchNormalization' in str(layer)])\n activation_layers = len([layer for layer in model.layers if 'Activation' in str(layer)])\n assert batch_norm_layers == activation_layers",
"def test_cnn_batchnorm_dim(self):\n model_type = CNN((None, 20, 3), 2)\n model = model_type.create_model(**{\"filters\": [32, 32],\n \"fc_hidden_nodes\": 100})\n\n batchnormlay = model.layers[2]\n assert batchnormlay.output_shape == (None, 20, 32)",
"def inference(images_placeholder, is_training,\r\n depth1, depth2, depth3, dense1_units, dense2_units,\r\n dropout_rate=0.5):\r\n training_mode = is_training is not None\r\n\r\n # layer1:bn-conv-relu(depth1)-pool\r\n with tf.name_scope('conv1'):\r\n print('images_placeholder.shape:', images_placeholder.shape)\r\n bn = tf.layers.batch_normalization(inputs=images_placeholder, training=training_mode)\r\n tf.summary.histogram('batch norm', bn)\r\n\r\n conv = tf.layers.conv2d(\r\n inputs=bn,\r\n filters=depth1,\r\n kernel_size=[3, 3],\r\n padding=\"same\",\r\n activation=tf.nn.relu\r\n )\r\n tf.summary.histogram('conv layer:', conv)\r\n\r\n pool = tf.layers.max_pooling2d(inputs=conv, pool_size=[2, 2], strides=2)\r\n tf.summary.histogram('pool', pool)\r\n\r\n # layer2:bn-conv-relu(depth2)-pool\r\n with tf.name_scope('conv2'):\r\n bn = tf.layers.batch_normalization(inputs=pool, training=training_mode)\r\n tf.summary.histogram('batch norm', bn)\r\n\r\n conv = tf.layers.conv2d(\r\n inputs=bn,\r\n filters=depth2,\r\n kernel_size=[3, 3],\r\n padding=\"same\",\r\n activation=tf.nn.relu\r\n )\r\n tf.summary.histogram('conv layer:', conv)\r\n\r\n pool = tf.layers.max_pooling2d(inputs=conv, pool_size=[2, 2], strides=2)\r\n tf.summary.histogram('pool', pool)\r\n\r\n # layer3:bn-conv-relu(depth3)-pool\r\n with tf.name_scope('conv3'):\r\n bn = tf.layers.batch_normalization(inputs=pool, training=training_mode)\r\n tf.summary.histogram('batch norm', bn)\r\n\r\n conv = tf.layers.conv2d(\r\n inputs=bn,\r\n filters=depth3,\r\n kernel_size=[3, 3],\r\n padding=\"same\",\r\n activation=tf.nn.relu\r\n )\r\n tf.summary.histogram('conv layer:', conv)\r\n\r\n pool = tf.layers.max_pooling2d(inputs=conv, pool_size=[2, 2], strides=2)\r\n tf.summary.histogram('pool', pool)\r\n\r\n with tf.name_scope('dense1'):\r\n pool_flat = tf.reshape(pool, [-1, 3 * 3 * 64])\r\n dense = tf.layers.dense(inputs=pool_flat, units=dense1_units, activation=tf.nn.relu)\r\n tf.summary.histogram('dense', dense)\r\n\r\n # dropout\r\n with tf.name_scope('dropout'):\r\n dropout = tf.layers.dropout(\r\n inputs=dense, rate=dropout_rate, training=training_mode)\r\n\r\n # dense2 58 output units\r\n with tf.name_scope('dense2'):\r\n logits = tf.layers.dense(inputs=dropout, units=58)\r\n tf.summary.histogram('dense2', dense)\r\n\r\n return logits",
"def convnet_layers( inputs, widths, mode ):\n\n training = (mode == \"train\")\n \n with tf.variable_scope( \"convnet\" ): # h,w\n \n #print(inputs.shape)\n x = conv_layer( inputs, layer_params[0], training ) \n #print(x.shape)\n x = conv_layer( x, layer_params[1], training ) \n #print(x.shape)\n x = pool_layer( x, 2, 'valid', 'pool2' )\n #print(x.shape)\n x = conv_layer( x, layer_params[2], training ) \n x = conv_layer( x, layer_params[3], training )\n #print(x.shape)\n x = pool_layer( x, 2, 'valid', 'pool4' )\n #print(x.shape)\n x = conv_layer( x, layer_params[4], training ) \n x = conv_layer( x, layer_params[5], training )\n #print(x.shape)\n x = pool_layer( x, 2, 'valid', 'pool6') \n #print(x.shape)\n x = conv_layer( x, layer_params[6], training ) \n x = conv_layer( x, layer_params[7], training )\n \n x = tf.layers.max_pooling2d( x, [2, 1], [2, 1], \n padding='valid', \n name='pool8' ) \n\n #print(x.shape)\n\n # squeeze row dim\n x = tf.squeeze( x, axis=1, name='features' )\n\n #print(x.shape)\n\n sequence_length = get_sequence_lengths( widths ) \n\n return x, sequence_length",
"def _batch_norm(inputs, decay = 0.999, center = True, scale = False, epsilon = 0.001, \n\t\t\t\tmoving_vars = 'moving_vars', activation = None, is_training = None, \n\t\t\t\ttrainable = True, restore = True, scope = None, reuse = None):\n inputs_shape = inputs.get_shape()\n with tf.variable_op_scope([inputs], scope, 'BatchNorm', reuse = reuse):\n axis = list(range(len(inputs_shape) - 1))\n params_shape = inputs_shape[-1:]\n beta, gamma = None, None\n\n if center:\n beta = _variable_on_cpu('beta', params_shape, tf.zeros_initializer)\n if scale:\n gamma = _variable_on_cpu('gamma', params_shape, tf.ones_initializer)\n\n # moving_collections = [moving_vars, tf.GraphKeys.MOVING_AVERAGE_VARIABLES]\n moving_mean = _variable_on_cpu('moving_mean', params_shape,tf.zeros_initializer, trainable = False)\n # tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, moving_mean)\n moving_variance = _variable_on_cpu('moving_variance', params_shape, tf.ones_initializer, trainable = False)\n # tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, moving_variance)\n \n def train_phase():\n mean, variance = tf.nn.moments(inputs, axis)\n update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay)\n update_moving_variance = moving_averages.assign_moving_average(moving_variance, \n variance, decay)\n with tf.control_dependencies([update_moving_mean, update_moving_variance]):\n return tf.identity(mean), tf.identity(variance)\n\n def test_phase():\n return moving_mean, moving_variance\t\n\n mean, variance = tf.cond(is_training, train_phase, test_phase)\n outputs = tf.nn.batch_normalization(inputs, mean, variance, beta, gamma, epsilon)\n outputs.set_shape(inputs.get_shape()) \n\n if activation:\n outputs = activation(outputs)\n\n return outputs",
"def inference(images_placeholder, is_training,\r\n depth1, depth2, depth3, dense1_units, dense2_units,\r\n dropout_rate=0.5):\r\n training_mode = is_training is not None\r\n\r\n # layer1:bn-conv-relu(depth1)-pool\r\n with tf.name_scope('conv1'):\r\n bn = tf.layers.batch_normalization(inputs=images_placeholder, training=training_mode)\r\n tf.summary.histogram('batch norm', bn)\r\n\r\n conv = tf.layers.conv2d(\r\n inputs=bn,\r\n filters=depth1,\r\n kernel_size=[3, 3],\r\n padding=\"same\",\r\n activation=tf.nn.relu\r\n )\r\n tf.summary.histogram('conv layer:', conv)\r\n\r\n pool = tf.layers.max_pooling2d(inputs=conv, pool_size=[2, 2], strides=2)\r\n tf.summary.histogram('pool', pool)\r\n\r\n # layer2:bn-conv-relu(depth2)-pool\r\n with tf.name_scope('conv2'):\r\n bn = tf.layers.batch_normalization(inputs=pool, training=training_mode)\r\n tf.summary.histogram('batch norm', bn)\r\n\r\n conv = tf.layers.conv2d(\r\n inputs=bn,\r\n filters=depth2,\r\n kernel_size=[3, 3],\r\n padding=\"same\",\r\n activation=tf.nn.relu\r\n )\r\n tf.summary.histogram('conv layer:', conv)\r\n\r\n pool = tf.layers.max_pooling2d(inputs=conv, pool_size=[2, 2], strides=2)\r\n tf.summary.histogram('pool', pool)\r\n\r\n # layer3:bn-conv-relu(depth3)-pool\r\n with tf.name_scope('conv3'):\r\n bn = tf.layers.batch_normalization(inputs=pool, training=training_mode)\r\n tf.summary.histogram('batch norm', bn)\r\n\r\n conv = tf.layers.conv2d(\r\n inputs=bn,\r\n filters=depth3,\r\n kernel_size=[3, 3],\r\n padding=\"same\",\r\n activation=tf.nn.relu\r\n )\r\n tf.summary.histogram('conv layer:', conv)\r\n\r\n pool = tf.layers.max_pooling2d(inputs=conv, pool_size=[2, 2], strides=2)\r\n tf.summary.histogram('pool', pool)\r\n\r\n with tf.name_scope('dense1'):\r\n pool_flat = tf.reshape(pool, [-1, 4 * 4 * depth3])\r\n dense = tf.layers.dense(inputs=pool_flat, units=dense1_units, activation=tf.nn.relu)\r\n tf.summary.histogram('dense', dense)\r\n\r\n # dropout\r\n with tf.name_scope('dropout'):\r\n dropout = tf.layers.dropout(\r\n inputs=dense, rate=dropout_rate, training=training_mode)\r\n\r\n # dense2 58 output units\r\n with tf.name_scope('dense2'):\r\n logits = tf.layers.dense(inputs=dropout, units=58)\r\n tf.summary.histogram('dense2', dense)\r\n\r\n return logits",
"def keras_model_functional_with_non_fused_batchnorms_for_tf2():\n inputs = tf.keras.Input(shape=(32, 32, 3,))\n x = tf.keras.layers.Conv2D(32, (3, 3))(inputs)\n x = tf.keras.layers.BatchNormalization(momentum=.3, epsilon=.65, fused=False)(x, training=True)\n with tf.compat.v1.variable_scope(\"scope_1\"):\n x = tf.keras.layers.Conv2D(16, (2, 2), activation=tf.nn.tanh)(x)\n x = tf.keras.layers.BatchNormalization(momentum=.4, epsilon=.25, fused=False)(x, training=False)\n x = tf.keras.layers.Conv2D(8, (2, 2), activation=tf.nn.tanh)(x)\n x = tf.keras.layers.BatchNormalization(momentum=.5, epsilon=.35, fused=False)(x, training=False)\n x = tf.keras.layers.Conv2D(4, (2, 2), activation=tf.nn.relu6)(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax,\n name=\"keras_model_functional_with_non_fused_batchnorms\")(x)\n model = tf.keras.Model(inputs=inputs, outputs=outputs)\n return model",
"def norm_layer( x, training, name):\n top = tf.layers.batch_normalization( x, \n axis=3, # channels last \n training=training,\n name=name )\n return top",
"def test_bn_fold_with_linear_layer(self):\n inputs = tf.keras.Input(shape=(1, 1, 4,))\n bn = tf.keras.layers.BatchNormalization(fused=True)(inputs, training=False)\n x = tf.keras.layers.Flatten()(bn)\n dense = tf.keras.layers.Dense(2, activation=tf.nn.relu, name=\"linear_layer\")(x)\n model = tf.keras.Model(inputs=inputs, outputs=dense)\n\n # get baseline output\n np.random.seed(0)\n w_shape = model.layers[0].input.shape\n numpy_data = np.random.rand(1, w_shape[1], w_shape[2], w_shape[3]).astype(np.float32)\n baseline_output = model(numpy_data)\n weight_before_fold = model.layers[3].kernel.numpy()\n\n _, model = fold_all_batch_norms(model)\n after_fold_output = model(numpy_data)\n weight_after_fold = model.layers[2].kernel.numpy()\n\n # check that weight got updated\n assert not np.allclose(weight_before_fold, weight_after_fold, atol=1e-4)\n\n # check outputs are close\n assert np.allclose(baseline_output, after_fold_output, atol=1e-3)",
"def spatial_batchnorm_forward(x, gamma, beta, bn_param):\n\tout, cache = None, None\n\n\tN, C, H, W = x.shape\n\ty = x.transpose(0,2,3,1).reshape((N*H*W,C))\n\tout, cache = batchnorm_forward(y, gamma, beta, bn_param)\n\tout = out.reshape((N,H,W,C)).transpose(0,3,1,2)\n\t###########################################################################\n\t# END OF YOUR CODE #\n\t###########################################################################\n\n\treturn out, cache",
"def test_bn_fold_layer_selection_looped_network(self):\n input1 = tf.keras.Input(name='input1', shape=(10, 10, 3))\n x1 = tf.keras.layers.Conv2D(8, (1, 1), name='conv1a',\n kernel_initializer=tf.random_uniform_initializer(-1, 1),\n bias_initializer='random_uniform')(input1)\n\n bn_op_1 = tf.keras.layers.BatchNormalization(fused=True)(x1)\n bn_op_2 = tf.keras.layers.BatchNormalization(fused=True)(x1)\n\n add = tf.keras.layers.add([bn_op_1, bn_op_2])\n\n x2 = tf.keras.layers.Conv2D(8, (3, 3), name='conv1b',\n kernel_initializer=tf.random_uniform_initializer(-1, 1),\n bias_initializer='random_uniform')(add)\n\n model = tf.keras.Model(inputs=input1, outputs=x2)\n\n conv_bn_pairs, bn_conv_pairs, _ = _find_all_batch_norms_to_fold(model)\n\n assert 0 == len(conv_bn_pairs) + len(bn_conv_pairs)"
] | [
"0.72084844",
"0.70753294",
"0.6888734",
"0.6833401",
"0.65570444",
"0.63252455",
"0.6297912",
"0.6279781",
"0.62624854",
"0.6249418",
"0.62053025",
"0.61905295",
"0.6186012",
"0.6178468",
"0.6162257",
"0.6147891",
"0.61445254",
"0.60991395",
"0.6081668",
"0.6066008",
"0.6028096",
"0.60242534",
"0.5985344",
"0.5976863",
"0.59753096",
"0.59631115",
"0.5957688",
"0.59533477",
"0.5938677",
"0.5909502"
] | 0.81622905 | 0 |
Clones layer_op with input_tensor and weight_tensor as new inputs. | def _CloneWithNewOperands(layer_op, input_tensor, weight_tensor):
new_layer_name = layer_op.name.split('/')[-1] + '_Fold'
if layer_op.type == 'Conv2D':
return nn_ops.conv2d(
input_tensor,
weight_tensor,
strides=layer_op.get_attr('strides'),
padding=layer_op.get_attr('padding'),
use_cudnn_on_gpu=layer_op.get_attr('use_cudnn_on_gpu'),
data_format=layer_op.get_attr('data_format'),
name=new_layer_name)
elif layer_op.type == 'MatMul':
return math_ops.matmul(
input_tensor,
weight_tensor,
transpose_a=layer_op.get_attr('transpose_a'),
transpose_b=layer_op.get_attr('transpose_b'),
name=new_layer_name)
elif layer_op.type == 'DepthwiseConv2dNative':
return nn.depthwise_conv2d(
input_tensor,
weight_tensor,
strides=layer_op.get_attr('strides'),
padding=layer_op.get_attr('padding'),
name=new_layer_name)
else:
raise ValueError('Cannot handle operation of type: %s' % layer_op.type) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _CloneOp(op, new_name, new_inputs):\n inputs = list(op.inputs)\n for new_input in new_inputs:\n inputs[new_input[0]] = new_input[1]\n return _OP_CLONER.Clone(op, inputs, new_name)",
"def build(self, input_layer, trainable=True):\n\n with tf.variable_scope(self.name):\n # Determine the size of the input when flattened\n input_layer_shape = input_layer.get_shape()[1:].dims\n flattened_dimension = reduce(lambda x,y: x*y, input_layer_shape, tf.Dimension(1))\n\n # Create the layer\n self.layer = tf.reshape(input_layer, [-1, flattened_dimension.value])\n\n return self.layer, None, None",
"def clone(self):\r\n cp = self.__class__(self.op, self.inputs, [output.clone() for output in self.outputs])\r\n cp.tag = copy(self.tag)\r\n return cp",
"def __init__(self, shape, input_var=None):\n\n self.output = layers.InputLayer(shape, input_var=input_var)",
"def copy(self):\r\n clone = NeuralNetLayer(self.input_size, self.output_size)\r\n clone.weights = self.weights.copy()\r\n return clone",
"def copy(self, **kwargs):\n return Tensor(self.data, **kwargs)",
"def __init__(self, input, weight_init=None):\n n_in = input.get_shape()[1].value\n \n self.input = input\n \n # Initiate the weight for the input layer\n r = 4*np.sqrt(3.0/n_in)\n\n if weight_init is None:\n self.w = tf.Variable(tf.random_uniform([n_in,],-r, r), name='w')\n else: \n self.w = tf.Variable(weight_init, name='w')\n\n self.output = self.w * self.input",
"def copy(tensor):\n raise NotImplementedError",
"def build(self, input_layer, trainable=True):\n\n with tf.variable_scope(self.name):\n # Get the number of input channels\n input_shape = input_layer.get_shape()\n num_input_channels = input_shape[-1].value\n\n # Create the weights and convolutional layer\n weight_shape = [self.kernel_shape[0], self.kernel_shape[1], num_input_channels, self.num_kernels]\n\n# if self.name:\n# self.weights = weight_variable(weight_shape, 'W_'+self.name)\n# else:\n# self.weights = weight_variable(weight_shape)\n\n self.weights = weight_variable(weight_shape, 'weights', trainable)\n self.bias = bias_variable([self.num_kernels], 'bias', trainable)\n\n self.layer = tf.nn.conv2d(input_layer, self.weights, strides=[1, self.stride, self.stride, 1], padding=self.padding) + self.bias\n\n if self.activation_function:\n self.layer = self.activation_function(self.layer)\n\n return self.layer, self.weights, self.bias",
"def make_cloning_model(input_shape=(66, 200, 3)):\n # Create the Sequential model\n print(\"input shape\", input_shape)\n model = Sequential()\n model.add(Lambda(lambda x: x / 128. - 1., output_shape=input_shape, input_shape=input_shape))\n add_conv_type1(model, 12, input_shape)\n add_conv_type1(model, 18)\n add_conv_type1(model, 24)\n add_conv_type2(model, 30)\n add_conv_type2(model, 30)\n model.add(Flatten(input_shape=(13, 33, 30)))\n model.add(Dense(2000, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(500, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(100, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1))\n return model",
"def __init__(self,layer_def,input,input_shape,rs,clone_from=None):\n batch_size = int(layer_def.find(\"batchsize\").text)\n image_size = int(layer_def.find(\"imagesize\").text)\n image_channels = int(layer_def.find(\"imagechannels\").text)\n self.layer_name = layer_def.attrib[\"name\"]\n self.init(input, input_shape, batch_size,image_size,image_channels)",
"def build(self, input_layer, trainable=True):\n\n with tf.variable_scope(self.name):\n\n # Create a weight matrix\n input_size = input_layer.get_shape()[-1].value\n\n self.weights = weight_variable([input_size, self.output_size], 'weights', trainable, False)\n self.bias = bias_variable([self.output_size], 'bias', trainable)\n\n # Create the ReLU layer\n self.layer = tf.matmul(input_layer, self.weights) + self.bias\n\n if self.activation_function:\n self.layer = self.activation_function(self.layer)\n\n return self.layer, self.weights, self.bias",
"def build(self, input_shape):\n shape = np.ones(len(input_shape), dtype=np.int32)\n shape[self._axis] = input_shape[self._axis]\n self._rand_shape = tf.constant(shape, dtype=tf.dtypes.int32)",
"def make_fully_connected_layer(input_layer,\n layer_size,\n activation=tf.nn.relu,\n layer_name='',\n logs=False):\n if not layer_name:\n layer_name = ''.join(str(x) for x in np.random.randint(9, size=10)) # assign random name\n w = tf.Variable(tf.truncated_normal([int(input_layer.shape[1]), layer_size]), name='w_' + layer_name)\n if logs: tf.summary.histogram('weights', w)\n b = tf.Variable(tf.truncated_normal([1, layer_size]), name='b_' + layer_name)\n if logs: tf.summary.histogram('biases', b)\n z = tf.add(tf.matmul(input_layer, w), b, name='z_' + layer_name)\n if logs: tf.summary.histogram('pre-activations', z)\n a = activation(z, name='a_' + layer_name)\n if logs: tf.summary.histogram('activations', a)\n return a, w, b",
"def __init__(\n self,\n tensor_type: Type,\n dynamic_sizes: Sequence[Value],\n copy: Value,\n size_hint: Value,\n escape: BoolAttr,\n *,\n loc=None,\n ip=None\n ):\n context = get_default_loc_context(loc)\n attributes = {}\n if escape:\n attributes[\"escape\"] = escape\n op = self.build_generic(\n results=[tensor_type],\n operands=[dynamic_sizes, copy, size_hint],\n attributes=attributes,\n loc=loc,\n ip=ip,\n )\n OpView.__init__(self, op)",
"def __init__(self, input, init_w, init_b, activation='sigmoid'):\n\n n_in = input.get_shape()[1].value\n self.input = input\n\n # Initiate the weight for the input layer\n \n w = tf.Variable(init_w, name='w')\n b = tf.Variable(init_b, name='b')\n\n output = tf.add(tf.matmul(input, w), b)\n output = activate(output, activation)\n \n self.w = w\n self.b = b\n self.output = output\n self.params = [w]",
"def clone(self, **kwargs):\n new_inst = MetaTensor(self.as_tensor().clone(**kwargs))\n new_inst.__dict__ = deepcopy(self.__dict__)\n return new_inst",
"def construct_layer(\n self,\n input_layer: \"NeuralNetworkLayer\",\n output_layer: \"NeuralNetworkLayer\",\n **kwargs\n ):\n # Add Nodes\n for node_number in range(self.num_nodes):\n node_object = Circle(\n radius=self.node_radius,\n color=self.node_color,\n stroke_width=self.node_stroke_width,\n )\n self.node_group.add(node_object)\n # Space the nodes\n # Assumes Vertical orientation\n for node_index, node_object in enumerate(self.node_group):\n location = node_index * self.node_spacing\n node_object.move_to([0, location, 0])\n # Create Surrounding Rectangle\n self.surrounding_rectangle = SurroundingRectangle(\n self.node_group,\n color=self.rectangle_color,\n fill_color=self.rectangle_fill_color,\n fill_opacity=1.0,\n buff=self.layer_buffer,\n stroke_width=self.rectangle_stroke_width,\n )\n self.surrounding_rectangle.set_z_index(1)\n # Add the objects to the class\n self.add(self.surrounding_rectangle, self.node_group)\n\n self.construct_activation_function()\n super().construct_layer(input_layer, output_layer, **kwargs)",
"def initial_layer(input_layer):\n INITIAL_LAYER_FILTER = 32\n INITIAL_KERNEL_SIZE = (4, 4)\n return intermediate_layer(input_layer, INITIAL_LAYER_FILTER, INITIAL_KERNEL_SIZE)",
"def clone(tensor):\n cloned = tensor.clone()#tensor.detach().clone()\n # cloned.requires_grad = tensor.requires_grad\n # if tensor.grad is not None:\n # cloned.grad = clone(tensor.grad)\n return cloned",
"def new_layer(self, nodes, inputs, alpha=0.1):\n weights = [[random.uniform(-0.1, 0.1) for _ in range(inputs)] for i in range(nodes)]\n alphas = [alpha for _ in range(nodes)]\n self._layers.append(Layer(weights, alphas))",
"def copy_conv(sess, tftensor, layer):\n\n W = sess.graph.get_tensor_by_name('{}/conv2d_params:0'.format(tftensor)).eval()\n W = W.transpose((3, 2, 0, 1))\n\n assert W.shape == layer.W.data.shape\n\n layer.W.data = W",
"def build(self, input_shape):\n dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx())\n if not (dtype.is_floating or dtype.is_complex):\n raise TypeError(\"Unable to build `Dense` layer with non-floating point \"\n \"dtype %s\" % (dtype,))\n input_shape = tf.TensorShape(input_shape)\n if tf.compat.dimension_value(input_shape[-1]) is None:\n raise ValueError(\"The last dimension of the inputs to `Dense` \"\n \"should be defined. Found `None`.\")\n self.last_dim = tf.compat.dimension_value(input_shape[-1])\n self.input_spec = tf.keras.layers.InputSpec(\n min_ndim=3, axes={-1: self.last_dim})\n # Determines variable shapes.\n if self.backward_compatible:\n kernel_shape = self.compatible_kernel_shape\n bias_shape = self.compatible_bias_shape\n else:\n kernel_shape = self.kernel_shape\n bias_shape = self.bias_shape\n\n self.kernel = self.add_weight(\n \"kernel\",\n shape=kernel_shape,\n initializer=self.kernel_initializer,\n dtype=self.dtype,\n trainable=True)\n if self.use_bias:\n self.bias = self.add_weight(\n \"bias\",\n shape=bias_shape,\n initializer=self.bias_initializer,\n dtype=self.dtype,\n trainable=True)\n else:\n self.bias = None\n super(Dense3D, self).build(input_shape)",
"def _create_train_input(self, input_batch):\n self.raw_image = input_batch\n self.image = tf.reshape(self.raw_image, (-1, self._im_size[0], self._im_size[1]))\n self.lr = tf.placeholder(tf.float32, name='lr')\n self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')",
"def __init__(self,\n in_channels: int,\n out_channels: int,\n kernel_size: Union[int, Sequence[int]],\n stride: Union[int, Sequence[int]] = 1,\n padding: PaddingArgType = PaddingMode.DEFAULT,\n output_padding: Union[int, Sequence[int]] = 0,\n dilation: Union[int, Sequence[int]] = 1,\n resize_at_exit: bool = False,\n use_shortcut: Optional[bool] = None,\n shortcut: Optional[LayerOrLayerFactory] = None,\n conv0: Optional[LayerOrLayerFactory] = None,\n conv1: Optional[LayerOrLayerFactory] = None,\n merge_context0: Optional[Module] = None,\n merge_context1: Optional[Module] = None,\n activation: Optional[LayerFactory] = None,\n normalizer: Optional[NormalizerFactory] = None,\n dropout: Optional[Union[float, LayerOrLayerFactory]] = None,\n weight_norm: WeightNormArgType = False,\n gated: bool = False,\n gate_bias: float = DEFAULT_GATE_BIAS,\n use_bias: Optional[bool] = None,\n weight_init: TensorInitArgType = DEFAULT_WEIGHT_INIT,\n bias_init: TensorInitArgType = DEFAULT_BIAS_INIT,\n data_init: Optional[DataInitArgType] = None,\n device: Optional[str] = None,\n ):\n def use_bias_or_else(default_val: bool):\n if use_bias is None:\n return default_val\n return use_bias\n\n def compile_layer_list(layers: List[Module]) -> Module:\n if len(layers) == 0:\n return Identity()\n elif len(layers) == 1:\n return layers[0]\n else:\n return Sequential(layers)\n\n spatial_ndims = self._get_spatial_ndims()\n is_deconv = self._is_deconv()\n\n # validate arguments\n in_channels = int(in_channels)\n out_channels = int(out_channels)\n\n kernel_size = validate_conv_size('kernel_size', kernel_size, spatial_ndims)\n stride = validate_conv_size('strides', stride, spatial_ndims)\n dilation = validate_conv_size('dilation', dilation, spatial_ndims)\n padding = validate_padding(padding, kernel_size, dilation, spatial_ndims)\n\n if output_padding != 0 and not is_deconv:\n raise ValueError(f'The `output_padding` argument is not allowed '\n f'by {self.__class__.__qualname__}.')\n output_padding = validate_output_padding(\n output_padding, stride, dilation, spatial_ndims)\n\n if conv0 is None:\n conv0 = self._default_conv_factory()\n\n if conv1 is None:\n conv1 = self._default_conv_factory()\n\n orig_merge_context0 = merge_context0\n if merge_context0 is None:\n merge_context0 = IgnoreContext()\n else:\n merge_context0 = validate_layer('merge_context0', merge_context0)\n\n if merge_context1 is None:\n merge_context1 = IgnoreContext()\n else:\n merge_context1 = validate_layer('merge_context1', merge_context1)\n\n if shortcut is not None:\n use_shortcut = True\n if use_shortcut is None:\n use_shortcut = (\n any(s != 1 for s in stride) or\n any(p[0] + p[1] != (k - 1) * d\n for p, k, d in zip(padding, kernel_size, dilation)) or\n in_channels != out_channels)\n\n if activation is not None:\n activation_factory = validate_layer_factory('activation', activation)\n else:\n activation_factory = None\n\n if normalizer is not None:\n normalizer_factory = validate_layer_factory('normalizer', normalizer)\n else:\n normalizer_factory = None\n\n if isinstance(dropout, float):\n dropout = Dropout(p=dropout)\n elif dropout is not None:\n dropout = get_layer_from_layer_or_factory('dropout', dropout)\n\n conv0_weight_norm = weight_norm\n if conv0_weight_norm is True:\n conv0_weight_norm = (\n WeightNormMode.FULL if normalizer is None or dropout is not None\n else WeightNormMode.NO_SCALE\n )\n\n kwargs = {'weight_init': weight_init, 'bias_init': bias_init,\n 'data_init': data_init, 'device': device}\n\n # build the shortcut path\n if use_shortcut:\n if shortcut is None:\n shortcut = self._default_conv_factory()\n if not isinstance(shortcut, Module):\n shortcut = get_layer_from_layer_or_factory(\n 'shortcut', shortcut, kwargs=dict(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n use_bias=use_bias_or_else(gated),\n weight_norm=weight_norm,\n **self._add_output_padding_to_kwargs(output_padding, kwargs)\n )\n )\n else:\n shortcut = Identity()\n\n # prepare the arguments for the residual path\n if resize_at_exit:\n conv0_out_channels = in_channels\n conv0_stride = 1\n conv0_padding = PaddingMode.HALF # such that it can keep the output shape\n conv0_kwargs = kwargs\n conv1_stride = stride\n conv1_padding = padding\n conv1_kwargs = self._add_output_padding_to_kwargs(output_padding, kwargs)\n else:\n conv0_out_channels = out_channels\n conv0_stride = stride\n conv0_padding = padding\n conv0_kwargs = self._add_output_padding_to_kwargs(output_padding, kwargs)\n conv1_stride = 1\n conv1_padding = PaddingMode.HALF # such that it can keep the output shape\n conv1_kwargs = kwargs\n\n conv1_out_channels = out_channels\n if gated:\n conv1_out_channels *= 2\n\n # pre_conv0\n pre_conv0 = []\n if normalizer_factory is not None:\n pre_conv0.append(normalizer_factory(in_channels))\n if activation_factory is not None:\n pre_conv0.append(activation_factory())\n pre_conv0 = compile_layer_list(pre_conv0)\n\n # conv0\n conv0 = get_layer_from_layer_or_factory( # conv0\n 'conv0', conv0, kwargs=dict(\n in_channels=in_channels,\n out_channels=conv0_out_channels,\n kernel_size=kernel_size,\n stride=conv0_stride,\n padding=conv0_padding,\n dilation=dilation,\n use_bias=use_bias_or_else(normalizer_factory is None or\n dropout is not None or\n orig_merge_context0 is not None),\n weight_norm=conv0_weight_norm,\n **conv0_kwargs,\n )\n )\n\n # pre_conv1\n pre_conv1 = []\n if dropout is not None:\n pre_conv1.append(dropout)\n if normalizer_factory is not None:\n pre_conv1.append(normalizer_factory(conv0_out_channels))\n if activation_factory is not None:\n pre_conv1.append(activation_factory())\n pre_conv1 = compile_layer_list(pre_conv1)\n\n # conv1\n conv1 = get_layer_from_layer_or_factory(\n 'conv1', conv1, kwargs=dict(\n in_channels=conv0_out_channels,\n out_channels=conv1_out_channels,\n kernel_size=kernel_size,\n stride=conv1_stride,\n padding=conv1_padding,\n dilation=dilation,\n use_bias=use_bias_or_else(True),\n weight_norm=weight_norm,\n **conv1_kwargs,\n )\n )\n\n # post_conv1\n if gated:\n post_conv1 = Gated(\n feature_axis=-(spatial_ndims + 1),\n num_features=out_channels,\n gate_bias=gate_bias,\n )\n else:\n post_conv1 = Identity()\n\n # construct the layer\n super().__init__()\n self.shortcut = shortcut\n self.pre_conv0 = pre_conv0\n self.merge_context0 = merge_context0\n self.conv0 = conv0\n self.pre_conv1 = pre_conv1\n self.merge_context1 = merge_context1\n self.conv1 = conv1\n self.post_conv1 = post_conv1",
"def __init__(self, input_size, output_size, activation=torch.nn.functional.relu, left_to_right=True):\n super(GraphConvolutionalLayer, self).__init__()\n self.w = torch.nn.Parameter(torch.rand([input_size, output_size]))\n self.activation = activation\n self.left_to_right = left_to_right",
"def build(self, input_shape: tf.Tensor):\n self.dense = tf.keras.layers.Dense(self.channels, input_shape=input_shape)\n self.bn = tf.keras.layers.BatchNormalization(momentum=self.momentum)",
"def __init__(self, incoming, shape, name='ReshapeLayer'):\n super(ReshapeLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n \n self.shape = shape\n self.out = tf.zeros(self.get_output_shape())\n self.name = name",
"def copy(self, shareWeights):\n newNode = SparseLinear(self.inputDim, self.outputDim, self.stdv)\n #newNode.receiveGradFrom = self.receiveGradFrom[:]\n #newNode.receiveInputFrom = self.receiveInputFrom[:]\n if shareWeights:\n newNode.weight = self.weight\n newNode.gradWeight = self.gradWeight\n newNode.bias = self.bias\n newNode.gradBias = self.gradBias\n return newNode",
"def copy(self):\r\n clone = NeuralNet()\r\n for layer in self.layers:\r\n clone.layers.append(layer.copy())\r\n return clone"
] | [
"0.62645614",
"0.6237233",
"0.6156408",
"0.61453366",
"0.5968321",
"0.58285654",
"0.5824187",
"0.58094114",
"0.58049345",
"0.5748741",
"0.5666261",
"0.5660313",
"0.5655159",
"0.5610911",
"0.5606715",
"0.55708444",
"0.55697495",
"0.5538788",
"0.5504511",
"0.547193",
"0.54517174",
"0.544682",
"0.5427598",
"0.54218245",
"0.5410881",
"0.5403577",
"0.5396121",
"0.5394196",
"0.53777",
"0.536819"
] | 0.7447388 | 0 |
Finds all ops and tensors related to found FusedBatchNorms. | def _FindFusedBatchNorms(graph):
input_pattern = graph_matcher.OpTypePattern('*')
weight_pattern = graph_matcher.OpTypePattern('*')
gamma_pattern = graph_matcher.OpTypePattern('*')
beta_pattern = graph_matcher.OpTypePattern('*')
mean_pattern = graph_matcher.OpTypePattern('*')
variance_pattern = graph_matcher.OpTypePattern('*')
conv_pattern = graph_matcher.OpTypePattern(
'Conv2D|DepthwiseConv2dNative', inputs=[input_pattern, weight_pattern])
# MatMul has a Reshape between it and FusedBatchNorm.
matmul_pattern = graph_matcher.OpTypePattern(
'MatMul', inputs=[input_pattern, weight_pattern])
matmul_reshape_pattern = graph_matcher.OpTypePattern(
'Reshape', inputs=[matmul_pattern,
graph_matcher.OpTypePattern('*')])
conv_batch_norm_pattern = graph_matcher.OpTypePattern(
'FusedBatchNorm',
inputs=[
conv_pattern, gamma_pattern, beta_pattern, mean_pattern,
variance_pattern
])
matmul_batch_norm_pattern = graph_matcher.OpTypePattern(
'FusedBatchNorm',
inputs=[
matmul_reshape_pattern, gamma_pattern, beta_pattern, mean_pattern,
variance_pattern
])
matmul_bn_output_reshape_pattern = graph_matcher.OpTypePattern(
'Reshape',
inputs=[matmul_batch_norm_pattern,
graph_matcher.OpTypePattern('*')])
conv_matcher = graph_matcher.GraphMatcher(conv_batch_norm_pattern)
matmul_matcher = graph_matcher.GraphMatcher(matmul_bn_output_reshape_pattern)
def _GetCommonTensors(match_result, bn_op, bn_input_tensor):
"""Gets tensors needed for FusedBatchNormMatch from match_result."""
input_tensor = match_result.get_tensor(input_pattern)
weight_tensor = match_result.get_tensor(weight_pattern)
gamma_tensor = match_result.get_tensor(gamma_pattern)
beta_tensor = match_result.get_tensor(beta_pattern)
# FusedBatchNorm in training is different from that in inference. It takes
# empty 'mean' and empty 'variance', and produces the mean and the variance
# of the batch. Therefore, when is_training is true, mean_tensor and
# variance_tensor point to 1st and 2nd (0-based) output of bn_op,
# respectively; when is_training is false, they point to bn_op's inputs.
is_training = bn_op.get_attr('is_training')
if is_training:
# FusedBatchNormGrad doesn't compute gradients of the batch_mean and
# batch_variance outputs, so we need to substitute our own custom
# gradient.
# TODO(suharshs, raghuramank): Find a way to avoid needing this hack.
# pylint: disable=protected-access
bn_op._set_attr(
'_gradient_op_type',
attr_value_pb2.AttrValue(s=compat.as_bytes('FoldFusedBatchNormGrad')))
# pylint: enable=protected-access
mean_tensor = bn_op.outputs[1]
# The batch variance used during forward and backward prop is biased,
# i.e it is calculated as: V=sum(x(k)-mu)^2/N. For the moving average
# calculation, the variance is corrected by the term N/N-1 (Bessel's
# correction). The variance tensor read from FuseBatchNorm has bessel's
# correction applied, so we undo it here.
n = math_ops.cast(
array_ops.size(bn_input_tensor) / array_ops.size(mean_tensor),
dtypes.float32)
variance_tensor = bn_op.outputs[2] * (n - 1) / n
else:
mean_tensor = match_result.get_tensor(mean_pattern)
variance_tensor = match_result.get_tensor(variance_pattern)
return (input_tensor, weight_tensor, gamma_tensor, beta_tensor, mean_tensor,
variance_tensor)
for match_result in conv_matcher.match_graph(graph):
layer_op = match_result.get_op(conv_pattern)
layer_tensor = match_result.get_tensor(conv_pattern)
bn_op = match_result.get_op(conv_batch_norm_pattern)
# In the case of convolution the output_tensor is the output of bn_op.
output_tensor = bn_op.outputs[0]
(input_tensor, weight_tensor, gamma_tensor, beta_tensor, mean_tensor,
variance_tensor) = _GetCommonTensors(match_result, bn_op, layer_tensor)
yield _FusedBatchNormMatch(
layer_op=layer_op,
bn_op=bn_op,
output_tensor=output_tensor,
input_tensor=input_tensor,
weight_tensor=weight_tensor,
gamma_tensor=gamma_tensor,
beta_tensor=beta_tensor,
mean_tensor=mean_tensor,
variance_tensor=variance_tensor)
for match_result in matmul_matcher.match_graph(graph):
layer_op = match_result.get_op(matmul_pattern)
layer_tensor = match_result.get_tensor(matmul_pattern)
bn_op = match_result.get_op(matmul_batch_norm_pattern)
# In the MatMul case, the output of batch norm is reshaped back into a
# 2D tensor, so the output_tensor is the output of the Reshape op.
output_reshape_op = match_result.get_op(matmul_bn_output_reshape_pattern)
output_tensor = output_reshape_op.outputs[0]
(input_tensor, weight_tensor, gamma_tensor, beta_tensor, mean_tensor,
variance_tensor) = _GetCommonTensors(match_result, bn_op, layer_tensor)
yield _FusedBatchNormMatch(
layer_op=layer_op,
bn_op=bn_op,
output_tensor=output_tensor,
input_tensor=input_tensor,
weight_tensor=weight_tensor,
gamma_tensor=gamma_tensor,
beta_tensor=beta_tensor,
mean_tensor=mean_tensor,
variance_tensor=variance_tensor) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _FoldFusedBatchNorms(graph):\n for match in _FindFusedBatchNorms(graph):\n scope, sep, _ = match.layer_op.name.rpartition('/')\n # Make sure new ops are added to `graph` and put on the same device as\n # `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope\n # named `scope`. Otherwise, TF creates a unique scope whose name starts with\n # `scope`.\n with graph.as_default(), graph.name_scope(scope + sep), ops.device(\n match.bn_op.device):\n with graph.name_scope(scope + sep + 'BatchNorm_Fold' + sep):\n # new weights = old weights * gamma / sqrt(variance + epsilon)\n # new biases = -mean * gamma / sqrt(variance + epsilon) + beta\n multiplier_tensor = match.gamma_tensor * math_ops.rsqrt(\n match.variance_tensor + match.bn_op.get_attr('epsilon'))\n bias_tensor = math_ops.subtract(\n match.beta_tensor,\n match.mean_tensor * multiplier_tensor,\n name='bias')\n\n # The shape of depthwise weights is different, so we need to reshape the\n # multiplier_tensor to ensure that the scaled_weight_tensor has the\n # expected shape.\n if match.layer_op.type == 'DepthwiseConv2dNative':\n new_shape = [\n match.weight_tensor.get_shape().as_list()[2],\n match.weight_tensor.get_shape().as_list()[3]\n ]\n multiplier_tensor = array_ops.reshape(\n multiplier_tensor, new_shape, name='scale_reshape')\n\n # TODO(suharshs): This naming of the following ops needs to carefully\n # follow the naming expected by quantize.py. Generalize the quantize code\n # to not require these delicate naming conventions.\n scaled_weight_tensor = math_ops.multiply(\n match.weight_tensor, multiplier_tensor, name='mul_fold')\n\n new_layer_tensor = _CloneWithNewOperands(\n match.layer_op, match.input_tensor, scaled_weight_tensor)\n\n bias_add_tensor = math_ops.add(\n new_layer_tensor, bias_tensor, name='add_fold')\n\n nodes_modified_count = graph_editor.reroute_ts(bias_add_tensor,\n match.output_tensor)\n if nodes_modified_count != 1:\n raise ValueError(\n 'Unexpected inputs to op: %s' % match.output_tensor.name)",
"def _find_all_batch_norms_to_fold(connected_graph: ConnectedGraph) -> Tuple[\n List[Tuple[LayerType, BatchNormType]], List[Tuple[BatchNormType, LayerType]]]:\n conv_bn_pairs, bn_conv_pairs, bn_to_fold = _find_foldable_bn_pair_and_bn_picked_for_folding(connected_graph)\n return conv_bn_pairs, bn_conv_pairs, bn_to_fold",
"def _FoldUnfusedBatchNorms(graph):\n input_to_ops_map = input_to_ops.InputToOps(graph)\n\n for bn in common.BatchNormGroups(graph):\n has_scaling = _HasScaling(graph, input_to_ops_map, bn)\n\n # The mangling code intimately depends on BatchNorm node's internals.\n original_op, folded_op = _CreateFoldedOp(graph, bn, has_scaling=has_scaling)\n\n activation = common.GetEndpointActivationOp(graph, bn)\n if activation:\n nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],\n [original_op.outputs[0]],\n can_modify=[activation])\n if nodes_modified_count != 1:\n raise ValueError('Unexpected inputs to op: %s' % activation.name)\n continue\n\n # Treat consumer ops in bypass modules differently since they have Add\n # operations instead of Relu* above.\n add_bypass_ctx = re.search(r'^(.*)/([^/]+)', bn).group(1)\n add_bypass = graph.get_operation_by_name(add_bypass_ctx + '/Add')\n nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],\n [original_op.outputs[0]],\n can_modify=[add_bypass])\n if nodes_modified_count != 1:\n raise ValueError('Unexpected inputs to op: %s' % add_bypass.name)",
"def FindBatchNormLayers(network):\n batch_norm_keys = []\n for layer in network.layer:\n if layer.type =='BatchNorm':\n batch_norm_keys.append(layer.name)\n \n return batch_norm_keys",
"def find_standalone_batchnorm_ops(connected_graph: ConnectedGraph)->set:\n _, _, bn_picked_for_folding = _find_foldable_bn_pair_and_bn_picked_for_folding(connected_graph)\n bn_ops = {op for op in connected_graph.get_all_ops().values() if op.type in BN_OP_TYPES}\n stand_alone_bn_ops = bn_ops - bn_picked_for_folding\n\n return stand_alone_bn_ops",
"def FoldBatchNorms(graph):\n _FoldFusedBatchNorms(graph)\n _FoldUnfusedBatchNorms(graph)",
"def _GetCommonTensors(match_result, bn_op, bn_input_tensor):\n input_tensor = match_result.get_tensor(input_pattern)\n weight_tensor = match_result.get_tensor(weight_pattern)\n gamma_tensor = match_result.get_tensor(gamma_pattern)\n beta_tensor = match_result.get_tensor(beta_pattern)\n # FusedBatchNorm in training is different from that in inference. It takes\n # empty 'mean' and empty 'variance', and produces the mean and the variance\n # of the batch. Therefore, when is_training is true, mean_tensor and\n # variance_tensor point to 1st and 2nd (0-based) output of bn_op,\n # respectively; when is_training is false, they point to bn_op's inputs.\n is_training = bn_op.get_attr('is_training')\n if is_training:\n # FusedBatchNormGrad doesn't compute gradients of the batch_mean and\n # batch_variance outputs, so we need to substitute our own custom\n # gradient.\n # TODO(suharshs, raghuramank): Find a way to avoid needing this hack.\n # pylint: disable=protected-access\n bn_op._set_attr(\n '_gradient_op_type',\n attr_value_pb2.AttrValue(s=compat.as_bytes('FoldFusedBatchNormGrad')))\n # pylint: enable=protected-access\n mean_tensor = bn_op.outputs[1]\n # The batch variance used during forward and backward prop is biased,\n # i.e it is calculated as: V=sum(x(k)-mu)^2/N. For the moving average\n # calculation, the variance is corrected by the term N/N-1 (Bessel's\n # correction). The variance tensor read from FuseBatchNorm has bessel's\n # correction applied, so we undo it here.\n n = math_ops.cast(\n array_ops.size(bn_input_tensor) / array_ops.size(mean_tensor),\n dtypes.float32)\n variance_tensor = bn_op.outputs[2] * (n - 1) / n\n else:\n mean_tensor = match_result.get_tensor(mean_pattern)\n variance_tensor = match_result.get_tensor(variance_pattern)\n return (input_tensor, weight_tensor, gamma_tensor, beta_tensor, mean_tensor,\n variance_tensor)",
"def __init__(self):\n self._conv_to_gamma = collections.defaultdict(set)\n for op in tf.get_default_graph().get_operations():\n if op.type != 'FusedBatchNorm':\n continue\n\n convs = _dfs(op)\n for conv in convs:\n if conv.type == 'Conv2D':\n self._conv_to_gamma[conv].add(op.inputs[1]) # Input #1 is gamma.\n\n for op in tf.get_default_graph().get_operations():\n if op.type == 'Conv2D' and op not in self._conv_to_gamma:\n self._conv_to_gamma[op] = None",
"def find_all_batch_norms_to_fold(model, input_shapes, dummy_input: Union[torch.Tensor, Tuple] = None):\n device = utils.get_device(model)\n if dummy_input is not None:\n connected_graph = ConnectedGraph(model, dummy_input)\n else:\n device = utils.get_device(model)\n inp_tensor_list = utils.create_rand_tensors_given_shapes(input_shapes, device)\n connected_graph = ConnectedGraph(model, inp_tensor_list)\n\n conv_bn_pairs, bn_conv_pairs, _ = _find_all_batch_norms_to_fold(connected_graph)\n return conv_bn_pairs + bn_conv_pairs",
"def test_keras_model_functional_with_non_fused_batchnorms_get_op_product_graph(self):\n tf.compat.v1.reset_default_graph()\n\n _ = keras_model_functional_with_non_fused_batchnorms_for_tf2()\n conn_graph = ConnectedGraph(tf.compat.v1.get_default_graph(), ['input_1'],\n ['keras_model_functional_with_non_fused_batchnorms/Softmax'])\n self.assertTrue(validate_branch_ops(conn_graph))\n self.assertTrue(validate_product_tensor_lists(conn_graph))\n _ = conn_graph.get_all_ops()['batch_normalization']\n _ = conn_graph.get_all_ops()['scope_1/batch_normalization_1']\n _ = conn_graph.get_all_ops()['scope_1/batch_normalization_2']\n self.assertEqual(0, conn_graph.branch_count)\n self.assertEqual(14, len(conn_graph.get_all_ops()))\n\n # 13 products from inter module connections\n # 22 products from parameters\n self.assertEqual(35, len(conn_graph.get_all_products()))",
"def fold_all_batch_norms_to_scale(\n sim: QuantizationSimModel,\n) -> List[Tuple[QcQuantizeWrapper, QcQuantizeWrapper]]:\n # pylint: disable=protected-access\n assert sim.model is not None\n assert sim.connected_graph is not None\n\n model = sim.model\n connected_graph = sim.connected_graph\n\n quant_wrappers = {\n quant_wrapper._module_to_wrap: quant_wrapper\n for _, quant_wrapper in sim.quant_wrappers()\n }\n conv_bn_pairs, bn_conv_pairs, _ = _find_all_batch_norms_to_fold(connected_graph)\n conv_bn_pairs = [\n (quant_wrappers[conv], quant_wrappers[bn]) for conv, bn in conv_bn_pairs\n ]\n bn_conv_pairs = [\n (quant_wrappers[bn], quant_wrappers[conv]) for bn, conv in bn_conv_pairs\n ]\n\n _fold_given_batch_norms(model, conv_bn_pairs, bn_conv_pairs)\n\n return conv_bn_pairs + [(conv, bn) for bn, conv in bn_conv_pairs]",
"def _special_handle_batchnorm(cls, op, X, W):\n # for singa, x, scale, bias is input\n # and mean and var is attribute\n # so we add the mean and var to W\n tensor_list = []\n append_inputs = {\"mean\": op.running_mean, \"var\": op.running_var}\n for tmp_name, append_input in append_inputs.items():\n node_name = op.name + \":\" + tmp_name\n append_input = tensor.to_numpy(tensor.from_raw_tensor(append_input))\n tensor_list.append(numpy_helper.from_array(append_input, node_name))\n return tensor_list",
"def keras_model_functional_with_non_fused_batchnorms():\n is_training = tf.compat.v1.placeholder_with_default(tf.constant(True), shape=(), name='is_training')\n inputs = tf.keras.Input(shape=(32, 32, 3,))\n x = tf.keras.layers.Conv2D(32, (3, 3))(inputs)\n x = tf.keras.layers.BatchNormalization(momentum=.3, epsilon=.65, fused=False)(x, training=True)\n with tf.compat.v1.variable_scope(\"scope_1\"):\n x = tf.keras.layers.Conv2D(16, (2, 2), activation=tf.nn.tanh)(x)\n x = tf.keras.layers.BatchNormalization(momentum=.4, epsilon=.25, fused=False)(x, training=is_training)\n x = tf.keras.layers.Conv2D(8, (2, 2), activation=tf.nn.tanh)(x)\n x = tf.keras.layers.BatchNormalization(momentum=.5, epsilon=.35, fused=False)(x, training=False)\n x = tf.keras.layers.Conv2D(4, (2, 2), activation=tf.nn.relu6)(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax,\n name=\"keras_model_functional_with_non_fused_batchnorms\")(x)\n model = tf.keras.Model(inputs=inputs, outputs=outputs)\n return model",
"def Batchnorm(name, axes, inputs, is_training=None, stats_iter=None, update_moving_stats=True, fused=True, labels=None, n_labels=None):\n if axes != [0,2,3]:\n raise Exception('unsupported')\n batch_mean, batch_var = tf.nn.moments(inputs, axes, keep_dims=True)\n shape = batch_mean.get_shape().as_list() # shape is [1,n,1,1]\n offset_m = lib.param(name+'.offset', np.zeros([n_labels,shape[1]], dtype='float32'))\n scale_m = lib.param(name+'.scale', np.ones([n_labels,shape[1]], dtype='float32'))\n offset = tf.nn.embedding_lookup(offset_m, labels)\n # offset = tf.Print(offset,['offset',offset])\n scale = tf.nn.embedding_lookup(scale_m, labels)\n # scale = tf.Print(scale,['scale',scale])\n\n moving_mean = lib.param(name + '.moving_mean', np.zeros(batch_mean.get_shape(), dtype='float32'), trainable=False)\n moving_variance = lib.param(name + '.moving_variance', np.ones(batch_var.get_shape(), dtype='float32'),trainable=False)\n\n def _batch_norm_training():\n return tf.nn.batch_normalization(inputs, batch_mean, batch_var, offset[:,:,None,None], scale[:,:,None,None], 1e-5)\n\n def _batch_norm_inference():\n # Version which blends in the current item's statistics\n mean = moving_mean[None, :, None, None]\n var = moving_variance[None, :, None, None]\n '''\n batch_size = tf.cast(tf.shape(inputs)[0], 'float32')\n mean, var = tf.nn.moments(inputs, [2,3], keep_dims=True)\n mean = ((1./batch_size)*mean) + (((batch_size-1.)/batch_size)*moving_mean)[None,:,None,None]\n var = ((1./batch_size)*var) + (((batch_size-1.)/batch_size)*moving_variance)[None,:,None,None]\n '''\n return tf.nn.batch_normalization(inputs, mean, var, offset[:,:,None,None], scale[:,:,None,None],\n 1e-5), mean, var\n\n if is_training is None:\n outputs = _batch_norm_training()\n else:\n if is_training:\n outputs = _batch_norm_training()\n else:\n outputs = _batch_norm_inference()\n\n if update_moving_stats:\n no_updates = lambda: outputs\n\n def _force_updates():\n \"\"\"Internal function forces updates moving_vars if is_training.\"\"\"\n float_stats_iter = tf.cast(stats_iter, tf.float32)\n update_moving_mean = tf.assign(moving_mean,\n ((float_stats_iter / (float_stats_iter + 1)) * moving_mean) + (\n (1 / (float_stats_iter + 1)) * batch_mean))\n update_moving_variance = tf.assign(moving_variance,\n ((float_stats_iter / (float_stats_iter + 1)) * moving_variance) + (\n (1 / (float_stats_iter + 1)) * batch_var))\n with tf.control_dependencies([update_moving_mean, update_moving_variance]):\n return tf.identity(outputs)\n\n if is_training:\n outputs = _force_updates()\n else:\n outputs = no_updates()\n\n return outputs",
"def test_find_conv_bn_pairs_functional_nested(self):\n inputs = tf.keras.Input((26, 26, 3))\n conv2d_1 = tf.keras.layers.Conv2D(filters=3, kernel_size=3, strides=1)(inputs)\n bn = tf.keras.layers.BatchNormalization(fused=True)(inputs)\n conv2d_2 = tf.keras.layers.Conv2D(filters=3, kernel_size=3, strides=1)(bn)\n outputs = tf.keras.layers.add([conv2d_1, conv2d_2])\n Block1 = tf.keras.Model(inputs=inputs, outputs=outputs)\n\n inputs2 = tf.keras.Input((28, 28, 64))\n bn1 = tf.keras.layers.BatchNormalization(fused=True)(inputs2)\n relu = tf.keras.layers.ReLU()(bn1)\n conv2d_0 = tf.keras.layers.Conv2D(3, 3)(relu)\n block1 = Block1(conv2d_0)\n outputs = tf.keras.layers.ReLU()(block1)\n model = tf.keras.Model(inputs=inputs2, outputs=outputs)\n\n node_layer_map = common.create_node_to_layer_map(model)\n layer_out_node_map = common.create_layer_to_out_node_map(model)\n conv_linear_with_bn_dict = _find_possible_convs_linears_bn(node_layer_map, layer_out_node_map)\n\n assert 10 == len(node_layer_map)\n assert 9 == len(layer_out_node_map)\n assert 1 == len(conv_linear_with_bn_dict)",
"def DerefBatchNormLayers(network, batch_norm_names, layers_dict, suffix='_fold', \n lr_mult=1.0, decay_mult=1.0):\n for bn_layer_name in batch_norm_names:\n index = layers_dict[bn_layer_name]\n bn_layer = network.layer[index]\n \n if (len(bn_layer.bottom) != 1) or (len(bn_layer.top) != 1):\n raise AssertionError('Expected bn layer to have one top and bottom')\n \n prev_layer_idx = index - 1\n next_layer_idx = index + 1\n prev_layer, next_layer = network.layer[prev_layer_idx], network.layer[next_layer_idx]\n \n if not (prev_layer.top == bn_layer.bottom and bn_layer.top == next_layer.bottom):\n raise AssertionError(\"Could not find previous and next nodes for\"\n \"batch norm layer\")\n \n if next_layer.type != 'Scale':\n print bn_layer_name, next_layer.type, next_layer.name\n raise AssertionError('Expected Scale layer to follow batch norm layer')\n \n if not (len(prev_layer.top) == 1 and len(next_layer.bottom) == 1):\n raise AssertionError(\"Expected previous and next blobs to have\" \n \"only one input and output\")\n \n next_layer.bottom[0] = prev_layer.top[0]\n next_layer.name = next_layer.name + suffix\n\n if lr_mult != 1.0 or decay_mult != 1.0:\n while len(next_layer.param) < 2:\n next_layer.param.add()\n for i in range(len(next_layer.param)):\n next_layer.param[i].lr_mult = lr_mult\n next_layer.param[i].decay_mult = decay_mult",
"def inference(images):\n # We instantiate all variables using tf.get_variable() instead of\n # tf.Variable() in order to share variables across multiple GPU training runs.\n # If we only ran this model on a single GPU, we could simplify this function\n # by replacing all instances of tf.get_variable() with tf.Variable().\n #\n # conv1\n with tf.variable_scope('conv1') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[5, 5, 3, 64],\n stddev=5e-2,\n wd=0.0)\n conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')\n biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))\n pre_activation = tf.nn.bias_add(conv, biases)\n conv1 = tf.nn.relu(pre_activation, name=scope.name)\n _activation_summary(conv1)\n\n # pool1\n pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\n padding='SAME', name='pool1')\n # norm1\n norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n name='norm1')\n\n # conv2\n with tf.variable_scope('conv2') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[5, 5, 64, 64],\n stddev=5e-2,\n wd=0.0)\n conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))\n pre_activation = tf.nn.bias_add(conv, biases)\n conv2 = tf.nn.relu(pre_activation, name=scope.name)\n _activation_summary(conv2)\n\n # norm2\n norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n name='norm2')\n # pool2\n pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1], padding='SAME', name='pool2')\n\n # local3\n with tf.variable_scope('local3') as scope:\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])\n dim = reshape.get_shape()[1].value\n weights = _variable_with_weight_decay('weights', shape=[dim, 384],\n stddev=0.04, wd=0.004)\n biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))\n local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n _activation_summary(local3)\n\n # local4\n with tf.variable_scope('local4') as scope:\n weights = _variable_with_weight_decay('weights', shape=[384, 192],\n stddev=0.04, wd=0.004)\n biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))\n local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)\n _activation_summary(local4)\n\n # linear layer(WX + b),\n # We don't apply softmax here because\n # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits\n # and performs the softmax internally for efficiency.\n with tf.variable_scope('softmax_linear') as scope:\n weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],\n stddev=1/192.0, wd=0.0)\n biases = _variable_on_cpu('biases', [NUM_CLASSES],\n tf.constant_initializer(0.0))\n softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)\n _activation_summary(softmax_linear)\n\n return softmax_linear",
"def _get_grads_as_flattened(\n self,\n model: FSDP,\n uses_tp: bool,\n param_name_to_numel: Dict[str, int],\n param_name_to_sharding_info: Dict[str, Tuple[torch.Size, int]],\n tp_pg: Optional[dist.ProcessGroup],\n fsdp_pg: Optional[dist.ProcessGroup],\n sharded_param_names: Optional[List[str]],\n ) -> torch.Tensor:\n local_grads_as_flattened = (\n torch.cat([torch.flatten(param.grad) for param in model.parameters()])\n .contiguous()\n .cuda(self.rank)\n )\n all_grads_as_flattened = torch.cat(\n [torch.empty_like(local_grads_as_flattened) for _ in range(fsdp_pg.size())]\n ).contiguous()\n dist._all_gather_base(\n all_grads_as_flattened, local_grads_as_flattened, group=fsdp_pg\n )\n if not uses_tp:\n return all_grads_as_flattened\n splits = tuple(param_name_to_numel.values())\n all_grads_per_param = list(all_grads_as_flattened.split(splits))\n for param_idx, param_name in enumerate(\n param_name_to_numel.keys()\n ): # assumes fixed order\n if param_name in sharded_param_names:\n local_tensor_size = list(param_name_to_sharding_info[param_name][0])\n sharding_dim = param_name_to_sharding_info[param_name][1]\n local_tensor_size[sharding_dim] //= tp_pg.size()\n local_tensor = all_grads_per_param[param_idx].view(*local_tensor_size)\n local_tensors = [\n torch.empty_like(local_tensor) for _ in range(tp_pg.size())\n ]\n dist.all_gather(local_tensors, local_tensor, group=tp_pg)\n all_grads_per_param[param_idx] = torch.cat(\n local_tensors, dim=sharding_dim\n ).reshape(-1)\n return torch.cat(all_grads_per_param).contiguous()",
"def keras_model_functional_with_non_fused_batchnorms_for_tf2():\n inputs = tf.keras.Input(shape=(32, 32, 3,))\n x = tf.keras.layers.Conv2D(32, (3, 3))(inputs)\n x = tf.keras.layers.BatchNormalization(momentum=.3, epsilon=.65, fused=False)(x, training=True)\n with tf.compat.v1.variable_scope(\"scope_1\"):\n x = tf.keras.layers.Conv2D(16, (2, 2), activation=tf.nn.tanh)(x)\n x = tf.keras.layers.BatchNormalization(momentum=.4, epsilon=.25, fused=False)(x, training=False)\n x = tf.keras.layers.Conv2D(8, (2, 2), activation=tf.nn.tanh)(x)\n x = tf.keras.layers.BatchNormalization(momentum=.5, epsilon=.35, fused=False)(x, training=False)\n x = tf.keras.layers.Conv2D(4, (2, 2), activation=tf.nn.relu6)(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax,\n name=\"keras_model_functional_with_non_fused_batchnorms\")(x)\n model = tf.keras.Model(inputs=inputs, outputs=outputs)\n return model",
"def run_train_iter(self, session, batch, summary_writer):\n # Match up our input data with the placeholders\n input_feed = {}\n input_feed[self.context_ids] = batch.context_ids\n input_feed[self.context_mask] = batch.context_mask\n input_feed[self.qn_ids] = batch.qn_ids\n input_feed[self.qn_mask] = batch.qn_mask\n input_feed[self.ans_ids] = batch.ans_ids\n input_feed[self.ans_mask] = batch.ans_mask\n input_feed[self.keep_prob] = 1.0 - self.FLAGS.dropout # apply dropout\n\n # if not use raw graph tokens\n if not self.FLAGS.use_raw_graph:\n input_feed[self.context_embedding] = batch.context_embeddings\n\n # output_feed contains the things we want to fetch.\n output_feed = [self.updates, self.summaries, self.loss, self.global_step, self.param_norm, self.gradient_norm, self.dev_loss]\n\n # Run the model\n [_, summaries, loss, global_step, param_norm, gradient_norm, dev_loss] = session.run(output_feed, input_feed)\n\n # All summaries in the graph are added to Tensorboard\n summary_writer.add_summary(summaries, global_step)\n\n return loss, global_step, param_norm, gradient_norm, dev_loss",
"def try_all_gpus():\n ctx_list = []\n try:\n for i in range(16):\n ctx = mx.gpu(i)\n _ = nd.array([0], ctx=ctx)\n ctx_list.append(ctx)\n except:\n pass\n if not ctx_list:\n ctx_list = [mx.cpu()]\n return ctx_list",
"def get_kernel_norms(self):\n return self.adjacency",
"def inference(images):\n # We instantiate all variables using tf.get_variable() instead of\n # tf.Variable() in order to share variables across multiple GPU training runs.\n # If we only ran this model on a single GPU, we could simplify this function\n # by replacing all instances of tf.get_variable() with tf.Variable().\n #\n # conv1\n with tf.variable_scope('conv1') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[5, 5, 3, 64],\n stddev=5e-2,\n wd=None)\n conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')\n biases = _create_variable('biases', [64], tf.constant_initializer(0.0))\n pre_activation = tf.nn.bias_add(conv, biases)\n conv1 = tf.nn.relu(pre_activation, name=scope.name)\n\n # pool1\n pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\n padding='SAME', name='pool1')\n # norm1\n norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n name='norm1')\n\n # conv2\n with tf.variable_scope('conv2') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[5, 5, 64, 64],\n stddev=5e-2,\n wd=None)\n conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = _create_variable('biases', [64], tf.constant_initializer(0.1))\n pre_activation = tf.nn.bias_add(conv, biases)\n conv2 = tf.nn.relu(pre_activation, name=scope.name)\n\n # norm2\n norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n name='norm2')\n # pool2\n pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1], padding='SAME', name='pool2')\n\n # local3\n with tf.variable_scope('local3') as scope:\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(pool2, [images.get_shape().as_list()[0], -1])\n dim = reshape.get_shape()[1].value\n weights = _variable_with_weight_decay('weights', shape=[dim, 384],\n stddev=0.04, wd=0.004)\n biases = _create_variable('biases', [384], tf.constant_initializer(0.1))\n local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n\n # local4\n with tf.variable_scope('local4') as scope:\n weights = _variable_with_weight_decay('weights', shape=[384, 192],\n stddev=0.04, wd=0.004)\n biases = _create_variable('biases', [192], tf.constant_initializer(0.1))\n local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)\n\n # linear layer(WX + b),\n # We don't apply softmax here because\n # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits\n # and performs the softmax internally for efficiency.\n with tf.variable_scope('softmax_linear') as scope:\n weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],\n stddev=1 / 192.0, wd=None)\n biases = _create_variable('biases', [NUM_CLASSES],\n tf.constant_initializer(0.0))\n softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)\n\n return softmax_linear",
"def get_kernel_norms(self):\n corresponding_simu = self._corresponding_simu()\n get_norm = np.vectorize(lambda kernel: kernel.get_norm())\n return get_norm(corresponding_simu.kernels)",
"def convert_standalone_batchnorms(model: torch.nn.Module,\n dummy_input: Union[torch.Tensor, Tuple],\n folded_bn: set) -> List[Tuple[Any, BatchNorm2d]]:\n\n module_list = utils.get_ordered_list_of_modules(model, dummy_input)\n bn_converted = []\n for name, module in module_list:\n if isinstance(module, (torch.nn.BatchNorm1d, torch.nn.BatchNorm2d)) and module not in folded_bn:\n convert_batchnorm_parameters(model, module)\n _logger.debug(\"%s weights got converted\", name)\n bn_converted.append((name, module))\n return bn_converted",
"def test_bn_fold_find_layers_model_with_multi_input(self):\n\n input1 = tf.keras.Input(name='input1', shape=(10, 10, 3))\n input2 = tf.keras.Input(name='input2', shape=(12, 12, 3))\n x1 = tf.keras.layers.Conv2D(8, (1, 1), name='conv1a')(input1)\n x2 = tf.keras.layers.Conv2D(8, (3, 3), name='conv1b')(input2)\n x = tf.keras.layers.add([x1, x2])\n x = tf.keras.layers.Conv2D(4, (1, 1), name='conv2')(x)\n bn_op = tf.keras.layers.BatchNormalization(fused=True)(x)\n relu = tf.nn.relu(bn_op)\n model = tf.keras.Model(inputs=[input1, input2], outputs=relu)\n\n conv_bn_pairs, bn_conv_pairs, _ = _find_all_batch_norms_to_fold(model)\n assert 1 == len(conv_bn_pairs) + len(bn_conv_pairs)",
"def get_weight_norms(self, sess, matrix_norm_fxn = lambda x: np.linalg.norm(x, ord = 1)):\n model_norms = []\n weights_list = self.get_weights_np(sess)\n for weights in weights_list:\n norm = matrix_norm_fxn(weights)\n model_norms.append(norm)\n return model_norms",
"def fuse_model(self):\n\n for m in self.modules():\n if type(m) == QuantizableBasicConv2d:\n m.fuse_model()",
"def spatial_batchnorm_forward(x, gamma, beta, bn_param):\n\tout, cache = None, None\n\n\tN, C, H, W = x.shape\n\ty = x.transpose(0,2,3,1).reshape((N*H*W,C))\n\tout, cache = batchnorm_forward(y, gamma, beta, bn_param)\n\tout = out.reshape((N,H,W,C)).transpose(0,3,1,2)\n\t###########################################################################\n\t# END OF YOUR CODE #\n\t###########################################################################\n\n\treturn out, cache",
"def get_all_layers(self, img): # noqa\n s1_out, c1_out, s2_out, c2_out = self.run_all_layers(img)\n return (\n [s1.cpu().detach().numpy() for s1 in s1_out],\n [c1.cpu().detach().numpy() for c1 in c1_out],\n [[s2_.cpu().detach().numpy() for s2_ in s2] for s2 in s2_out],\n [c2.cpu().detach().numpy() for c2 in c2_out],\n )"
] | [
"0.68619853",
"0.6494927",
"0.60702705",
"0.5982493",
"0.5894786",
"0.5847638",
"0.57264715",
"0.5440104",
"0.5178902",
"0.5135283",
"0.5134985",
"0.5104784",
"0.5068771",
"0.5065753",
"0.5037282",
"0.5032491",
"0.50149405",
"0.50096035",
"0.5000106",
"0.4991522",
"0.49882165",
"0.49863228",
"0.49755704",
"0.49200854",
"0.491101",
"0.49089321",
"0.489331",
"0.48928055",
"0.487677",
"0.48762015"
] | 0.7717176 | 0 |
Gets tensors needed for FusedBatchNormMatch from match_result. | def _GetCommonTensors(match_result, bn_op, bn_input_tensor):
input_tensor = match_result.get_tensor(input_pattern)
weight_tensor = match_result.get_tensor(weight_pattern)
gamma_tensor = match_result.get_tensor(gamma_pattern)
beta_tensor = match_result.get_tensor(beta_pattern)
# FusedBatchNorm in training is different from that in inference. It takes
# empty 'mean' and empty 'variance', and produces the mean and the variance
# of the batch. Therefore, when is_training is true, mean_tensor and
# variance_tensor point to 1st and 2nd (0-based) output of bn_op,
# respectively; when is_training is false, they point to bn_op's inputs.
is_training = bn_op.get_attr('is_training')
if is_training:
# FusedBatchNormGrad doesn't compute gradients of the batch_mean and
# batch_variance outputs, so we need to substitute our own custom
# gradient.
# TODO(suharshs, raghuramank): Find a way to avoid needing this hack.
# pylint: disable=protected-access
bn_op._set_attr(
'_gradient_op_type',
attr_value_pb2.AttrValue(s=compat.as_bytes('FoldFusedBatchNormGrad')))
# pylint: enable=protected-access
mean_tensor = bn_op.outputs[1]
# The batch variance used during forward and backward prop is biased,
# i.e it is calculated as: V=sum(x(k)-mu)^2/N. For the moving average
# calculation, the variance is corrected by the term N/N-1 (Bessel's
# correction). The variance tensor read from FuseBatchNorm has bessel's
# correction applied, so we undo it here.
n = math_ops.cast(
array_ops.size(bn_input_tensor) / array_ops.size(mean_tensor),
dtypes.float32)
variance_tensor = bn_op.outputs[2] * (n - 1) / n
else:
mean_tensor = match_result.get_tensor(mean_pattern)
variance_tensor = match_result.get_tensor(variance_pattern)
return (input_tensor, weight_tensor, gamma_tensor, beta_tensor, mean_tensor,
variance_tensor) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _FindFusedBatchNorms(graph):\n input_pattern = graph_matcher.OpTypePattern('*')\n weight_pattern = graph_matcher.OpTypePattern('*')\n gamma_pattern = graph_matcher.OpTypePattern('*')\n beta_pattern = graph_matcher.OpTypePattern('*')\n mean_pattern = graph_matcher.OpTypePattern('*')\n variance_pattern = graph_matcher.OpTypePattern('*')\n\n conv_pattern = graph_matcher.OpTypePattern(\n 'Conv2D|DepthwiseConv2dNative', inputs=[input_pattern, weight_pattern])\n # MatMul has a Reshape between it and FusedBatchNorm.\n matmul_pattern = graph_matcher.OpTypePattern(\n 'MatMul', inputs=[input_pattern, weight_pattern])\n matmul_reshape_pattern = graph_matcher.OpTypePattern(\n 'Reshape', inputs=[matmul_pattern,\n graph_matcher.OpTypePattern('*')])\n\n conv_batch_norm_pattern = graph_matcher.OpTypePattern(\n 'FusedBatchNorm',\n inputs=[\n conv_pattern, gamma_pattern, beta_pattern, mean_pattern,\n variance_pattern\n ])\n matmul_batch_norm_pattern = graph_matcher.OpTypePattern(\n 'FusedBatchNorm',\n inputs=[\n matmul_reshape_pattern, gamma_pattern, beta_pattern, mean_pattern,\n variance_pattern\n ])\n matmul_bn_output_reshape_pattern = graph_matcher.OpTypePattern(\n 'Reshape',\n inputs=[matmul_batch_norm_pattern,\n graph_matcher.OpTypePattern('*')])\n\n conv_matcher = graph_matcher.GraphMatcher(conv_batch_norm_pattern)\n matmul_matcher = graph_matcher.GraphMatcher(matmul_bn_output_reshape_pattern)\n\n def _GetCommonTensors(match_result, bn_op, bn_input_tensor):\n \"\"\"Gets tensors needed for FusedBatchNormMatch from match_result.\"\"\"\n input_tensor = match_result.get_tensor(input_pattern)\n weight_tensor = match_result.get_tensor(weight_pattern)\n gamma_tensor = match_result.get_tensor(gamma_pattern)\n beta_tensor = match_result.get_tensor(beta_pattern)\n # FusedBatchNorm in training is different from that in inference. It takes\n # empty 'mean' and empty 'variance', and produces the mean and the variance\n # of the batch. Therefore, when is_training is true, mean_tensor and\n # variance_tensor point to 1st and 2nd (0-based) output of bn_op,\n # respectively; when is_training is false, they point to bn_op's inputs.\n is_training = bn_op.get_attr('is_training')\n if is_training:\n # FusedBatchNormGrad doesn't compute gradients of the batch_mean and\n # batch_variance outputs, so we need to substitute our own custom\n # gradient.\n # TODO(suharshs, raghuramank): Find a way to avoid needing this hack.\n # pylint: disable=protected-access\n bn_op._set_attr(\n '_gradient_op_type',\n attr_value_pb2.AttrValue(s=compat.as_bytes('FoldFusedBatchNormGrad')))\n # pylint: enable=protected-access\n mean_tensor = bn_op.outputs[1]\n # The batch variance used during forward and backward prop is biased,\n # i.e it is calculated as: V=sum(x(k)-mu)^2/N. For the moving average\n # calculation, the variance is corrected by the term N/N-1 (Bessel's\n # correction). The variance tensor read from FuseBatchNorm has bessel's\n # correction applied, so we undo it here.\n n = math_ops.cast(\n array_ops.size(bn_input_tensor) / array_ops.size(mean_tensor),\n dtypes.float32)\n variance_tensor = bn_op.outputs[2] * (n - 1) / n\n else:\n mean_tensor = match_result.get_tensor(mean_pattern)\n variance_tensor = match_result.get_tensor(variance_pattern)\n return (input_tensor, weight_tensor, gamma_tensor, beta_tensor, mean_tensor,\n variance_tensor)\n\n for match_result in conv_matcher.match_graph(graph):\n layer_op = match_result.get_op(conv_pattern)\n layer_tensor = match_result.get_tensor(conv_pattern)\n bn_op = match_result.get_op(conv_batch_norm_pattern)\n # In the case of convolution the output_tensor is the output of bn_op.\n output_tensor = bn_op.outputs[0]\n\n (input_tensor, weight_tensor, gamma_tensor, beta_tensor, mean_tensor,\n variance_tensor) = _GetCommonTensors(match_result, bn_op, layer_tensor)\n yield _FusedBatchNormMatch(\n layer_op=layer_op,\n bn_op=bn_op,\n output_tensor=output_tensor,\n input_tensor=input_tensor,\n weight_tensor=weight_tensor,\n gamma_tensor=gamma_tensor,\n beta_tensor=beta_tensor,\n mean_tensor=mean_tensor,\n variance_tensor=variance_tensor)\n\n for match_result in matmul_matcher.match_graph(graph):\n layer_op = match_result.get_op(matmul_pattern)\n layer_tensor = match_result.get_tensor(matmul_pattern)\n bn_op = match_result.get_op(matmul_batch_norm_pattern)\n # In the MatMul case, the output of batch norm is reshaped back into a\n # 2D tensor, so the output_tensor is the output of the Reshape op.\n output_reshape_op = match_result.get_op(matmul_bn_output_reshape_pattern)\n output_tensor = output_reshape_op.outputs[0]\n\n (input_tensor, weight_tensor, gamma_tensor, beta_tensor, mean_tensor,\n variance_tensor) = _GetCommonTensors(match_result, bn_op, layer_tensor)\n yield _FusedBatchNormMatch(\n layer_op=layer_op,\n bn_op=bn_op,\n output_tensor=output_tensor,\n input_tensor=input_tensor,\n weight_tensor=weight_tensor,\n gamma_tensor=gamma_tensor,\n beta_tensor=beta_tensor,\n mean_tensor=mean_tensor,\n variance_tensor=variance_tensor)",
"def _get_batch(self, i: int, matches: List[Dict]) -> List[Dict]:\n return matches[i:(i + self.batch_size)]",
"def _FoldFusedBatchNorms(graph):\n for match in _FindFusedBatchNorms(graph):\n scope, sep, _ = match.layer_op.name.rpartition('/')\n # Make sure new ops are added to `graph` and put on the same device as\n # `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope\n # named `scope`. Otherwise, TF creates a unique scope whose name starts with\n # `scope`.\n with graph.as_default(), graph.name_scope(scope + sep), ops.device(\n match.bn_op.device):\n with graph.name_scope(scope + sep + 'BatchNorm_Fold' + sep):\n # new weights = old weights * gamma / sqrt(variance + epsilon)\n # new biases = -mean * gamma / sqrt(variance + epsilon) + beta\n multiplier_tensor = match.gamma_tensor * math_ops.rsqrt(\n match.variance_tensor + match.bn_op.get_attr('epsilon'))\n bias_tensor = math_ops.subtract(\n match.beta_tensor,\n match.mean_tensor * multiplier_tensor,\n name='bias')\n\n # The shape of depthwise weights is different, so we need to reshape the\n # multiplier_tensor to ensure that the scaled_weight_tensor has the\n # expected shape.\n if match.layer_op.type == 'DepthwiseConv2dNative':\n new_shape = [\n match.weight_tensor.get_shape().as_list()[2],\n match.weight_tensor.get_shape().as_list()[3]\n ]\n multiplier_tensor = array_ops.reshape(\n multiplier_tensor, new_shape, name='scale_reshape')\n\n # TODO(suharshs): This naming of the following ops needs to carefully\n # follow the naming expected by quantize.py. Generalize the quantize code\n # to not require these delicate naming conventions.\n scaled_weight_tensor = math_ops.multiply(\n match.weight_tensor, multiplier_tensor, name='mul_fold')\n\n new_layer_tensor = _CloneWithNewOperands(\n match.layer_op, match.input_tensor, scaled_weight_tensor)\n\n bias_add_tensor = math_ops.add(\n new_layer_tensor, bias_tensor, name='add_fold')\n\n nodes_modified_count = graph_editor.reroute_ts(bias_add_tensor,\n match.output_tensor)\n if nodes_modified_count != 1:\n raise ValueError(\n 'Unexpected inputs to op: %s' % match.output_tensor.name)",
"def match_results(self):\n return np.array(list(self._match_result_dict.values()))",
"def match(\n self, key_embeds: Tensor, ref_embeds: Tensor,\n key_sampling_results: List[SamplingResult],\n ref_sampling_results: List[SamplingResult]\n ) -> Tuple[List[Tensor], List[Tensor]]:\n\n num_key_rois = [res.pos_bboxes.size(0) for res in key_sampling_results]\n key_embeds = torch.split(key_embeds, num_key_rois)\n num_ref_rois = [res.bboxes.size(0) for res in ref_sampling_results]\n ref_embeds = torch.split(ref_embeds, num_ref_rois)\n\n dists, cos_dists = [], []\n for key_embed, ref_embed in zip(key_embeds, ref_embeds):\n dist = embed_similarity(\n key_embed,\n ref_embed,\n method='dot_product',\n temperature=self.softmax_temp)\n dists.append(dist)\n if self.loss_track_aux is not None:\n cos_dist = embed_similarity(\n key_embed, ref_embed, method='cosine')\n cos_dists.append(cos_dist)\n else:\n cos_dists.append(None)\n return dists, cos_dists",
"def get_targets(\n self, gt_match_indices: List[Tensor],\n key_sampling_results: List[SamplingResult],\n ref_sampling_results: List[SamplingResult]) -> Tuple[List, List]:\n\n track_targets = []\n track_weights = []\n for _gt_match_indices, key_res, ref_res in zip(gt_match_indices,\n key_sampling_results,\n ref_sampling_results):\n targets = _gt_match_indices.new_zeros(\n (key_res.pos_bboxes.size(0), ref_res.bboxes.size(0)),\n dtype=torch.int)\n _match_indices = _gt_match_indices[key_res.pos_assigned_gt_inds]\n pos2pos = (_match_indices.view(\n -1, 1) == ref_res.pos_assigned_gt_inds.view(1, -1)).int()\n targets[:, :pos2pos.size(1)] = pos2pos\n weights = (targets.sum(dim=1) > 0).float()\n track_targets.append(targets)\n track_weights.append(weights)\n return track_targets, track_weights",
"def get_match_stats(detected: np.ndarray, matched: np.ndarray) -> tuple:\n n = len(detected) # batch_size\n tp = np.zeros(n)\n fp = np.zeros(n)\n t = np.zeros(n)\n p = np.zeros(n)\n for ii in range(n):\n tp[ii] = np.sum(matched[ii])\n fp[ii] = len(matched[ii]) - np.sum(matched[ii])\n t[ii] = len(detected[ii])\n p[ii] = len(matched[ii])\n return tp, fp, t, p",
"def test_fused_batch_norm_uneven_batch(self, distribution):\n self.skipTest(\"TODO(b/234354008): Requires fetching data from network.\")\n (train_images, train_labels), _ = fashion_mnist.load_data()\n # add channel dimension to make 2D data into 3D, since some ops of the\n # model require it.\n train_images = train_images[..., None]\n train_images = train_images / np.float32(255)\n\n # Padding images because ResNet requires a minimal shape of (32, 32)\n padded_train_images = np.concatenate(\n [\n np.zeros((len(train_images), 2, 28, 1)),\n train_images,\n np.zeros((len(train_images), 2, 28, 1)),\n ],\n axis=1,\n )\n padded_train_images = np.concatenate(\n [\n np.zeros((len(train_images), 32, 2, 1)),\n padded_train_images,\n np.zeros((len(train_images), 32, 2, 1)),\n ],\n axis=2,\n )\n\n buffer_size = len(train_images)\n global_batch_size = distribution.num_replicas_in_sync\n num_samples = global_batch_size - 1\n\n epochs = 2\n\n # Keep only the first images, so that the last GPU receives an empty\n # batch\n padded_train_images = padded_train_images[:num_samples]\n train_labels = train_labels[:num_samples]\n\n train_dataset = (\n tf.data.Dataset.from_tensor_slices(\n (padded_train_images, train_labels)\n )\n .shuffle(buffer_size)\n .batch(global_batch_size)\n )\n train_dist_dataset = distribution.experimental_distribute_dataset(\n train_dataset\n )\n\n def create_model():\n inputs = keras.Input((32, 32, 1))\n preprocessed = keras.layers.Conv2D(3, (1, 1))(\n inputs\n ) # ResNet requires 3 channels\n features = resnet_v2.ResNet50V2(\n include_top=False,\n input_tensor=preprocessed,\n pooling=\"avg\",\n weights=None,\n ).output\n return keras.Model(inputs, features)\n\n with distribution.scope():\n # Set reduction to `none` so we can do the reduction afterwards and\n # divide by global batch size.\n loss_object = keras.losses.SparseCategoricalCrossentropy(\n from_logits=True, reduction=losses_impl.Reduction.NONE\n )\n\n def compute_resnet_loss(labels, predictions):\n per_example_loss = loss_object(labels, predictions)\n return tf.nn.compute_average_loss(\n per_example_loss, global_batch_size=global_batch_size\n )\n\n model = create_model()\n\n optimizer = optimizers.adam_legacy.Adam()\n\n def train_step(inputs):\n images, labels = inputs\n\n with tf.GradientTape() as tape:\n predictions = model(images, training=True)\n loss = compute_resnet_loss(labels, predictions)\n\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return loss\n\n @tf.function\n def distributed_train_step(dataset_inputs):\n per_replica_losses = distribution.run(\n train_step, args=(dataset_inputs,)\n )\n return distribution.reduce(\n tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None\n )\n\n for epoch in range(epochs):\n # Train loop\n total_loss = 0.0\n num_batches = 0\n for x in train_dist_dataset:\n total_loss += distributed_train_step(x)\n num_batches += 1\n train_loss = total_loss / num_batches\n\n print(f\"Epoch {epoch+1}, Loss: {train_loss}\")",
"def build(self):\n tf_predictions = self.BatchNormClassifier(self.tf_inputs)\n return tf_predictions",
"def gather_all_tensors(result: Tensor, group: Optional[Any]=None) ->List[Tensor]:\n if group is None:\n group = torch.distributed.group.WORLD\n result = result.contiguous()\n world_size = torch.distributed.get_world_size(group)\n torch.distributed.barrier(group=group)\n if result.ndim == 0:\n return _simple_gather_all_tensors(result, group, world_size)\n local_size = torch.tensor(result.shape, device=result.device)\n local_sizes = [torch.zeros_like(local_size) for _ in range(world_size)]\n torch.distributed.all_gather(local_sizes, local_size, group=group)\n max_size = torch.stack(local_sizes).max(dim=0).values\n all_sizes_equal = all(all(ls == max_size) for ls in local_sizes)\n if all_sizes_equal:\n return _simple_gather_all_tensors(result, group, world_size)\n pad_dims = []\n pad_by = (max_size - local_size).detach().cpu()\n for val in reversed(pad_by):\n pad_dims.append(0)\n pad_dims.append(val.item())\n result_padded = F.pad(result, pad_dims)\n gathered_result = [torch.zeros_like(result_padded) for _ in range(world_size)]\n torch.distributed.all_gather(gathered_result, result_padded, group)\n for idx, item_size in enumerate(local_sizes):\n slice_param = [slice(dim_size) for dim_size in item_size]\n gathered_result[idx] = gathered_result[idx][slice_param]\n return gathered_result",
"def get_list(self):\n for match in self.matches:\n img_pixel = [int(self.featureA[match.queryIdx].pt[0]), int(self.featureA[match.queryIdx].pt[1])]\n depth = aligned_depth_frame.get_distance(img_pixel[0], img_pixel[1])\n point_a = rs.rs2_deproject_pixel_to_point(self.intrin, img_pixel, depth)\n point_a = [point_a[0], point_a[2], 1]\n img_pixel = [int(self.featureB[match.trainIdx].pt[0]), int(self.featureB[match.trainIdx].pt[1])]\n depth = aligned_depth_frame.get_distance(img_pixel[0], img_pixel[1])\n point_b = rs.rs2_deproject_pixel_to_point(self.intrin, img_pixel, depth)\n point_b = [point_b[0], point_b[2], 1]\n self.listA.append(point_a)\n self.listB.append(point_b)",
"def _match_back(self):\n if self.algo == 'MLSTM':\n match_layer = MatchLSTMLayer(self.hidden_size)\n elif self.algo == 'BIDAF':\n match_layer = AttentionFlowMatchLayer(self.hidden_size)\n else:\n raise NotImplementedError('The algorithm {} is not implemented.'.format(self.algo))\n self.match_p_encodes, _ = match_layer.match(self.sep_p_encodes, self.sep_q_encodes,\n self.p_length, self.q_length)\n if self.use_dropout:\n self.match_p_encodes = tf.nn.dropout(self.match_p_encodes, self.dropout_keep_prob)",
"def _match_keypoints(self, query_image: Image) -> typing.Tuple[typing.List[int], typing.List[typing.Any]]:\n query_image.get_keypoints_and_descriptors()\n\n # matches = self.matcher.match(query_image.descriptor)\n matches = self.matcher.knnMatch(query_image.descriptor, k=2)\n good = []\n for m_n in matches:\n if len(m_n) == 1:\n good.append(m_n[0])\n continue\n elif len(m_n) != 2:\n continue\n (m, n) = m_n\n if m.distance < 0.7 * n.distance:\n good.append(m)\n\n images_scores = [0] * len(self._candidate_images)\n images_matches = [None] * len(self._candidate_images)\n for image_index, image in enumerate(self._candidate_images):\n matches_scores = []\n matches = []\n for i, match in enumerate(good):\n if match.imgIdx != image_index:\n continue\n matches.append(match)\n matches_scores.append((256 - match.distance) / 256)\n\n match_cnt = len(matches_scores)\n if match_cnt <= 0:\n continue\n\n images_scores[image_index] = (\n 0.5 + ((math.tanh(match_cnt / 3 - 1)) / 2)) * (sum(matches_scores) / match_cnt)\n\n images_matches[image_index] = matches\n\n return images_scores, images_matches",
"def result(self) -> Dict[str, tf.Tensor]:\n return super().result()",
"def result(self) -> Dict[str, tf.Tensor]:\n return super().result()",
"def variables(self):\n return np.array(list(self._match_result_dict.keys()))",
"def target_tensors(self):\n return None",
"def _match(self):\n if self.algo == 'MLSTM':\n match_layer = MatchLSTMLayer(self.hidden_size)\n elif self.algo == 'BIDAF':\n match_layer = AttentionFlowMatchLayer(self.hidden_size)\n else:\n raise NotImplementedError('The algorithm {} is not implemented.'.format(self.algo))\n self.match_p_encodes, _ = match_layer.match(self.sep_p_encodes, self.sep_q_encodes,\n self.p_length, self.q_length)\n if self.use_dropout:\n self.match_p_encodes = tf.nn.dropout(self.match_p_encodes, self.dropout_keep_prob)",
"def matching_accuracy(pmat_pred, pmat_gt, ns):\n device = pmat_pred.device\n batch_num = pmat_pred.shape[0]\n\n pmat_gt = pmat_gt.to(device)\n\n assert torch.all((pmat_pred == 0) + (pmat_pred == 1)), 'pmat_pred can noly contain 0/1 elements.'\n assert torch.all((pmat_gt == 0) + (pmat_gt == 1)), 'pmat_gt should noly contain 0/1 elements.'\n assert torch.all(torch.sum(pmat_gt, dim=-1) <= 1) and torch.all(torch.sum(pmat_gt, dim=-2) <= 1)\n assert torch.all(torch.sum(pmat_pred, dim=-1) <= 1) and torch.all(torch.sum(pmat_pred, dim=-2) <= 1)\n\n #indices_pred = torch.argmax(pmat_pred, dim=-1)\n #indices_gt = torch.argmax(pmat_gt, dim=-1)\n\n #matched = (indices_gt == indices_pred).type(pmat_pred.dtype)\n match_num_list = []\n gt_num_list = []\n pred_num_list = []\n acc_gt = []\n acc_pred = []\n for b in range(batch_num): #acc_gt,acc_pred\n #match_num += torch.sum(matched[b, :ns[b]])\n #total_num += ns[b].item()\n match_num = torch.sum(pmat_pred[b, :] * pmat_gt[b, :]) + 1e-8 #:ns[b]\n gt_num = torch.sum(pmat_gt[b, :]) + 1e-8\n pred_num = torch.sum(pmat_pred[b,:]) + 1e-8\n match_num_list.append(match_num.cpu().numpy())\n gt_num_list.append(gt_num.cpu().numpy())\n pred_num_list.append(pred_num.cpu().numpy())\n acc_gt.append((match_num/gt_num).cpu().numpy())\n acc_pred.append((match_num/pred_num).cpu().numpy())\n\n return {'acc_gt': np.array(acc_gt),\n 'acc_pred': np.array(acc_pred),\n 'match_num': np.array(match_num_list),\n 'gt_num': np.array(gt_num_list),\n 'pred_num': np.array(pred_num_list)}",
"def get_candidates(\n self,\n word: Dict[str, torch.LongTensor]\n ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:\n\n # shape: [batch_size, 1]\n original_target_ids = word['target'].squeeze()\n\n # shape: [batch_size, num_variants]\n candidate_ids = self.mapping_ids[original_target_ids]\n\n # shape: [batch_size, num_variants]\n mask = self.mapping_mask[original_target_ids]\n\n # shape: [batch_size, num_variants]\n mask = mask & (candidate_ids != original_target_ids.unsqueeze(-1)).long()\n\n # shape: [batch_size, num_variants, emb_size]\n candidate_vectors = self.target_w2v(candidate_ids)\n\n return (\n candidate_ids,\n candidate_vectors,\n mask\n )",
"def batch_norm_pattern():\n pattern = is_op(\"nn.batch_norm\")(\n wildcard(), is_constant(), is_constant(), is_constant(), is_constant()\n )\n pattern = is_tuple_get_item(pattern)\n return pattern",
"def tensors(self):\n return [x[0] for x in self.__normalizeData__(self.__tensors__)]",
"def hybrid_forward(self, F, rois, gt_masks, matches, cls_targets):\n\n # cannot know M (num_gt) to have accurate batch id B * M, must split batch dim\n def _split(x, axis, num_outputs, squeeze_axis):\n x = F.split(x, axis=axis, num_outputs=num_outputs, squeeze_axis=squeeze_axis)\n if isinstance(x, list):\n return x\n elif self._num_images > 1:\n return list(x)\n else:\n return [x]\n\n with autograd.pause():\n # gt_masks (B, M, H, W) -> (B, M, 1, H, W) -> B * (M, 1, H, W)\n gt_masks = gt_masks.reshape((0, -4, -1, 1, 0, 0))\n gt_masks = _split(gt_masks, axis=0, num_outputs=self._num_images, squeeze_axis=True)\n # rois (B, N, 4) -> B * (N, 4)\n rois = _split(rois, axis=0, num_outputs=self._num_images, squeeze_axis=True)\n # remove possible -1 match\n matches = F.relu(matches)\n # matches (B, N) -> B * (N,)\n matches = _split(matches, axis=0, num_outputs=self._num_images, squeeze_axis=True)\n # cls_targets (B, N) -> B * (N,)\n cls_targets = _split(cls_targets, axis=0, num_outputs=self._num_images,\n squeeze_axis=True)\n\n # (1, C)\n cids = F.arange(1, self._num_classes + 1)\n cids = cids.reshape((1, -1))\n\n mask_targets = []\n mask_masks = []\n for roi, gt_mask, match, cls_target in zip(rois, gt_masks, matches, cls_targets):\n # batch id = match\n padded_rois = F.concat(match.reshape((-1, 1)), roi, dim=-1)\n # pooled_mask (N, 1, MS, MS)\n pooled_mask = F.contrib.ROIAlign(gt_mask, padded_rois,\n self._mask_size, 1.0, sample_ratio=2)\n\n # (N,) -> (1, C) -> (N, C, 1, 1)\n cls_target = F.expand_dims(cls_target, 1)\n same_cids = F.broadcast_equal(cls_target, cids)\n same_cids = same_cids.reshape((-2, 1, 1))\n\n # (N, MS, MS) -> (N, C, 1, 1) -> (N, C, MS, MS)\n mask_mask = F.broadcast_like(same_cids, pooled_mask,\n lhs_axes=(2, 3), rhs_axes=(2, 3))\n\n # (N, 1, MS, MS) -> (N, C, MS, MS)\n mask_target = F.broadcast_axis(pooled_mask, size=self._num_classes, axis=1)\n\n mask_targets.append(mask_target)\n mask_masks.append(mask_mask)\n\n # B * (N, C, MS, MS) -> (B, N, C, MS, MS)\n mask_targets = F.stack(*mask_targets, axis=0)\n mask_masks = F.stack(*mask_masks, axis=0)\n return mask_targets, mask_masks",
"def fastrcnn_predictions(boxes, scores):\n assert boxes.shape[1] == scores.shape[1]\n boxes = tf.transpose(boxes, [1, 0, 2])[1:, :, :] # #catxnx4\n scores = tf.transpose(scores[:, 1:], [1, 0]) # #catxn\n\n max_coord = tf.reduce_max(boxes)\n filtered_ids = tf.where(scores > cfg.TEST.RESULT_SCORE_THRESH) # Fx2\n filtered_boxes = tf.gather_nd(boxes, filtered_ids) # Fx4\n filtered_scores = tf.gather_nd(scores, filtered_ids) # F,\n cls_per_box = tf.slice(filtered_ids, [0, 0], [-1, 1])\n offsets = tf.cast(cls_per_box, tf.float32) * (max_coord + 1) # F,1\n nms_boxes = filtered_boxes + offsets\n selection = tf.image.non_max_suppression(\n nms_boxes,\n filtered_scores,\n cfg.TEST.RESULTS_PER_IM,\n cfg.TEST.FRCNN_NMS_THRESH)\n final_scores = tf.gather(filtered_scores, selection, name='scores')\n final_labels = tf.add(tf.gather(cls_per_box[:, 0], selection), 1, name='labels')\n final_boxes = tf.gather(filtered_boxes, selection, name='boxes')\n return final_boxes, final_scores, final_labels",
"def batch_find_pixel_correspondences(img_a_depth, img_a_pose, img_b_depth, img_b_pose, \n uv_a=None, num_attempts=20, device='CPU', img_a_mask=None, K=None):\n assert (img_a_depth.shape == img_b_depth.shape)\n image_width = img_a_depth.shape[1]\n image_height = img_b_depth.shape[0]\n\n global dtype_float\n global dtype_long\n if device == 'CPU':\n dtype_float = torch.FloatTensor\n dtype_long = torch.LongTensor\n if device =='GPU':\n dtype_float = torch.cuda.FloatTensor\n dtype_long = torch.cuda.LongTensor\n\n if uv_a is None:\n uv_a = pytorch_rand_select_pixel(width=image_width,height=image_height, num_samples=num_attempts)\n else:\n uv_a = (torch.LongTensor([uv_a[0]]).type(dtype_long), torch.LongTensor([uv_a[1]]).type(dtype_long))\n num_attempts = 1\n\n if img_a_mask is None:\n uv_a_vec = (torch.ones(num_attempts).type(dtype_long)*uv_a[0],torch.ones(num_attempts).type(dtype_long)*uv_a[1])\n uv_a_vec_flattened = uv_a_vec[1]*image_width+uv_a_vec[0]\n else:\n img_a_mask = torch.from_numpy(img_a_mask).type(dtype_float) \n \n # Option A: This next line samples from img mask\n uv_a_vec = random_sample_from_masked_image_torch(img_a_mask, num_samples=num_attempts)\n if uv_a_vec[0] is None:\n return (None, None)\n \n # Option B: These 4 lines grab ALL from img mask\n # mask_a = img_a_mask.squeeze(0)\n # mask_a = mask_a/torch.max(mask_a)\n # nonzero = (torch.nonzero(mask_a)).type(dtype_long)\n # uv_a_vec = (nonzero[:,1], nonzero[:,0])\n\n # Always use this line \n uv_a_vec_flattened = uv_a_vec[1]*image_width+uv_a_vec[0]\n\n\n if K is None:\n K = get_default_K_matrix()\n\n K_inv = inv(K)\n body_to_rdf = get_body_to_rdf()\n rdf_to_body = inv(body_to_rdf)\n\n img_a_depth_torch = torch.from_numpy(img_a_depth).type(dtype_float)\n img_a_depth_torch = torch.squeeze(img_a_depth_torch, 0)\n img_a_depth_torch = img_a_depth_torch.view(-1,1)\n\n \n depth_vec = torch.index_select(img_a_depth_torch, 0, uv_a_vec_flattened)*1.0/DEPTH_IM_SCALE\n depth_vec = depth_vec.squeeze(1)\n \n # Prune based on\n # Case 1: depth is zero (for this data, this means no-return)\n nonzero_indices = torch.nonzero(depth_vec)\n if nonzero_indices.dim() == 0:\n return (None, None)\n nonzero_indices = nonzero_indices.squeeze(1)\n depth_vec = torch.index_select(depth_vec, 0, nonzero_indices)\n\n # prune u_vec and v_vec, then multiply by already pruned depth_vec\n u_a_pruned = torch.index_select(uv_a_vec[0], 0, nonzero_indices)\n u_vec = u_a_pruned.type(dtype_float)*depth_vec\n\n v_a_pruned = torch.index_select(uv_a_vec[1], 0, nonzero_indices)\n v_vec = v_a_pruned.type(dtype_float)*depth_vec\n\n z_vec = depth_vec\n\n full_vec = torch.stack((u_vec, v_vec, z_vec))\n\n K_inv_torch = torch.from_numpy(K_inv).type(dtype_float)\n point_camera_frame_rdf_vec = K_inv_torch.mm(full_vec)\n\n point_world_frame_rdf_vec = apply_transform_torch(point_camera_frame_rdf_vec, torch.from_numpy(img_a_pose).type(dtype_float))\n point_camera_2_frame_rdf_vec = apply_transform_torch(point_world_frame_rdf_vec, torch.from_numpy(invert_transform(img_b_pose)).type(dtype_float))\n\n K_torch = torch.from_numpy(K).type(dtype_float)\n vec2_vec = K_torch.mm(point_camera_2_frame_rdf_vec)\n\n u2_vec = vec2_vec[0]/vec2_vec[2]\n v2_vec = vec2_vec[1]/vec2_vec[2]\n\n maybe_z2_vec = point_camera_2_frame_rdf_vec[2]\n\n z2_vec = vec2_vec[2]\n\n # Prune based on\n # Case 2: the pixels projected into image b are outside FOV\n # u2_vec bounds should be: 0, image_width\n # v2_vec bounds should be: 0, image_height\n\n ## do u2-based pruning\n u2_vec_lower_bound = 0.0\n epsilon = 1e-3\n u2_vec_upper_bound = image_width*1.0 - epsilon # careful, needs to be epsilon less!!\n lower_bound_vec = torch.ones_like(u2_vec) * u2_vec_lower_bound\n upper_bound_vec = torch.ones_like(u2_vec) * u2_vec_upper_bound\n zeros_vec = torch.zeros_like(u2_vec)\n\n u2_vec = where(u2_vec < lower_bound_vec, zeros_vec, u2_vec)\n u2_vec = where(u2_vec > upper_bound_vec, zeros_vec, u2_vec)\n in_bound_indices = torch.nonzero(u2_vec)\n if in_bound_indices.dim() == 0:\n return (None, None)\n in_bound_indices = in_bound_indices.squeeze(1)\n\n # apply pruning\n u2_vec = torch.index_select(u2_vec, 0, in_bound_indices)\n v2_vec = torch.index_select(v2_vec, 0, in_bound_indices)\n z2_vec = torch.index_select(z2_vec, 0, in_bound_indices)\n u_a_pruned = torch.index_select(u_a_pruned, 0, in_bound_indices) # also prune from first list\n v_a_pruned = torch.index_select(v_a_pruned, 0, in_bound_indices) # also prune from first list\n\n ## do v2-based pruning\n v2_vec_lower_bound = 0.0\n v2_vec_upper_bound = image_height*1.0 - epsilon\n lower_bound_vec = torch.ones_like(v2_vec) * v2_vec_lower_bound\n upper_bound_vec = torch.ones_like(v2_vec) * v2_vec_upper_bound\n zeros_vec = torch.zeros_like(v2_vec) \n\n v2_vec = where(v2_vec < lower_bound_vec, zeros_vec, v2_vec)\n v2_vec = where(v2_vec > upper_bound_vec, zeros_vec, v2_vec)\n in_bound_indices = torch.nonzero(v2_vec)\n if in_bound_indices.dim() == 0:\n return (None, None)\n in_bound_indices = in_bound_indices.squeeze(1)\n\n # apply pruning\n u2_vec = torch.index_select(u2_vec, 0, in_bound_indices)\n v2_vec = torch.index_select(v2_vec, 0, in_bound_indices)\n z2_vec = torch.index_select(z2_vec, 0, in_bound_indices)\n u_a_pruned = torch.index_select(u_a_pruned, 0, in_bound_indices) # also prune from first list\n v_a_pruned = torch.index_select(v_a_pruned, 0, in_bound_indices) # also prune from first list\n\n # Prune based on\n # Case 3: the pixels in image b are occluded, OR there is no depth return in image b so we aren't sure\n\n img_b_depth_torch = torch.from_numpy(img_b_depth).type(dtype_float)\n img_b_depth_torch = torch.squeeze(img_b_depth_torch, 0)\n img_b_depth_torch = img_b_depth_torch.view(-1,1)\n\n uv_b_vec_flattened = (v2_vec.type(dtype_long)*image_width+u2_vec.type(dtype_long)) # simply round to int -- good enough \n # occlusion check for smooth surfaces\n\n depth2_vec = torch.index_select(img_b_depth_torch, 0, uv_b_vec_flattened)*1.0/1000\n depth2_vec = depth2_vec.squeeze(1)\n\n # occlusion margin, in meters\n occlusion_margin = 0.003\n z2_vec = z2_vec - occlusion_margin\n zeros_vec = torch.zeros_like(depth2_vec)\n\n depth2_vec = where(depth2_vec < zeros_vec, zeros_vec, depth2_vec) # to be careful, prune any negative depths\n depth2_vec = where(depth2_vec < z2_vec, zeros_vec, depth2_vec) # prune occlusions\n non_occluded_indices = torch.nonzero(depth2_vec)\n if non_occluded_indices.dim() == 0:\n return (None, None)\n non_occluded_indices = non_occluded_indices.squeeze(1)\n depth2_vec = torch.index_select(depth2_vec, 0, non_occluded_indices)\n\n # apply pruning\n u2_vec = torch.index_select(u2_vec, 0, non_occluded_indices)\n v2_vec = torch.index_select(v2_vec, 0, non_occluded_indices)\n u_a_pruned = torch.index_select(u_a_pruned, 0, non_occluded_indices) # also prune from first list\n v_a_pruned = torch.index_select(v_a_pruned, 0, non_occluded_indices) # also prune from first list\n\n uv_b_vec = (u2_vec, v2_vec)\n uv_a_vec = (u_a_pruned, v_a_pruned)\n return (uv_a_vec, uv_b_vec)",
"def _match_dimensions(self, audio, ir):\n # Add batch dimension.\n if ir.dim() == 1:\n ir = ir[None, :]\n # Match batch dimension.\n batch_size = audio.shape[0]\n return ir.expand(batch_size, -1)",
"def reconcile(self, batch_results, patch_centers, patch_sizes):\n final_results = {}\n if len(batch_results) == 0: # Empty batch\n return final_results\n\n # UResNet predictions\n if 'predictions' and 'scores' and 'softmax' in batch_results[0]:\n final_voxels = np.array([], dtype=np.int32).reshape(0, 3) # Shape N_voxels x dim\n final_scores = np.array([], dtype=np.float32).reshape(0, self.cfg.NUM_CLASSES) # Shape N_voxels x num_classes\n final_counts = np.array([], dtype=np.int32).reshape(0,) # Shape N_voxels x 1\n for i, result in enumerate(batch_results):\n # Extract voxel and voxel values\n # Shape N_voxels x dim\n v, values = extract_voxels(result['predictions'])\n # Extract corresponding softmax scores\n # Shape N_voxels x num_classes\n scores = result['softmax'][v[:, 0], v[:, 1], v[:, 2], :]\n # Restore original blob coordinates\n v = (v + np.flipud(patch_centers[i]) - patch_sizes[i] / 2.0).astype(np.int64)\n v = np.clip(v, 0, self.cfg.IMAGE_SIZE-1)\n # indices are indices of the *first* occurrences of the unique values\n # hence for doublons they are indices in final_voxels\n # We assume the only overlap that can occur is between\n # final_voxels and v, not inside these arrays themselves\n n = final_voxels.shape[0]\n final_voxels, indices, counts = np.unique(np.concatenate([final_voxels, v], axis=0), axis=0, return_index=True, return_counts=True)\n final_scores = np.concatenate([final_scores, scores], axis=0)[indices]\n lower_indices = indices[indices < n]\n upper_indices = indices[indices >= n]\n final_counts[lower_indices] += counts[lower_indices] - 1\n final_counts = np.concatenate([final_counts, np.ones((upper_indices.shape[0],))], axis=0)\n\n final_scores = final_scores / final_counts[:, np.newaxis] # Compute average\n final_predictions = np.argmax(final_scores, axis=1)\n final_results['predictions'] = np.zeros((self.cfg.IMAGE_SIZE,) * 3)\n final_results['predictions'][final_voxels.T[0], final_voxels.T[1], final_voxels.T[2]] = final_predictions\n final_results['scores'] = np.zeros((self.cfg.IMAGE_SIZE,) * 3)\n final_results['scores'][final_voxels.T[0], final_voxels.T[1], final_voxels.T[2]] = final_scores[np.arange(final_scores.shape[0]), final_predictions]\n final_results['softmax'] = np.zeros((self.cfg.IMAGE_SIZE,) * 3 + (self.cfg.NUM_CLASSES,))\n final_results['softmax'][final_voxels.T[0], final_voxels.T[1], final_voxels.T[2], :] = final_scores\n final_results['predictions'] = final_results['predictions'][np.newaxis, ...]\n\n # PPN\n if 'im_proposals' and 'im_scores' and 'im_labels' and 'rois' in batch_results[0]:\n # print(batch_results[0]['im_proposals'].shape, batch_results[0]['im_scores'].shape, batch_results[0]['im_labels'].shape, batch_results[0]['rois'].shape)\n final_im_proposals = np.array([], dtype=np.float32).reshape(0, 3)\n final_im_scores = np.array([], dtype=np.float32).reshape(0,)\n final_im_labels = np.array([], dtype=np.int32).reshape(0,)\n final_rois = np.array([], dtype=np.float32).reshape(0, 3)\n for i, result in enumerate(batch_results):\n im_proposals = result['im_proposals'] + np.flipud(patch_centers[i]) - patch_sizes[i] / 2.0\n im_proposals = np.clip(im_proposals, 0, self.cfg.IMAGE_SIZE-1)\n # print(final_im_proposals, im_proposals)\n final_im_proposals = np.concatenate([final_im_proposals, im_proposals], axis=0)\n final_im_scores = np.concatenate([final_im_scores, result['im_scores']], axis=0)\n final_im_labels = np.concatenate([final_im_labels, result['im_labels']], axis=0)\n rois = result['rois'] + (np.flipud(patch_centers[i]) - patch_sizes[i] / 2.0) / (self.cfg.dim1 * self.cfg.dim2)\n rois = np.clip(rois, 0, self.cfg.IMAGE_SIZE-1)\n final_rois = np.concatenate([final_rois, rois], axis=0)\n final_results['im_proposals'] = np.array(final_im_proposals)\n final_results['im_scores'] = np.array(final_im_scores)\n final_results['im_labels'] = np.array(final_im_labels)\n final_results['rois'] = np.array(final_rois)\n\n # Try thresholding\n # index = np.where(final_results['im_scores'] > 1e-3)\n # final_results['im_proposals'] = final_results['im_proposals'][index, :]\n # final_results['im_scores'] = final_results['im_scores'][index]\n # final_results['im_labels'] = final_results['im_labels'][index]\n\n return final_results",
"def preprocess_pair(templar_buffer, search_buffer, templar_bbox, search_bbox, num_channels, is_training=True):\n\n '''\n *********************************** Templar image ****************************************\n * Get tight bbox, randomly shift +-8 pixels\n * Pad image to [2500, 2500] with mean RGB values\n * Crop to 256x256:\n * get tight bbox [w, h]\n * compute context margin p = (w+h)/4\n * extend bbox to [w+2p, h+2p], and get min(w+2p, h+2p)\n * extend bbox to [D, D] by adding the shorter side with max(w+2p, h+2p) - min(w+2p, h+2p)\n * crop [D, D] and rescale to [128, 128], get the rescale factor [s]\n * pad boundaries to [256,256] with mean RGB values\n \n \n *********************************** Search image ****************************************\n * Get tight bbox of the corresponding object in templar image\n * Randomly rescale in range(s*0.8, s*1.2), and update bbox position; [s] is computed during pre-process templar image\n * Pad image to [2500, 2500] with mean RGB values\n * Set bbox as the center and crop the image to [256, 256] so that search target is centered in the image\n '''\n\n # decode image buffers\n templar_img = tf.image.decode_jpeg(templar_buffer, channels=num_channels) # uint8\n search_img = tf.image.decode_jpeg(search_buffer, channels=num_channels) # uint8\n templar_bbox = tf.cast(templar_bbox, tf.int32)\n search_bbox = tf.cast(search_bbox, tf.int32)\n\n def return_zero_pad(x): return [0, tf.abs(x)]\n def return_iden_no_pad(x): return [x, 0]\n def return_maxW_pad(x, w_max): return [w_max - 1, x - (w_max - 1)]\n def return_maxH_pad(x, h_max): return [h_max - 1, x - (h_max - 1)]\n def flip_bbox(bbox, img_w):\n '''\n :param bbox: original bbox [xmin, ymin, xmax, ymax]\n :param img_w:\n :return: flipped bbox\n '''\n new_bbox = []\n new_bbox.append(img_w - bbox[2])\n new_bbox.append(bbox[1])\n new_bbox.append(img_w - bbox[0])\n new_bbox.append(bbox[3])\n\n return new_bbox\n\n ######################################## Process Templar #############################################\n # Get tight bbox, always keep the target at the center\n #templar_bbox = distort_bounding_box(input_bbox=templar_bbox, random_shift=8) # new box [xmin, ymin, xmax, ymax]\n # pad border in case distorted bbox out of boundary\n mean_rgb = tf.reduce_mean(tf.cast(templar_img, tf.int64)) # tf.uint8\n mean_rgb = tf.cast(mean_rgb, tf.uint8)\n #templar_img = templar_img - mean_rgb\n #pad_border, pad_border = 10, 10\n #templar_img = tf.pad(tensor=templar_img, paddings=[[pad_border, pad_border], [pad_border, pad_border],[0, 0]],\n # mode='CONSTANT', name=None, constant_values=0)\n #templar_img = templar_img + mean_rgb\n # update tight bbox position, the size stays the same, the 4 corners are updated\n #templar_bbox[0] = templar_bbox[0] + pad_border\n #templar_bbox[1] = templar_bbox[1] + pad_border\n #templar_bbox[2] = templar_bbox[2] + pad_border\n #templar_bbox[3] = templar_bbox[3] + pad_border\n bbox_h = templar_bbox[3] - templar_bbox[1]\n bbox_w = templar_bbox[2] - templar_bbox[0]\n # save the (distorted) tight bbox for display\n tight_bbox = []\n tight_bbox.append(templar_bbox[0])\n tight_bbox.append(templar_bbox[1])\n tight_bbox.append(templar_bbox[2])\n tight_bbox.append(templar_bbox[3])\n p = tf.cast((bbox_h + bbox_w) / 4, tf.int32) # get context margin and compute new bbox\n argmin_dim = tf.math.argmin([bbox_w, bbox_h], axis=0) # 0: shorter in width, 1: shorter in height\n extend_w_cond = tf.equal(argmin_dim, 0) # true if extend in width dim, otherwise extend in height dim\n extend_side_cond = tf.equal(tf.math.abs(bbox_w-bbox_h) % 2, 0) # if true, extend evenly on both side\n extend_val_left = tf.cond(extend_side_cond,\n lambda: tf.cast(tf.math.abs(bbox_w - bbox_h) / 2, tf.int32),\n lambda: tf.cast(tf.math.abs(bbox_w - bbox_h) / 2, tf.int32) + 1)\n extend_val_right = tf.cast(tf.math.abs(bbox_w-bbox_h) / 2, tf.int32)\n # get a rect bbox by extending the shorter side\n templar_bbox_new = tf.cond(extend_w_cond, lambda: extend_bbox_w(templar_bbox, extend_val_left, extend_val_right),\n lambda: extend_bbox_h(templar_bbox, extend_val_left, extend_val_right))\n ## add context margin\n templar_bbox_new = [templar_bbox_new[0]-p, templar_bbox_new[1]-p, templar_bbox_new[2]+p, templar_bbox_new[3]+p]\n tight_bbox[0] = tight_bbox[0] - templar_bbox_new[0] # [xmin, ymin, xmax, ymax]\n tight_bbox[1] = tight_bbox[1] - templar_bbox_new[1]\n tight_bbox[2] = tight_bbox[2] - templar_bbox_new[0]\n tight_bbox[3] = tight_bbox[3] - templar_bbox_new[1]\n # here the rectangular bbox might already out of boundary, must pad precise number of pixels on left/up\n img_height = tf.shape(templar_img)[0]\n img_width = tf.shape(templar_img)[1]\n [new_x_min, pad_w_begin] = tf.cond(templar_bbox_new[0] < 0, lambda :return_zero_pad(templar_bbox_new[0]), lambda :return_iden_no_pad(templar_bbox_new[0]))\n [new_x_max, pad_w_end] = tf.cond(templar_bbox_new[2] >= img_width, lambda :return_maxW_pad(templar_bbox_new[2], img_width), lambda :return_iden_no_pad(templar_bbox_new[2]))\n [new_y_min, pad_h_begin] = tf.cond(templar_bbox_new[1] < 0, lambda :return_zero_pad(templar_bbox_new[1]), lambda :return_iden_no_pad(templar_bbox_new[1]))\n [new_y_max, pad_h_end] = tf.cond(templar_bbox_new[3] >= img_height, lambda :return_maxH_pad(templar_bbox_new[3], img_height), lambda :return_iden_no_pad(templar_bbox_new[3]))\n # do paddings, only effective if out of boundary\n templar_img = templar_img - mean_rgb\n templar_img = tf.pad(tensor=templar_img,\n paddings=[[pad_h_begin, pad_h_end + 10], [pad_w_begin, pad_w_end + 10], [0, 0]],\n mode='CONSTANT', name=None, constant_values=0)\n templar_img = templar_img + mean_rgb\n # crop the image\n croped_templar = tf.image.crop_to_bounding_box(image=templar_img, offset_height=new_y_min,\n offset_width=new_x_min,\n target_height=templar_bbox_new[3]-templar_bbox_new[1],\n target_width=templar_bbox_new[2]-templar_bbox_new[0])\n with tf.control_dependencies([tf.debugging.assert_equal(templar_bbox_new[3] - templar_bbox_new[1],\n templar_bbox_new[2] - templar_bbox_new[0])]):\n # rescale to [127, 127], get the scale factor\n scale_s = 127.0 / tf.cast(templar_bbox_new[3] - templar_bbox_new[1], tf.float32)\n # rescale the tight bbox\n tight_temp_bbox = rescale_bbox(tight_bbox, scale_s)\n scale_s = tf.debugging.assert_all_finite(t=scale_s, msg='scale factor not a number!')\n croped_templar = tf.image.resize_bilinear(images=tf.expand_dims(croped_templar, axis=0), size=[127, 127])\n croped_templar = tf.squeeze(croped_templar, axis=0) # [h, w, 3]\n # check size\n with tf.control_dependencies([tf.debugging.assert_equal(tf.shape(croped_templar)[0], 127),\n tf.debugging.assert_equal(tf.shape(croped_templar)[1], 127),\n tf.debugging.assert_equal(tf.shape(croped_templar)[2], 3)]):\n templar_final = tf.identity(croped_templar)\n\n ######################################## Process Search image #############################################\n # Get rgb mean\n mean_rgb = tf.reduce_mean(tf.cast(search_img, tf.int64)) # tf.uint8\n mean_rgb = tf.cast(mean_rgb, tf.float32)\n # Get random scale factor\n rescale_factor = scale_s * tf.random.uniform(shape=[], minval=0.8, maxval=1.2, dtype=tf.float32)\n rescale_factor = tf.debugging.assert_all_finite(t=rescale_factor, msg='rescale_factor factor not a number!')\n # Get rescaled bbox position, and the image\n search_bbox = rescale_bbox(search_bbox, rescale_factor)\n new_height = tf.cast(tf.cast(tf.shape(search_img)[0], tf.float32) * rescale_factor, tf.int32)\n new_width = tf.cast(tf.cast(tf.shape(search_img)[1], tf.float32) * rescale_factor, tf.int32)\n search_img = tf.image.resize_bilinear(images=tf.expand_dims(search_img, axis=0), size=[new_height, new_width])\n search_img = tf.squeeze(search_img, axis=0) # [h, w, 3]\n ### randomly shift bbox +-64 pixels, get the shift values and new bbox center\n search_bbox, h_shift, w_shift = distort_bounding_box(input_bbox=search_bbox, random_shift=32) # new box [xmin, ymin, xmax, ymax], h_shift, w_shift\n ### crop around the center of the bbox to [255, 255], if out of boundary, pad with mean rgb value\n img_width = tf.shape(search_img)[1]\n img_height = tf.shape(search_img)[0]\n x_center = tf.cast((search_bbox[2] - search_bbox[0]) / 2, tf.int32) + search_bbox[0]\n y_center = tf.cast((search_bbox[3] - search_bbox[1]) / 2, tf.int32) + search_bbox[1]\n x_min, x_max = x_center - 127, x_center + 127\n y_min, y_max = y_center - 127, y_center + 127\n [new_x_min, pad_w_begin] = tf.cond(x_min < 0, lambda :return_zero_pad(x_min), lambda :return_iden_no_pad(x_min))\n [new_x_max, pad_w_end] = tf.cond(x_max >= img_width, lambda :return_maxW_pad(x_max, img_width), lambda :return_iden_no_pad(x_max))\n [new_y_min, pad_h_begin] = tf.cond(y_min < 0, lambda :return_zero_pad(y_min), lambda :return_iden_no_pad(y_min))\n [new_y_max, pad_h_end] = tf.cond(y_max >= img_height, lambda :return_maxH_pad(y_max, img_height), lambda :return_iden_no_pad(y_max))\n # do paddings, only effective if out of boundary\n search_img = search_img - mean_rgb\n search_img = tf.pad(tensor=search_img, paddings=[[pad_h_begin, pad_h_end+10], [pad_w_begin, pad_w_end+10], [0, 0]],\n mode='CONSTANT', name=None, constant_values=0)\n search_img = search_img + mean_rgb\n # crop\n search_final = tf.image.crop_to_bounding_box(image=search_img, offset_height=new_y_min, offset_width=new_x_min,\n target_height=255, target_width=255)\n ## get tight bbox within the rescaled search img [xmin, ymin, xmax, ymax]\n bbox_h_half = tf.cast((search_bbox[3] - search_bbox[1]) / 2, tf.int32) # might be zero\n bbox_w_half = tf.cast((search_bbox[2] - search_bbox[0]) / 2, tf.int32) # might be zero\n tight_search_bbox = []\n tight_search_bbox.append(127 - bbox_w_half - w_shift) # xmin\n tight_search_bbox.append(127 - bbox_h_half - h_shift) # ymin\n tight_search_bbox.append(127 + bbox_w_half - w_shift) # xmax\n tight_search_bbox.append(127 + bbox_h_half - h_shift) # ymax\n with tf.control_dependencies([tf.debugging.assert_equal(tf.shape(search_final)[0], 255),\n tf.debugging.assert_equal(tf.shape(search_final)[1], 255),\n tf.debugging.assert_equal(tf.shape(search_final)[2], 3)]):\n search_final = tf.identity(search_final)\n\n ######################################## Process Score Map GT #############################################\n # [17, 17, 1], [17, 17, 1]\n # consider 8 x (center - offset) <= 16 as positives, stride=8; also note that target in search image is already shifted\n t_center_x = 8 - tf.cast(w_shift / 8, tf.int32)\n t_center_y = 8 - tf.cast(h_shift / 8, tf.int32)\n score, score_weight = tf.py_func(func=build_gt_py, inp=[t_center_x, t_center_y], Tout=[tf.int32, tf.float32],\n stateful=True, name=None)\n \"\"\"\n score = tf.zeros([17, 17, 1], dtype=tf.int32)\n delta = tf.sparse.SparseTensor(indices=[[t_center_y, t_center_x, 0]], values=[1], dense_shape=[17,17,1])\n score = score + tf.sparse.to_dense(delta)\n score = tf.expand_dims(score, axis=0) # [1,17,17,1]\n dila_structure = np.array([[False, False, True, False, False],\n [False, True, True, True, False],\n [True, True, True, True, True],\n [False, True, True, True, False],\n [False, False, True, False, False]], dtype=bool)\n dila_structure = dila_structure.astype(np.int32)\n dila_structure = np.expand_dims(dila_structure, axis=-1) # [5,5,1]\n score = tf.nn.dilation2d(input=score, filter=dila_structure, strides=[1,1,1,1], rates=[1,1,1,1], padding='SAME')\n num_total = 17 * 17\n num_positive = tf.reduce_sum(score)\n num_negative = num_total - num_positive\n weight_positive = tf.cast(num_negative, tf.float32) / tf.cast(num_total, tf.float32)\n weight_negative = tf.cast(num_positive, tf.float32) / tf.cast(num_total, tf.float32)\n mat_positive = tf.cast(score, tf.float32) * weight_positive # float\n mat_negative = (1.0 - tf.cast(score, tf.float32)) * weight_negative # float\n score_weight = mat_positive + mat_negative\n score = tf.squeeze(score, 0)\n score_weight = tf.squeeze(score_weight, 0)\n \"\"\"\n # check size\n with tf.control_dependencies([tf.debugging.assert_equal(tf.shape(score)[0], 17),\n tf.debugging.assert_equal(tf.shape(score)[1], 17),\n tf.debugging.assert_equal(tf.shape(score)[2], 1),\n tf.debugging.assert_equal(tf.shape(score_weight)[0], 17),\n tf.debugging.assert_equal(tf.shape(score_weight)[1], 17),\n tf.debugging.assert_equal(tf.shape(score_weight)[2], 1)]):\n score = tf.identity(score)\n score_weight = tf.identity(score_weight)\n\n ################################### Randomly flip templar/search images ####################################\n flip_v = tf.random.uniform(shape=[]) # scalar\n flip_v = tf.greater_equal(flip_v, 0.5)\n templar_final = tf.cond(flip_v, lambda : tf.image.flip_left_right(image=templar_final), lambda :templar_final)\n search_final = tf.cond(flip_v, lambda: tf.image.flip_left_right(image=search_final), lambda: search_final)\n score = tf.cond(flip_v, lambda :tf.image.flip_left_right(image=score), lambda :score)\n score_weight = tf.cond(flip_v, lambda :tf.image.flip_left_right(image=score_weight), lambda :score_weight)\n tight_search_bbox = tf.cond(flip_v, lambda :flip_bbox(tight_search_bbox, 255), lambda :tight_search_bbox)\n\n templar_final = mean_image_subtraction(templar_final, _CHANNEL_MEANS, num_channels)\n search_final = mean_image_subtraction(search_final, _CHANNEL_MEANS, num_channels)\n\n return templar_final, search_final, score, score_weight, tight_temp_bbox, tight_search_bbox",
"def get_tensors(self, loaded_graph):\n return loaded_graph.get_tensor_by_name(\"input:0\"),\\\n loaded_graph.get_tensor_by_name(\"initial_state:0\"),\\\n loaded_graph.get_tensor_by_name(\"final_state:0\"),\\\n loaded_graph.get_tensor_by_name(\"probs:0\"),\\\n loaded_graph.get_tensor_by_name(\"keep_prob:0\")",
"def forward_pass(self):\n # Compute the support set's mean and var and use these as the moments for\n # batch norm on the query set.\n train_embeddings = self.embedding_fn(self.episode.train_images,\n self.is_training)\n self.train_embeddings = train_embeddings['embeddings']\n support_set_moments = None\n if not self.transductive_batch_norm:\n support_set_moments = train_embeddings['moments']\n test_embeddings = self.embedding_fn(\n self.episode.test_images,\n self.is_training,\n moments=support_set_moments,\n backprop_through_moments=self.backprop_through_moments)\n self.test_embeddings = test_embeddings['embeddings']"
] | [
"0.687296",
"0.53285825",
"0.52761436",
"0.526731",
"0.52641547",
"0.52601016",
"0.52112883",
"0.5158161",
"0.51153666",
"0.51105905",
"0.5093035",
"0.5083832",
"0.5050526",
"0.50354075",
"0.50354075",
"0.50307226",
"0.50050557",
"0.49959216",
"0.49791405",
"0.49321866",
"0.49192724",
"0.49007574",
"0.4850069",
"0.48332533",
"0.48262596",
"0.48250735",
"0.47938016",
"0.4786404",
"0.47338268",
"0.4728398"
] | 0.6739167 | 1 |
Finds unfused batch norm layers and folds them into preceding layers. | def _FoldUnfusedBatchNorms(graph):
input_to_ops_map = input_to_ops.InputToOps(graph)
for bn in common.BatchNormGroups(graph):
has_scaling = _HasScaling(graph, input_to_ops_map, bn)
# The mangling code intimately depends on BatchNorm node's internals.
original_op, folded_op = _CreateFoldedOp(graph, bn, has_scaling=has_scaling)
activation = common.GetEndpointActivationOp(graph, bn)
if activation:
nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],
[original_op.outputs[0]],
can_modify=[activation])
if nodes_modified_count != 1:
raise ValueError('Unexpected inputs to op: %s' % activation.name)
continue
# Treat consumer ops in bypass modules differently since they have Add
# operations instead of Relu* above.
add_bypass_ctx = re.search(r'^(.*)/([^/]+)', bn).group(1)
add_bypass = graph.get_operation_by_name(add_bypass_ctx + '/Add')
nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],
[original_op.outputs[0]],
can_modify=[add_bypass])
if nodes_modified_count != 1:
raise ValueError('Unexpected inputs to op: %s' % add_bypass.name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _FoldFusedBatchNorms(graph):\n for match in _FindFusedBatchNorms(graph):\n scope, sep, _ = match.layer_op.name.rpartition('/')\n # Make sure new ops are added to `graph` and put on the same device as\n # `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope\n # named `scope`. Otherwise, TF creates a unique scope whose name starts with\n # `scope`.\n with graph.as_default(), graph.name_scope(scope + sep), ops.device(\n match.bn_op.device):\n with graph.name_scope(scope + sep + 'BatchNorm_Fold' + sep):\n # new weights = old weights * gamma / sqrt(variance + epsilon)\n # new biases = -mean * gamma / sqrt(variance + epsilon) + beta\n multiplier_tensor = match.gamma_tensor * math_ops.rsqrt(\n match.variance_tensor + match.bn_op.get_attr('epsilon'))\n bias_tensor = math_ops.subtract(\n match.beta_tensor,\n match.mean_tensor * multiplier_tensor,\n name='bias')\n\n # The shape of depthwise weights is different, so we need to reshape the\n # multiplier_tensor to ensure that the scaled_weight_tensor has the\n # expected shape.\n if match.layer_op.type == 'DepthwiseConv2dNative':\n new_shape = [\n match.weight_tensor.get_shape().as_list()[2],\n match.weight_tensor.get_shape().as_list()[3]\n ]\n multiplier_tensor = array_ops.reshape(\n multiplier_tensor, new_shape, name='scale_reshape')\n\n # TODO(suharshs): This naming of the following ops needs to carefully\n # follow the naming expected by quantize.py. Generalize the quantize code\n # to not require these delicate naming conventions.\n scaled_weight_tensor = math_ops.multiply(\n match.weight_tensor, multiplier_tensor, name='mul_fold')\n\n new_layer_tensor = _CloneWithNewOperands(\n match.layer_op, match.input_tensor, scaled_weight_tensor)\n\n bias_add_tensor = math_ops.add(\n new_layer_tensor, bias_tensor, name='add_fold')\n\n nodes_modified_count = graph_editor.reroute_ts(bias_add_tensor,\n match.output_tensor)\n if nodes_modified_count != 1:\n raise ValueError(\n 'Unexpected inputs to op: %s' % match.output_tensor.name)",
"def FoldBatchNorms(graph):\n _FoldFusedBatchNorms(graph)\n _FoldUnfusedBatchNorms(graph)",
"def test_batch_norm_fold(self):\n inputs = tf.keras.Input(shape=(32, 32, 3,))\n conv = tf.keras.layers.Conv2D(32, (3, 3))(inputs)\n bn = tf.keras.layers.BatchNormalization(fused=True)(conv, training=False)\n relu = tf.nn.relu(bn)\n model = tf.keras.Model(inputs=inputs, outputs=relu)\n\n np.random.seed(0)\n w_shape = model.layers[0].input.shape\n numpy_data = np.random.rand(1, w_shape[1], w_shape[2], w_shape[3]).astype(np.float32)\n\n baseline_output = model(numpy_data)\n\n _, model = fold_all_batch_norms(model)\n output_after_fold = model(numpy_data)\n\n assert np.allclose(baseline_output, output_after_fold, atol=1.e-4)",
"def DerefBatchNormLayers(network, batch_norm_names, layers_dict, suffix='_fold', \n lr_mult=1.0, decay_mult=1.0):\n for bn_layer_name in batch_norm_names:\n index = layers_dict[bn_layer_name]\n bn_layer = network.layer[index]\n \n if (len(bn_layer.bottom) != 1) or (len(bn_layer.top) != 1):\n raise AssertionError('Expected bn layer to have one top and bottom')\n \n prev_layer_idx = index - 1\n next_layer_idx = index + 1\n prev_layer, next_layer = network.layer[prev_layer_idx], network.layer[next_layer_idx]\n \n if not (prev_layer.top == bn_layer.bottom and bn_layer.top == next_layer.bottom):\n raise AssertionError(\"Could not find previous and next nodes for\"\n \"batch norm layer\")\n \n if next_layer.type != 'Scale':\n print bn_layer_name, next_layer.type, next_layer.name\n raise AssertionError('Expected Scale layer to follow batch norm layer')\n \n if not (len(prev_layer.top) == 1 and len(next_layer.bottom) == 1):\n raise AssertionError(\"Expected previous and next blobs to have\" \n \"only one input and output\")\n \n next_layer.bottom[0] = prev_layer.top[0]\n next_layer.name = next_layer.name + suffix\n\n if lr_mult != 1.0 or decay_mult != 1.0:\n while len(next_layer.param) < 2:\n next_layer.param.add()\n for i in range(len(next_layer.param)):\n next_layer.param[i].lr_mult = lr_mult\n next_layer.param[i].decay_mult = decay_mult",
"def test_cnn_starts_with_batchnorm(self):\n model = modelgen.generate_CNN_model((None, 20, 3), 2, [32, 32], 100)\n assert str(type(model.layers[0])) \\\n == \"<class 'keras.layers.normalization.BatchNormalization'>\", \\\n 'Wrong layer type.'",
"def test_batch_norm_layers():\n layers = [[\"gru\", 20], [\"lstm\", 3], [\"linear\", 4], [\"linear\", 10]]\n rnn = RNN(layers_info=layers, hidden_activations=\"relu\", input_dim=5,\n output_activation=\"relu\", initialiser=\"xavier\", batch_norm=True)\n assert len(rnn.batch_norm_layers) == 3\n assert rnn.batch_norm_layers[0].num_features == 20\n assert rnn.batch_norm_layers[1].num_features == 3\n assert rnn.batch_norm_layers[2].num_features == 4",
"def RemoveBatchNormLayers(network, batch_norm_names):\n i = 0\n j = 0\n while i < len(network.layer) and j < len(batch_norm_names): \n if network.layer[i].name == batch_norm_names[j]:\n del network.layer[i]\n j += 1\n else:\n i += 1\n \n if j != len(batch_norm_names):\n print j, len(batch_norm_names)\n raise AssertionError('All batch norm layers were not removed')",
"def test_cnn_enough_batchnorm(self):\n model = modelgen.generate_CNN_model((None, 20, 3), 2, [32, 32], 100)\n batch_norm_layers = len(\n [l for l in model.layers if 'BatchNormalization' in str(l)])\n activation_layers = len(\n [l for l in model.layers if 'Activation' in str(l)])\n assert batch_norm_layers == activation_layers",
"def _find_all_batch_norms_to_fold(connected_graph: ConnectedGraph) -> Tuple[\n List[Tuple[LayerType, BatchNormType]], List[Tuple[BatchNormType, LayerType]]]:\n conv_bn_pairs, bn_conv_pairs, bn_to_fold = _find_foldable_bn_pair_and_bn_picked_for_folding(connected_graph)\n return conv_bn_pairs, bn_conv_pairs, bn_to_fold",
"def norm_layer( x, training, name):\n top = tf.layers.batch_normalization( x, \n axis=3, # channels last \n training=training,\n name=name )\n return top",
"def test_cnn_batchnorm_dim(self):\n model = modelgen.generate_CNN_model((None, 20, 3), 2, [32, 32], 100)\n batchnormlay = model.layers[2]\n assert batchnormlay.output_shape == (None, 20, 32)",
"def test_batch_norm_fold_with_random_data(self):\n inputs = tf.keras.Input(shape=(32, 32, 3,))\n conv = tf.keras.layers.Conv2D(32, (3, 3),\n kernel_initializer=tf.random_uniform_initializer(-1, 1),\n bias_initializer='random_uniform')(inputs)\n bn = tf.keras.layers.BatchNormalization(fused=True,\n beta_initializer='random_uniform',\n gamma_initializer='random_uniform',\n moving_mean_initializer='random_uniform',\n moving_variance_initializer='ones')(conv, training=False)\n relu = tf.nn.relu(bn)\n\n model = tf.keras.Model(inputs=inputs, outputs=relu)\n\n np.random.seed(0)\n w_shape = model.layers[0].input.shape\n numpy_data = np.random.rand(1, w_shape[1], w_shape[2], w_shape[3]).astype(np.float32)\n baseline_output = model(numpy_data)\n\n _, model = fold_all_batch_norms(model)\n\n output_after_fold = model(numpy_data)\n\n assert not np.allclose(baseline_output, output_after_fold, atol=0)\n assert np.allclose(baseline_output, output_after_fold, atol=1e-4)",
"def keras_model_functional_with_non_fused_batchnorms():\n is_training = tf.compat.v1.placeholder_with_default(tf.constant(True), shape=(), name='is_training')\n inputs = tf.keras.Input(shape=(32, 32, 3,))\n x = tf.keras.layers.Conv2D(32, (3, 3))(inputs)\n x = tf.keras.layers.BatchNormalization(momentum=.3, epsilon=.65, fused=False)(x, training=True)\n with tf.compat.v1.variable_scope(\"scope_1\"):\n x = tf.keras.layers.Conv2D(16, (2, 2), activation=tf.nn.tanh)(x)\n x = tf.keras.layers.BatchNormalization(momentum=.4, epsilon=.25, fused=False)(x, training=is_training)\n x = tf.keras.layers.Conv2D(8, (2, 2), activation=tf.nn.tanh)(x)\n x = tf.keras.layers.BatchNormalization(momentum=.5, epsilon=.35, fused=False)(x, training=False)\n x = tf.keras.layers.Conv2D(4, (2, 2), activation=tf.nn.relu6)(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax,\n name=\"keras_model_functional_with_non_fused_batchnorms\")(x)\n model = tf.keras.Model(inputs=inputs, outputs=outputs)\n return model",
"def test_bn_fold_auto_rules_bn_before_conv(self):\n inputs = tf.keras.Input(shape=(32, 32, 3,), name=\"inputs\")\n bn_op = tf.keras.layers.BatchNormalization(fused=True)(inputs)\n conv_op = tf.keras.layers.Conv2D(32, (3, 3))(bn_op)\n relu = tf.nn.relu(conv_op)\n model = tf.keras.Model(inputs=inputs, outputs=relu)\n\n conv_bn_pairs, bn_conv_pairs, _ = _find_all_batch_norms_to_fold(model)\n assert 1 == len(conv_bn_pairs) + len(bn_conv_pairs)",
"def batch_norm(x, training, name):\n with tf.variable_scope(name):\n x = tf.cond(training, lambda: tf.contrib.layers.batch_norm(x, is_training=True, scope=name+'_batch_norm'),\n lambda: tf.contrib.layers.batch_norm(x, is_training=False, scope=name+'_batch_norm', reuse=True))\n return x",
"def test_cnn_enough_batchnorm(self):\n model_type = CNN((None, 20, 3), 2)\n model = model_type.create_model(**{\"filters\": [32, 32],\n \"fc_hidden_nodes\": 100})\n\n batch_norm_layers = len([layer for layer in model.layers if 'BatchNormalization' in str(layer)])\n activation_layers = len([layer for layer in model.layers if 'Activation' in str(layer)])\n assert batch_norm_layers == activation_layers",
"def residual_net_old(total_depth, data_layer_params, num_classes = 1000, acclayer = True):\n # figure out network structure\n net_defs = {\n 18:([2, 2, 2, 2], \"standard\"),\n 34:([3, 4, 6, 3], \"standard\"),\n 50:([3, 4, 6, 3], \"bottleneck\"),\n 101:([3, 4, 23, 3], \"bottleneck\"),\n 152:([3, 8, 36, 3], \"bottleneck\"),\n }\n assert total_depth in net_defs.keys(), \"net of depth:{} not defined\".format(total_depth)\n\n nunits_list, unit_type = net_defs[total_depth] # nunits_list a list of integers indicating the number of layers in each depth.\n nouts = [64, 128, 256, 512] # same for all nets\n\n # setup the first couple of layers\n n = caffe.NetSpec()\n n.data, n.label = L.Python(module = 'beijbom_caffe_data_layers', layer = 'ImageNetDataLayer',\n ntop = 2, param_str=str(data_layer_params))\n n.conv1, n.bn1, n.lrn1 = conv_bn(n.data, ks = 7, stride = 2, nout = 64, pad = 3)\n n.relu1 = L.ReLU(n.lrn1, in_place=True)\n n.pool1 = L.Pooling(n.relu1, stride = 2, kernel_size = 3)\n \n # make the convolutional body\n for nout, nunits in zip(nouts, nunits_list): # for each depth and nunits\n for unit in range(1, nunits + 1): # for each unit. Enumerate from 1.\n s = str(nout) + '_' + str(unit) + '_' # layer name prefix\n if unit_type == \"standard\":\n residual_standard_unit_old(n, nout, s, newdepth = unit is 1 and nout > 64)\n else:\n residual_bottleneck_unit_old(n, nout, s, newdepth = unit is 1)\n \n # add the end layers \n n.global_pool = L.Pooling(n.__dict__['tops'][n.__dict__['tops'].keys()[-1]], pooling_param = dict(pool = 1, global_pooling = True))\n n.score = L.InnerProduct(n.global_pool, num_output = num_classes,\n param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)])\n n.loss = L.SoftmaxWithLoss(n.score, n.label)\n if acclayer:\n n.accuracy = L.Accuracy(n.score, n.label)\n\n return n",
"def batch_norm(x: tf.Tensor) -> tf.Tensor:\n return slim.batch_norm(x, activation_fn=tf.nn.relu, scope='postnorm')",
"def test_deepconvlstm_starts_with_batchnorm(self):\n model = modelgen.generate_DeepConvLSTM_model(\n (None, 20, 3), 2, [32, 32], [32, 32])\n assert str(type(model.layers[0])) \\\n == \"<class 'keras.layers.normalization.BatchNormalization'>\", \\\n 'Wrong layer type.'",
"def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_",
"def _FindFusedBatchNorms(graph):\n input_pattern = graph_matcher.OpTypePattern('*')\n weight_pattern = graph_matcher.OpTypePattern('*')\n gamma_pattern = graph_matcher.OpTypePattern('*')\n beta_pattern = graph_matcher.OpTypePattern('*')\n mean_pattern = graph_matcher.OpTypePattern('*')\n variance_pattern = graph_matcher.OpTypePattern('*')\n\n conv_pattern = graph_matcher.OpTypePattern(\n 'Conv2D|DepthwiseConv2dNative', inputs=[input_pattern, weight_pattern])\n # MatMul has a Reshape between it and FusedBatchNorm.\n matmul_pattern = graph_matcher.OpTypePattern(\n 'MatMul', inputs=[input_pattern, weight_pattern])\n matmul_reshape_pattern = graph_matcher.OpTypePattern(\n 'Reshape', inputs=[matmul_pattern,\n graph_matcher.OpTypePattern('*')])\n\n conv_batch_norm_pattern = graph_matcher.OpTypePattern(\n 'FusedBatchNorm',\n inputs=[\n conv_pattern, gamma_pattern, beta_pattern, mean_pattern,\n variance_pattern\n ])\n matmul_batch_norm_pattern = graph_matcher.OpTypePattern(\n 'FusedBatchNorm',\n inputs=[\n matmul_reshape_pattern, gamma_pattern, beta_pattern, mean_pattern,\n variance_pattern\n ])\n matmul_bn_output_reshape_pattern = graph_matcher.OpTypePattern(\n 'Reshape',\n inputs=[matmul_batch_norm_pattern,\n graph_matcher.OpTypePattern('*')])\n\n conv_matcher = graph_matcher.GraphMatcher(conv_batch_norm_pattern)\n matmul_matcher = graph_matcher.GraphMatcher(matmul_bn_output_reshape_pattern)\n\n def _GetCommonTensors(match_result, bn_op, bn_input_tensor):\n \"\"\"Gets tensors needed for FusedBatchNormMatch from match_result.\"\"\"\n input_tensor = match_result.get_tensor(input_pattern)\n weight_tensor = match_result.get_tensor(weight_pattern)\n gamma_tensor = match_result.get_tensor(gamma_pattern)\n beta_tensor = match_result.get_tensor(beta_pattern)\n # FusedBatchNorm in training is different from that in inference. It takes\n # empty 'mean' and empty 'variance', and produces the mean and the variance\n # of the batch. Therefore, when is_training is true, mean_tensor and\n # variance_tensor point to 1st and 2nd (0-based) output of bn_op,\n # respectively; when is_training is false, they point to bn_op's inputs.\n is_training = bn_op.get_attr('is_training')\n if is_training:\n # FusedBatchNormGrad doesn't compute gradients of the batch_mean and\n # batch_variance outputs, so we need to substitute our own custom\n # gradient.\n # TODO(suharshs, raghuramank): Find a way to avoid needing this hack.\n # pylint: disable=protected-access\n bn_op._set_attr(\n '_gradient_op_type',\n attr_value_pb2.AttrValue(s=compat.as_bytes('FoldFusedBatchNormGrad')))\n # pylint: enable=protected-access\n mean_tensor = bn_op.outputs[1]\n # The batch variance used during forward and backward prop is biased,\n # i.e it is calculated as: V=sum(x(k)-mu)^2/N. For the moving average\n # calculation, the variance is corrected by the term N/N-1 (Bessel's\n # correction). The variance tensor read from FuseBatchNorm has bessel's\n # correction applied, so we undo it here.\n n = math_ops.cast(\n array_ops.size(bn_input_tensor) / array_ops.size(mean_tensor),\n dtypes.float32)\n variance_tensor = bn_op.outputs[2] * (n - 1) / n\n else:\n mean_tensor = match_result.get_tensor(mean_pattern)\n variance_tensor = match_result.get_tensor(variance_pattern)\n return (input_tensor, weight_tensor, gamma_tensor, beta_tensor, mean_tensor,\n variance_tensor)\n\n for match_result in conv_matcher.match_graph(graph):\n layer_op = match_result.get_op(conv_pattern)\n layer_tensor = match_result.get_tensor(conv_pattern)\n bn_op = match_result.get_op(conv_batch_norm_pattern)\n # In the case of convolution the output_tensor is the output of bn_op.\n output_tensor = bn_op.outputs[0]\n\n (input_tensor, weight_tensor, gamma_tensor, beta_tensor, mean_tensor,\n variance_tensor) = _GetCommonTensors(match_result, bn_op, layer_tensor)\n yield _FusedBatchNormMatch(\n layer_op=layer_op,\n bn_op=bn_op,\n output_tensor=output_tensor,\n input_tensor=input_tensor,\n weight_tensor=weight_tensor,\n gamma_tensor=gamma_tensor,\n beta_tensor=beta_tensor,\n mean_tensor=mean_tensor,\n variance_tensor=variance_tensor)\n\n for match_result in matmul_matcher.match_graph(graph):\n layer_op = match_result.get_op(matmul_pattern)\n layer_tensor = match_result.get_tensor(matmul_pattern)\n bn_op = match_result.get_op(matmul_batch_norm_pattern)\n # In the MatMul case, the output of batch norm is reshaped back into a\n # 2D tensor, so the output_tensor is the output of the Reshape op.\n output_reshape_op = match_result.get_op(matmul_bn_output_reshape_pattern)\n output_tensor = output_reshape_op.outputs[0]\n\n (input_tensor, weight_tensor, gamma_tensor, beta_tensor, mean_tensor,\n variance_tensor) = _GetCommonTensors(match_result, bn_op, layer_tensor)\n yield _FusedBatchNormMatch(\n layer_op=layer_op,\n bn_op=bn_op,\n output_tensor=output_tensor,\n input_tensor=input_tensor,\n weight_tensor=weight_tensor,\n gamma_tensor=gamma_tensor,\n beta_tensor=beta_tensor,\n mean_tensor=mean_tensor,\n variance_tensor=variance_tensor)",
"def _fold_given_batch_norms(model,\n conv_bn_pairs: Iterable[Tuple[torch.nn.Module, torch.nn.Module]],\n bn_conv_pairs: Iterable[Tuple[torch.nn.Module, torch.nn.Module]]):\n # pylint: disable=protected-access\n for bn, conv in bn_conv_pairs:\n if isinstance(conv, QcQuantizeWrapper):\n raise RuntimeError(f\"Forward folding to scale is not possible. Got {conv}\")\n\n bn_modules = []\n\n def _fold(conv, bn, fold_backward):\n is_wrapped = isinstance(conv, QcQuantizeWrapper) or isinstance(bn, QcQuantizeWrapper)\n try:\n if is_wrapped:\n assert isinstance(conv, QcQuantizeWrapper) and isinstance(bn, QcQuantizeWrapper)\n _fold_to_scale(conv, bn)\n bn_modules.append(bn._module_to_wrap)\n else:\n _fold_to_weight(conv, bn, fold_backward=fold_backward)\n except _BatchNormFoldingNotSupported as e:\n bn_name = utils.get_layer_name(model, bn)\n conv_name = utils.get_layer_name(model, conv)\n _logger.warning(\n \"Failed to fold %s to %s. [Reason] %s\", bn_name, conv_name, str(e)\n )\n else:\n bn_modules.append(bn._module_to_wrap if is_wrapped else bn)\n\n\n with utils.in_eval_mode(model), torch.no_grad():\n for conv, bn in conv_bn_pairs:\n _fold(conv, bn, fold_backward=True)\n\n for bn, conv in bn_conv_pairs:\n _fold(conv, bn, fold_backward=False)\n\n _delete_bn_from_model(model, bn_modules)",
"def batch_normalization(input_var=None):\n\n # Hyperparameters\n hp = Hyperparameters()\n hp('batch_size', 30)\n hp('n_epochs', 1000)\n hp('learning_rate', 0.01)\n hp('l1_reg', 0.00)\n hp('l2_reg', 0.0001)\n hp('patience', 5000)\n\n # Create connected layers\n # Input layer\n l_in = InputLayer(input_shape=(hp.batch_size, 28 * 28), input_var=input_var, name='Input')\n # Batch Normalization\n l_bn1 = BatchNormalization(incoming=l_in, name='Batch Normalization 1')\n # Dense Layer\n l_hid1 = DenseLayer(incoming=l_bn1, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 1')\n # Batch Normalization\n l_bn2 = BatchNormalization(incoming=l_hid1, name='Batch Normalization 2')\n # Dense Layer\n l_hid2 = DenseLayer(incoming=l_bn2, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 2')\n # Batch Normalization\n l_bn3 = BatchNormalization(incoming=l_hid2, name='Batch Normalization 3')\n # Logistic regression Layer\n l_out = LogisticRegression(incoming=l_bn3, n_class=10, l1=hp.l1_reg,\n l2=hp.l2_reg, name='Logistic regression')\n\n # Create network and add layers\n net = Network('mlp with batch normalization')\n net.add(l_in)\n net.add(l_bn1)\n net.add(l_hid1)\n net.add(l_bn2)\n net.add(l_hid2)\n net.add(l_bn3)\n net.add(l_out)\n\n return net, hp",
"def test_bn_fold_auto_rules_bn_after_conv(self):\n inputs = tf.keras.Input(shape=(32, 32, 3,), name=\"inputs\")\n conv_op = tf.keras.layers.Conv2D(32, (3, 3))(inputs)\n bn_op = tf.keras.layers.BatchNormalization(fused=True)(conv_op)\n relu = tf.nn.relu(bn_op)\n model = tf.keras.Model(inputs=inputs, outputs=relu)\n\n conv_bn_pairs, bn_conv_pairs, _ = _find_all_batch_norms_to_fold(model)\n assert 1 == len(conv_bn_pairs) + len(bn_conv_pairs)",
"def _batch_norm(inputs, decay = 0.999, center = True, scale = False, epsilon = 0.001, \n\t\t\t\tmoving_vars = 'moving_vars', activation = None, is_training = None, \n\t\t\t\ttrainable = True, restore = True, scope = None, reuse = None):\n inputs_shape = inputs.get_shape()\n with tf.variable_op_scope([inputs], scope, 'BatchNorm', reuse = reuse):\n axis = list(range(len(inputs_shape) - 1))\n params_shape = inputs_shape[-1:]\n beta, gamma = None, None\n\n if center:\n beta = _variable_on_cpu('beta', params_shape, tf.zeros_initializer)\n if scale:\n gamma = _variable_on_cpu('gamma', params_shape, tf.ones_initializer)\n\n # moving_collections = [moving_vars, tf.GraphKeys.MOVING_AVERAGE_VARIABLES]\n moving_mean = _variable_on_cpu('moving_mean', params_shape,tf.zeros_initializer, trainable = False)\n # tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, moving_mean)\n moving_variance = _variable_on_cpu('moving_variance', params_shape, tf.ones_initializer, trainable = False)\n # tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, moving_variance)\n \n def train_phase():\n mean, variance = tf.nn.moments(inputs, axis)\n update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay)\n update_moving_variance = moving_averages.assign_moving_average(moving_variance, \n variance, decay)\n with tf.control_dependencies([update_moving_mean, update_moving_variance]):\n return tf.identity(mean), tf.identity(variance)\n\n def test_phase():\n return moving_mean, moving_variance\t\n\n mean, variance = tf.cond(is_training, train_phase, test_phase)\n outputs = tf.nn.batch_normalization(inputs, mean, variance, beta, gamma, epsilon)\n outputs.set_shape(inputs.get_shape()) \n\n if activation:\n outputs = activation(outputs)\n\n return outputs",
"def Batchnorm(name, axes, inputs, is_training=None, stats_iter=None, update_moving_stats=True, fused=True, labels=None, n_labels=None):\n if axes != [0,2,3]:\n raise Exception('unsupported')\n batch_mean, batch_var = tf.nn.moments(inputs, axes, keep_dims=True)\n shape = batch_mean.get_shape().as_list() # shape is [1,n,1,1]\n offset_m = lib.param(name+'.offset', np.zeros([n_labels,shape[1]], dtype='float32'))\n scale_m = lib.param(name+'.scale', np.ones([n_labels,shape[1]], dtype='float32'))\n offset = tf.nn.embedding_lookup(offset_m, labels)\n # offset = tf.Print(offset,['offset',offset])\n scale = tf.nn.embedding_lookup(scale_m, labels)\n # scale = tf.Print(scale,['scale',scale])\n\n moving_mean = lib.param(name + '.moving_mean', np.zeros(batch_mean.get_shape(), dtype='float32'), trainable=False)\n moving_variance = lib.param(name + '.moving_variance', np.ones(batch_var.get_shape(), dtype='float32'),trainable=False)\n\n def _batch_norm_training():\n return tf.nn.batch_normalization(inputs, batch_mean, batch_var, offset[:,:,None,None], scale[:,:,None,None], 1e-5)\n\n def _batch_norm_inference():\n # Version which blends in the current item's statistics\n mean = moving_mean[None, :, None, None]\n var = moving_variance[None, :, None, None]\n '''\n batch_size = tf.cast(tf.shape(inputs)[0], 'float32')\n mean, var = tf.nn.moments(inputs, [2,3], keep_dims=True)\n mean = ((1./batch_size)*mean) + (((batch_size-1.)/batch_size)*moving_mean)[None,:,None,None]\n var = ((1./batch_size)*var) + (((batch_size-1.)/batch_size)*moving_variance)[None,:,None,None]\n '''\n return tf.nn.batch_normalization(inputs, mean, var, offset[:,:,None,None], scale[:,:,None,None],\n 1e-5), mean, var\n\n if is_training is None:\n outputs = _batch_norm_training()\n else:\n if is_training:\n outputs = _batch_norm_training()\n else:\n outputs = _batch_norm_inference()\n\n if update_moving_stats:\n no_updates = lambda: outputs\n\n def _force_updates():\n \"\"\"Internal function forces updates moving_vars if is_training.\"\"\"\n float_stats_iter = tf.cast(stats_iter, tf.float32)\n update_moving_mean = tf.assign(moving_mean,\n ((float_stats_iter / (float_stats_iter + 1)) * moving_mean) + (\n (1 / (float_stats_iter + 1)) * batch_mean))\n update_moving_variance = tf.assign(moving_variance,\n ((float_stats_iter / (float_stats_iter + 1)) * moving_variance) + (\n (1 / (float_stats_iter + 1)) * batch_var))\n with tf.control_dependencies([update_moving_mean, update_moving_variance]):\n return tf.identity(outputs)\n\n if is_training:\n outputs = _force_updates()\n else:\n outputs = no_updates()\n\n return outputs",
"def test_cnn_batchnorm_dim(self):\n model_type = CNN((None, 20, 3), 2)\n model = model_type.create_model(**{\"filters\": [32, 32],\n \"fc_hidden_nodes\": 100})\n\n batchnormlay = model.layers[2]\n assert batchnormlay.output_shape == (None, 20, 32)",
"def fold_all_batch_norms_to_weight(\n model: torch.nn.Module,\n input_shapes: Union[Tuple, List[Tuple]],\n dummy_input: Union[torch.Tensor, Tuple] = None\n) -> List[Tuple[LayerType, BatchNormType]]:\n if isinstance(model, torch.nn.DataParallel):\n return fold_all_batch_norms_to_weight(model.module, input_shapes, dummy_input)\n device = utils.get_device(model)\n if dummy_input is None:\n inp_tensor_list = utils.create_rand_tensors_given_shapes(input_shapes, device)\n else:\n inp_tensor_list = dummy_input\n connected_graph = ConnectedGraph(model, inp_tensor_list)\n\n conv_bn_pairs, bn_conv_pairs, bn_to_fold = _find_all_batch_norms_to_fold(connected_graph)\n\n _fold_given_batch_norms(model, conv_bn_pairs, bn_conv_pairs)\n\n # Convert the standalone BNs which are not folded\n bn_converted = convert_standalone_batchnorms(model, inp_tensor_list, bn_to_fold)\n _logger.info(\"%d BatchNorms' weights got converted\", len(bn_converted))\n return conv_bn_pairs + [(conv, bn) for bn, conv in bn_conv_pairs]",
"def forward(self, data_batch):\n\n x = data_batch[0]\n im_info = data_batch[1]\n gt_boxes = data_batch[2]\n num_boxes = data_batch[3]\n rel_mat = data_batch[4]\n\n if self.training:\n self.iter_counter += 1\n\n input_imgs = x.clone()\n\n sources = list()\n loc = list()\n conf = list()\n\n self.batch_size = x.size(0)\n\n # apply vgg up to conv4_3 relu\n if isinstance(self.base, nn.ModuleList):\n for k,v in enumerate(self.base):\n x = v(x)\n else:\n x = self.base(x)\n\n s = self.L2Norm(x)\n sources.append(s)\n base_feat = s\n\n # apply vgg up to fc7\n if isinstance(self.conv5, nn.ModuleList):\n for k,v in enumerate(self.conv5):\n x = v(x)\n else:\n x = self.conv5(x)\n sources.append(x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n\n loc = loc.view(loc.size(0), -1, 4)\n conf = conf.view(conf.size(0), -1, self.num_classes)\n\n SSD_loss_cls = 0\n SSD_loss_bbox = 0\n if self.training:\n predictions = (\n loc,\n conf,\n self.priors.type_as(loc)\n )\n # targets = torch.cat([gt_boxes[:,:,:4] / self.size, gt_boxes[:,:,4:5]],dim=2)\n targets = gt_boxes\n SSD_loss_bbox, SSD_loss_cls = self.criterion(predictions, targets, num_boxes)\n\n conf = self.softmax(conf)\n\n # online data\n if self.training:\n if self.iter_counter > cfg.TRAIN.VMRN.ONLINEDATA_BEGIN_ITER:\n obj_rois, obj_num = self._obj_det(conf, loc, self.batch_size, im_info)\n obj_rois = obj_rois.type_as(gt_boxes)\n obj_num = obj_num.type_as(num_boxes)\n else:\n obj_rois = torch.FloatTensor([]).type_as(gt_boxes)\n obj_num = torch.LongTensor([]).type_as(num_boxes)\n obj_labels = None\n else:\n # when testing, this is object detection results\n # TODO: SUPPORT MULTI-IMAGE BATCH\n obj_rois, obj_num = self._obj_det(conf, loc, self.batch_size, im_info)\n if obj_rois.numel() > 0:\n obj_labels = obj_rois[:, 5]\n obj_rois = obj_rois[:, :5]\n obj_rois = obj_rois.type_as(gt_boxes)\n obj_num = obj_num.type_as(num_boxes)\n else:\n # there is no object detected\n obj_labels = torch.Tensor([]).type_as(gt_boxes).long()\n obj_rois = obj_rois.type_as(gt_boxes)\n obj_num = obj_num.type_as(num_boxes)\n\n if self.training:\n # offline data\n for i in range(self.batch_size):\n obj_rois = torch.cat([obj_rois,\n torch.cat([(i * torch.ones(num_boxes[i].item(), 1)).type_as(gt_boxes),\n (gt_boxes[i][:num_boxes[i]][:, 0:4])], 1)\n ])\n obj_num = torch.cat([obj_num, torch.Tensor([num_boxes[i]]).type_as(obj_num)])\n\n\n obj_rois = Variable(obj_rois)\n\n VMRN_rel_loss_cls = 0\n rel_cls_prob = torch.Tensor([]).type_as(obj_rois)\n if (obj_num > 1).sum().item() > 0:\n\n obj_pair_feat = self.VMRN_obj_pair_feat_extractor(input_imgs, obj_rois, self.batch_size, obj_num)\n # obj_pair_feat = obj_pair_feat.detach()\n rel_cls_score = self.VMRN_rel_cls_score(obj_pair_feat)\n\n rel_cls_prob = F.softmax(rel_cls_score)\n\n self.rel_batch_size = obj_pair_feat.size(0)\n\n if self.training:\n obj_pair_rel_label = self._generate_rel_labels(obj_rois, gt_boxes, obj_num, rel_mat)\n obj_pair_rel_label = obj_pair_rel_label.type_as(gt_boxes).long()\n\n rel_not_keep = (obj_pair_rel_label == 0)\n # no relationship is kept\n if (rel_not_keep == 0).sum().item() > 0:\n rel_keep = torch.nonzero(rel_not_keep == 0).view(-1)\n\n rel_cls_score = rel_cls_score[rel_keep]\n\n obj_pair_rel_label = obj_pair_rel_label[rel_keep]\n obj_pair_rel_label -= 1\n VMRN_rel_loss_cls = F.cross_entropy(rel_cls_score, obj_pair_rel_label)\n else:\n if (not cfg.TEST.VMRN.ISEX) and cfg.TRAIN.VMRN.ISEX:\n rel_cls_prob = rel_cls_prob[::2, :]\n\n rel_result = None\n if not self.training:\n if obj_rois.numel() > 0:\n pred_boxes = obj_rois.data[:,1:5]\n pred_boxes[:, 0::2] /= im_info[0][3].item()\n pred_boxes[:, 1::2] /= im_info[0][2].item()\n rel_result = (pred_boxes, obj_labels, rel_cls_prob.data)\n else:\n rel_result = (obj_rois.data, obj_labels, rel_cls_prob.data)\n\n return loc, conf, rel_result, SSD_loss_bbox, SSD_loss_cls, VMRN_rel_loss_cls",
"def test_bn_fold_with_linear_layer(self):\n inputs = tf.keras.Input(shape=(1, 1, 4,))\n bn = tf.keras.layers.BatchNormalization(fused=True)(inputs, training=False)\n x = tf.keras.layers.Flatten()(bn)\n dense = tf.keras.layers.Dense(2, activation=tf.nn.relu, name=\"linear_layer\")(x)\n model = tf.keras.Model(inputs=inputs, outputs=dense)\n\n # get baseline output\n np.random.seed(0)\n w_shape = model.layers[0].input.shape\n numpy_data = np.random.rand(1, w_shape[1], w_shape[2], w_shape[3]).astype(np.float32)\n baseline_output = model(numpy_data)\n weight_before_fold = model.layers[3].kernel.numpy()\n\n _, model = fold_all_batch_norms(model)\n after_fold_output = model(numpy_data)\n weight_after_fold = model.layers[2].kernel.numpy()\n\n # check that weight got updated\n assert not np.allclose(weight_before_fold, weight_after_fold, atol=1e-4)\n\n # check outputs are close\n assert np.allclose(baseline_output, after_fold_output, atol=1e-3)"
] | [
"0.77741086",
"0.7156549",
"0.7024513",
"0.68224394",
"0.63824594",
"0.63595194",
"0.6346714",
"0.625679",
"0.6225848",
"0.6212077",
"0.620075",
"0.6187602",
"0.617762",
"0.6148358",
"0.6089603",
"0.60637486",
"0.6047825",
"0.6043735",
"0.60115445",
"0.60108745",
"0.60100204",
"0.60035557",
"0.59996784",
"0.5996033",
"0.5991673",
"0.5990615",
"0.5980897",
"0.596824",
"0.5943858",
"0.5935901"
] | 0.74272305 | 1 |
r"""Checks if batch norm has scaling enabled. | def _HasScaling(graph, input_to_ops_map, bn):
rsqrt_op = graph.get_operation_by_name(bn + '/BatchNorm/batchnorm/Rsqrt')
rsqrt_consumers = input_to_ops_map.ConsumerOperations(rsqrt_op)
return sum(1 for op in rsqrt_consumers if op.type == 'Mul') == 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_scale_enabled(self) -> bool:\r\n ...",
"def scaling_enabled(self):\n return False",
"def isSetScale(self):\n return _libsbml.Unit_isSetScale(self)",
"def param_scale_check(shape_x, shape_scale):\n\n length_x = len(shape_x)\n length_scale = len(shape_scale)\n\n if not(length_scale == 1 and shape_scale[0] == 1):\n if length_x != length_scale:\n raise RuntimeError(\n \"length_x and length_scale must be equal\")\n for i in range(length_scale):\n if shape_scale[i] != shape_x[i] and shape_scale[i] != 1:\n raise RuntimeError(\n \"shape_scale is not match to broadcast\")",
"def scale(self, _: Application) -> bool:\n return False",
"def is_valid_mbart(self) -> bool:\r\n if self.normalize_before and self.add_final_layer_norm and self.scale_embedding:\r\n return True\r\n if self.normalize_before or self.add_final_layer_norm or self.scale_embedding:\r\n logger.info(\"This configuration is a mixture of MBART and BART settings\")\r\n return False",
"def test_data_is_scaled():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"SGD\")\n assert sum(atom.sgd.predict(X_bin)) > 0 # Always 0 if not scaled",
"def _check_is_fitted(self):\n check_is_fitted(self, ['w', 'b'])",
"def normalisable(self):\n\n return np.abs(np.nansum(self.data)) > 0",
"def check_scaled_shape(orig: torch.Tensor, scaled: torch.Tensor, scale_factor: float) -> bool:\n N, C, H, W = orig.shape\n Hc = int(scale_factor * H)\n Wc = int(scale_factor * W)\n\n return scaled.shape == (N, C, Hc, Wc)",
"def shouldAutoScale(self):\n if self.autoscale is not None:\n return self.autoscale\n # preserve backwards compatability for zenpacks\n for dp in self.graphPoints():\n if dp.meta_type == 'DataPointGraphPoint' and dp.shouldAutoScale():\n return True\n return False",
"def check_norm_state(modules, train_state):\n for mod in modules:\n if isinstance(mod, _BatchNorm):\n if mod.training != train_state:\n return False\n return True",
"def check_norm_state(modules, train_state):\n for mod in modules:\n if isinstance(mod, _BatchNorm):\n if mod.training != train_state:\n return False\n return True",
"def scale(self, app: Application) -> bool:\n pass",
"def use_blas(self):\r\n #the gemm version only support that case\r\n if self.out_mode == 'valid' and self.dx == 0 and self.dy == 0:\r\n #We use a faster version in those case.\r\n if (self.imshp != self.imshp_logical or\r\n self.kshp != self.kshp_logical or\r\n self.unroll_patch or\r\n self.unroll_batch > 0 or\r\n self.unroll_kern > 0):\r\n\r\n return False\r\n return True\r\n return False",
"def test_validate_scale_count(ctx):\n assert eos.validate_scale_count(None, ctx) is None\n assert eos.validate_scale_count(orm.Int(3), ctx) is None\n assert eos.validate_scale_count(orm.Int(2), ctx) == 'need at least 3 scaling factors.'",
"def check_norm_state(modules, train_state):\n for mod in modules:\n if isinstance(mod, _BatchNorm):\n if mod.training != train_state:\n return False\n return True",
"def allowUpscaling(self):\n return self._allow_upscaling",
"def can_sample(self, batch_size):\n return batch_size + 1 <= self.num_in_buffer",
"def check_normality(self,alpha = 0.05):\n\n stat1, p = shapiro(self.x)\n \n if self.y is not None:\n stat2, p2 = shapiro(self.y)\n \n if p < alpha:\n if self.y is not None:\n if p2 < alpha:\n self._verbose('x and y do not look Gaussian (reject H0)')\n return False\n else:\n self._verbose('x does not look Gaussian, but y looks Gaussian (fail to reject H0)')\n return True\n else:\n self._verbose('Sample does not look Gaussian (reject H0)')\n return False\n\n else:\n if self.y is not None:\n if p2 < alpha:\n self._verbose('x looks Gaussian, but y does not look Gaussian (fail to reject H0)')\n return False\n else:\n self._verbose('x and y look Gaussian (fail to reject H0)')\n return True\n else:\n self._verbose('Sample looks Gaussian (fail to reject H0)')\n return True",
"def auto_scaling_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_scaling_enabled\")",
"def test_validate_scale_factors(ctx):\n assert eos.validate_scale_factors(None, ctx) is None\n assert eos.validate_scale_factors(orm.List(list=[0.98, 1, 1.02]), ctx) is None\n assert eos.validate_scale_factors(orm.List(list=[0, 1]), ctx) == 'need at least 3 scaling factors.'",
"def isSetExtentConversionFactor(self):\n return _libsbml.Submodel_isSetExtentConversionFactor(self)",
"def _check_is_fitted(self):\n # Do not check `b` as some classifiers do not set it\n check_is_fitted(self, 'w')\n super(CClassifierLinear, self)._check_is_fitted()",
"def per_site_scaling(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"per_site_scaling\")",
"def per_site_scaling(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"per_site_scaling\")",
"def is_valid(self) -> bool:\r\n try:\r\n self.shape\r\n return True\r\n except ValueError:\r\n return False",
"def check(self):\n exception = False\n for scalerThread in [self.preemptableScaler, self.scaler]:\n if scalerThread is not None:\n try:\n scalerThread.join(timeout=0)\n except Exception as e:\n logger.exception(e)\n exception = True\n if exception:\n raise RuntimeError('The cluster scaler has exited due to an exception')",
"def autoscale_is_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"autoscale_is_enabled\")",
"def autoscale_is_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"autoscale_is_enabled\")"
] | [
"0.73828375",
"0.6997457",
"0.6757541",
"0.6416183",
"0.6400106",
"0.6291684",
"0.626143",
"0.6211953",
"0.6028917",
"0.6004665",
"0.59074646",
"0.5888298",
"0.5888298",
"0.5880715",
"0.582264",
"0.5821804",
"0.57957906",
"0.578953",
"0.5759011",
"0.57504267",
"0.574466",
"0.56916064",
"0.56787467",
"0.5650609",
"0.5618305",
"0.5618305",
"0.5585925",
"0.55850536",
"0.5577814",
"0.5577814"
] | 0.71985847 | 1 |
Clones a given op, replaces its name and some of its inputs. | def _CloneOp(op, new_name, new_inputs):
inputs = list(op.inputs)
for new_input in new_inputs:
inputs[new_input[0]] = new_input[1]
return _OP_CLONER.Clone(op, inputs, new_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clone(self):\r\n cp = self.__class__(self.op, self.inputs, [output.clone() for output in self.outputs])\r\n cp.tag = copy(self.tag)\r\n return cp",
"def _CloneWithNewOperands(layer_op, input_tensor, weight_tensor):\n new_layer_name = layer_op.name.split('/')[-1] + '_Fold'\n if layer_op.type == 'Conv2D':\n return nn_ops.conv2d(\n input_tensor,\n weight_tensor,\n strides=layer_op.get_attr('strides'),\n padding=layer_op.get_attr('padding'),\n use_cudnn_on_gpu=layer_op.get_attr('use_cudnn_on_gpu'),\n data_format=layer_op.get_attr('data_format'),\n name=new_layer_name)\n elif layer_op.type == 'MatMul':\n return math_ops.matmul(\n input_tensor,\n weight_tensor,\n transpose_a=layer_op.get_attr('transpose_a'),\n transpose_b=layer_op.get_attr('transpose_b'),\n name=new_layer_name)\n elif layer_op.type == 'DepthwiseConv2dNative':\n return nn.depthwise_conv2d(\n input_tensor,\n weight_tensor,\n strides=layer_op.get_attr('strides'),\n padding=layer_op.get_attr('padding'),\n name=new_layer_name)\n else:\n raise ValueError('Cannot handle operation of type: %s' % layer_op.type)",
"def clone(self):\n if self.result_id is not None:\n new_id = self.module.new_id()\n else:\n new_id = None\n return Instruction(self.module, self.op_name, new_id, self.type_id,\n self.operands[:])",
"def clone(self):\n tmp = self.my_operator\n self.my_operator = None\n new = copy.copy(self)\n self.my_operator = tmp\n return new",
"def get_copied_op(org_instance, graph, scope=\"\"):\n\n #The name of the copied instance\n if scope != '':\n new_name = scope + '/' + org_instance.name\n else:\n new_name = org_instance.name\n\n return graph.as_graph_element(new_name, allow_tensor=True,\n allow_operation=True)",
"def clone(self, *args):\n return _SALOMERuntime.InputPyPort_clone(self, *args)",
"def copy_op_to_graph(org_instance, to_graph, variables,\n scope=\"\"):\n\n #The name of the new instance\n if scope != '':\n new_name = scope + '/' + org_instance.name\n else:\n new_name = org_instance.name\n\n #Extract names of variables\n copied_variables = dict((x.name, x) for x in variables)\n\n #If a variable by the new name already exists, return the\n #correspondng tensor that will act as an input\n if new_name in copied_variables:\n return to_graph.get_tensor_by_name(\n copied_variables[new_name].name)\n\n #If an instance of the same name exists, return appropriately\n try:\n already_present = to_graph.as_graph_element(new_name,\n allow_tensor=True,\n allow_operation=True)\n return already_present\n except:\n pass\n\n #Get the collections that the new instance needs to be added to.\n #The new collections will also be a part of the given scope.\n collections = []\n for name, collection in org_instance.graph._collections.items():\n if org_instance in collection:\n if scope == '':\n collections.append(name)\n else:\n collections.append(scope + '/' + name)\n\n #Take action based on the class of the instance\n\n if isinstance(org_instance, ops.Tensor):\n\n #If its a Tensor, it is one of the outputs of the underlying\n #op. Therefore, copy the op itself and return the appropriate\n #output.\n op = org_instance.op\n new_op = copy_op_to_graph(op, to_graph, variables, scope)\n output_index = op.outputs.index(org_instance)\n new_tensor = new_op.outputs[output_index]\n #Add to collections if any\n for collection in collections:\n to_graph.add_to_collection(collection, new_tensor)\n\n return new_tensor\n\n elif isinstance(org_instance, ops.Operation):\n\n op = org_instance\n\n #If it has an original_op parameter, copy it\n if op._original_op is not None:\n new_original_op = copy_op_to_graph(op._original_op, to_graph,\n variables, scope)\n else:\n new_original_op = None\n\n #If it has control inputs, call this function recursively on each.\n new_control_inputs = [copy_op_to_graph(x, to_graph, variables,\n scope)\n for x in op.control_inputs]\n\n #If it has inputs, call this function recursively on each.\n new_inputs = [copy_op_to_graph(x, to_graph, variables,\n scope)\n for x in op.inputs]\n\n #Make a new node_def based on that of the original.\n #An instance of tensorflow.core.framework.node_def_pb2.NodeDef, it\n #stores String-based info such as name, device and type of the op.\n #Unique to every Operation instance.\n new_node_def = deepcopy(op._node_def)\n #Change the name\n new_node_def.name = new_name\n\n #Copy the other inputs needed for initialization\n output_types = op._output_types[:]\n input_types = op._input_types[:]\n\n #Make a copy of the op_def too.\n #Its unique to every _type_ of Operation.\n op_def = deepcopy(op._op_def)\n\n #Initialize a new Operation instance\n new_op = ops.Operation(new_node_def,\n to_graph,\n new_inputs,\n output_types,\n new_control_inputs,\n input_types,\n new_original_op,\n op_def)\n #Use Graph's hidden methods to add the op\n to_graph._add_op(new_op)\n to_graph._record_op_seen_by_control_dependencies(new_op)\n for device_function in reversed(to_graph._device_function_stack):\n new_op._set_device(device_function(new_op))\n\n return new_op\n\n else:\n raise TypeError(\"Could not copy instance: \" + str(org_instance))",
"def clone( m, orig):\r\n if m.ObjType not in (1, 6): return\r\n if not orig: return\r\n \r\n if m.ObjType == 6: # Target is a Folder\r\n if orig.ObjType == 6: cloned = m.CopyFolderDisp( orig) # Orig is Folder too\r\n else: cloned = m.CopyFCODisp( orig) # Orig is FCO\r\n elif m.ObjType == 1:\r\n cloned = m.CopyFCODisp( orig, metaRole( orig)) # Target is Model, Orig is FCO\r\n \r\n if cloned:\r\n \tcloned.Name = \"Cloned\" + orig.Name\r\n return cloned",
"def merge(self, op):\n self.__desc = listify(self.__desc, op.__desc)\n self.__name = listify(self.__name, op.__name)\n self.__label_pre = listify(self.__label_pre, op.__label_pre)\n self.__label_post = listify(self.__label_post, op.__label_post)",
"def clone(self, *args, **kwargs):\n new_self = copy.copy(self)\n kwargs = self.get_arguments(args, kwargs, onlykeys=True, onlyused=True)\n _map_parameters = getattr(self, \"_map_parameters\", None)\n for key in kwargs:\n if _map_parameters is not None and key in _map_parameters:\n setattr(new_self, _map_parameters[key], kwargs[key])\n else:\n setattr(new_self, key, kwargs[key])\n return new_self",
"def sculptMeshCacheChangeCloneSource(*args, blendShape: Union[AnyStr, bool]=\"\", target:\n Union[AnyStr, bool]=\"\", q=True, query=True, e=True,\n edit=True, **kwargs)->Union[None, Any]:\n pass",
"def clone(output, replace=None, strict=True, share_inputs=True, copy_inputs=DEPRECATED_ARG):\r\n if copy_inputs is not DEPRECATED_ARG:\r\n warnings.warn('In `clone()` function, the argument `copy_inputs` has been deprecated and renamed into `share_inputs`')\r\n assert share_inputs # since we used `copy_inputs` we should have default value for `share_inputs`\r\n share_inputs = copy_inputs\r\n\r\n inps, outs, other_stuff = rebuild_collect_shared(output,\r\n [],\r\n replace,\r\n [],\r\n strict,\r\n share_inputs)\r\n return outs",
"def clone(self, *args, **kwargs):\n return self.copy().reset(*args, **kwargs)",
"def clone_with_new_inputs(self, inputs, strict=True):\r\n assert isinstance(inputs, (list, tuple))\r\n remake_node = False\r\n new_inputs = inputs[:]\r\n for i, (curr, new) in enumerate(zip(self.inputs, new_inputs)):\r\n if not curr.type == new.type:\r\n if strict:\r\n # If compatible, casts new into curr.type\r\n new_inputs[i] = curr.type.filter_variable(new)\r\n else:\r\n remake_node = True\r\n if remake_node:\r\n new_node = self.op.make_node(*new_inputs)\r\n new_node.tag = copy(self.tag).__update__(new_node.tag)\r\n else:\r\n new_node = self.clone()\r\n new_node.inputs = new_inputs\r\n return new_node",
"def clone(self, name, **kwargs):\n obj = copy.deepcopy(self._object.get(name))\n obj.__dict__.update(kwargs)\n return obj",
"def test_clone_name(self, cosmo):\n # test changing name. clone treats 'name' differently (see next test)\n c = cosmo.clone(name=\"cloned cosmo\")\n assert c.name == \"cloned cosmo\" # changed\n # show name is the only thing changed\n c._name = cosmo.name # first change name back\n assert c == cosmo\n assert c.meta == cosmo.meta\n\n # now change a different parameter and see how 'name' changes\n c = cosmo.clone(meta={})\n assert c.name == cosmo.name + \" (modified)\"",
"def clone(self):",
"def rename(op_name):\n return type(op_name, (OpConverter,), {})",
"def _op_copy(self, op: str, other: t.Any) -> InspectableSet[_C]:\n if hasattr(self.__members__, op):\n if isinstance(other, InspectableSet):\n other = other.__members__\n retval = getattr(self.__members__, op)(other)\n if retval is not NotImplemented:\n return InspectableSet(retval)\n return NotImplemented",
"def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]",
"def clone(self, clone=None):\r\n # copy specs from supplied object\r\n if clone is not None: [setattr(self, v, getattr(clone, v)) for v in vars(clone)]",
"def clone(self, *args):\n return _SALOMERuntime.InputCorbaPort_clone(self, *args)",
"def clone(self):\n return self.__class__(self.name, *self)",
"def clone(self, theta = None):\n callable_obj = lambda params: self._callable(params)\n return pFunc_fromcallable(callable_obj = callable_obj)",
"def _prepare_for_operation(self, op_name, c):\n if isinstance(c, np.ndarray):\n if self.data.shape != c.shape:\n raise Exception('Cannot %s cuboid and ndarray. Shape of self'\n ' %s different from ndarray shape %s.'\n % (op_name, str(self.data.shape),\n str(c.shape)))\n\n c = xndarray.xndarray_like(self, data=c)\n elif np.isscalar(c):\n class Dummy:\n def __init__(self, val):\n self.data = val\n return Dummy(c)\n\n if set(self.axes_names) != set(c.axes_names):\n raise Exception('Cannot %s cuboids with different axes' % op_name)\n\n # TODO: check axes domains ...\n if self.axes_names != c.axes_names:\n c = c.reorient(self.axes_names)\n\n for i, a in enumerate(self.axes_names):\n if self.data.shape[i] != c.data.shape[i]:\n\n raise Exception('Cannot %s cuboids, shape mismatch.'\n ' self has shape: %s and operand has '\n ' shape: %s'\n % (op_name, self.descrip_shape(),\n c.descrip_shape()))\n\n return c",
"def create_graph_copy_op(self, src, target, tau):\n src_vars = tf.trainable_variables(src)\n target_vars = tf.trainable_variables(target)\n\n op_holder = []\n\n for s, t in zip(src_vars, target_vars):\n op_holder.append(t.assign((s.value() * tau) + ((1 - tau) * t.value())))\n return op_holder",
"def __clone_param__(cls, param_name: str, value: Any) -> Any: # pylint: disable=unused-argument\n return clone(value, safe=False)",
"def clone(self, **kwargs):\n new_inst = MetaTensor(self.as_tensor().clone(**kwargs))\n new_inst.__dict__ = deepcopy(self.__dict__)\n return new_inst",
"def convert_copy(node, **kwargs):\n return create_basic_op_node('Identity', node, kwargs)",
"def clone(self, *args):\n return _SALOMERuntime.InputPresetPort_clone(self, *args)"
] | [
"0.61716413",
"0.6113947",
"0.5992132",
"0.5831337",
"0.5507165",
"0.5421506",
"0.5408626",
"0.5406843",
"0.5405116",
"0.5387802",
"0.5377177",
"0.53763574",
"0.53390443",
"0.5338112",
"0.53215635",
"0.5299693",
"0.52697754",
"0.5264059",
"0.5250165",
"0.5242738",
"0.5225992",
"0.52144474",
"0.52062494",
"0.51986516",
"0.51967627",
"0.5191628",
"0.51558745",
"0.5153204",
"0.51331556",
"0.5127118"
] | 0.82067853 | 0 |
Makes sure that convolution inputs have compatible shapes. | def _AssertConvShapes(self, op_name, input_tensor, weights):
input_shape = input_tensor.get_shape()
weights_shape = weights.get_shape()
if (len(input_shape) != 4 or len(weights_shape) != 4 or
input_shape[3] != weights_shape[2]):
raise ValueError('Incompatible shapes for op %s inputs: %s and %s' %
(op_name, input_shape, weights_shape)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_convolve_input_dim_check(self, case, fn, x_shape, y_shape):\n x = torch.rand(*x_shape, dtype=self.dtype, device=self.device)\n y = torch.rand(*y_shape, dtype=self.dtype, device=self.device)\n\n message = [\n \"The operands must be the same dimension\",\n \"Leading dimensions of x and y are not broadcastable\",\n ][case]\n with self.assertRaisesRegex(ValueError, message):\n fn(x, y)",
"def testShapesSame(self, batch_size, in_length, in_channels, out_length,\n out_channels, kernel_shape, padding, use_bias, in_shape,\n out_shape, stride_shape, use_output_shape):\n if use_output_shape:\n output_shape_arg = out_shape\n else:\n output_shape_arg = None\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_length, in_channels])\n\n conv1 = snt.Conv1DTranspose(output_channels=out_channels,\n output_shape=output_shape_arg,\n kernel_shape=kernel_shape,\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(\n output.get_shape().is_compatible_with(\n [batch_size, out_length, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [1, kernel_shape, out_channels, in_channels]))\n\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))",
"def testKernelShape(self, out_channels, padding, use_bias, in_shape,\n out_shape, stride_shape, use_output_shape):\n snt.Conv1DTranspose(\n output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=[3],\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n snt.Conv1DTranspose(\n output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=3,\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n\n err = \"Invalid kernel\"\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):\n snt.Conv1DTranspose(output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=[3, 3],\n name=\"conv1\",\n use_bias=use_bias)\n\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):\n snt.Conv1DTranspose(output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=[3, 3, 3, 3],\n name=\"conv1\",\n use_bias=use_bias)",
"def test_convolution():\n # Default test\n inputs_shape = [3,3,4,5,3]\n filters_shape = [3,1,4,4,3]\n test_convolution_for_parameters(inputs_shape, filters_shape,\n \"Default test\")\n # All dimensions 1\n inputs_shape = [1,1,1,1,1]\n filters_shape = [1,1,1,1,1]\n test_convolution_for_parameters(inputs_shape, filters_shape,\n \"Input and filter dimensions 1\")\n # Filter spans all dimensions\n # This will lead to a failure for theano 2d3d for some reason\n # (for now we ignore this and remove theano2d3d for this test\n inputs_shape = [3,3,4,5,3]\n filters_shape = [3,3,4,5,3]\n test_convolution_for_parameters(inputs_shape, filters_shape,\n \"Filter dimension = Input dimension\")\n # Filter smaller for all dimensions\n inputs_shape = [3,3,4,5,3]\n filters_shape = [3,2,2,2,3]\n test_convolution_for_parameters(inputs_shape, filters_shape, \n \"Filter dimension < all Input dimension\")\n # 1,1,1,1,1 filter\n # Filter smaller for all dimensions\n inputs_shape = [3,3,4,5,1]\n filters_shape = [3,1,1,1,1]\n test_convolution_for_parameters(inputs_shape, filters_shape, \n \"Filter dimension 1 everywhere\")",
"def testKernelShape(self, use_bias):\n\n # No check against output_channels is done yet (needs input size).\n snt.SeparableConv1D(\n output_channels=1,\n channel_multiplier=2,\n kernel_shape=[3],\n name=\"conv1\",\n use_bias=use_bias)\n snt.SeparableConv1D(\n output_channels=1, channel_multiplier=1, kernel_shape=3, name=\"conv1\")\n\n error_msg = (r\"Invalid kernel shape: x is \\[3, 3\\], must be either a \"\n r\"positive integer or an iterable of positive integers of \"\n r\"size 1\")\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg):\n snt.SeparableConv1D(output_channels=1,\n channel_multiplier=3,\n kernel_shape=[3, 3],\n use_bias=use_bias)",
"def testShapesSame(self, use_bias):\n\n out_channels = self.out_channels_dw\n input_shape = self.input_shape\n kernel_shape = self.kernel_shape\n output_shape = self.output_shape\n depthwise_filter_shape = self.depthwise_filter_shape\n pointwise_filter_shape = self.pointwise_filter_shape\n channel_multiplier = self.channel_multiplier\n\n inputs = tf.placeholder(tf.float32, shape=input_shape)\n\n conv1 = snt.SeparableConv1D(\n output_channels=out_channels,\n channel_multiplier=channel_multiplier,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(output.get_shape().is_compatible_with(output_shape))\n self.assertTrue(conv1.w_dw.get_shape().is_compatible_with(\n depthwise_filter_shape))\n self.assertTrue(conv1.w_pw.get_shape().is_compatible_with(\n pointwise_filter_shape))\n if use_bias:\n self.assertTrue(conv1.b.get_shape().is_compatible_with([out_channels]))",
"def testShapes(self, use_bias):\n\n batch_size = random.randint(1, 100)\n in_length = random.randint(10, 288)\n in_channels = random.randint(1, 10)\n out_channels = random.randint(1, 32)\n\n kernel_shape = random.randint(1, 10)\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_length, in_channels])\n\n conv1 = snt.Conv1D(\n output_channels=out_channels,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n stride=1,\n name=\"conv1\",\n use_bias=use_bias)\n\n output1 = conv1(inputs)\n\n self.assertTrue(\n output1.get_shape().is_compatible_with(\n [batch_size, in_length, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [kernel_shape, in_channels, out_channels]))\n\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))\n\n conv2 = snt.Conv1D(\n output_channels=out_channels,\n kernel_shape=kernel_shape,\n padding=snt.VALID,\n stride=1,\n name=\"conv2\",\n use_bias=use_bias)\n\n output2 = conv2(inputs)\n\n self.assertTrue(\n output2.get_shape().is_compatible_with(\n [batch_size, in_length - kernel_shape + 1, out_channels]))\n\n self.assertTrue(\n conv2.w.get_shape().is_compatible_with(\n [kernel_shape, in_channels, out_channels]))\n\n if use_bias:\n self.assertTrue(\n conv2.b.get_shape().is_compatible_with(\n [out_channels]))",
"def _valid_convolve(images: th.Tensor, kernels: th.Tensor) -> th.Tensor:\n ret = F.conv2d(images.view((images.shape[0], *images.shape[-3:])).transpose(1, 0),\n th.flip(kernels.view((kernels.shape[0], *kernels.shape[-3:])), dims=(-1, -2)),\n groups=kernels.shape[0]).transpose(1, 0)\n return ret",
"def testInputTypeError(self, batch_size, in_length, in_channels, out_channels,\n kernel_shape, padding, use_bias, out_shape,\n stride_shape, use_output_shape):\n conv1 = snt.Conv1DTranspose(\n output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=kernel_shape,\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n\n for dtype in (tf.uint32, tf.float64):\n x = tf.constant(np.ones([batch_size, in_length,\n in_channels]), dtype=dtype)\n err = \"Input must have dtype tf.float.*\"\n with self.assertRaisesRegexp(TypeError, err):\n conv1(x)",
"def test_same_convolution(conv1d_placeholder, spatial_onehot, output_size, width, stride):\n conv_layer = Convolution((3, output_size), lambda x: 1, strides=stride, padding=\"same\")\n output = conv_layer(conv1d_placeholder)\n output_width = output.axes.find_by_name(\"W\")[0].length\n assert output_width == np.ceil(width / float(stride)), (\"Same convolution output width != \"\n \"ceil(input_width / stride): {} != \"\n \"ceil({} / {})\").format(output_width,\n width,\n stride)",
"def conv2d_config(input_shape, output_shape, filter_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if len(input_shape) == 4:\n batch_size = input_shape[0]\n else:\n batch_size = None\n\n input_shape = np.array(input_shape[-3:])\n output_shape = np.array(tf.TensorShape(output_shape).as_list()[-3:])\n\n # Determine what kind of convolution to use\n if np.all(input_shape[-3:-1] >= output_shape[-3:-1]):\n conv_type = \"NORMAL\"\n elif np.all(input_shape[-3:-1] <= output_shape[-3:-1]):\n conv_type = 'FULL'\n # swap input and output shape\n input_shape, output_shape = output_shape, input_shape\n else:\n raise ValueError('Input shape dimensions must be both bigger than or both smaller than output shape dimensions')\n\n filter_shape = np.array(tf.TensorShape(filter_shape).as_list()[:2] + [input_shape[-1], output_shape[-1]])\n stride = np.ceil((input_shape[:2] - filter_shape[:2] + 1) / output_shape[:2]).astype(np.int)\n padding = output_shape[:2] * stride - input_shape[:2] + filter_shape[:2] - 1\n\n # Determine what type of padding can be used\n if np.all(np.ceil(input_shape[:2] / stride) == output_shape[:2]):\n padding_type = 'SAME'\n else:\n padding_type = 'VALID'\n\n # get padded input shape\n input_shape[:2] = input_shape[:2] + padding.astype(np.int)\n padded_shape = [batch_size] + input_shape.tolist()\n\n left_padding = np.ceil(padding / 2).astype(np.int)\n right_padding = np.floor(padding / 2).astype(np.int)\n\n padding = [[0, 0], [left_padding[0], right_padding[0]], [left_padding[1], right_padding[1]], [0, 0]]\n stride = [1, stride[0], stride[1], 1]\n\n return filter_shape.tolist(), stride, padding, padded_shape, conv_type, padding_type",
"def test_wrong_filters_shape_length():\n padding = dict(pad_d=0, pad_h=0, pad_w=0)\n strides = dict(str_d=1, str_h=1, str_w=1)\n dilation = dict(dil_d=1, dil_h=1, dil_w=1)\n conv_params = padding.copy()\n conv_params.update(strides)\n conv_params.update(dilation)\n\n ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])\n ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S])\n\n inputs = ng.placeholder(ax_i)\n filters = ng.placeholder(ax_f)\n\n with pytest.raises(ValueError) as exinfo:\n ng.convolution(conv_params, inputs, filters, {})\n assert str(exinfo.value) == 'convolution filter shape must be length 5, found {}'\\\n .format(len(ax_f))",
"def testShapesSame(self, use_bias):\n\n out_channels = self.out_channels_dw\n input_shape = self.input_shape\n kernel_shape = self.kernel_shape\n output_shape = self.output_shape\n depthwise_filter_shape = self.depthwise_filter_shape\n pointwise_filter_shape = self.pointwise_filter_shape\n channel_multiplier = self.channel_multiplier\n\n inputs = tf.placeholder(tf.float32, shape=input_shape)\n\n conv1 = snt.SeparableConv2D(\n output_channels=out_channels,\n channel_multiplier=channel_multiplier,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(output.get_shape().is_compatible_with(output_shape))\n self.assertTrue(conv1.w_dw.get_shape().is_compatible_with(\n depthwise_filter_shape))\n self.assertTrue(conv1.w_pw.get_shape().is_compatible_with(\n pointwise_filter_shape))\n if use_bias:\n self.assertTrue(conv1.b.get_shape().is_compatible_with([out_channels]))",
"def conv2d(input, filters, image_shape=None, filter_shape=None,\r\n border_mode='valid', subsample=(1, 1), **kargs):\r\n\r\n #accept Constant value for image_shape and filter_shape.\r\n if image_shape is not None:\r\n image_shape = list(image_shape)\r\n for i in xrange(len(image_shape)):\r\n if image_shape[i] is not None:\r\n try:\r\n image_shape[i] = get_scalar_constant_value(\r\n as_tensor_variable(image_shape[i]))\r\n except NotScalarConstantError, e:\r\n raise NotScalarConstantError(\r\n \"The convolution need that the shape\"\r\n \" information are constant values. We got\"\r\n \" %s for the image_shape parameter\" %\r\n image_shape[i])\r\n assert str(image_shape[i].dtype).startswith('int')\r\n image_shape[i] = int(image_shape[i])\r\n if filter_shape is not None:\r\n filter_shape = list(filter_shape)\r\n for i in xrange(len(filter_shape)):\r\n if filter_shape[i] is not None:\r\n try:\r\n filter_shape[i] = get_scalar_constant_value(\r\n as_tensor_variable(filter_shape[i]))\r\n except NotScalarConstantError, e:\r\n raise NotScalarConstantError(\r\n \"The convolution need that the shape\"\r\n \" information are constant values. We got\"\r\n \" %s for the filter_shape \"\r\n \"parameter\" % filter_shape[i])\r\n assert str(filter_shape[i].dtype).startswith('int')\r\n filter_shape[i] = int(filter_shape[i])\r\n\r\n if image_shape and filter_shape:\r\n try:\r\n assert image_shape[1] == filter_shape[1]\r\n except Exception:\r\n print 'image ', image_shape, ' filters ', filter_shape\r\n raise\r\n\r\n if filter_shape is not None:\r\n nkern = filter_shape[0]\r\n kshp = filter_shape[2:]\r\n else:\r\n nkern, kshp = None, None\r\n\r\n if image_shape is not None:\r\n bsize = image_shape[0]\r\n imshp = image_shape[1:]\r\n else:\r\n bsize, imshp = None, None\r\n\r\n op = ConvOp(output_mode=border_mode, dx=subsample[0], dy=subsample[1],\r\n imshp=imshp, kshp=kshp, nkern=nkern, bsize=bsize, **kargs)\r\n\r\n return op(input, filters)",
"def testKernelShape(self, use_bias):\n\n # No check against output_channels is done yet (needs input size).\n snt.SeparableConv2D(\n output_channels=1,\n channel_multiplier=2,\n kernel_shape=[3, 4],\n name=\"conv1\",\n use_bias=use_bias)\n snt.SeparableConv2D(\n output_channels=1, channel_multiplier=1, kernel_shape=3, name=\"conv1\")\n\n error_msg = (r\"Invalid kernel shape: x is \\[3], must be either a positive\"\n r\" integer or an iterable of positive integers of size 2\")\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg):\n snt.SeparableConv2D(output_channels=1,\n channel_multiplier=3,\n kernel_shape=[3],\n use_bias=use_bias)",
"def testShapesNotKnown(self, use_bias):\n\n batch_size = 5\n in_length = 32\n in_channels = out_channels = 5\n kernel_shape = 3\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[None, None, in_channels],\n name=\"inputs\")\n\n conv1 = snt.Conv1D(\n name=\"conv1\",\n output_channels=out_channels,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n stride=1,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w]).run()\n\n output_eval = output.eval({\n inputs: np.zeros([batch_size, in_length, in_channels])})\n\n self.assertEqual(\n output_eval.shape,\n (batch_size, in_length, out_channels))",
"def testShapesSame(self, use_bias):\n\n batch_size = random.randint(1, 100)\n in_height = random.randint(10, 288)\n in_width = random.randint(10, 288)\n in_channels = random.randint(1, 10)\n out_channels = random.randint(1, 32)\n kernel_shape_h = random.randint(1, 11)\n kernel_shape_w = random.randint(1, 11)\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_height, in_width, in_channels])\n\n conv1 = snt.Conv2D(\n name=\"conv1\",\n output_channels=out_channels,\n kernel_shape=[kernel_shape_h, kernel_shape_w],\n padding=snt.SAME,\n stride=1,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(\n output.get_shape().is_compatible_with(\n [batch_size, in_height, in_width, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [kernel_shape_h, kernel_shape_w, in_channels, out_channels]))\n\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))",
"def convolution_internal(\n input, # pylint: disable=redefined-builtin\n filters,\n strides=None,\n padding=\"VALID\",\n data_format=None,\n dilations=None,\n name=None,\n call_from_convolution=True,\n num_spatial_dims=None):\n if (not isinstance(filters, variables_lib.Variable) and\n not tensor_util.is_tf_type(filters)):\n with ops.name_scope(\"convolution_internal\", None, [filters, input]):\n filters = ops.convert_to_tensor(filters, name='filters')\n if (not isinstance(input, tensor_lib.Tensor) and not tensor_util.is_tf_type(\n input)):\n with ops.name_scope(\"convolution_internal\", None, [filters, input]):\n input = ops.convert_to_tensor(input, name=\"input\")\n\n filters_rank = filters.shape.rank\n inputs_rank = input.shape.rank\n if num_spatial_dims is None:\n if filters_rank:\n num_spatial_dims = filters_rank - 2\n elif inputs_rank:\n num_spatial_dims = inputs_rank - 2\n else:\n raise ValueError(\n \"When `num_spatial_dims` is not set, one of `input.shape.rank` or \"\n \"`filters.shape.rank` must be known. \"\n f\"Received: input.shape={input.shape} of rank {inputs_rank} and \"\n f\"filters.shape={filters.shape} of rank {filters_rank}\")\n elif filters_rank and filters_rank - 2 != num_spatial_dims:\n raise ValueError(\n \"`filters.shape.rank - 2` should equal `num_spatial_dims`. Received: \"\n f\"filters.shape={filters.shape} of rank {filters_rank} and \"\n f\"num_spatial_dims={num_spatial_dims}\")\n\n if inputs_rank:\n num_batch_dims = inputs_rank - num_spatial_dims - 1 # Channel dimension.\n else:\n num_batch_dims = 1 # By default, assume single batch dimension.\n\n if num_spatial_dims not in {1, 2, 3}:\n raise ValueError(\n \"`num_spatial_dims` must be 1, 2, or 3. \"\n f\"Received: num_spatial_dims={num_spatial_dims}.\")\n\n if data_format is None or data_format in _CHANNELS_LAST_FORMATS:\n channel_index = num_batch_dims + num_spatial_dims\n else:\n channel_index = num_batch_dims\n\n if dilations is None:\n dilations = _get_sequence(dilations, num_spatial_dims, channel_index,\n \"dilations\")\n is_dilated_conv = False\n else:\n dilations = _get_sequence(dilations, num_spatial_dims, channel_index,\n \"dilations\")\n is_dilated_conv = any(i != 1 for i in dilations)\n\n strides = _get_sequence(strides, num_spatial_dims, channel_index, \"strides\")\n has_tpu_context = device_context.enclosing_tpu_context() is not None\n\n if name:\n default_name = None\n elif not has_tpu_context or call_from_convolution:\n default_name = \"convolution\"\n elif num_spatial_dims == 2: # Most common case.\n default_name = \"Conv2D\"\n elif num_spatial_dims == 3:\n default_name = \"Conv3D\"\n else:\n default_name = \"conv1d\"\n\n with ops.name_scope(name, default_name, [input, filters]) as name:\n # Fast path for TPU or if no dilation, as gradient only supported on TPU\n # for dilations.\n if not is_dilated_conv or has_tpu_context:\n if num_spatial_dims == 2: # Most common case.\n op = _conv2d_expanded_batch\n elif num_spatial_dims == 3:\n op = _conv3d_expanded_batch\n else:\n op = conv1d\n\n return op(\n input,\n filters,\n strides,\n padding=padding,\n data_format=data_format,\n dilations=dilations,\n name=name)\n else:\n if channel_index == 1:\n strides = strides[2:]\n dilations = dilations[2:]\n else:\n strides = strides[1:-1]\n dilations = dilations[1:-1]\n\n op = Convolution(\n tensor_shape.as_shape(input.shape),\n tensor_shape.as_shape(filters.shape),\n padding,\n strides=strides,\n dilation_rate=dilations,\n name=name,\n data_format=data_format,\n num_spatial_dims=num_spatial_dims)\n return op(input, filters)",
"def testKernelShape(self, use_bias):\n\n snt.Conv1D(output_channels=10, kernel_shape=[3], name=\"conv1\",\n use_bias=use_bias)\n snt.Conv1D(output_channels=10, kernel_shape=3, name=\"conv1\",\n use_bias=use_bias)\n\n err = \"Invalid kernel shape\"\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):\n snt.Conv1D(output_channels=10, kernel_shape=[3, 3], name=\"conv1\")",
"def testShapesWithUnknownInputShape(self, use_bias):\n\n batch_size = 5\n in_depth = in_height = in_width = 32\n in_channels = out_channels = 5\n kernel_shape_d = kernel_shape_h = kernel_shape_w = 3\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[None, None, None, None, in_channels],\n name=\"inputs\")\n\n conv1 = snt.Conv3D(\n name=\"conv1\",\n output_channels=out_channels,\n kernel_shape=[kernel_shape_d, kernel_shape_h, kernel_shape_w],\n padding=snt.SAME,\n stride=1,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w]).run()\n\n output_eval = output.eval({\n inputs: np.zeros([batch_size, in_depth, in_height, in_width,\n in_channels])})\n\n self.assertEqual(\n output_eval.shape,\n (batch_size, in_depth, in_height, in_width, out_channels))",
"def convolution_shape(input_shape, n_filters, filter_shape, stride, padding):\n img_height, img_width, _ = input_shape\n height = (img_height + 2 * padding[0] - filter_shape[0]) / float(stride) + 1\n width = (img_width + 2 * padding[1] - filter_shape[1]) / float(stride) + 1\n\n return int(height), int(width), n_filters",
"def conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs):\n static_shape = inputs.get_shape()\n if not static_shape or len(static_shape) != 4:\n raise ValueError(\"Inputs to conv must have statically known rank 4. \"\n \"Shape: \" + str(static_shape))\n # Add support for left padding.\n if kwargs.get(\"padding\") == \"LEFT\":\n dilation_rate = (1, 1)\n if \"dilation_rate\" in kwargs:\n dilation_rate = kwargs[\"dilation_rate\"]\n assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1\n height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0]\n cond_padding = tf.cond(\n tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),\n lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1]))\n width_padding = 0 if static_shape[2] == 1 else cond_padding\n padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]\n inputs = tf.pad(inputs, padding)\n # Set middle two dimensions to None to prevent convolution from complaining\n inputs.set_shape([static_shape[0], None, None, static_shape[3]])\n kwargs[\"padding\"] = \"VALID\"\n\n def conv2d_kernel(kernel_size_arg, name_suffix):\n \"\"\"Call conv2d but add suffix to name.\"\"\"\n name = \"{}_{}\".format(kwargs.get(\"name\", \"conv\"), name_suffix)\n original_name = kwargs.pop(\"name\", None)\n original_force2d = kwargs.pop(\"force2d\", None)\n result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs)\n if original_name is not None:\n kwargs[\"name\"] = original_name # Restore for other calls.\n if original_force2d is not None:\n kwargs[\"force2d\"] = original_force2d\n return result\n\n return conv2d_kernel(kernel_size, \"single\")",
"def test_conv_consistency(self) -> None:\n x = Input(\n 'const1',\n [1, 3, 3, 3],\n Float32(),\n )\n w = Constant(\n 'weight',\n Float32(),\n np.zeros([1, 2, 2, 3])\n )\n input_ops = {'X': cast(Operator, x), 'W': cast(Operator, w)}\n\n Conv(\n 'conv_under_test',\n [1, 3, 3, 3],\n Float32(),\n input_ops,\n pads=[1, 2, 1, 2],\n strides=[2, 2]\n )\n\n print(\"Consistency test for conv operator passed!\")",
"def testShapesNotKnown(self, use_bias):\n\n inputs = tf.placeholder(\n tf.float32, shape=[None, None, self.in_channels], name=\"inputs\")\n\n conv1 = snt.SeparableConv1D(\n output_channels=self.out_channels_dw,\n channel_multiplier=1,\n kernel_shape=self.kernel_shape,\n padding=snt.SAME,\n use_bias=use_bias)\n output = conv1(inputs)\n\n with self.test_session() as session:\n tf.variables_initializer(\n [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else\n [conv1.w_dw, conv1.w_pw]).run()\n output_eval = session.run(output, {inputs: np.zeros(self.input_shape)})\n self.assertEqual(output_eval.shape, tuple(self.output_shape))",
"def clConvolution(self, size, mask):",
"def check_input_shape(self, op, block):\n\n ipt_name = op.input(op.input_names[0])\n ipt_shape = block.var(ipt_name).shape\n for i in ipt_shape:\n if i < 0:\n warning_msg = (\n f\"Input {ipt_name}(shape={ipt_shape}) has unkown dimension shapes. \"\n f\"Specifying static values may improve performance\"\n )\n warnings.warn(warning_msg)",
"def testShapesSame(self, use_bias):\n\n batch_size = random.randint(1, 100)\n in_depth = random.randint(10, 288)\n in_height = random.randint(10, 288)\n in_width = random.randint(10, 288)\n in_channels = random.randint(1, 10)\n out_channels = random.randint(1, 32)\n kernel_shape_d = random.randint(1, 11)\n kernel_shape_h = random.randint(1, 11)\n kernel_shape_w = random.randint(1, 11)\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_depth, in_height, in_width, in_channels])\n\n conv1 = snt.Conv3D(\n output_channels=out_channels,\n kernel_shape=[kernel_shape_d, kernel_shape_h, kernel_shape_w],\n padding=snt.SAME,\n stride=1,\n use_bias=use_bias,\n name=\"conv1\")\n\n output = conv1(inputs)\n\n self.assertTrue(\n output.get_shape().is_compatible_with(\n [batch_size, in_depth, in_height, in_width, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [kernel_shape_d, kernel_shape_h, kernel_shape_w, in_channels,\n out_channels]))\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))",
"def conv_dimension_numbers(lhs_shape, rhs_shape, dimension_numbers):\n if isinstance(dimension_numbers, ConvDimensionNumbers):\n return dimension_numbers\n if len(lhs_shape) != len(rhs_shape):\n msg = 'convolution requires lhs and rhs ndim to be equal, got {} and {}.'\n raise TypeError(msg.format(len(lhs_shape), len(rhs_shape)))\n\n if dimension_numbers is None:\n iota = tuple(range(len(lhs_shape)))\n return ConvDimensionNumbers(iota, iota, iota)\n elif isinstance(dimension_numbers, (list, tuple)):\n if len(dimension_numbers) != 3:\n msg = 'convolution dimension_numbers list/tuple must be length 3, got {}.'\n raise TypeError(msg.format(len(dimension_numbers)))\n if not all(isinstance(elt, str) for elt in dimension_numbers):\n msg = 'convolution dimension_numbers elements must be strings, got {}.'\n raise TypeError(msg.format(tuple(map(type, dimension_numbers))))\n msg = ('convolution dimension_numbers[{}] must have len equal to the ndim '\n 'of lhs and rhs, got {} for lhs and rhs shapes {} and {}.')\n for i, elt in enumerate(dimension_numbers):\n if len(elt) != len(lhs_shape):\n raise TypeError(msg.format(i, len(elt), lhs_shape, rhs_shape))\n\n lhs_spec, rhs_spec, out_spec = conv_general_permutations(dimension_numbers)\n return ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec)\n else:\n msg = 'convolution dimension_numbers must be tuple/list or None, got {}.'\n raise TypeError(msg.format(type(dimension_numbers)))",
"def num_conv_locations(input_shape, filter_shape, strides, padding):\n if len(input_shape) != 4 and len(input_shape) != 3:\n raise ValueError(\"input_shape must be length 4, corresponding to a Conv2D,\"\n \" or length 3, corresponding to a Conv1D.\")\n if len(input_shape) != len(filter_shape):\n raise ValueError(\"Inconsistent number of dimensions between input and \"\n \"filter for convolution\")\n\n if strides is None:\n if len(input_shape) == 4:\n strides = [1, 1, 1, 1]\n else:\n strides = [1, 1, 1]\n\n # Use negative integer division to implement 'rounding up'.\n # Formula for convolution shape taken from:\n # http://machinelearninguru.com/computer_vision/basics/convolution/convolution_layer.html\n if len(input_shape) == 3:\n if padding is not None and padding.lower() == \"valid\":\n out_width = -(-(input_shape[1] - filter_shape[0] + 1) // strides[1])\n else:\n out_width = -(-input_shape[1] // strides[1])\n\n return out_width\n else:\n if padding is not None and padding.lower() == \"valid\":\n out_height = -(-(input_shape[1] - filter_shape[0] + 1) // strides[1])\n out_width = -(-(input_shape[2] - filter_shape[1] + 1) // strides[2])\n else:\n out_height = -(-input_shape[1] // strides[1])\n out_width = -(-input_shape[2] // strides[2])\n\n return out_height * out_width",
"def testShapesNotKnown(self, use_bias):\n\n batch_size = 5\n in_height = in_width = 32\n in_channels = out_channels = 5\n kernel_shape_h = kernel_shape_w = 3\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[None, None, None, in_channels],\n name=\"inputs\")\n\n conv1 = snt.Conv2D(\n name=\"conv1\",\n output_channels=out_channels,\n kernel_shape=[kernel_shape_h, kernel_shape_w],\n padding=snt.SAME,\n stride=1,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w]).run()\n\n output_eval = output.eval({\n inputs: np.zeros([batch_size, in_height, in_width, in_channels])})\n\n self.assertEqual(\n output_eval.shape,\n (batch_size, in_height, in_width, out_channels))"
] | [
"0.70430326",
"0.6984046",
"0.67752093",
"0.67076695",
"0.6631883",
"0.6590607",
"0.65526205",
"0.6539452",
"0.65392506",
"0.65136945",
"0.6513212",
"0.6502569",
"0.64620143",
"0.64320785",
"0.6431594",
"0.6411467",
"0.64058405",
"0.63952243",
"0.63517404",
"0.6339218",
"0.6326168",
"0.6304486",
"0.6286611",
"0.6268357",
"0.6259792",
"0.6230927",
"0.62001556",
"0.6199535",
"0.6196328",
"0.6180178"
] | 0.732145 | 0 |
Makes sure that FC layer inputs have compatible shapes. | def _AssertFCShapes(self, op_name, weights, input_tensor):
weights_shape = weights.get_shape()
input_shape = input_tensor.get_shape()
if (len(weights_shape) != 2 or len(input_shape) != 2 or
weights_shape[1] != input_shape[0]):
raise ValueError('Incompatible shapes for op %s inputs: %s and %s' %
(op_name, weights_shape, input_shape)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _AssertConvShapes(self, op_name, input_tensor, weights):\n input_shape = input_tensor.get_shape()\n weights_shape = weights.get_shape()\n if (len(input_shape) != 4 or len(weights_shape) != 4 or\n input_shape[3] != weights_shape[2]):\n raise ValueError('Incompatible shapes for op %s inputs: %s and %s' %\n (op_name, input_shape, weights_shape))",
"def is_compatible_with(self, inputs): # pylint:disable=useless-super-delegation\n if self.shape is None:\n return False\n if len(inputs) != len(self):\n raise ValueError('Expects ' +\n str(len(self)) + ' inputs, '\n 'but it received ' + str(len(inputs)) +\n ' input tensors. Inputs received: ' + str(inputs))\n for input_index, (x, spec) in enumerate(zip(inputs, self)):\n if spec is None:\n continue\n\n if (spec.ndim is not None or\n spec.min_ndim is not None or\n spec.max_ndim is not None):\n if x.shape.ndims is None:\n raise ValueError('Input ' + ' is incompatible : '\n 'its rank is undefined, but the layer requires a '\n 'defined rank.')\n\n # Check ndim.\n if spec.ndim is not None:\n ndim = x.shape.ndims\n if ndim != spec.ndim:\n raise ValueError('Input ' + str(input_index) + ' is incompatible with the layer: '\n 'expected ndim=' + str(spec.ndim) + ', found ndim=' +\n str(ndim) + '. Full shape received: ' +\n str(x.shape.as_list()))\n if spec.max_ndim is not None:\n ndim = x.shape.ndims\n if ndim is not None and ndim > spec.max_ndim:\n raise ValueError('Input ' + str(input_index) + ' is incompatible with the layer: '\n 'expected max_ndim=' + str(spec.max_ndim) +\n ', found ndim=' + str(ndim))\n if spec.min_ndim is not None:\n ndim = x.shape.ndims\n if ndim is not None and ndim < spec.min_ndim:\n raise ValueError('Input ' + str(input_index) + ' is incompatible with the layer: '\n ': expected min_ndim=' + str(spec.min_ndim) +\n ', found ndim=' + str(ndim) +\n '. Full shape received: ' +\n str(x.shape.as_list()))\n # Check dtype.\n if spec.dtype is not None:\n if x.dtype != spec.dtype:\n raise ValueError('Input ' + str(input_index) + ' is incompatible with the layer: '\n 'expected dtype=' + str(spec.dtype) +\n ', found dtype=' + str(x.dtype))\n # Check specific shape axes.\n if spec.axes:\n shape = x.shape.as_list()\n if shape is not None:\n for axis, value in spec.axes.items():\n if hasattr(value, 'value'):\n value = value.value\n if value is not None and shape[int(axis)] not in {value, None}:\n raise ValueError(\n 'Input ' + str(input_index) + ' is'\n ' incompatible with the layer: expected axis ' + str(axis) +\n ' of input shape to have value ' + str(value) +\n ' but received input with shape ' + str(shape))\n # Check shape.\n if spec.shape is not None:\n shape = x.shape.as_list()\n if shape is not None:\n for spec_dim, dim in zip(spec.shape, shape):\n if spec_dim is not None and dim is not None:\n if spec_dim != dim:\n raise ValueError('Input ' + str(input_index) +\n ' is incompatible ' +\n ': expected shape=' + str(spec.shape) +\n ', found shape=' + str(shape))",
"def testInputTypeError(self, batch_size, in_length, in_channels, out_channels,\n kernel_shape, padding, use_bias, out_shape,\n stride_shape, use_output_shape):\n conv1 = snt.Conv1DTranspose(\n output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=kernel_shape,\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n\n for dtype in (tf.uint32, tf.float64):\n x = tf.constant(np.ones([batch_size, in_length,\n in_channels]), dtype=dtype)\n err = \"Input must have dtype tf.float.*\"\n with self.assertRaisesRegexp(TypeError, err):\n conv1(x)",
"def check_input_shape(self, op, block):\n\n ipt_name = op.input(op.input_names[0])\n ipt_shape = block.var(ipt_name).shape\n for i in ipt_shape:\n if i < 0:\n warning_msg = (\n f\"Input {ipt_name}(shape={ipt_shape}) has unkown dimension shapes. \"\n f\"Specifying static values may improve performance\"\n )\n warnings.warn(warning_msg)",
"def validate_shape_and_dtype(self):\n if self.rgb.dtype != tf.float32:\n raise ValueError(\"Expected float32 rgb!\")\n if len(self.rgb.shape) != 4:\n raise ValueError(f\"Expected (B, H, W, C), got {self.rgb.shape}\")\n _, _, _, channels = self.rgb.shape.as_list()\n if channels != 3:\n raise ValueError(f\"Expected 3 rgb channels, got shape {self.rgb.shape}\")",
"def _is_all_input_shape_generalize(input_shape_tuple):\n for elem in input_shape_tuple:\n if not is_shape_unknown(elem.shape):\n return False\n return True",
"def testShapesSame(self, batch_size, in_length, in_channels, out_length,\n out_channels, kernel_shape, padding, use_bias, in_shape,\n out_shape, stride_shape, use_output_shape):\n if use_output_shape:\n output_shape_arg = out_shape\n else:\n output_shape_arg = None\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_length, in_channels])\n\n conv1 = snt.Conv1DTranspose(output_channels=out_channels,\n output_shape=output_shape_arg,\n kernel_shape=kernel_shape,\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(\n output.get_shape().is_compatible_with(\n [batch_size, out_length, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [1, kernel_shape, out_channels, in_channels]))\n\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))",
"def testShapesWithUnknownInputShape(self, use_bias):\n\n batch_size = 5\n in_depth = in_height = in_width = 32\n in_channels = out_channels = 5\n kernel_shape_d = kernel_shape_h = kernel_shape_w = 3\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[None, None, None, None, in_channels],\n name=\"inputs\")\n\n conv1 = snt.Conv3D(\n name=\"conv1\",\n output_channels=out_channels,\n kernel_shape=[kernel_shape_d, kernel_shape_h, kernel_shape_w],\n padding=snt.SAME,\n stride=1,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w]).run()\n\n output_eval = output.eval({\n inputs: np.zeros([batch_size, in_depth, in_height, in_width,\n in_channels])})\n\n self.assertEqual(\n output_eval.shape,\n (batch_size, in_depth, in_height, in_width, out_channels))",
"def _check_shape(input_shape):\n msg = ('Input to FlattenAxis must be 5D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_temporal, '\n 'n_features), but received shape: {}'.format(input_shape))\n assert len(input_shape) == 5, msg",
"def validate_shape(self):\n if len(self._first_rgb.shape) != 5:\n raise ValueError(f\"Invalid shape: {self._first_rgb.shape}\")",
"def assert_input_compatibility(input_spec: TensorSpec, inputs):\n if not input_spec:\n return\n input_spec.shape.to('cpu')\n inputs.to('cpu')\n if len(inputs) != len(input_spec):\n raise ValueError('Tensor ' + ' expects ' +\n str(len(input_spec)) + ' inputs, '\n 'but it received ' + str(len(inputs)) +\n ' input tensors. Inputs received: ' + str(inputs))\n for input_index, (x, spec) in enumerate(zip(inputs, input_spec)):\n if spec is None:\n continue\n\n if (spec.ndim is not None or\n spec.min_ndim is not None or\n spec.max_ndim is not None):\n if x.shape.ndims is None:\n raise ValueError('Input ' + str(input_index) + ' of tensor ' + ' is incompatible with the layer: '\n 'its rank is undefined, but the layer requires a '\n 'defined rank.')\n\n # Check ndim.\n if spec.ndim is not None:\n ndim = x.shape.ndims\n if ndim != spec.ndim:\n raise ValueError('Input ' + str(input_index) + ' of tensor ' + ' is incompatible with the layer: '\n 'expected ndim=' + str(spec.ndim) + ', found ndim=' +\n str(ndim) + '. Full shape received: ' +\n str(x.shape.as_list()))\n if spec.max_ndim is not None:\n ndim = x.shape.ndims\n if ndim is not None and ndim > spec.max_ndim:\n raise ValueError('Input ' + str(input_index) + ' of tensor ' + ' is incompatible with the layer: '\n 'expected max_ndim=' + str(spec.max_ndim) +\n ', found ndim=' + str(ndim))\n if spec.min_ndim is not None:\n ndim = x.shape.ndims\n if ndim is not None and ndim < spec.min_ndim:\n raise ValueError('Input ' + str(input_index) + ' of tensor ' + ' is incompatible with the layer: '\n ': expected min_ndim=' + str(spec.min_ndim) +\n ', found ndim=' + str(ndim) +\n '. Full shape received: ' +\n str(x.shape.as_list()))\n # Check dtype.\n if spec.dtype is not None:\n if x.dtype != spec.dtype:\n raise ValueError('Input ' + str(input_index) + ' of tensor ' + ' is incompatible with the layer: '\n 'expected dtype=' + str(spec.dtype) +\n ', found dtype=' + str(x.dtype))\n # Check specific shape axes.\n if spec.axes:\n shape = x.shape.as_list()\n if shape is not None:\n for axis, value in spec.axes.items():\n if hasattr(value, 'value'):\n value = value.value\n if value is not None and shape[int(axis)] not in {value, None}:\n raise ValueError(\n 'Input ' + str(input_index) + ' of tensor ' + ' is'\n ' incompatible with the layer: expected axis ' + str(axis) +\n ' of input shape to have value ' + str(value) +\n ' but received input with shape ' + str(shape))\n # Check shape.\n if spec.shape is not None:\n shape = x.shape.as_list()\n if shape is not None:\n for spec_dim, dim in zip(spec.shape, shape):\n if spec_dim is not None and dim is not None:\n if spec_dim != dim:\n raise ValueError('Input ' + str(input_index) +\n ' is incompatible with tensor ' +\n ': expected shape=' + str(spec.shape) +\n ', found shape=' + str(shape))",
"def check_input_shapes(*args):\n\n # Collect the shapes of the inputs\n shapes = set()\n\n # DESIGN NOTES - currently allow:\n # - scalars,\n # - 0 dim ndarrays (also scalars but packaged differently)\n # - 1 dim ndarrays with only a single value\n\n for val in args:\n if isinstance(val, np.ndarray):\n # Note that 0-dim ndarrays (which are scalars) pass through as do\n # one dimensional arrays with a single value (also a scalar)\n if not(val.ndim == 0 or val.shape == (1,)):\n shapes.add(val.shape)\n # elif isinstance(val, Series):\n # # Note that 0-dim ndarrays (which are scalars) pass through\n # if val.ndim > 0:\n # shapes.add(val.shape)\n elif val is None or isinstance(val, (float, int, np.generic)):\n pass # No need to track scalars and optional values pass None\n else:\n raise ValueError(f'Unexpected input to check_input_shapes: {type(val)}')\n\n # shapes can be an empty set (all scalars) or contain one common shape\n # otherwise raise an error\n if len(shapes) > 1:\n raise ValueError('Inputs contain arrays of different shapes.')\n\n if len(shapes) == 1:\n return shapes.pop()\n\n return 1",
"def testShapes(self, use_bias):\n\n batch_size = random.randint(1, 100)\n in_length = random.randint(10, 288)\n in_channels = random.randint(1, 10)\n out_channels = random.randint(1, 32)\n\n kernel_shape = random.randint(1, 10)\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_length, in_channels])\n\n conv1 = snt.Conv1D(\n output_channels=out_channels,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n stride=1,\n name=\"conv1\",\n use_bias=use_bias)\n\n output1 = conv1(inputs)\n\n self.assertTrue(\n output1.get_shape().is_compatible_with(\n [batch_size, in_length, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [kernel_shape, in_channels, out_channels]))\n\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))\n\n conv2 = snt.Conv1D(\n output_channels=out_channels,\n kernel_shape=kernel_shape,\n padding=snt.VALID,\n stride=1,\n name=\"conv2\",\n use_bias=use_bias)\n\n output2 = conv2(inputs)\n\n self.assertTrue(\n output2.get_shape().is_compatible_with(\n [batch_size, in_length - kernel_shape + 1, out_channels]))\n\n self.assertTrue(\n conv2.w.get_shape().is_compatible_with(\n [kernel_shape, in_channels, out_channels]))\n\n if use_bias:\n self.assertTrue(\n conv2.b.get_shape().is_compatible_with(\n [out_channels]))",
"def testShapesNotKnown(self, use_bias):\n\n batch_size = 5\n in_length = 32\n in_channels = out_channels = 5\n kernel_shape = 3\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[None, None, in_channels],\n name=\"inputs\")\n\n conv1 = snt.Conv1D(\n name=\"conv1\",\n output_channels=out_channels,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n stride=1,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w]).run()\n\n output_eval = output.eval({\n inputs: np.zeros([batch_size, in_length, in_channels])})\n\n self.assertEqual(\n output_eval.shape,\n (batch_size, in_length, out_channels))",
"def testShapesSame(self, use_bias):\n\n out_channels = self.out_channels_dw\n input_shape = self.input_shape\n kernel_shape = self.kernel_shape\n output_shape = self.output_shape\n depthwise_filter_shape = self.depthwise_filter_shape\n pointwise_filter_shape = self.pointwise_filter_shape\n channel_multiplier = self.channel_multiplier\n\n inputs = tf.placeholder(tf.float32, shape=input_shape)\n\n conv1 = snt.SeparableConv1D(\n output_channels=out_channels,\n channel_multiplier=channel_multiplier,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(output.get_shape().is_compatible_with(output_shape))\n self.assertTrue(conv1.w_dw.get_shape().is_compatible_with(\n depthwise_filter_shape))\n self.assertTrue(conv1.w_pw.get_shape().is_compatible_with(\n pointwise_filter_shape))\n if use_bias:\n self.assertTrue(conv1.b.get_shape().is_compatible_with([out_channels]))",
"def testShapesNotKnown(self, use_bias):\n\n inputs = tf.placeholder(\n tf.float32, shape=[None, None, self.in_channels], name=\"inputs\")\n\n conv1 = snt.SeparableConv1D(\n output_channels=self.out_channels_dw,\n channel_multiplier=1,\n kernel_shape=self.kernel_shape,\n padding=snt.SAME,\n use_bias=use_bias)\n output = conv1(inputs)\n\n with self.test_session() as session:\n tf.variables_initializer(\n [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else\n [conv1.w_dw, conv1.w_pw]).run()\n output_eval = session.run(output, {inputs: np.zeros(self.input_shape)})\n self.assertEqual(output_eval.shape, tuple(self.output_shape))",
"def _check_shape(placeholder_shape, data_shape):\n\n return True",
"def _AssertShapesMatch(op_name, in_tensor, out_tensor):\n in_shape = in_tensor.get_shape()\n out_shape = out_tensor.get_shape()\n\n if not in_shape.is_compatible_with(out_shape):\n raise ValueError('%s should not change tensor shape: input %s, '\n 'output %s' % (op_name, in_shape, out_shape))",
"def testKernelShape(self, use_bias):\n\n # No check against output_channels is done yet (needs input size).\n snt.SeparableConv1D(\n output_channels=1,\n channel_multiplier=2,\n kernel_shape=[3],\n name=\"conv1\",\n use_bias=use_bias)\n snt.SeparableConv1D(\n output_channels=1, channel_multiplier=1, kernel_shape=3, name=\"conv1\")\n\n error_msg = (r\"Invalid kernel shape: x is \\[3, 3\\], must be either a \"\n r\"positive integer or an iterable of positive integers of \"\n r\"size 1\")\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg):\n snt.SeparableConv1D(output_channels=1,\n channel_multiplier=3,\n kernel_shape=[3, 3],\n use_bias=use_bias)",
"def test_shape_error(self):\n raise unittest.SkipTest(\"Failing after fixing Poly unsoundness #4878\")\n with self.assertRaisesRegex(TypeError,\n re.escape(\"add got incompatible shapes for broadcasting: (v,), (4,)\")):\n self.CheckShapePolymorphism(\n lambda x, y: x + y,\n input_signature=[tf.TensorSpec([None]), tf.TensorSpec([4])],\n in_shapes=[\"(v,)\", \"(4,)\"],\n expected_output_signature=tf.TensorSpec([None]))\n\n four_ones = np.ones((4,))\n # We get the error even if we use correct actual arguments\n with self.assertRaisesRegex(TypeError,\n re.escape(\"add got incompatible shapes for broadcasting: (v,), (4,)\")):\n jax2tf.convert(lambda x, y: x + y,\n in_shapes=[\"(v,)\", \"(4,)\"])(four_ones, four_ones)\n\n with self.assertRaisesRegex(TypeError,\n re.escape(\"dot_general requires contracting dimensions to have the same shape, got [4] and [v].\")):\n jax2tf.convert(lambda x: jnp.matmul(x, x),\n in_shapes=[\"(v, 4)\"])(np.ones((4, 4)))\n\n # TODO: this is an opportunity to improve the translation, should not error\n with self.assertRaisesRegex(TypeError,\n \"Only integers, .* tensors are valid indices, got 0\"):\n jax2tf.convert(lambda x: jnp.split(x, 2),\n in_shapes=[\"(2*v,)\"])(four_ones)",
"def test_input_shape_error(self):\n\n def net_func():\n input_value = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])\n paddle.bincount(input_value)\n\n with self.assertRaises(ValueError):\n self.run_network(net_func)",
"def input_shape(self):\n return [None, 32, 32, 1]",
"def input_shape(self):\n return [None, 32, 32, 1]",
"def input_shape(self):\n return [None, 32, 32, 1]",
"def _keras_update_shape(self, prep):\n\n # Run preprocessing on the training data\n X_transform = prep.fit_transform(self.X_train)\n\n # If the input shape has not been specified, it is simply the number of features in X_transform\n if 'input_shape' not in self.model.first_layer_kwargs:\n self.model.first_layer_kwargs['input_shape'] = tuple([X_transform.shape[1]])\n # Else update the input shape based on the number of features after preprocessing\n else:\n # Transform to a list to make the input_shape mutable\n self.model.first_layer_kwargs['input_shape'] = list(self.model.first_layer_kwargs['input_shape'])\n # Update the number of features based on X_transform\n if self.model.lags:\n self.model.first_layer_kwargs['input_shape'][-1] = X_transform.shape[1]//(self.model.lags + (1 if self.model.current_sample_as_input else 0))\n else:\n self.model.first_layer_kwargs['input_shape'][-1] = X_transform.shape[1]//np.prod(self.model.first_layer_kwargs['input_shape'][:-1])\n # Transform back to a tuple as required by Keras\n self.model.first_layer_kwargs['input_shape'] = tuple(self.model.first_layer_kwargs['input_shape'])\n \n # Ensure the Architecture has been updated\n self.model.architecture.iloc[0, 2]['input_shape'] = self.model.first_layer_kwargs['input_shape']\n \n # 2D, 3D and 4D data is valid. \n # e.g. The input_shape can be a tuple of (subsequences, timesteps, features), with subsequences and timesteps as optional.\n # A 4D shape may be valid for e.g. a ConvLSTM with (timesteps, rows, columns, features) \n if len(self.model.first_layer_kwargs['input_shape']) > 5:\n err = \"Unsupported input_shape: {}\".format(self.model.first_layer_kwargs['input_shape'])\n raise Exception(err)",
"def testKernelShape(self, out_channels, padding, use_bias, in_shape,\n out_shape, stride_shape, use_output_shape):\n snt.Conv1DTranspose(\n output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=[3],\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n snt.Conv1DTranspose(\n output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=3,\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n\n err = \"Invalid kernel\"\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):\n snt.Conv1DTranspose(output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=[3, 3],\n name=\"conv1\",\n use_bias=use_bias)\n\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):\n snt.Conv1DTranspose(output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=[3, 3, 3, 3],\n name=\"conv1\",\n use_bias=use_bias)",
"def testShapesNotKnown(self, use_bias):\n\n inputs = tf.placeholder(\n tf.float32, shape=[None, None, None, self.in_channels], name=\"inputs\")\n\n conv1 = snt.DepthwiseConv2D(\n channel_multiplier=self.channel_multiplier,\n kernel_shape=self.kernel_shape,\n padding=snt.SAME,\n stride=1,\n use_bias=use_bias)\n output = conv1(inputs)\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w]).run()\n output_eval = output.eval({inputs: np.zeros(self.input_shape)})\n self.assertEqual(output_eval.shape, tuple(self.output_shape))",
"def input_type_shapes(self):\n return self._input_type_shapes",
"def testShapesSame(self, use_bias):\n\n out_channels = self.out_channels_dw\n input_shape = self.input_shape\n kernel_shape = self.kernel_shape\n output_shape = self.output_shape\n depthwise_filter_shape = self.depthwise_filter_shape\n pointwise_filter_shape = self.pointwise_filter_shape\n channel_multiplier = self.channel_multiplier\n\n inputs = tf.placeholder(tf.float32, shape=input_shape)\n\n conv1 = snt.SeparableConv2D(\n output_channels=out_channels,\n channel_multiplier=channel_multiplier,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(output.get_shape().is_compatible_with(output_shape))\n self.assertTrue(conv1.w_dw.get_shape().is_compatible_with(\n depthwise_filter_shape))\n self.assertTrue(conv1.w_pw.get_shape().is_compatible_with(\n pointwise_filter_shape))\n if use_bias:\n self.assertTrue(conv1.b.get_shape().is_compatible_with([out_channels]))",
"def testKernelShape(self, use_bias):\n\n snt.Conv1D(output_channels=10, kernel_shape=[3], name=\"conv1\",\n use_bias=use_bias)\n snt.Conv1D(output_channels=10, kernel_shape=3, name=\"conv1\",\n use_bias=use_bias)\n\n err = \"Invalid kernel shape\"\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):\n snt.Conv1D(output_channels=10, kernel_shape=[3, 3], name=\"conv1\")"
] | [
"0.7000613",
"0.6832547",
"0.6529788",
"0.64534384",
"0.63770324",
"0.6370928",
"0.6363974",
"0.6346374",
"0.633438",
"0.63139635",
"0.6271003",
"0.6215813",
"0.6161302",
"0.6155026",
"0.6152465",
"0.614528",
"0.61381274",
"0.61302507",
"0.61197114",
"0.61083287",
"0.6102161",
"0.608538",
"0.608538",
"0.608538",
"0.6069723",
"0.6066751",
"0.60624254",
"0.6051453",
"0.60465",
"0.6045687"
] | 0.7046982 | 0 |
Makes sure that shapes of input and output tensors are compatible. | def _AssertShapesMatch(op_name, in_tensor, out_tensor):
in_shape = in_tensor.get_shape()
out_shape = out_tensor.get_shape()
if not in_shape.is_compatible_with(out_shape):
raise ValueError('%s should not change tensor shape: input %s, '
'output %s' % (op_name, in_shape, out_shape)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_compatible_with(self, inputs): # pylint:disable=useless-super-delegation\n if self.shape is None:\n return False\n if len(inputs) != len(self):\n raise ValueError('Expects ' +\n str(len(self)) + ' inputs, '\n 'but it received ' + str(len(inputs)) +\n ' input tensors. Inputs received: ' + str(inputs))\n for input_index, (x, spec) in enumerate(zip(inputs, self)):\n if spec is None:\n continue\n\n if (spec.ndim is not None or\n spec.min_ndim is not None or\n spec.max_ndim is not None):\n if x.shape.ndims is None:\n raise ValueError('Input ' + ' is incompatible : '\n 'its rank is undefined, but the layer requires a '\n 'defined rank.')\n\n # Check ndim.\n if spec.ndim is not None:\n ndim = x.shape.ndims\n if ndim != spec.ndim:\n raise ValueError('Input ' + str(input_index) + ' is incompatible with the layer: '\n 'expected ndim=' + str(spec.ndim) + ', found ndim=' +\n str(ndim) + '. Full shape received: ' +\n str(x.shape.as_list()))\n if spec.max_ndim is not None:\n ndim = x.shape.ndims\n if ndim is not None and ndim > spec.max_ndim:\n raise ValueError('Input ' + str(input_index) + ' is incompatible with the layer: '\n 'expected max_ndim=' + str(spec.max_ndim) +\n ', found ndim=' + str(ndim))\n if spec.min_ndim is not None:\n ndim = x.shape.ndims\n if ndim is not None and ndim < spec.min_ndim:\n raise ValueError('Input ' + str(input_index) + ' is incompatible with the layer: '\n ': expected min_ndim=' + str(spec.min_ndim) +\n ', found ndim=' + str(ndim) +\n '. Full shape received: ' +\n str(x.shape.as_list()))\n # Check dtype.\n if spec.dtype is not None:\n if x.dtype != spec.dtype:\n raise ValueError('Input ' + str(input_index) + ' is incompatible with the layer: '\n 'expected dtype=' + str(spec.dtype) +\n ', found dtype=' + str(x.dtype))\n # Check specific shape axes.\n if spec.axes:\n shape = x.shape.as_list()\n if shape is not None:\n for axis, value in spec.axes.items():\n if hasattr(value, 'value'):\n value = value.value\n if value is not None and shape[int(axis)] not in {value, None}:\n raise ValueError(\n 'Input ' + str(input_index) + ' is'\n ' incompatible with the layer: expected axis ' + str(axis) +\n ' of input shape to have value ' + str(value) +\n ' but received input with shape ' + str(shape))\n # Check shape.\n if spec.shape is not None:\n shape = x.shape.as_list()\n if shape is not None:\n for spec_dim, dim in zip(spec.shape, shape):\n if spec_dim is not None and dim is not None:\n if spec_dim != dim:\n raise ValueError('Input ' + str(input_index) +\n ' is incompatible ' +\n ': expected shape=' + str(spec.shape) +\n ', found shape=' + str(shape))",
"def assert_input_compatibility(input_spec: TensorSpec, inputs):\n if not input_spec:\n return\n input_spec.shape.to('cpu')\n inputs.to('cpu')\n if len(inputs) != len(input_spec):\n raise ValueError('Tensor ' + ' expects ' +\n str(len(input_spec)) + ' inputs, '\n 'but it received ' + str(len(inputs)) +\n ' input tensors. Inputs received: ' + str(inputs))\n for input_index, (x, spec) in enumerate(zip(inputs, input_spec)):\n if spec is None:\n continue\n\n if (spec.ndim is not None or\n spec.min_ndim is not None or\n spec.max_ndim is not None):\n if x.shape.ndims is None:\n raise ValueError('Input ' + str(input_index) + ' of tensor ' + ' is incompatible with the layer: '\n 'its rank is undefined, but the layer requires a '\n 'defined rank.')\n\n # Check ndim.\n if spec.ndim is not None:\n ndim = x.shape.ndims\n if ndim != spec.ndim:\n raise ValueError('Input ' + str(input_index) + ' of tensor ' + ' is incompatible with the layer: '\n 'expected ndim=' + str(spec.ndim) + ', found ndim=' +\n str(ndim) + '. Full shape received: ' +\n str(x.shape.as_list()))\n if spec.max_ndim is not None:\n ndim = x.shape.ndims\n if ndim is not None and ndim > spec.max_ndim:\n raise ValueError('Input ' + str(input_index) + ' of tensor ' + ' is incompatible with the layer: '\n 'expected max_ndim=' + str(spec.max_ndim) +\n ', found ndim=' + str(ndim))\n if spec.min_ndim is not None:\n ndim = x.shape.ndims\n if ndim is not None and ndim < spec.min_ndim:\n raise ValueError('Input ' + str(input_index) + ' of tensor ' + ' is incompatible with the layer: '\n ': expected min_ndim=' + str(spec.min_ndim) +\n ', found ndim=' + str(ndim) +\n '. Full shape received: ' +\n str(x.shape.as_list()))\n # Check dtype.\n if spec.dtype is not None:\n if x.dtype != spec.dtype:\n raise ValueError('Input ' + str(input_index) + ' of tensor ' + ' is incompatible with the layer: '\n 'expected dtype=' + str(spec.dtype) +\n ', found dtype=' + str(x.dtype))\n # Check specific shape axes.\n if spec.axes:\n shape = x.shape.as_list()\n if shape is not None:\n for axis, value in spec.axes.items():\n if hasattr(value, 'value'):\n value = value.value\n if value is not None and shape[int(axis)] not in {value, None}:\n raise ValueError(\n 'Input ' + str(input_index) + ' of tensor ' + ' is'\n ' incompatible with the layer: expected axis ' + str(axis) +\n ' of input shape to have value ' + str(value) +\n ' but received input with shape ' + str(shape))\n # Check shape.\n if spec.shape is not None:\n shape = x.shape.as_list()\n if shape is not None:\n for spec_dim, dim in zip(spec.shape, shape):\n if spec_dim is not None and dim is not None:\n if spec_dim != dim:\n raise ValueError('Input ' + str(input_index) +\n ' is incompatible with tensor ' +\n ': expected shape=' + str(spec.shape) +\n ', found shape=' + str(shape))",
"def _AssertConvShapes(self, op_name, input_tensor, weights):\n input_shape = input_tensor.get_shape()\n weights_shape = weights.get_shape()\n if (len(input_shape) != 4 or len(weights_shape) != 4 or\n input_shape[3] != weights_shape[2]):\n raise ValueError('Incompatible shapes for op %s inputs: %s and %s' %\n (op_name, input_shape, weights_shape))",
"def _check_shape_of_model_output(output: Tensor, input_ids: Tensor) ->None:\n bs, seq_len = input_ids.shape[:2]\n invalid_out_shape = len(output.shape) != 3 or output.shape[0] != bs or output.shape[1] != seq_len\n if invalid_out_shape:\n raise ValueError(f'The model output must be `Tensor` of a shape `[batch_size, seq_len, model_dim]` i.e. [{bs}, {seq_len}. , `model_dim`], but got {output.shape}.')",
"def _check_same_shape(preds: Tensor, target: Tensor) ->None:\n if preds.shape != target.shape:\n raise RuntimeError(f'Predictions and targets are expected to have the same shape, but got {preds.shape} and {target.shape}.')",
"def testShapesSame(self, batch_size, in_length, in_channels, out_length,\n out_channels, kernel_shape, padding, use_bias, in_shape,\n out_shape, stride_shape, use_output_shape):\n if use_output_shape:\n output_shape_arg = out_shape\n else:\n output_shape_arg = None\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_length, in_channels])\n\n conv1 = snt.Conv1DTranspose(output_channels=out_channels,\n output_shape=output_shape_arg,\n kernel_shape=kernel_shape,\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(\n output.get_shape().is_compatible_with(\n [batch_size, out_length, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [1, kernel_shape, out_channels, in_channels]))\n\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))",
"def _verify_data(inputs, targets):\n check_value_type('inputs', inputs, Tensor)\n if len(inputs.shape) != 4:\n raise ValueError(f'Argument inputs must be 4D Tensor, but got {len(inputs.shape)}D Tensor.')\n check_value_type('targets', targets, (Tensor, int, tuple, list))\n if isinstance(targets, Tensor):\n if len(targets.shape) > 2:\n raise ValueError('Dimension invalid. If `targets` is a Tensor, it should be 0D, 1D or 2D. '\n 'But got {}D.'.format(len(targets.shape)))\n if targets.shape and len(targets) != len(inputs):\n raise ValueError(\n 'If `targets` is a 2D, 1D Tensor, it should have the same length as inputs {}. But got {}.'.format(\n len(inputs), len(targets)))",
"def _autocheck_dimensions(self):\n # W dimensions check list\n assert len(self.W.shape) == 2, f\"W shape should be (N, N) but is {self.W.shape}.\"\n assert self.W.shape[0] == self.W.shape[1], f\"W shape should be (N, N) but is {self.W.shape}.\"\n\n # Win dimensions check list\n assert len(self.Win.shape) == 2, f\"Win shape should be (N, input) but is {self.Win.shape}.\"\n err = f\"Win shape should be ({self.W.shape[1]}, input) but is {self.Win.shape}.\"\n assert self.Win.shape[0] == self.W.shape[0], err\n\n # Wout dimensions check list\n assert len(self.Wout.shape) == 2, f\"Wout shape should be (output, nb_states) but is {self.Wout.shape}.\"\n nb_states = self.Win.shape[1] + self.W.shape[0] + 1 if self.use_raw_inp else self.W.shape[0] + 1\n err = f\"Wout shape should be (output, {nb_states}) but is {self.Wout.shape}.\"\n assert self.Wout.shape[1] == nb_states, err\n\n # Wfb dimensions check list\n if self.Wfb is not None:\n assert len(self.Wfb.shape) == 2, f\"Wfb shape should be (input, output) but is {self.Wfb.shape}.\"\n err = f\"Wfb shape should be ({self.Win.shape[0]}, {self.Wout.shape[0]}) but is {self.Wfb.shape}.\"\n assert (self.Win.shape[0],self.Wout.shape[0]) == self.Wfb.shape, err",
"def _check_tensor_shapes(tensors):\n for tensor in tensors:\n tensor = tf.convert_to_tensor(value=tensor)\n tensor.get_shape().assert_has_rank(2)\n tensor.get_shape().assert_is_compatible_with(\n tf.convert_to_tensor(value=tensors[0]).get_shape())",
"def _AssertFCShapes(self, op_name, weights, input_tensor):\n weights_shape = weights.get_shape()\n input_shape = input_tensor.get_shape()\n if (len(weights_shape) != 2 or len(input_shape) != 2 or\n weights_shape[1] != input_shape[0]):\n raise ValueError('Incompatible shapes for op %s inputs: %s and %s' %\n (op_name, weights_shape, input_shape))",
"def _check_data_shape_to_num_outputs(preds: Tensor, target: Tensor, num_outputs: int) ->None:\n if preds.ndim > 2 or target.ndim > 2:\n raise ValueError(f'Expected both predictions and target to be either 1- or 2-dimensional tensors, but got {target.ndim} and {preds.ndim}.')\n if num_outputs == 1 and preds.ndim != 1 or num_outputs > 1 and num_outputs != preds.shape[1]:\n raise ValueError(f'Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs} and {preds.shape[1]}.')",
"def _check_and_resize_input_tensor(self, input_data_map):\r\n is_need_reshape = False\r\n input_shape_list = []\r\n\r\n for model_input in self.model_inputs:\r\n tensor_name = model_input.name.rstrip()\r\n input_data = input_data_map.get(tensor_name, None)\r\n if input_data is None:\r\n raise ValueError(f'{tensor_name} is not in model inputs')\r\n if model_input.shape != list(input_data.shape):\r\n self.logger.warning(f'model input shape: {model_input.shape} is not equal'\r\n f'with input data shape: {input_data.shape}, model input shape'\r\n f'would be reshaped')\r\n is_need_reshape = True\r\n input_shape_list.append(list(input_data.shape))\r\n\r\n if is_need_reshape:\r\n self.model_session.resize(self.model_inputs, input_shape_list)\r\n self.model_inputs = self.model_session.get_inputs()",
"def check_shape_equal(pred, labels):\n if pred.shape != labels.shape:\n raise ValueError('Prediction and labels shapes must be equal:'\n f'{pred.shape} vs {labels.shape}.')",
"def assert_spec_compatibility(input_spec: TensorSpec, other_spec: TensorSpec):\n if not input_spec:\n return False\n if isinstance(input_spec, (tuple, list)) and all([isinstance(item, numbers.Integral) for item in input_spec]):\n input_spec = TensorSpec(shape=to_tensor(input_spec))\n\n if isinstance(other_spec, (tuple, list)) and all([isinstance(item, numbers.Integral) for item in other_spec]):\n other_spec = TensorSpec(shape=to_tensor(other_spec))\n\n if (input_spec.ndim is not None or\n input_spec.min_ndim is not None or\n input_spec.max_ndim is not None):\n if other_spec.ndim is None:\n print('Other_spec ' + ' is incompatible with input_spec: '\n 'its rank is undefined, but input_spec requires a '\n 'defined rank.')\n return False\n\n # Check ndim.\n if input_spec.ndim is not None:\n ndim = other_spec.ndim\n if ndim != input_spec.ndim:\n print('Other_spec is incompatible with the input_spec: expected ndim=' + str(input_spec.ndim) + ', found ndim=' +\n str(ndim) + '. Full shape received: ' +\n str(other_spec._shape_tuple))\n return False\n if input_spec.max_ndim is not None:\n ndim = other_spec.ndim\n if ndim is not None and ndim > input_spec.max_ndim:\n print('Other_spec is incompatible with the input_spec: expected max_ndim=' + str(input_spec.max_ndim) +\n ', found ndim=' + str(ndim))\n return False\n if input_spec.min_ndim is not None:\n ndim = other_spec.ndim\n if ndim is not None and ndim < input_spec.min_ndim:\n print('Other_spec is incompatible with the input_spec: expected min_ndim=' + str(input_spec.min_ndim) +\n ', found ndim=' + str(ndim) +\n '. Full shape received: ' +\n str(other_spec._shape_tuple))\n return False\n # Check dtype.\n if input_spec.dtype is not None:\n if other_spec.dtype != input_spec.dtype:\n print('Other_spec is incompatible with the input_spec: expected dtype=' + str(input_spec.dtype) +\n ', found dtype=' + str(other_spec.dtype))\n return False\n # Check specific shape axes.\n if input_spec.axes:\n shape = other_spec._shape_tuple\n if shape is not None:\n for axis, value in input_spec.axes.items():\n if hasattr(value, 'value'):\n value = value.value\n if value is not None and shape[int(axis)] not in {value, None}:\n print(\n 'Other_spec is incompatible with input_spec: expected axis ' + str(axis) +\n ' of input shape to have value ' + str(value) +\n ' but received input with shape ' + str(shape))\n return False\n # Check shape.\n if input_spec.shape is not None:\n shape = other_spec._shape_tuple\n is_compatible=TensorShape(input_spec.shape).is_compatible_with(TensorShape(other_spec._shape_tuple))\n if is_compatible:\n return is_compatible\n if shape is not None:\n for spec_dim, dim in zip(other_spec._shape_tuple, input_spec._shape_tuple):\n if spec_dim is not None and dim is not None:\n if spec_dim != dim:\n print('Other_spec is incompatible with input_spec: expected shape=' + str(input_spec._shape_tuple) +\n ', found shape=' + str(shape))\n return False\n return True",
"def testOutputShapeConsistency(self, use_bias):\n\n # When padding is SAME, then the actual number of padding pixels can be\n # computed as: pad = kernel_shape - strides + (-input_shape % strides)\n # = 5 - 1 + (- 32 % 1) = 4\n\n # The formula for the minimal size is:\n # oH = strides[1] * (in_height - 1) - padding + kernel_shape_h\n # oH = 1 * ( 32 - 1) - 4 + 5 = 32\n\n # The formula for the maximum size (due to extra pixels) is:\n # oH_max = oH + strides[1] - 1\n # so, for strides = 1 and padding = SAME, input size == output size.\n inputs = tf.placeholder(tf.float32, shape=self.in_shape)\n\n conv1 = snt.Conv3DTranspose(name=\"conv3d_1\",\n output_channels=self.out_channels,\n output_shape=self.out_shape,\n kernel_shape=self.kernel_shape,\n padding=self.padding,\n stride=1,\n use_bias=use_bias)\n\n outputs = conv1(inputs)\n\n self.assertTrue(outputs.get_shape().is_compatible_with((\n self.batch_size,) + self.out_shape + (self.out_channels,)))\n\n self.assertTrue(conv1.w.get_shape().is_compatible_with(self.kernel_shape2))\n if use_bias:\n self.assertTrue(conv1.b.get_shape().is_compatible_with(\n [self.out_channels]))",
"def check_param(self):\n check_tuple = (\"float16\", \"float32\", \"int32\")\n check_shape(self.shape_x, param_name=\"x\")\n check_shape(self.shape_indices, param_name=\"indices\")\n check_shape(self.shape_v, param_name=\"v\")\n check_dtype(self.dtype_x, check_tuple, param_name=\"x\")\n check_dtype(self.dtype_indices, (\"int32\",), param_name=\"indices\")\n check_dtype(self.dtype_v, check_tuple, param_name=\"v\")\n if len(self.shape_x) != len(self.shape_v):\n raise RuntimeError(\"The number of dimension x must\"\n \" be same as dimension v\")\n\n if self.shape_v[0] != self.shape_indices[0]:\n raise RuntimeError(\"The length of rank 0 of tensor v must\"\n \" be the same as length of indices\")\n\n if len(self.shape_indices) != 1:\n raise RuntimeError(\"The length of indices only support 1\")\n for i in range(1, len(self.shape_v)):\n if self.shape_x[i] != self.shape_v[i]:\n if not self.check_special():\n raise RuntimeError(\"The length of each rank of tensor x\"\n \" must be the same as length of\"\n \" each or next rank of tensor v\")",
"def testShapesSame(self, use_bias):\n\n batch_size = random.randint(1, 100)\n in_height = random.randint(10, 288)\n in_width = random.randint(10, 288)\n in_channels = random.randint(1, 10)\n out_channels = random.randint(1, 32)\n kernel_shape_h = random.randint(1, 11)\n kernel_shape_w = random.randint(1, 11)\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_height, in_width, in_channels])\n\n conv1 = snt.Conv2D(\n name=\"conv1\",\n output_channels=out_channels,\n kernel_shape=[kernel_shape_h, kernel_shape_w],\n padding=snt.SAME,\n stride=1,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(\n output.get_shape().is_compatible_with(\n [batch_size, in_height, in_width, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [kernel_shape_h, kernel_shape_w, in_channels, out_channels]))\n\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))",
"def test_shape_error(self):\n raise unittest.SkipTest(\"Failing after fixing Poly unsoundness #4878\")\n with self.assertRaisesRegex(TypeError,\n re.escape(\"add got incompatible shapes for broadcasting: (v,), (4,)\")):\n self.CheckShapePolymorphism(\n lambda x, y: x + y,\n input_signature=[tf.TensorSpec([None]), tf.TensorSpec([4])],\n in_shapes=[\"(v,)\", \"(4,)\"],\n expected_output_signature=tf.TensorSpec([None]))\n\n four_ones = np.ones((4,))\n # We get the error even if we use correct actual arguments\n with self.assertRaisesRegex(TypeError,\n re.escape(\"add got incompatible shapes for broadcasting: (v,), (4,)\")):\n jax2tf.convert(lambda x, y: x + y,\n in_shapes=[\"(v,)\", \"(4,)\"])(four_ones, four_ones)\n\n with self.assertRaisesRegex(TypeError,\n re.escape(\"dot_general requires contracting dimensions to have the same shape, got [4] and [v].\")):\n jax2tf.convert(lambda x: jnp.matmul(x, x),\n in_shapes=[\"(v, 4)\"])(np.ones((4, 4)))\n\n # TODO: this is an opportunity to improve the translation, should not error\n with self.assertRaisesRegex(TypeError,\n \"Only integers, .* tensors are valid indices, got 0\"):\n jax2tf.convert(lambda x: jnp.split(x, 2),\n in_shapes=[\"(2*v,)\"])(four_ones)",
"def testShapesSame(self, use_bias):\n\n batch_size = random.randint(1, 100)\n in_depth = random.randint(10, 288)\n in_height = random.randint(10, 288)\n in_width = random.randint(10, 288)\n in_channels = random.randint(1, 10)\n out_channels = random.randint(1, 32)\n kernel_shape_d = random.randint(1, 11)\n kernel_shape_h = random.randint(1, 11)\n kernel_shape_w = random.randint(1, 11)\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_depth, in_height, in_width, in_channels])\n\n conv1 = snt.Conv3D(\n output_channels=out_channels,\n kernel_shape=[kernel_shape_d, kernel_shape_h, kernel_shape_w],\n padding=snt.SAME,\n stride=1,\n use_bias=use_bias,\n name=\"conv1\")\n\n output = conv1(inputs)\n\n self.assertTrue(\n output.get_shape().is_compatible_with(\n [batch_size, in_depth, in_height, in_width, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [kernel_shape_d, kernel_shape_h, kernel_shape_w, in_channels,\n out_channels]))\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))",
"def testInputTypeError(self, batch_size, in_length, in_channels, out_channels,\n kernel_shape, padding, use_bias, out_shape,\n stride_shape, use_output_shape):\n conv1 = snt.Conv1DTranspose(\n output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=kernel_shape,\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n\n for dtype in (tf.uint32, tf.float64):\n x = tf.constant(np.ones([batch_size, in_length,\n in_channels]), dtype=dtype)\n err = \"Input must have dtype tf.float.*\"\n with self.assertRaisesRegexp(TypeError, err):\n conv1(x)",
"def testShapes(self, use_bias):\n\n batch_size = random.randint(1, 100)\n in_length = random.randint(10, 288)\n in_channels = random.randint(1, 10)\n out_channels = random.randint(1, 32)\n\n kernel_shape = random.randint(1, 10)\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_length, in_channels])\n\n conv1 = snt.Conv1D(\n output_channels=out_channels,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n stride=1,\n name=\"conv1\",\n use_bias=use_bias)\n\n output1 = conv1(inputs)\n\n self.assertTrue(\n output1.get_shape().is_compatible_with(\n [batch_size, in_length, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [kernel_shape, in_channels, out_channels]))\n\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))\n\n conv2 = snt.Conv1D(\n output_channels=out_channels,\n kernel_shape=kernel_shape,\n padding=snt.VALID,\n stride=1,\n name=\"conv2\",\n use_bias=use_bias)\n\n output2 = conv2(inputs)\n\n self.assertTrue(\n output2.get_shape().is_compatible_with(\n [batch_size, in_length - kernel_shape + 1, out_channels]))\n\n self.assertTrue(\n conv2.w.get_shape().is_compatible_with(\n [kernel_shape, in_channels, out_channels]))\n\n if use_bias:\n self.assertTrue(\n conv2.b.get_shape().is_compatible_with(\n [out_channels]))",
"def test_shapes_coupling_out(self):\n\n out_single = self.coupling_net_odd(self.x_single_odd, self.y_single)\n out_batch = self.coupling_net_odd(self.x_batch_odd, self.y_batch)\n\n self.assertEqual(out_single.shape[0], 1,\n 'Batch shape mismatch on single instance in CouplingNet')\n self.assertEqual(out_single.shape[1], self.x_dim_odd//2,\n 'Input/Output shape mismatch on single instance in CouplingNet')\n self.assertEqual(out_batch.shape[0], self.batch_size,\n 'Batch shape mismatch on a batch in CouplingNet')\n self.assertEqual(out_batch.shape[1], self.x_dim_odd // 2,\n 'Input/Output shape mismatch on a batch in CouplingNet')",
"def test_shapes_coupling_out(self):\n\n out_single = self.coupling_net_odd(self.x_single_odd, self.y_single)\n out_batch = self.coupling_net_odd(self.x_batch_odd, self.y_batch)\n\n self.assertEqual(out_single.shape[0], 1,\n 'Batch shape mismatch on single instance in CouplingNet')\n self.assertEqual(out_single.shape[1], self.x_dim_odd//2,\n 'Input/Output shape mismatch on single instance in CouplingNet')\n self.assertEqual(out_batch.shape[0], self.batch_size,\n 'Batch shape mismatch on a batch in CouplingNet')\n self.assertEqual(out_batch.shape[1], self.x_dim_odd // 2,\n 'Input/Output shape mismatch on a batch in CouplingNet')",
"def testOutputShapeConsistency(self, use_bias):\n\n # When padding is SAME, then the actual number of padding pixels can be\n # computed as: pad = kernel_shape - strides + (-input_shape % strides)\n # = 5 - 1 + (- 32 % 1) = 4\n\n # The formula for the minimal size is:\n # oH = strides[1] * (in_height - 1) - padding + kernel_shape_h\n # oH = 1 * ( 32 - 1) - 4 + 5 = 32\n\n # The formula for the maximum size (due to extra pixels) is:\n # oH_max = oH + strides[1] - 1\n # so, for strides = 1 and padding = SAME, input size == output size.\n inputs = tf.placeholder(tf.float32, shape=self.in_shape)\n\n conv1 = snt.Conv2DTranspose(name=\"conv2d_1\",\n output_channels=self.out_channels,\n output_shape=self.out_shape,\n kernel_shape=self.kernel_shape,\n padding=self.padding,\n stride=1,\n use_bias=use_bias)\n\n outputs = conv1(inputs)\n\n self.assertTrue(outputs.get_shape().is_compatible_with((\n self.batch_size,) + self.out_shape + (self.out_channels,)))\n\n self.assertTrue(conv1.w.get_shape().is_compatible_with(self.kernel_shape2))\n if use_bias:\n self.assertTrue(conv1.b.get_shape().is_compatible_with(\n [self.out_channels]))",
"def test_convolve_input_dim_check(self, case, fn, x_shape, y_shape):\n x = torch.rand(*x_shape, dtype=self.dtype, device=self.device)\n y = torch.rand(*y_shape, dtype=self.dtype, device=self.device)\n\n message = [\n \"The operands must be the same dimension\",\n \"Leading dimensions of x and y are not broadcastable\",\n ][case]\n with self.assertRaisesRegex(ValueError, message):\n fn(x, y)",
"def check_consistent_shape(X_train, y_train, X_test, y_test, y_train_pred,\n y_test_pred):\n\n # check input data shapes are consistent\n X_train, y_train = check_X_y(X_train, y_train)\n X_test, y_test = check_X_y(X_test, y_test)\n\n y_test_pred = column_or_1d(y_test_pred)\n y_train_pred = column_or_1d(y_train_pred)\n\n check_consistent_length(y_train, y_train_pred)\n check_consistent_length(y_test, y_test_pred)\n\n if X_train.shape[1] != X_test.shape[1]:\n raise ValueError(\"X_train {0} and X_test {1} have different number \"\n \"of features.\".format(X_train.shape, X_test.shape))\n\n return X_train, y_train, X_test, y_test, y_train_pred, y_test_pred",
"def testOutputShapeInference(self, use_bias):\n inputs = tf.zeros(shape=[3, 5, 5, 5, 2], dtype=tf.float32)\n\n conv1 = snt.Conv3DTranspose(name=\"conv3d_1\",\n output_channels=10,\n output_shape=None,\n kernel_shape=5,\n padding=snt.SAME,\n stride=2,\n use_bias=use_bias)\n\n outputs = conv1(inputs)\n\n self.assertTrue(outputs.get_shape().is_compatible_with((3, 10, 10, 10, 10)))",
"def _check_input_args(scale, shape, dtype):\n if tf.as_dtype(dtype) not in (tf.int32, tf.int64):\n raise ValueError(\n f'Only tf.int32 and tf.int64 are supported. Found dtype `{dtype}`.')\n\n checks = [\n tf.compat.v1.assert_non_negative(scale),\n tf.compat.v1.assert_integer(scale)\n ]\n with tf.control_dependencies(checks):\n return tf.identity(scale), shape, dtype",
"def check_input_shape(self, op, block):\n\n ipt_name = op.input(op.input_names[0])\n ipt_shape = block.var(ipt_name).shape\n for i in ipt_shape:\n if i < 0:\n warning_msg = (\n f\"Input {ipt_name}(shape={ipt_shape}) has unkown dimension shapes. \"\n f\"Specifying static values may improve performance\"\n )\n warnings.warn(warning_msg)",
"def _check_device(self, inputs):\n for i, input in enumerate(inputs):\n if input._device != self._device:\n raise RuntimeError(\n 'Mismatched device between function and '\n 'element {} of input tensors. ({} vs. {})'\n .format(i, self._device, input._device))"
] | [
"0.69564956",
"0.689856",
"0.6888036",
"0.6857275",
"0.67998415",
"0.67264485",
"0.662843",
"0.64965355",
"0.6468758",
"0.6395005",
"0.63594204",
"0.6335894",
"0.6281145",
"0.6273137",
"0.6260043",
"0.6223739",
"0.6200478",
"0.61994135",
"0.61986095",
"0.6178359",
"0.6171779",
"0.61672837",
"0.61672837",
"0.6165397",
"0.6158025",
"0.61469865",
"0.6142395",
"0.61335194",
"0.6127111",
"0.6119566"
] | 0.71506196 | 0 |
Sets the server_enabled of this FtsSftpSettings. | def server_enabled(self, server_enabled):
self._server_enabled = server_enabled | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enable_server(self, server):\n log.info(\"Enabling %s in netscaler\", server)\n return self.post(\"server?action=enable\", {\"server\": {\"name\": server}}, content_type=self.content_type(\"server\"))",
"def set_dhcpserver_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVirtNet_SetDHCPServerEnabled', self.handle, bEnabled)",
"def set_natserver_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVirtNet_SetNATServerEnabled', self.handle, bEnabled)",
"def remote_file_server(self, remote_file_server):\n\n self._remote_file_server = remote_file_server",
"def tcpssl_server_mode(self, tcpssl_server_mode):\n\n self._tcpssl_server_mode = tcpssl_server_mode",
"def setServer(self, server):\n libxml2mod.xmlURISetServer(self._o, server)",
"def server(self, server):\n\n self._server = server",
"async def set_enabled(self, enabled: bool) -> None:\n return await self.api.set_enabled(enabled)",
"def enabled(self, enabled):\n if (self.local_vars_configuration.client_side_validation and\n enabled is not None and not isinstance(enabled, bool)):\n raise ValueError(\"Parameter `enabled` must be a boolean\") # noqa: E501\n\n self._enabled = enabled",
"def server_auth(self, server_auth):\n allowed_values = [\"REQUIRED\", \"IGNORE\"] # noqa: E501\n if server_auth not in allowed_values:\n raise ValueError(\n \"Invalid value for `server_auth` ({0}), must be one of {1}\" # noqa: E501\n .format(server_auth, allowed_values)\n )\n\n self._server_auth = server_auth",
"def __init__(__self__, *,\n config_server: Optional[pulumi.Input['ConfigServerSettingsArgs']] = None,\n enabled_state: Optional[pulumi.Input[Union[str, 'ConfigServerEnabledState']]] = None,\n error: Optional[pulumi.Input['ErrorArgs']] = None):\n if config_server is not None:\n pulumi.set(__self__, \"config_server\", config_server)\n if enabled_state is not None:\n pulumi.set(__self__, \"enabled_state\", enabled_state)\n if error is not None:\n pulumi.set(__self__, \"error\", error)",
"def set_enabled(self, enabled=True):\n self._enabled = enabled",
"def set_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlShare_SetEnabled', self.handle, bEnabled)",
"def set_soak(self, server, to):\n to_exec = \"UPDATE server SET enable_soak = %s WHERE server_id = %s\"\n self.__cursor.execute(to_exec, (to, str(server.id),))\n self.__connection.commit()",
"def enabled(self, enabled: ConfigNodePropertyBoolean):\n\n self._enabled = enabled",
"def disable_server(self, server):\n log.info(\"Disabling %s in netscaler\", server)\n return self.post(\"server?action=disable\", {\"server\": {\"name\": server}}, content_type=self.content_type(\"server\"))",
"def enabled(self, enabled):\n \n self._enabled = enabled",
"def scp_enable(task):\n cmd = \"ip scp server enable\"\n task.run(task=netmiko_send_config, config_commands=cmd)\n c_print(f\"*** {task.host}: SCP has been enabled ***\")",
"def enabled(self, enabled):\n\n self._enabled = enabled",
"def enabled(self, enabled):\n\n self._enabled = enabled",
"def enabled(self, enabled: bool):\n\n self._enabled = enabled",
"def enabled(self, enabled: bool):\n\n self._enabled = enabled",
"def _openSFTPConnection(self):\n if not self.sftp_open:\n self.sftp = paramiko.SFTPClient.from_transport(self.transport)\n self.sftp_open = True",
"def toggle_server(self):\n name = request.params.get('name', g.DEFAULT_SERVER)\n log.debug('toggle_server(%s)' % name)\n servers = model.Session.query(model.Server)\n server = servers.filter(model.Server.name == name).one()\n server.server_on = not server.server_on\n model.Session.update(server)\n model.Session.commit()\n redirect_to('/admin/dashboard')",
"def EnableMappingServer(self):\n return self._get_attribute('enableMappingServer')",
"def setEnabled(self, enable: bool) -> None:\n self.enabled = ...",
"def set_cfg(self, server, cfg):\n\t\tcfg = self.valid_cfg(cfg)\n\t\tserver = valid_server(server)\n\t\tself._reset_server_settings(server)\n\t\treturn self._update_server_cfg(server, cfg)",
"def set_management_ssh(enabled=True, deploy=False):\n\n if enabled is True:\n value = \"no\"\n elif enabled is False:\n value = \"yes\"\n else:\n raise CommandExecutionError(\n \"Invalid option provided for service enabled option.\"\n )\n\n ret = {}\n\n query = {\n \"type\": \"config\",\n \"action\": \"set\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/service\",\n \"element\": \"<disable-ssh>{}</disable-ssh>\".format(value),\n }\n\n ret.update(__proxy__[\"panos.call\"](query))\n\n if deploy is True:\n ret.update(commit())\n\n return ret",
"def set_host_enabled(self, host, enabled):\n if enabled:\n return 'enabled'\n return 'disabled'",
"def set_host_enabled(self, host, enabled):\n if enabled:\n return 'enabled'\n return 'disabled'"
] | [
"0.64041066",
"0.62101185",
"0.6115301",
"0.57635754",
"0.5666829",
"0.560498",
"0.5532448",
"0.55312526",
"0.55182683",
"0.55100137",
"0.54706293",
"0.5436376",
"0.53900456",
"0.5380313",
"0.5367764",
"0.53609276",
"0.5244654",
"0.52353865",
"0.5225218",
"0.5225218",
"0.5204937",
"0.5204937",
"0.5187824",
"0.51824176",
"0.5162303",
"0.5161454",
"0.510343",
"0.51008666",
"0.5099061",
"0.5099061"
] | 0.7935785 | 0 |
Sets the authentication_method of this FtsSftpSettings. | def authentication_method(self, authentication_method):
self._authentication_method = authentication_method | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def authentication_methods(self, authentication_methods):\n\n self._authentication_methods = authentication_methods",
"def auth_method(self):\n return self.settings[\"authMethod\"]",
"def auth_method(self):\n return self[\"authMethod\"]",
"def auth_method(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_method\")",
"def auth_method(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_method\")",
"def auth_method(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"auth_method\")",
"def set_method(self, method):\n self.method = method",
"def kafka_authentication_method(self) -> Optional[str]:\n return pulumi.get(self, \"kafka_authentication_method\")",
"def setMethod(self, method):\n self.__set('method', method)",
"def setMethod(self, method):\n\t\tself.method = method",
"def _set_authenticator(self):\n pass",
"def set_method(\n self, method: MethodStr | QtWebEngineCore.QWebEngineHttpRequest.Method\n ):\n self.setMethod(METHODS.get_enum_value(method))",
"def setAuthenticationOptions(self, authOptions):\n internals.blpapi_SessionOptions_setAuthenticationOptions(\n self.__handle,\n authOptions)",
"def token_endpoint_auth_method(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_endpoint_auth_method\")",
"def token_endpoint_auth_method(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_endpoint_auth_method\")",
"def set_auth_type(self, auth_type):\n pass",
"def authentication(self, authentication):\n self._authentication = authentication",
"def setAuthenticationCredentials(self, username, password):\n self.PDFreactorConfiguration.in1[\"authenticationUsername\"] = username\n self.PDFreactorConfiguration.in1[\"authenticationPassword\"] = password",
"def login(self, username=None, password=None):\r\n self.ftp.login()",
"def for_authenticate_only(self):\n self.token['type'] = 'auth'\n\n return self",
"def auth_method(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"auth_method\")",
"def set_credentials(self, authenticator):\n pass",
"def auth_protocol(self, auth_protocol):\n\n self._auth_protocol = auth_protocol",
"def token_endpoint_auth_method(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"token_endpoint_auth_method\")",
"def get_auth(self):\n return {'method': yeti_config.core.auth}",
"def authentication_mode(self) -> Optional[pulumi.Input['UserAuthenticationModeArgs']]:\n return pulumi.get(self, \"authentication_mode\")",
"def authentication_mode(self) -> Optional[pulumi.Input['UserAuthenticationModeArgs']]:\n return pulumi.get(self, \"authentication_mode\")",
"def default_protocol(self):\n return \"sftp://\"",
"def file_transfer_method_id(self, file_transfer_method_id):\n\n self._file_transfer_method_id = file_transfer_method_id",
"def method(self, method):\n if method is None:\n raise ValueError(\"Invalid value for `method`, must not be `None`\")\n\n self._method = method"
] | [
"0.62380636",
"0.61203897",
"0.5855358",
"0.55694807",
"0.5538085",
"0.5522016",
"0.5440951",
"0.53794426",
"0.53782594",
"0.5377386",
"0.5356047",
"0.5349534",
"0.52542967",
"0.5180398",
"0.5180398",
"0.51782846",
"0.5156288",
"0.50967616",
"0.5048462",
"0.501886",
"0.50109535",
"0.50089884",
"0.5004058",
"0.4978331",
"0.4917409",
"0.49169376",
"0.49169376",
"0.4882829",
"0.48248333",
"0.47892573"
] | 0.7283392 | 0 |
Sets the keystore_file_path of this FtsSftpSettings. | def keystore_file_path(self, keystore_file_path):
self._keystore_file_path = keystore_file_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def keystore_file_password(self, keystore_file_password):\n\n self._keystore_file_password = keystore_file_password",
"def _set_keystore_path(self) -> None:\n response = self.single_call(\"hmy keys location\").strip()\n if not os.path.exists(response):\n os.mkdir(response)\n self.keystore_path = response",
"def org_apache_felix_https_keystore(self, org_apache_felix_https_keystore: ConfigNodePropertyString):\n\n self._org_apache_felix_https_keystore = org_apache_felix_https_keystore",
"def org_apache_felix_https_keystore_key_password(self, org_apache_felix_https_keystore_key_password: ConfigNodePropertyString):\n\n self._org_apache_felix_https_keystore_key_password = org_apache_felix_https_keystore_key_password",
"def setStoreFile(self, storeFile):\n self._checkArgs({'storeFile': bool})\n self.storeFile = storeFile",
"def org_apache_felix_https_keystore_password(self, org_apache_felix_https_keystore_password: ConfigNodePropertyString):\n\n self._org_apache_felix_https_keystore_password = org_apache_felix_https_keystore_password",
"def settingsFilePath(self, value):\n self._settingsFilePath = value\n self.readSettingsFile()",
"def setCertfile(self, certfile):\r\n if not os.access(certfile, os.R_OK):\r\n raise IOError('No such certfile found: %s' % (certfile))\r\n self.certfile = certfile",
"def set_config_file_path(self, config_file_path):\n try:\n if type(config_file_path) is not str:\n raise Exception(\"Class Config_Manager- \" +\n \"set_config_file_path(..): \" +\n \"config_file_path not of type str\")\n\n self.config_file_path = config_file_path\n except Exception as error:\n utilities.show_exception_info(error)\n raise error",
"def _set_snakefile(self):\n from snakemake import SNAKEFILE_CHOICES\n\n for snakefile in SNAKEFILE_CHOICES:\n if os.path.exists(os.path.join(self.workdir, snakefile)):\n self.snakefile = snakefile\n break",
"def org_apache_felix_https_truststore_password(self, org_apache_felix_https_truststore_password: ConfigNodePropertyString):\n\n self._org_apache_felix_https_truststore_password = org_apache_felix_https_truststore_password",
"async def setconfigfile(self, ctx, *, config_file):\n self.settings.setConfigFile(config_file)\n await ctx.send(inline('Done'))",
"def org_apache_felix_https_keystore(self) -> ConfigNodePropertyString:\n return self._org_apache_felix_https_keystore",
"def setWriteFilePath(self, file_path):\n self.file_path = file_path",
"def org_apache_felix_https_keystore_key_password(self) -> ConfigNodePropertyString:\n return self._org_apache_felix_https_keystore_key_password",
"def setSessionPath(self, sessionPath):\n self.__sessionPath = sessionPath\n self.__sessionDownloadPath = os.path.join(self.__sessionPath, \"downloads\")",
"def log_file_path(self, log_file_path):\n self._log_file_path = log_file_path\n return self",
"def setNfsRoot(self):\n\t\tself.nfsroot = self.settings.getKeyValue('nfs.root')\n\t\treturn None",
"def set_recovery_conf(self, recovery_conf):\n try:\n fp = open(recovery_conf, 'r')\n fp.close()\n self.recovery_conf = recovery_conf\n except Exception as e:\n print(e)\n sys.exit(e.errno)",
"def file_from_sf(self, file_from_sf):\n\n self._file_from_sf = file_from_sf",
"def set_jwt_file(self, filename):\n self.jwtfile = filename",
"def setSignPDF(self, keystoreURL, keyAlias, keystorePassword, keystoreType, signingMode):\n self.PDFreactorConfiguration.in1[\"signPdfKeystoreURL\"] = keystoreURL\n self.PDFreactorConfiguration.in1[\"signPdfKeyAlias\"] = keyAlias\n self.PDFreactorConfiguration.in1[\"signPdfKeystorePassword\"] = keystorePassword\n self.PDFreactorConfiguration.in1[\"signPdfKeystoreType\"] = keystoreType\n self.PDFreactorConfiguration.in1[\"signPdfSigningMode\"] = signingMode",
"def writeShREEKConfig(self, filename):\n self._ShREEKConfig.save(filename)\n return",
"def org_apache_felix_https_truststore(self, org_apache_felix_https_truststore: ConfigNodePropertyString):\n\n self._org_apache_felix_https_truststore = org_apache_felix_https_truststore",
"def org_apache_felix_https_keystore_password(self) -> ConfigNodePropertyString:\n return self._org_apache_felix_https_keystore_password",
"def setConfigFileName(self, configFileName):\n self._configFileName = configFileName\n if self._configFileName:\n self._configFileName = os.path.abspath(configFileName)",
"def remote_file_server(self, remote_file_server):\n\n self._remote_file_server = remote_file_server",
"def SetCredentials(self,\n api_key,\n ):\n self._api_key = api_key",
"def set_ssl(self):\n for params in self.config.get_ssl_params():\n self.connection.transport.set_ssl(**params)",
"def set_log_file(self, file_path):\n res = self._dll.JLINKARM_SetLogFile(file_path.encode())\n if res:\n raise errors.JLinkException(res)"
] | [
"0.7145288",
"0.6014983",
"0.58418983",
"0.55596626",
"0.5433482",
"0.5313241",
"0.51829666",
"0.5103493",
"0.5063631",
"0.49352625",
"0.49106106",
"0.48667493",
"0.48239157",
"0.48141515",
"0.4736292",
"0.46992692",
"0.4678572",
"0.46774423",
"0.4635615",
"0.46148446",
"0.46089324",
"0.45850602",
"0.454544",
"0.45305765",
"0.45283824",
"0.44919273",
"0.44900054",
"0.44756043",
"0.44605196",
"0.4437696"
] | 0.7703561 | 0 |
Sets the keystore_file_password of this FtsSftpSettings. | def keystore_file_password(self, keystore_file_password):
self._keystore_file_password = keystore_file_password | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def org_apache_felix_https_keystore_key_password(self, org_apache_felix_https_keystore_key_password: ConfigNodePropertyString):\n\n self._org_apache_felix_https_keystore_key_password = org_apache_felix_https_keystore_key_password",
"def org_apache_felix_https_keystore_password(self, org_apache_felix_https_keystore_password: ConfigNodePropertyString):\n\n self._org_apache_felix_https_keystore_password = org_apache_felix_https_keystore_password",
"def keystore_file_path(self, keystore_file_path):\n\n self._keystore_file_path = keystore_file_path",
"def org_apache_felix_https_truststore_password(self, org_apache_felix_https_truststore_password: ConfigNodePropertyString):\n\n self._org_apache_felix_https_truststore_password = org_apache_felix_https_truststore_password",
"def org_apache_felix_https_keystore_key_password(self) -> ConfigNodePropertyString:\n return self._org_apache_felix_https_keystore_key_password",
"def org_apache_felix_https_keystore_password(self) -> ConfigNodePropertyString:\n return self._org_apache_felix_https_keystore_password",
"def setpassword(self, pwd):\n pass",
"def set_password(self, password):\n self.cloudserver.change_password(password)",
"def settings_app_password(self, settings_app_password):\n\n self._settings_app_password = settings_app_password",
"def set_password(self, password):\n self.PASSWORD = password",
"def set_password(self, password):\n self.password = password",
"def org_apache_felix_https_keystore(self, org_apache_felix_https_keystore: ConfigNodePropertyString):\n\n self._org_apache_felix_https_keystore = org_apache_felix_https_keystore",
"def set_password(self, password):\n self.PASS = password",
"def set_password(self, password):\n self.authentication.password = password",
"def org_apache_felix_https_truststore_password(self) -> ConfigNodePropertyString:\n return self._org_apache_felix_https_truststore_password",
"def set_password(self, password):\n self.password = self.hash_password(password)",
"def set_password(self, password):\n self.password = generate_password_hash(password, method='pbkdf2:sha256')",
"def set_password(self, password):\n from kalon.auth import encrypt_password\n self.document.password = encrypt_password(password)",
"def set_password(self, password):\n self.password_hash = generate_password_hash(f\"{password}{self.user_salt}\")",
"def set_password(self, value):\n # Salt need to be generated before set password\n m = hashlib.sha256()\n m.update('-'.join([\n str(datetime.now()),\n config.get('security.password_salt')\n ]))\n self.salt = m.hexdigest()\n self.password_pending = False\n self.password = self.__encrypt(value)",
"def set_password(self, password):\n self.password = md5crypt(password, gen_salt())",
"def set_Password(self, value):\n super(DownloadDocumentInputSet, self)._set_input('Password', value)",
"def setUserPassword(self,value):\n self.PDFreactorConfiguration.in1[\"userPassword\"] = value",
"def set_password(self, password):\n self.password = generate_password_hash(password)",
"def password(self, password):\n\n self._password = password",
"def password(self, password):\n\n self._password = password",
"def password(self, password):\n\n self._password = password",
"def password(self, password):\n\n self._password = password",
"def set_session_password(self, pwd):\n\n if (self.__rootpwd == None):\n self.__rootpwd = hashlib.md5(pwd).hexdigest()\n else:\n self.__change_password(pwd)",
"def password(self, password: str):\n\n self._password = password"
] | [
"0.697365",
"0.6841448",
"0.6515118",
"0.6424852",
"0.60550404",
"0.5957332",
"0.5679264",
"0.56787336",
"0.56772876",
"0.56676793",
"0.5628823",
"0.55949026",
"0.55878115",
"0.5577493",
"0.5540421",
"0.551728",
"0.5511115",
"0.54981464",
"0.54964054",
"0.5444806",
"0.54447436",
"0.54000777",
"0.53954804",
"0.5367501",
"0.535635",
"0.535635",
"0.535635",
"0.535635",
"0.5351832",
"0.5337329"
] | 0.8196835 | 0 |
Sets the ciphers of this FtsSftpSettings. | def ciphers(self, ciphers):
self._ciphers = ciphers | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ciphers(self) -> Sequence[str]:\n return pulumi.get(self, \"ciphers\")",
"def ciphers(self) -> Sequence[str]:\n return pulumi.get(self, \"ciphers\")",
"def ciphers(self):\n return self._ciphers",
"def set_ssl(self):\n for params in self.config.get_ssl_params():\n self.connection.transport.set_ssl(**params)",
"def test_set_cipher_list(self, context, cipher_string):\n context.set_cipher_list(cipher_string)\n conn = Connection(context, None)\n\n assert \"AES128-SHA\" in conn.get_cipher_list()",
"def ciphers_obj(self):\n if self.esp_enc_alg == \"ENCR_AES_GCM_16_IIV\":\n ## BEGIN code to update\n \n return [ AES.new(self.esp_enc_key,AES.MODE_GCM, nonce=self.nonce)]\n ## END code to update\n raise UnsupportedEncAlgError(sa.esp_enc_alg, \"unsupported\")",
"def _set_tls_capabilities(self, caps):\n if self.settings.get(\"ssl-mode\") == SSLMode.DISABLED:\n return\n\n if self.stream.is_socket():\n if self.settings.get(\"ssl-mode\"):\n _LOGGER.warning(\"SSL not required when using Unix socket.\")\n return\n\n if \"tls\" not in caps:\n self.close_connection()\n raise OperationalError(\"SSL not enabled at server\")\n\n is_ol7 = False\n if platform.system() == \"Linux\":\n distname, version, _ = linux_distribution()\n try:\n is_ol7 = \"Oracle Linux\" in distname and version.split(\".\")[0] == \"7\"\n except IndexError:\n is_ol7 = False\n\n if sys.version_info < (2, 7, 9) and not is_ol7:\n self.close_connection()\n raise RuntimeError(\n \"The support for SSL is not available for this Python version\"\n )\n\n self.protocol.set_capabilities(tls=True)\n self.stream.set_ssl(\n self.settings.get(\"tls-versions\", None),\n self.settings.get(\"ssl-mode\", SSLMode.REQUIRED),\n self.settings.get(\"ssl-ca\"),\n self.settings.get(\"ssl-crl\"),\n self.settings.get(\"ssl-cert\"),\n self.settings.get(\"ssl-key\"),\n self.settings.get(\"tls-ciphersuites\"),\n )\n if \"attributes\" in self.settings:\n conn_attrs = self.settings[\"attributes\"]\n self.protocol.set_capabilities(session_connect_attrs=conn_attrs)",
"def tcpssl_server_mode(self, tcpssl_server_mode):\n\n self._tcpssl_server_mode = tcpssl_server_mode",
"def __init__(self):\n super(TLS12AuthenticationSuite, self).__init__()\n self._protocol = ssl.PROTOCOL_TLSv1_2\n self._ciphers = ':'.join((\n 'AES128-SHA256',\n 'AES256-SHA256',\n 'DH-DSS-AES256-SHA256',\n 'DH-DSS-AES128-SHA256',\n 'DH-RSA-AES128-SHA256',\n 'DHE-DSS-AES128-SHA256',\n 'DHE-RSA-AES128-SHA256',\n 'DH-DSS-AES256-SHA256',\n 'DH-RSA-AES256-SHA256',\n 'DHE-DSS-AES256-SHA256',\n 'DHE-RSA-AES256-SHA256',\n 'ECDH-ECDSA-AES128-SHA256',\n 'ECDH-ECDSA-AES256-SHA256',\n 'ECDHE-ECDSA-AES128-SHA256',\n 'ECDHE-ECDSA-AES256-SHA384',\n 'ECDH-RSA-AES128-SHA256',\n 'ECDH-RSA-AES256-SHA384',\n 'ECDHE-RSA-AES128-SHA256',\n 'ECDHE-RSA-AES256-SHA384',\n 'ECDHE-ECDSA-AES128-GCM-SHA256',\n 'ECDHE-ECDSA-AES256-GCM-SHA384',\n 'ECDHE-ECDSA-AES128-SHA256',\n 'ECDHE-ECDSA-AES256-SHA384',\n ))",
"def setCryptographicHandlers( self , tkip = None , aes = None ):\n\t\tself.handleTKIP = tkip\n\t\tself.handleAES \t= aes",
"def org_apache_felix_https_jetty_ciphersuites_included(self, org_apache_felix_https_jetty_ciphersuites_included: ConfigNodePropertyArray):\n\n self._org_apache_felix_https_jetty_ciphersuites_included = org_apache_felix_https_jetty_ciphersuites_included",
"def org_apache_felix_https_jetty_ciphersuites_included(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_https_jetty_ciphersuites_included",
"def setTlsOptions(self, tlsOptions):\n internals.blpapi_SessionOptions_setTlsOptions(\n self.__handle,\n get_handle(tlsOptions))",
"def get_all_ciphers(method):\n ssl_method = getattr(SSL, method.replace('.', '_') + '_METHOD')\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n context = SSL.Context(ssl_method)\n context.set_cipher_list(\"ALL:COMPLEMENTOFALL\")\n sock = SSL.Connection(context, sock)\n ciphers = sock.get_cipher_list()\n except SSL.Error:\n ciphers = []\n finally:\n sock.close()\n\n return ciphers",
"def _copy_cipher_settings(self, other):\n other.cipherNames = self.cipherNames\n other.macNames = self.macNames\n other.keyExchangeNames = self.keyExchangeNames\n other.cipherImplementations = self.cipherImplementations\n other.minVersion = self.minVersion\n other.maxVersion = self.maxVersion\n other.versions = self.versions",
"def setHTTPSMode(self, httpsMode):\n self.PDFreactorConfiguration.in1[\"httpsMode\"] = httpsMode",
"def __init__(self):\n super(BasicAuthenticationSuite, self).__init__()\n self._protocol = ssl.PROTOCOL_TLSv1\n self._ciphers = ':'.join((\n 'AES128-SHA',\n 'DES-CBC3-SHA',\n 'AES256-SHA',\n 'DHE-DSS-DES-CBC3-SHA',\n 'DHE-RSA-DES-CBC3-SHA',\n 'DH-DSS-AES128-SHA',\n 'DH-RSA-AES128-SHA',\n 'DHE-DSS-AES128-SHA',\n 'DHE-RSA-AES128-SHA',\n 'DH-RSA-AES256-SHA',\n 'DHE-DSS-AES256-SHA',\n 'DHE-RSA-AES256-SHA',\n ))",
"def org_apache_felix_https_jetty_ciphersuites_excluded(self, org_apache_felix_https_jetty_ciphersuites_excluded: ConfigNodePropertyArray):\n\n self._org_apache_felix_https_jetty_ciphersuites_excluded = org_apache_felix_https_jetty_ciphersuites_excluded",
"def set_cipher(self, key_name, hint):\n message_key_types.set_cipher(self.shared_key, self.nonce, key_name, hint)",
"def ssl_cipher(self) -> str:\n return pulumi.get(self, \"ssl_cipher\")",
"def _configure_ipsec_secrets(self, ipsec_confs):\n secrets_tpl = '../config/tpl/ipsec/ipsec.secrets'\n secret_confs = []\n\n for name, conf in ipsec_confs.items():\n secret_conf = {\n 'right_public_ip': conf['right_public_ip'],\n 'psk': env.get('ipsec_psk_%s' % name),\n }\n secret_confs.append(secret_conf)\n\n # Configure the /etc/ipsec.d/<name>.conf file with passwords\n with hide(*fab_output_hides):\n return upload_template_changed(\n secrets_tpl,\n '/etc/ipsec.secrets',\n context={'confs': secret_confs},\n use_sudo=True,\n mode=0600,\n use_jinja=True\n )",
"def setoptions(cls, session):\n newlist = list(clslist)\n list(map(lambda x: cls.setclsoptions(x, session), newlist))",
"def test_set_options(self):\n context = Context(SSLv23_METHOD)\n options = context.set_options(OP_NO_SSLv2)\n assert options & OP_NO_SSLv2 == OP_NO_SSLv2",
"def load_twisted_conch_ssh_transport(finder, module):\n finder.IncludePackage(\"Crypto.Cipher\")",
"def __Cipher(self, selector):\n assert selector in self.OP_TYPES, 'Invalid selector :%s' % selector\n if selector == self.OP_ACTIVE and (len(self.ciphers.keys()) > 1 or\n not len(self.ciphers.keys())):\n assert 0, 'If both encryption and decryption used then selector must \\\n be OP_ENCRYPT or OP_DECRYPT and at least 1 must be active'\n\n cipher = None\n if selector == self.OP_ACTIVE:\n # should only be one cipher active\n cipher = self.ciphers.values()[0]\n else:\n cipher = self.ciphers.get(selector)\n # have we been created a cipher for this selector yet?\n if not cipher:\n # no, so set it up as requested\n\n # convert between AES and EVP modes\n # NOTE: AES auto-selects based on key size using the same mode, but\n # EVP requires different mode strings for each key size (in bits)\n mode = 'aes_%s_cbc' % (self.key_size * 8)\n cipher = EVP.Cipher(alg=mode,\n key=self.key_bytes,\n iv=self.IV,\n op=selector,\n padding=0)\n self.ciphers[selector] = cipher\n return cipher",
"def _set_mode(self, langs):\n if self.__mode == \"configparser\":\n config = configparser.ConfigParser()\n config.read(self.__lang_file)\n config[\"servers\"] = {}\n for lang in langs:\n config[lang] = {}\n with open(self.__lang_file, 'w') as configfile:\n config.write(configfile)\n elif self.__mode == \"json\":\n with open(self.__lang_file, 'w') as f:\n f.write(\"{}\")\n with open(self.__lang_file, 'r') as f:\n data = json.load(f)\n data[\"servers\"] = {}\n for lang in langs:\n data[lang] = {}\n with open(self.__lang_file, 'w') as f:\n json.dump(data, f, indent=2)",
"def set_options(self, kvlist):\r\n if not kvlist:\r\n return\r\n msg = \" \".join([\"%s=\\\"%s\\\"\"%(k,quote(v)) for k,v in kvlist])\r\n return self.sendAndRecv(\"SETCONF %s\\r\\n\"%msg)",
"def org_apache_felix_https_jetty_ciphersuites_excluded(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_https_jetty_ciphersuites_excluded",
"def passive_clusters(self, passive_clusters):\n\n self._passive_clusters = passive_clusters",
"def default_protocol(self):\n return \"sftp://\""
] | [
"0.63286173",
"0.63286173",
"0.6211777",
"0.55823",
"0.53944427",
"0.52489173",
"0.5057643",
"0.49852008",
"0.4932068",
"0.4884324",
"0.48747385",
"0.48445147",
"0.48318604",
"0.48281583",
"0.48003778",
"0.47863695",
"0.47562948",
"0.47462425",
"0.47110054",
"0.46554583",
"0.46149644",
"0.46104816",
"0.45258972",
"0.45090297",
"0.4486668",
"0.447588",
"0.44754538",
"0.444228",
"0.4398951",
"0.43851012"
] | 0.7703951 | 0 |
Sets the known_users_file_path of this FtsSftpSettings. | def known_users_file_path(self, known_users_file_path):
self._known_users_file_path = known_users_file_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __parse_user_keyfiles(self):\n\n user_sshdir = os.path.expanduser('~/.ssh')\n if not os.path.isdir(user_sshdir):\n return\n\n paths = []\n for filename in os.listdir(user_sshdir):\n if filename in SSH_CONFIG_FILES or os.path.splitext(filename)[1] != '.pub':\n continue\n path = os.path.join(user_sshdir, filename)\n if os.path.isfile(path):\n paths.append(path)\n for path in paths:\n try:\n sshkey = SSHKeyFile(self, path)\n except SSHKeyError as e:\n self.log.debug(e)\n continue\n\n self[sshkey.path] = sshkey",
"def initUserFilesIfNeeded() -> None:\n\n # Create directories if they don't exist\n for userFileDir in UserFileDirs.list():\n os.makedirs(userFileDir, exist_ok=True)\n\n # Init preferences file\n from frcpredict.ui import Preferences\n Preferences.initFile()",
"def setusers(self, users=None):\n if users:\n self.users = users\n return\n import jsb.lib.users as u\n if not u.users: u.users_boot()\n self.users = u.users",
"def set_user_home(self, path):\n os.environ['HOME'] = path",
"def set_user_home(self, path):\n os.environ['HOME'] = path",
"def settingsFilePath(self, value):\n self._settingsFilePath = value\n self.readSettingsFile()",
"def __set_full_path_of_file(self, value):\n self.full_path_of_file = value",
"def files(self):\n log.info(\"starting file iteration\")\n ssh = paramiko.SSHClient()\n\n if self.load_system_host_keys:\n log.debug('loading system host keys')\n ssh.load_system_host_keys()\n if self.host_key_auto_add:\n log.debug('setting host key policy to auto add')\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n sshconf = paramiko.SSHConfig()\n # paramiko uses 'USER' environment var to parsing %u, %r\n # when nrpe daemon run the check, that var is not set and results in\n # 'None' user, set it before parsing config file.\n local_user = pwd.getpwuid(os.getuid()).pw_name\n os.environ['USER'] = os.environ.get('USER', local_user)\n with open('/etc/ssh/ssh_config') as f:\n sshconf.parse(f)\n\n # paramiko wrongly parses %u/%r@%h as it use same value for %u and %r\n # replace %r with the configured username\n self.kwargs['key_filename'] = [\n path for path in sshconf.lookup(self.hostname)['identityfile']\n ]\n\n log.info(\"connecting to %s\", self.hostname)\n log.debug(\"kwargs: %s\", str(self.kwargs))\n for key_file in self.kwargs['key_filename'][:]:\n try:\n ssh.connect(**self.kwargs)\n break\n except IOError as e:\n log.info(\"Key %s does not exist, trying another\", key_file)\n try:\n self.kwargs['key_filename'].pop(0)\n except IndexError:\n raise Exception('No more ssh private key to try.'\n 'Make sure good ssh key exist.')\n log.debug(\"opening sftp\")\n ftp = ssh.open_sftp()\n log.debug(\"chdir %s\", self.pwd)\n try:\n ftp.chdir(self.pwd)\n except IOError, e:\n log.error(\"Error going to directory %s: %s\", self.pwd, e)\n return\n\n # optimization. To avoid running fstat for every backup file, I filter\n # out to only test the newest backup for each facility\n files = {}\n log.debug(\"running ls\")\n for fattr in ftp.listdir_attr():\n # a longname looks like:\n # -rw-r--r-- 1 radvd quagga 5586928 Jun 22 06:35\n # postgresql-roundcube-2016-06-22-06_34_47.sql.xz\n if fattr.longname.startswith('d'): # is a directory\n log.debug(\"Skipping directory %s\", fattr.longname)\n continue\n filename = fattr.longname.split()[-1]\n log.debug('processing %s', filename)\n\n f = self.make_file(filename, None)\n if not f:\n log.debug('skipping')\n continue\n key, value = f.items()[0]\n # we may want to run fstat on this filename later on\n f[key]['filename'] = filename\n # keeps only the newest file for each facility\n if (key not in files) or (value['date'] > files[key]['date']):\n log.debug('first or newer.')\n files.update(f)\n else:\n log.debug('was old')\n\n # now fetch fstat for each file, and yield them\n for k, f in files.items():\n log.debug('getting fstat for %s', f['filename'])\n filestat = ftp.stat(f['filename'])\n f['size'] = filestat.st_size\n yield {k: f}",
"def setPreferencesAtStartup(self):\n\t\tif os.path.isfile(self.userPrefsFileName):\n\t\t\tprefs = open(self.userPrefsFileName, 'r')\n\t\t\tprefsLine = prefs.readline()\n\t\t\tprefs.close()\n\t\t\t\n\t\t\tfor i in range(0,len(prefsLine)):\n\t\t\t\tc = prefsLine[i]\n\t\t\t\tif c is not \"/\":\n\t\t\t\t\tself.setPreference(c)\n\t\t\t\telse:\n\t\t\t\t\tself.setPreference(prefsLine[i:])\n\t\t\t\t\tbreak",
"def overridden_users_home_directories(self, overridden_users_home_directories):\n\n self._overridden_users_home_directories = overridden_users_home_directories",
"def set_ssh_keys(self, ssh_keys):\n self.ssh_keys = {}\n self.ssh_keys_private = {}\n for user_name in ssh_keys:\n key = ssh_keys[user_name]\n if key.startswith('file:'):\n public_key_file = key.split('file:')[1]\n with open(public_key_file) as fd:\n key = fd.read()\n # try to open private key\n private_key_file = public_key_file.split('.pub')[0]\n try:\n with open(private_key_file) as fd:\n self.ssh_keys_private[user_name] = private_key_file\n except FileNotFoundError:\n pass\n\n self.ssh_keys[user_name] = key.strip()\n if user_name == 'root':\n # check if the private key is available:\n # (1) check ssh-agent\n # (2) check for private key file\n command = \"echo {} | ssh-keygen -l -f - | awk '{{ print $2 }}'\"\n finger = check_output(command.format(self.ssh_keys[user_name]),\n shell=True, encoding='ascii')\n try:\n command = 'ssh-add -l | grep -q {}'\n check_call(command.format(finger), shell=True)\n return\n except CalledProcessError:\n if user_name not in self.ssh_keys_private:\n fatal('Could not find matching ssh key for root -',\n 'neither in ssh-agent nor on disk.')",
"def remote_file_server(self, remote_file_server):\n\n self._remote_file_server = remote_file_server",
"def find_user_file(self, option_name, filename_list):\n if option_name is not None:\n filePath = self._options.get(option_name, None)\n else:\n filePath = None\n \n # Look for default filenames if a path wasn't provided.\n if filePath is None:\n for filename in filename_list:\n thisPath = os.path.join(self.project_dir, filename)\n if os.path.isfile(thisPath):\n filePath = thisPath\n break\n # Use the path passed in options, which may be absolute, relative to the\n # home directory, or relative to the project directory.\n else:\n filePath = os.path.expanduser(filePath)\n if not os.path.isabs(filePath):\n filePath = os.path.join(self.project_dir, filePath)\n \n return filePath",
"def set_user_config(self, data):\n config = self.read_config_obj(self.account_file)\n for key, value in data.items():\n config.set(self.user, str(key), value)\n\n self.write_config(self.account_file, config)",
"def users(self, users):\n\n self._users = users",
"def users(self, users):\n\n self._users = users",
"def users(self, users):\n\n self._users = users",
"def checkAndInitUsers(self):\n # config\n users = {}\n\n # iterate through all usernames\n for rUser in pwd.getpwall():\n # check userid\n if rUser.pw_uid is not None and rUser.pw_uid != \"\" and not (\"/nologin\" in rUser.pw_shell or \"/false\" in rUser.pw_shell):\n # save our user, if it mactches\n if verifyNormalUserID(rUser.pw_uid):\n # get processed usernames\n userFName = getNormalizedUserNames(pUser=rUser)[1]\n # save ()\n users[rUser.pw_name] = [rUser.pw_uid, userFName]\n\n # get user config\n timekprConfigManager = timekprConfig()\n # load user config\n timekprConfigManager.loadMainConfiguration()\n\n # go through our users\n for rUser in users:\n # get path of file\n file = os.path.join(timekprConfigManager.getTimekprConfigDir(), cons.TK_USER_CONFIG_FILE % (rUser))\n\n # check if we have config for them\n if not os.path.isfile(file):\n log.log(cons.TK_LOG_LEVEL_INFO, \"setting up user \\\"%s\\\" with id %i\" % (rUser, users[rUser][0]))\n # user config\n timekprUserConfig(timekprConfigManager.getTimekprConfigDir(), rUser).initUserConfiguration()\n # user control\n timekprUserControl(timekprConfigManager.getTimekprWorkDir(), rUser).initUserControl()\n\n log.log(cons.TK_LOG_LEVEL_DEBUG, \"finishing setting up users\")\n\n # user list\n return users",
"def chown_file ( self, fspath ):\n return",
"def setPaths(self):\n self.local_path = g.os_path_join(g.app.loadDir,\"..\",\"plugins\",\"trees\")\n # self.remote_path = r\"cvs.sourceforge.net/viewcvs.py/leo/leo/plugins/trees\"\n self.remote_path = r'leo.tigris.org/source/browse/leo/plugins/trees'",
"def __gitEditUserConfig(self):\n self.vcs.gitEditUserConfig()",
"def set_paths(self, paths):\n self.paths = paths",
"def set_share_user_home_dir(self, bShareUserHomeDir):\n\t\tcall_sdk_function('PrlVmCfg_SetShareUserHomeDir', self.handle, bShareUserHomeDir)",
"def getFSUserDir(self):\n\n return self.config.get(\"FileMan\",\"homedir\") + self.getRole()[\"roleName\"]",
"def set_up_pyfakefs(test_self, allow_root_user=True):\n real_cwd = os.path.realpath(os.getcwd())\n config_dir = os.path.realpath(environment.get_config_directory())\n test_self.setUpPyfakefs(allow_root_user=allow_root_user)\n test_self.fs.add_real_directory(config_dir, lazy_read=False)\n os.chdir(real_cwd)",
"def logged_in_users(self, logged_in_users):\n\n self._logged_in_users = logged_in_users",
"def create_user_configuration(self):\n\n # Ask before touching things that we do not have to!\n if self.test.user_conf_dir_exists():\n if self.test.user_configuration_seems_complete():\n reply = question(_(\"\"\"User configuration already exists.\nDo you want to rewrite it with a new one?\"\"\"), False)\n if not reply:\n report(_(\"Keeping configuration intact and continuing with settings.\"))\n return\n else:\n self.remove_user_configuration()\n else:\n reply = question(_(\"\"\"User configuration already exists, but it seems to be incomplete.\nDo you want to keep it?\"\"\"), False)\n if not reply:\n self.remove_user_configuration()\n else:\n report(_(\"Keeping configuration intact and aborting.\"))\n return\n # Copy the original intact configuration files\n # creating a conf/ subdirectory\n config_root = self.test.user_conf_dir()\n shutil.copytree(buildconfig.SPD_CONF_ORIG_PATH, config_root)\n # Ensure the files are writeable when copying from immutable directory.\n umask = os.umask(0)\n os.umask(umask)\n os.chmod(self.test.user_conf_dir(), 0o755 & ~umask)\n for root, dirs, files in os.walk(self.test.user_conf_dir()):\n for d in dirs:\n os.chmod(os.path.join(root, d), 0o755 & ~umask)\n for f in files:\n os.chmod(os.path.join(root, f), 0o644 & ~umask)\n\n report(_(\"User configuration created in %s\" % self.test.user_conf_dir()))",
"def users_filename(self):\n pass",
"def chown_dir ( self, fspath ):\n return",
"def import_ssh_keys(self, user, ssh_keys):\n ssh_key_dir = self.get_ssh_dir(user)\n if user != 'root':\n filetest.create_dir(ssh_key_dir)\n\n for ssh_file in (ssh_keys.ssh_key_file, ssh_keys.ssh_pub_file):\n shutil.copy(ssh_file, ssh_key_dir)"
] | [
"0.57418454",
"0.5557294",
"0.54986745",
"0.5214731",
"0.5214731",
"0.5180744",
"0.5055465",
"0.5035089",
"0.50259876",
"0.4974094",
"0.496511",
"0.49633723",
"0.4950638",
"0.49499902",
"0.48848796",
"0.48848796",
"0.48848796",
"0.4883349",
"0.48802492",
"0.4863774",
"0.4828682",
"0.48161942",
"0.48142204",
"0.48000604",
"0.47990066",
"0.47901568",
"0.47899166",
"0.4789847",
"0.47535414",
"0.47497293"
] | 0.8051893 | 0 |
Sets the overridden_users_home_directories of this FtsSftpSettings. | def overridden_users_home_directories(self, overridden_users_home_directories):
self._overridden_users_home_directories = overridden_users_home_directories | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_share_user_home_dir(self, bShareUserHomeDir):\n\t\tcall_sdk_function('PrlVmCfg_SetShareUserHomeDir', self.handle, bShareUserHomeDir)",
"def set_user_home(self, path):\n os.environ['HOME'] = path",
"def set_user_home(self, path):\n os.environ['HOME'] = path",
"def homeDirectory(self, ignored_value):\n\t\tself.__homeDirectory = self._resolve_home_directory()",
"def reset_chroot(self):\n try:\n if self.HAS_CHROOT:\n task = reset_ldap_users.post()\n MonQTask.wait_for_tasks(query={\n '_id': task._id, 'state': {'$in': ['ready', 'busy']}\n }, timeout=120000)\n except Exception, e:\n print \"Exception reseting chroot home folders.\"\n raise",
"def user_home(self, user_home_path: str):\n c = self.clone()\n c._user_home_path = path.normpath(user_home_path)\n return c",
"def user_home_path(self):\n return path.join(env.user_home, self._user_home_path)",
"def __validate_home_dir(self, home, login, system, force):\n\n\t\tif system:\n\t\t\tif home:\n\t\t\t\tif os.path.exists(home) and not force:\n\t\t\t\t\traise exceptions.BadArgumentError(_(u'Specified directory '\n\t\t\t\t\t\t'{0} for system user {1} already exists. If you '\n\t\t\t\t\t\t'really want to use it, please use the --force '\n\t\t\t\t\t\t'argument.').format(stylize(ST_PATH, home),\n\t\t\t\t\t\tstylize(ST_NAME,login)))\n\n\t\t\t\tif not home.startswith(\n\t\t\t\t\tsettings.defaults.home_base_path) \\\n\t\t\t\t\tand not home.startswith('/var') \\\n\t\t\t\t\tor home.startswith(LMC.configuration.groups.base_path) \\\n\t\t\t\t\tor home.find('/tmp') != -1:\n\n\t\t\t\t\traise exceptions.BadArgumentError(_(u'Specified home '\n\t\t\t\t\t\t'directory {0} for system user {1} is outside {2} '\n\t\t\t\t\t\t'and /var, or inside {3} or a temporary '\n\t\t\t\t\t\t'directory (/var/tmp, /tmp). This is unsupported, '\n\t\t\t\t\t\t'Aborting.').format(\n\t\t\t\t\t\tstylize(ST_PATH, home),\n\t\t\t\t\t\tstylize(ST_NAME,login),\n\t\t\t\t\t\tsettings.defaults.home_base_path,\n\t\t\t\t\t\tLMC.configuration.groups.base_path))\n\n\t\t\t\tif home in (user.homeDirectory for user in self):\n\t\t\t\t\traise exceptions.BadArgumentError(_(u'Specified home '\n\t\t\t\t\t\t'directory {0} for system user {1} is already owned '\n\t\t\t\t\t\t'by another user. Please choose another one.').format(\n\t\t\t\t\t\tstylize(ST_PATH, home),\n\t\t\t\t\t\tstylize(ST_NAME, login)))\n\n\t\t\t\treturn home\n\t\telse: # not system\n\t\t\tif home:\n\t\t\t\tlogging.warning(_(u'Specifying an alternative home directory '\n\t\t\t\t\t'is not allowed for standard users. Using standard home '\n\t\t\t\t\t'path {0} instead.').format(\n\t\t\t\t\t\tstylize(ST_PATH, '%s/%s' % (\n\t\t\t\t\t\t\tLMC.configuration.users.base_path, login))))\n\n\t\treturn \"%s/%s\" % (LMC.configuration.users.base_path, login)",
"def ensure_home_directory(fs, username):\n home_dir = '/user/%s' % username\n fs.do_as_user(username, fs.create_home_dir, home_dir)",
"def home_folder(self, home_folder):\n\n self._home_folder = home_folder",
"def is_share_user_home_dir(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsShareUserHomeDir', self.handle))",
"def setusers(self, users=None):\n if users:\n self.users = users\n return\n import jsb.lib.users as u\n if not u.users: u.users_boot()\n self.users = u.users",
"def initUserFilesIfNeeded() -> None:\n\n # Create directories if they don't exist\n for userFileDir in UserFileDirs.list():\n os.makedirs(userFileDir, exist_ok=True)\n\n # Init preferences file\n from frcpredict.ui import Preferences\n Preferences.initFile()",
"def get_user_homedir():\n return os.path.expanduser(\"~\")",
"def add_user_home_dir(self, username: str) -> None:\n cmd = self.create_user_home_dir_cmd + [username]\n self.log.info(\"Creating '{}' user home directory using command '{}'\".format(\n username, ' '.join(cmd)))\n proc = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)\n out, err = proc.communicate()\n if proc.returncode:\n raise RuntimeError(\"Failed to create '{}' user home directory: {}\".format(\n username, err))",
"def set_user_defined_shared_folders_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVmCfg_SetUserDefinedSharedFoldersEnabled', self.handle, bEnabled)",
"def set_home(func):\n def inner(*args, **kwargs):\n init_home = os.environ['HOME']\n os.environ['HOME'] = pwd.getpwuid(os.geteuid()).pw_dir\n try:\n retval = func(*args, **kwargs)\n finally:\n os.environ['HOME'] = init_home\n return retval\n return inner",
"def homedir():\n return os.path.expanduser('~')",
"def set_basedir(self, host, path):",
"def get_user_home(self):\n return os.environ['HOME']",
"def get_user_home(self):\n return os.environ['HOME']",
"def setPreferencesAtStartup(self):\n\t\tif os.path.isfile(self.userPrefsFileName):\n\t\t\tprefs = open(self.userPrefsFileName, 'r')\n\t\t\tprefsLine = prefs.readline()\n\t\t\tprefs.close()\n\t\t\t\n\t\t\tfor i in range(0,len(prefsLine)):\n\t\t\t\tc = prefsLine[i]\n\t\t\t\tif c is not \"/\":\n\t\t\t\t\tself.setPreference(c)\n\t\t\t\telse:\n\t\t\t\t\tself.setPreference(prefsLine[i:])\n\t\t\t\t\tbreak",
"def spark_home(self, sparkHome):\n self.sparkProperties[SparkProperties.SPARK_MESOS_EXECUTOR_HOME] = sparkHome\n return self",
"def set_folders(self, folders):\n\n self.folders = folders",
"def create_home_directories():\n # Directories to create\n directories = (\n translate_home_path(path)\n for path in repo_home.rglob(\"*\")\n if path.is_dir() and not path.is_symlink()\n )\n\n for directory in directories:\n if directory.exists():\n # Don't touch it\n continue\n else:\n # Create it\n directory.mkdir(mode=HOME_DIRECTORY_MODE, parents=True)",
"def setUnimacroUserDirectory(self, v):\n key = 'UnimacroUserDirectory'\n\n oldDir = self.getUnimacroUserDirectory()\n # v = os.path.normpath(os.path.expanduser(v))\n uuDir = self.isValidPath(v, wantDirectory=1)\n if uuDir:\n oldDir = self.isValidPath(oldDir, wantDirectory=1)\n if oldDir == uuDir:\n print(f'The UnimacroUserDirectory was already set to \"{uuDir}\", and Unimacro is enabled')\n return\n if oldDir:\n print(f'\\n-----------\\nChanging your UnimacroUserDirectory\\nConsider copying inifile subdirectories (enx_inifiles or nld_inifiles)\\n' \\\n 'from old: \"{oldDir}\" to the\\n' \\\n 'new UnimacroUserDirectory \"{uuDir}\"\\n--------\\n')\n self.userregnl.set(key, v)\n \n self.UnimacroUserDirectory = uuDir\n \n # clear this one, in order to refresh next time it is called:\n self.UnimacroGrammarsDirectory = None\n \n self.userregnl.delete('Old'+key)\n print(f'Enable Unimacro, and set UnimacroUserDirectory to {uuDir}')\n return\n mess = f'natlinkconfigfunctions, could not Enable Unimacro, and set the UnimacroUserDirectory to \"{v}\"'\n return mess",
"def getFSUserDir(self):\n\n return self.config.get(\"FileMan\",\"homedir\") + self.getRole()[\"roleName\"]",
"def get_home_dir(self, username):\n return self.user_table[username]['home']",
"def homeDirectory(self):\n\t\treturn self.__homeDirectory",
"def create_user_configuration(self):\n\n # Ask before touching things that we do not have to!\n if self.test.user_conf_dir_exists():\n if self.test.user_configuration_seems_complete():\n reply = question(_(\"\"\"User configuration already exists.\nDo you want to rewrite it with a new one?\"\"\"), False)\n if not reply:\n report(_(\"Keeping configuration intact and continuing with settings.\"))\n return\n else:\n self.remove_user_configuration()\n else:\n reply = question(_(\"\"\"User configuration already exists, but it seems to be incomplete.\nDo you want to keep it?\"\"\"), False)\n if not reply:\n self.remove_user_configuration()\n else:\n report(_(\"Keeping configuration intact and aborting.\"))\n return\n # Copy the original intact configuration files\n # creating a conf/ subdirectory\n config_root = self.test.user_conf_dir()\n shutil.copytree(buildconfig.SPD_CONF_ORIG_PATH, config_root)\n # Ensure the files are writeable when copying from immutable directory.\n umask = os.umask(0)\n os.umask(umask)\n os.chmod(self.test.user_conf_dir(), 0o755 & ~umask)\n for root, dirs, files in os.walk(self.test.user_conf_dir()):\n for d in dirs:\n os.chmod(os.path.join(root, d), 0o755 & ~umask)\n for f in files:\n os.chmod(os.path.join(root, f), 0o644 & ~umask)\n\n report(_(\"User configuration created in %s\" % self.test.user_conf_dir()))"
] | [
"0.6600052",
"0.6430041",
"0.6430041",
"0.6197573",
"0.57821155",
"0.5754564",
"0.57503104",
"0.5628441",
"0.5487617",
"0.54494035",
"0.54009694",
"0.53437734",
"0.53021526",
"0.5258358",
"0.5253186",
"0.52394444",
"0.5199579",
"0.5132827",
"0.5066063",
"0.5064636",
"0.5064636",
"0.5063934",
"0.5062443",
"0.5052073",
"0.50428385",
"0.50371",
"0.50236124",
"0.4981228",
"0.49628994",
"0.4949903"
] | 0.8290534 | 0 |
article is initialized with xml text contained inside tags | def __init__(self, article_xml):
self.article_xml = article_xml
self.links = self.grab_links()
self.first_link = self.parse_first_link() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, txt='', unicodeEncoding='utf-8'):\n # __document capture the document level structure\n # for each sentence and then put in the archives when the next sentence\n # is processed\n super(ConTextMarkup, self).__init__(__txt=None,\n __rawtxt=txt,\n __scope=None,\n __SCOPEUPDATED=False)\n self.__document = nx.DiGraph()\n self.__document.add_node(\"top\", category=\"document\")\n self.__VERBOSE = False\n self.__tagID = 0\n self.__unicodeEncoding = unicodeEncoding",
"def initDocTagText(self):\n self.doc, self.tag, self.text = Doc().tagtext()",
"def setUp(self):\n self.new_article = Articles('BBC News', \"Paris attack suspect 'of Chechen origin'\", 'The man killed one person and injured four others in Paris in an attack claimed by the IS group.', 'https://ichef.bbci.co.uk/images/ic/1024x576/p06705l3.jpg','http://www.bbc.co.uk/news/world-europe-44098615', '2018-05-13T06:36:21Z')",
"def extract_articles(self, parsed_xml):\n\n # Iterates over every item (article) in xml\n for item in parsed_xml.xpath(\"//item\"):\n\n article = {}\n\n\n article['title'] = self.get_text_or_attr(item, 'title')\n\n\n # The article's categories must be always a list, even if it has\n # only one element.\n categories = self.get_text_or_attr(item, 'category')\n\n if isinstance(categories, str):\n categories = [categories]\n\n article['categories'] = categories\n\n\n url = self.get_text_or_attr(item, 'feedburner:origLink')\n article['url'] = self.remove_query(url)\n\n self.article_url = article['url']\n\n\n # If article's URL is already stored, don't parse it again\n if Article.objects.filter(url=article['url']).count() > 0:\n continue\n\n\n # It is interesting to have the publication date as a `dateutil`\n # object, so we can do whatever manipulation we want.\n pub_date = self.get_text_or_attr(item, 'pubDate')\n article['date'] = self.parse_datetime_passing_errors(pub_date)\n\n\n # Get the author attribute and tries to fetch informations about\n # him/her. An article can have more than one author; on techcrunch's\n # feed, they are separated by a comma.\n author_names = self.get_text_or_attr(item, 'dc:creator').split(',')\n article['authors'] = []\n\n for i, name in enumerate(author_names):\n article['authors'] += [self.get_author(name, i)]\n\n\n # Tries to find the article's thumbnail url\n thumb = self.get_text_or_attr(item, 'media:thumbnail', 'url')\n if thumb and thumb[0]:\n article['thumb'] = self.remove_query(thumb[0])\n\n\n # Gets the article's description and strip all html tags from it\n content = self.clear_text(item.xpath('description'))\n content = content.strip(' Read More').strip(' ').strip()\n\n\n article['content'] = content\n\n\n yield article",
"def setUp(self):\n self.new_article = Articles(\"Politico\",\"By Benjamin Din\", \"2024 GOP contenders collect cash\", \"Republicans who led efforts to overturn the election results in January did especially well, according to the latest quarterly FEC filings.\", \"https://www.politico.com/news/2021/04/15/2024-gop-cash-fec-482240\", \"https://static.politico.com/90/a6/2eff74ff4d69b7aee677b55ae6e2/gettyimages-1230713929.jpg\", \"2021-04-16T02:10:04Z\")",
"def handle_starttag(self, tag, attrs):\n try:\n if tag == \"article\": # Set flag for news feed parsing to true\n for name, value in attrs:\n if name == 'class' and 'grid_12 alpha enrichi' in value:\n self.article_section = True\n elif tag == \"a\" and self.article_section == True: # get a link from the news feed\n for name, value in attrs:\n if name == \"href\":\n if value not in self.links and \"/journaliste/\" not in value:\n self.links.append(value)\n elif tag == \"div\" and not self.article_body: # Set flag from article body to true\n for name, value in attrs:\n if name == 'id' and value == 'articleBody':\n self.article_body = True\n elif tag == 'div' and self.article_body: # Increment number of open div in the main div of article (used to determine when the main article div is closed)\n self.div_open_in_article_body += 1\n elif tag == 'p' and self.article_body: # Suspend aqcuisition for \"lire aussi\" section\n for name, value in attrs:\n if name == 'class' and value == 'lire':\n self.suspend_acquisition = True\n elif tag == 'section' and self.article_body:\n self.suspend_acquisition == True\n elif tag == 'iframe' and self.article_body:\n self.suspend_acquisition == True\n elif tag == 'body':\n for name, value in attrs:\n if name == \"class\":\n self.category = value\n except:\n pass",
"def build(self, text):\n self._xml4nlp = Element('xml4nlp')\n self._note = SubElement(self._xml4nlp, 'note')\n self._doc = SubElement(self._xml4nlp, 'doc')\n\n para = SubElement(self._doc, 'para')\n para.set(\"id\", \"0\")\n para.text = text\n\n self._clean_note()\n self.dom = self._xml4nlp",
"def parse(self, article: BeautifulSoup):\n # Need to find content in a different manner for each of the different sitetypes.\n # Read this as studio, video, article etc.\n # Assume that only articles ask for this functionality\n\n title = self.get_title(article)\n sub_title = self.get_sub_title(article)\n words = self.get_words(article)\n journalists = self.get_journalist(article)\n images = self.get_images(article)\n subscription = self.get_subscription(article)\n content_list = self.get_content(article)\n\n if not title:\n title = self.headline.revisions[0].title\n\n revision = Revision(timestamp=datetime.datetime.now(pytz.timezone(\"Europe/Oslo\")), title=title, sub_title=sub_title, words=words, subscription=subscription)\n\n article = Article(news_site=self.news_site, headline=self.headline)\n\n return revision, article, journalists, images, content_list",
"def get_article_text(self, article: BeautifulSoup):\n # Removes unwanted elements in article, like ads\n for elm in article.find_all(self.parsing_template.ignore_content_tag):\n elm.decompose()\n\n return self.get_text(article, self.parsing_template.content)",
"def _parse(self, tree):\n date_el = self.get_etree().xpath(DATE_XP)[0]\n self.date = date_el.attrib['value']\n self.year, self.month, self.day = self.date.split('-')\n self.date_text = date_el.text\n\n def resolve_type(element):\n return element.attrib.get('type', '').lower().strip('. ')\n\n def index_entity(nodes, model, article):\n for n in nodes:\n m = model(n, article)\n if m.ok:\n db.session.add(m)\n\n def get_html(article):\n return html.tostring(tei.build(etree.Element('article'), article))\n\n root = self.get_etree()\n for section in root.xpath('//div1'):\n section_type = resolve_type(section)\n if not section_type:\n continue\n for subsection in section.xpath('./div2'):\n subsection_type = resolve_type(subsection)\n if not subsection_type:\n continue\n for article in subsection.xpath('./div3'):\n article_type = resolve_type(article)\n if article_type == 'ad-blank':\n continue\n a = Article(issue_id=self.id,\n date=self.date,\n section_type=section_type,\n subsection_type=subsection_type,\n article_type=article_type,\n xpath=root.getpath(article),\n content=get_html(article))\n db.session.add(a)\n db.session.flush()\n index_entity(article.xpath('.//persName'), PersName, a)\n index_entity(article.xpath('.//placeName'), PlaceName, a)\n index_entity(article.xpath('.//orgName'), OrgName, a)\n index_entity(article.xpath('.//rs'), RefString, a)",
"def __init__(self, xml_text):\n logger.verbose(\"Load Version.xml\")\n self.parse(xml_text)",
"def get_article_text(self, article_webpage):\n lemonde_parser = LeMondeHTMLParser()\n lemonde_parser.feed(article_webpage)\n return lemonde_parser.article_data",
"def __init__(self,txt=u'',unicodeEncoding='utf-8',verbose=False,tagID=0):\n # __document capture the document level structure\n # for each sentence and then put in the archives when the next sentence\n # is processed\n super(ConTextMarkup,self).__init__(__txt=None,__rawTxt=txt,\n __SCOPEUPDATED=False,__VERBOSE=verbose,\n __tagID=tagID,\n __unicodeEncoding=unicodeEncoding)\n self.__cleanText()",
"def __init__(self, tag):\n self.tag = tag.lower()\n self.attrs = {}\n self.contents = ()",
"def get_content(self):\n\n self.content = self.book.get_template('cover')\n\n tree = parse_string(super(EpubCoverHtml, self).get_content())\n tree_root = tree.getroot()\n\n images = tree_root.xpath('//xhtml:img', namespaces={'xhtml': NAMESPACES['XHTML']})\n\n images[0].set('src', self.image_name)\n images[0].set('alt', self.title)\n\n tree_str = etree.tostring(tree, pretty_print=True, encoding='utf-8', xml_declaration=True)\n\n return tree_str",
"def _getArticleContet(self,encodedTag):\r\n xmlEncodedTag = BeautifulSoup(encodedTag.string,\"lxml\")#encoded tag actually has a format of an XML\r\n articleContent = []\r\n for element in xmlEncodedTag.body.contents:\r\n if _getTextElement(element):\r\n articleContent.append(unidecode.unidecode(element.get_text()))\r\n if self._isEndOfArticleCommerical(element):\r\n continue\r\n wordPhraseToRefLink = {a.get_text().strip().lower():a.attrs['href'] for a in xmlEncodedTag.find_all(\"a\")}\r\n return articleContent,wordPhraseToRefLink",
"def get_article():\n quantity = randint(200, 500)\n text = Text(\"ru\")\n article = {\n \"title\": text.title(),\n \"body\": text.text(quantity=quantity),\n \"draft\": False,\n }\n return article",
"def __init__(self, text, tag, start ,end):\n\n self.text = six.text_type(text)\n self.tag = copy.copy(tag)\n self.end = end\n self.start = start",
"def parse_articles(self, response):\n item = NasdaqcrawlerItem()\n item['date_published'] = response.xpath('//span[@itemprop=\"datePublished\"]/text()').extract()\n item['text'] = \"\".join(self.clean_text(response.xpath('//div[@id=\"articlebody\"]//p//text()').extract()))\n item['title'] = response.xpath('//h1/text()').extract()\n item['stock_ticker'] = response.meta['ticker']\n # captures any text between symbol/ and /\n # this should only return a single item\n \n yield item",
"def setUp(self):\n self.news_article = News_Articles(\"Technology Change The World\",\"JOHN DOE\",\"Technolgy is the best\",\"tech.com\",\"tech.com/tchey.png\",\"2020-10-20\")",
"def __init__(self):\n self.elementName = \"\"\n self.elementText = \"\"\n self.attrib = {}\n self.xml = \"\"",
"def setUp(self):\n self.new_article = Article(\"abc-news\", \"ABC News\", \"Your trusted source for breaking news, analysis, exclusive interviews, headlines, and videos at ABCNews.com.\",\"https://abcnews.go.com\", \"general\", \"en\", \"us\")",
"def __init__(self):\n self.elementName=\"\"\n self.elementText=\"\"\n self.attrib={}\n self.xml=\"\"",
"def get_article(doi, output='txt'):\n xml = download_article(doi)\n if xml is None:\n return None\n et = ET.fromstring(xml)\n full_text = et.find('article:originalText', elsevier_ns)\n if full_text is None:\n logging.info('Could not find full text for %s.' % doi)\n return None\n main_body = full_text.find('xocs:doc/xocs:serial-item/ja:article/ja:body',\n elsevier_ns)\n if main_body is None:\n return None\n if output == 'xml':\n return main_body\n elif output == 'txt':\n sections = main_body.findall('common:sections/common:section',\n elsevier_ns)\n full_txt = ''\n for s in sections:\n # Paragraphs that are directly under the section\n pars = s.findall('common:para', elsevier_ns)\n # Paragraphs that are under a section within the section\n pars += s.findall('common:section/common:para', elsevier_ns)\n for p in pars:\n # Get the initial string inside the paragraph\n if p.text is not None:\n full_txt += p.text\n # When there are tags inside the paragraph (for instance\n # references), we need to take those child elements one by one\n # and get the corresponding tail strings and join these. \n full_txt += ''.join([c.tail if c.tail is not None \n else '' for c in p.getchildren()])\n full_txt += '\\n'\n else:\n logging.error('Unknown output format %s.' % output)\n return None\n return full_txt",
"def __init__(self, source):\n self.tree = ET.parse(source)\n self.root = self.tree.getroot()",
"def xmlRead(self, attrs, content, font):\n raise NotImplementedError(self)",
"def parse(self, response):\n\t\ttc = TCArticleItem()\n\t\ttc['name'] = response.xpath(\"//meta[@name='title']/@content\").extract()\n\t\ttc['url'] = response.url\n\t\ttc['date'] = self.date(response)\n\t\ttc['description'] = response.xpath(\"//meta[@name='description']/@content\").extract()\n\t\ttc['body'] = self.body(response)\t\t\n\t\ttc['tags'] = response.xpath(\"//meta[@name='keywords'][2]/@content\").re('(\\w+)')\n\t\treturn tc",
"def __init__(self, page):\n self.raw_page = page\n self.page = etree.HTML(page)",
"def xml(string, token=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]):\n return Text(string, token).xml",
"def filter_articles(tree: ElementTree, issue_num: str) -> List[Article]: \n root = tree.getroot()\n articles: List[Article] = []\n article_tags = root.findall('.//item')\n for article_tag in article_tags:\n if not is_for_issue(article_tag, issue_num):\n continue\n article = Article()\n #possible optimization, instead of calling find several times,\n #loop through tag children once and parse out data as we run into it\n article.title = article_tag.find('title').text\n #we will post process this later\n article.author = 'UNKNOWN AUTHOR'\n article_text_content = article_tag.find('content:encoded', XML_NS).text\n article.content = BeautifulSoup(article_text_content, 'html.parser')\n articles.append(article)\n return articles"
] | [
"0.6530297",
"0.6436172",
"0.631786",
"0.62645775",
"0.61489284",
"0.61476105",
"0.61120623",
"0.6110277",
"0.6025093",
"0.5990031",
"0.59805065",
"0.59666455",
"0.5903457",
"0.5874341",
"0.58594066",
"0.5852814",
"0.5852356",
"0.5843398",
"0.58399487",
"0.58204275",
"0.58045065",
"0.5796396",
"0.5773387",
"0.5749343",
"0.57440954",
"0.57388633",
"0.5724706",
"0.5721896",
"0.5693103",
"0.567953"
] | 0.6952109 | 0 |
returns a list of the outermost links not in parenthesis a tempalte, or a tag | def grab_links(self):
links = []
link_char = []
w_temp = [] #in template?
par = [] #in parentheses?
rtag = [] #in <ref> tag?
dtag = [] #in <div> tag?
skip_char = []
for i, c in enumerate(self.article_xml):
if i in skip_char: continue #eliminates double counting
char = self.article_xml[i:i+2]
tag = self.article_xml[i:i+4]
#wiki template
w_temp = self.inside_char(char, Article.w_marker, w_temp, i)
if char in Article.w_marker: skip_char.append(i+1)
if w_temp:
continue #doesn't process if inside wiki template
#parentheses
par = self.inside_char(c, Article.par_marker, par, i)
if par:
continue
#<ref> or <div>
rtag = self.inside_char(tag, Article.rtag_marker, rtag, i)
dtag = self.inside_char(tag, Article.dtag_marker, dtag, i)
if rtag or dtag:
continue
#clear to add outer-most link
if char == '[[':
link_char.append(i)
elif char == ']]' and len(link_char) == 1:
links.append( self.article_xml[link_char[0]:i+2])
link_char.pop()
elif char == ']]' and len(link_char) > 1:
link_char.pop()
return links | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_substitution_image_links(links):\n return [link for link in links if '{' not in link]",
"def getExpandedLinks():",
"def removeHtmlTags(self, text):\n sb = []\n text = self.removeHtmlComments(text)\n bits = text.split(u'<')\n sb.append(bits.pop(0))\n tagstack = []\n tablestack = tagstack\n for x in bits:\n m = _tagPattern.match(x)\n if not m:\n continue\n slash, t, params, brace, rest = m.groups()\n t = t.lower()\n badtag = False\n if t in _htmlelements:\n # Check our stack\n if slash:\n # Closing a tag...\n if t in _htmlsingleonly or len(tagstack) == 0:\n badtag = True\n else:\n ot = tagstack.pop()\n if ot != t:\n if ot in _htmlsingleallowed:\n # Pop all elements with an optional close tag\n # and see if we find a match below them\n optstack = []\n optstack.append(ot)\n while True:\n if len(tagstack) == 0:\n break\n ot = tagstack.pop()\n if ot == t or ot not in _htmlsingleallowed:\n break\n optstack.append(ot)\n if t != ot:\n # No match. Push the optinal elements back again\n badtag = True\n tagstack += reversed(optstack)\n else:\n tagstack.append(ot)\n # <li> can be nested in <ul> or <ol>, skip those cases:\n if ot not in _htmllist and t in _listtags:\n badtag = True\n elif t == u'table':\n if len(tablestack) == 0:\n bagtag = True\n else:\n tagstack = tablestack.pop()\n newparams = u''\n else:\n # Keep track for later\n if t in _tabletags and u'table' not in tagstack:\n badtag = True\n elif t in tagstack and t not in _htmlnest:\n badtag = True\n # Is it a self-closed htmlpair? (bug 5487)\n elif brace == u'/>' and t in _htmlpairs:\n badTag = True\n elif t in _htmlsingleonly:\n # Hack to force empty tag for uncloseable elements\n brace = u'/>'\n elif t in _htmlsingle:\n # Hack to not close $htmlsingle tags\n brace = None\n else:\n if t == u'table':\n tablestack.append(tagstack)\n tagstack = []\n tagstack.append(t)\n newparams = self.fixTagAttributes(params, t)\n if not badtag:\n rest = rest.replace(u'>', u'>')\n if brace == u'/>':\n close = u' /'\n else:\n close = u''\n sb.append(u'<')\n sb.append(slash)\n sb.append(t)\n sb.append(newparams)\n sb.append(close)\n sb.append(u'>')\n sb.append(rest)\n continue\n sb.append(u'<')\n sb.append(x.replace(u'>', u'>'))\n\n # Close off any remaining tags\n while tagstack:\n t = tagstack.pop()\n sb.append(u'</')\n sb.append(t)\n sb.append(u'>\\n')\n if t == u'table':\n if not tablestack:\n break\n tagstack = tablestack.pop()\n\n return u''.join(sb)",
"def in_collections(self):\n links = []\n for link in self.link:\n if link.rel == PARENT_LINK_REL and link.href:\n links.append(link)\n return links",
"def get_all_novel_links(browser):\n novels = list()\n for tr in browser.select('tr.tr3.t_one.tac'):\n if re.search(PATTERN, tr.h3.a.string) is None:\n novels.append(tr) \n return novels",
"def links_to_text(self):\r\n self.parser.stripTags(self.get_top_node(), 'a')",
"def check_tags(l):\n # start on outside working inwords, checking first and last tags match\n stack = []\n for item in l:\n if item[-2] == '/' or item[1] == '!':\n continue # deal with self-closing tags and comments\n elif item[ 1] == '/':\n root = item[2 : -1].split()[0]\n try:\n match = stack.pop()\n except: return False # closing tag without an opener\n if root != match:\n return False\n else:\n root = item[1 : -1].split()[0]\n stack.append(root) \n return True",
"def gen_links(text):\n return []",
"def get_text_links(parser, token):\n try:\n tag_name, slugname = token.split_contents()\n except ValueError:\n raise template.TemplateSyntaxError, \"%r tag requires exactly one argument\" % token.contents.split()[0]\n return GetTextLinkEntries(slugname)",
"def get_site_tags(html, tags=[\"a\"]):\n soup = BeautifulSoup(html)\n\n # For each of the wanted tags, parse out the tags from the web-page\n links = list()\n for t in tags:\n links.extend(soup.find_all(t))\n return links",
"def find_tag_urls(r):\n parser = MyHTMLParser()\n parser.feed(r)\n return parser.url_list",
"def links(self):\n return self.dom.findall(\".//a\")",
"def get_links(value):\n\ttry:\n\t\ttry:\n\t\t\tfrom BeautifulSoup import BeautifulSoup\n\t\texcept ImportError:\n\t\t\tfrom beautifulsoup import BeautifulSoup\n\t\tsoup = BeautifulSoup(value)\n\t\treturn soup.findAll('a')\n\texcept ImportError:\n\t\tif settings.DEBUG:\n\t\t\traise template.TemplateSyntaxError, \"Error in 'get_links' filter: BeautifulSoup isn't installed.\"\n\treturn value",
"def html_anchor_tags(self):\n return self.findall_markdown_cells(r'<a [^>]*>')",
"def strip_tags(tagged_sentences):\n untagged_sentences = []\n for taggedsent in tagged_sentences:\n untaggedsent = ''\n\tfor taggedword in taggedsent.split():\n\t word = re.split('(?<!\\\\\\)\\/', taggedword)[0]\n untaggedsent += word + ' '\n #print untaggedsent\n untagged_sentences.append(untaggedsent)\n return untagged_sentences",
"def get_links(self):\n links = \"\"\n if self.title != \"\":\n links += html_link_to_tag(\n plain_to_html(self.title), self.title, self.proc\n )\n return links + \\\n html_unordered_list([x.get_links() for x in self.subsections])",
"def get_non_docs_urls(self):\n non_docs_urls = []\n link_labels = []\n for tag in self.post_div.find_all(\"a\"):\n url = tag[\"href\"]\n if tag.text not in link_label_blacklist and \\\n not (url.startswith(\"https://docs.google.com\") or \\\n url.startswith(\"https://drive.google.com\")):\n non_docs_urls += [url]\n link_labels += [tag.text.strip()]\n return non_docs_urls, link_labels",
"def weblinksIn(text, withoutBracketed=False, onlyBracketed=False):\n text = textlib.removeDisabledParts(text)\n\n # Ignore links in fullurl template\n text = re.sub(r'{{\\s?fullurl:.[^}]*}}', '', text)\n # TODO search for links within cite with filled-in archiwum parameter\n \n\n # MediaWiki parses templates before parsing external links. Thus, there\n # might be a | or a } directly after a URL which does not belong to\n # the URL itself.\n\n # First, remove the curly braces of inner templates:\n nestedTemplateR = re.compile(r'{{([^}]*?){{(.*?)}}(.*?)}}')\n while nestedTemplateR.search(text):\n text = nestedTemplateR.sub(r'{{\\1 \\2 \\3}}', text)\n\n # Then blow up the templates with spaces so that the | and }} will not\n # be regarded as part of the link:.\n templateWithParamsR = re.compile(r'{{([^}]*?[^ ])\\|([^ ][^}]*?)}}',\n re.DOTALL)\n while templateWithParamsR.search(text):\n text = templateWithParamsR.sub(r'{{ \\1 | \\2 }}', text)\n\n # Add <blank> at the end of a template\n # URL as last param of multiline template would not be correct\n text = text.replace('}}', ' }}')\n\n # Remove HTML comments in URLs as well as URLs in HTML comments.\n # Also remove text inside nowiki links etc.\n text = textlib.removeDisabledParts(text)\n #linkR = textlib.compileLinkR(withoutBracketed, onlyBracketed)\n #linkR = re.compile(r'(?P<url>http[s]?:(\\/\\/[^:\\s\\?]+?)(\\??[^\\s;<>\\\"\\|\\)]*))(?:[\\]\\s\\.:;,<>\\\"\\|\\)])')\n linkR = re.compile(r'(?m)(?P<url>http[s]?:(\\/\\/[^\\s\\?]+?)(\\??[^\\s<\\|\\}\\]]*))(?:[\\]\\s\\.<\\|\\}])')\n for m in linkR.finditer(text):\n if m.group('url'):\n #pywikibot.output('URL to YIELD:%s' % m.group('url'))\n if not citeArchivedLink(m.group('url'),text):\n yield m.group('url')\n else:\n #test output\n pywikibot.output('[%s] WebLinksIn: link skipped:%s' % (datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),m.group('url')))\n #else:\n # yield m.group('urlb')",
"def getAncestors():",
"def taglinks(parser, token):\n bits = token.split_contents()\n\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\n '%r tag requires 1 argument.' % bits[0])\n cmname = bits[1]\n\n try:\n srcm = ContentModule.objects.get(name=cmname)\n\n inputstrl = srcm.comment.strip().split('\\n')\n tuplelist = []\n for tagset in inputstrl:\n tuplelist.append((tagset.split('=')[0].strip(),\n tagset.split('=')[1].strip()))\n\n except:\n tuplelist = []\n urlbase = bits[2]\n\n return TaglinksNode(tuplelist, urlbase)",
"def paren_references(article,word):\r\n all_references = ''\r\n # extract text inside parentheses containing the word\r\n pattern = r'\\(([^\\)]*\\b{}\\b.*?)\\)'.format(word)\r\n #[^5] will match any character except '5'\r\n matches = re.findall(pattern,article,re.IGNORECASE|re.DOTALL)\r\n if matches:\r\n all_references = '\\n'.join(matches)\r\n return all_references",
"def extract_all_tags(final_link, driver):\n\n #driver = webdriver.Chrome(executable_path=\"ChromeDriver/chromedriver.exe\")\n driver.get(str(final_link))\n classes = []\n tags = ['div', 'td', 'li', 'a']\n for tag in tags:\n a = driver.find_elements_by_tag_name(str(tag))\n b = len(a)\n for i in range(b):\n try:\n if a[i].get_attribute(\"class\") == None or a[i].get_attribute(\"class\") == '' or a[i].get_attribute(\"class\") == ' ' or a[i].get_attribute(\"class\") == ' ':\n continue\n else:\n className = a[i].get_attribute(\"class\").strip().split(\" \")\n for classN in className:\n classes.append(str(tag) + '.' + str(classN))\n\n except:\n continue\n\n #driver.quit()\n classes = list(dict.fromkeys(classes))\n return(classes)",
"def get_all_links(html):\n links = []\n while True:\n url, endpos = get_next_target(html)\n if url:\n links.append(url)\n html = html[endpos:]\n else:\n break\n return links",
"def prune_unlinked(self):\n linked_ids = set()\n for (link_from, link_to, link_style, link_tail) in self.links:\n linked_ids.add(link_from)\n linked_ids.add(link_to)\n nodes_to_delete = []\n for name, node in self.nodes.items():\n if node.node_id not in linked_ids:\n nodes_to_delete.append(name)\n for name in nodes_to_delete:\n del self.nodes[name]",
"def extract_linked_items(pages):\n for page in pages:\n for iterate in iterate_on_items(page):\n yield((iterate[1:])[:-1])",
"def get_listings(soup):\n listings = []\n for link in soup.find_all(\"a\"):\n if is_valid_listings(link):\n listings.append(link.attrs[\"href\"])\n return listings",
"def _fix_treetags(self, tree):\n for element in tree:\n element.tag = element.tag.split('}')[1]\n if len(element.getchildren()) > 0:\n self._fix_treetags(element)\n return tree",
"def true_tags (tagged_sentences):\n tags = []\n for sent in tagged_sentences:\n tags.extend([re.split('(?<!\\\\\\)\\/', word)[1] for word in sent.split()])\n return tags",
"def getNodesToCheck(self, doc):\n nodesToCheck = []\n for tag in ['p', 'pre', 'td']:\n items = Parser.getElementsByTag(doc, tag=tag)\n nodesToCheck += items\n return nodesToCheck",
"def links(self):\n\t\treturn self.list_of_links"
] | [
"0.6023677",
"0.592088",
"0.57580185",
"0.5751759",
"0.5707651",
"0.56727266",
"0.55507445",
"0.55107826",
"0.5486449",
"0.5462063",
"0.5458863",
"0.54393977",
"0.5430559",
"0.5428695",
"0.54271686",
"0.5418311",
"0.538971",
"0.53824365",
"0.5366302",
"0.5362929",
"0.52836615",
"0.5282814",
"0.5255507",
"0.523469",
"0.52138144",
"0.5210778",
"0.5200812",
"0.5182141",
"0.5168135",
"0.5164226"
] | 0.67386085 | 0 |
filters links to images, files, or other Wikimedia projects returns false if it's an invalid link (including links with a colon) | def check_link(self, link):
false_links = ["wikipedia:", "w:", "wikitionary:", "wikt:", "wikinews:",
"n:", "wikibooks:", "b:", "wikiquote:", "q:", "wikisource:",
"s:", "wikispecies:", "species:", "wikiversity", "v:",
"wikivoyage:", "voy:", "wikimedia:", "foundation:", "wmf:",
"commonds:", "c:", "chapter:", "metawikipedia:", "meta:",
"m:", "incubator:", "outreach:", "mw:", "mediazilla:",
"bugzilla:", "testwiki:", "wikitech:", "wikidata:", "d:",
"phabricator:", "phab:", "talk:", "user talk:", "file:",
"user:", "template:", "category:", "file talk:",
"category talk:", "image:", "media:", "special:",
"help:", "portal:", "portal talk:", "\#"]
is_bad = any(false_link in link.lower() for false_link in false_links)
if is_bad or link[0] == ":":
return False
else:
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_link(self, link, links_para):\n href = link['href']\n if not href.startswith('/wiki/') or href == '/wiki/Latin' or href.startswith('#'):\n return False\n if \"<i>\" in link or href in links_para:\n return False\n title = href[6:]\n if title.startswith('Help:') or title.startswith('File:') or title.endswith('.ogg') or title.startswith('Wikipedia:'):\n return False\n return True",
"def GoogleCode_IsExternalLink(wikifier, link):\n\n if GoogleCode_Exists(wikifier, link):\n return False;\n\n if URL.match(link):\n return True\n\n if '.' in link or '\\\\' in link or '/' in link or '#' in link:\n return True\n\n return False",
"def is_href_valid(self, link):\n url = str(link['href'])\n # if it doesn't lead to a wiki page\n if not url.startswith(\"/wiki/\"):\n return False\n\n wikipedia_classes = [\"external_text\", \"mw-disambig\", \"infobox-data\"]\n # if the href has a class\n if link.get(\"class\") is not None:\n link_class = \"_\".join(link.get(\"class\"))\n # if the class is an external text class, or a disambiguation link\n if any(wiki_class in link_class for wiki_class in wikipedia_classes):\n return False\n\n if 'wikimedia' in url or 'wiktionary' in url:\n return False\n wikipedia_keywords = [\"Help\", \"Category\", \"Wikipedia\", \"Template\", \"File\", \"Talk\", \"Special\", \"Portal\"]\n if any(keyword + ':' in url for keyword in wikipedia_keywords):\n return False\n if '#' in url:\n return False\n # if the page is a file\n if re.search(\"\\.[a-zA-Z][a-zA-Z][a-zA-Z]$\", url) or re.search(\"\\.[a-zA-Z][a-zA-Z][a-zA-Z][a-zA-Z]$\", url):\n return False\n\n # if the href is enclosed in brackets\n if WikiPage.is_substring_enclosed_in_brackets(link, link.parent.parent):\n return False\n\n wikipedia_not_needed_tags = ['small', 'sup', 'i']\n if link.parent.name in wikipedia_not_needed_tags:\n return False\n\n # if the href shows two different spellings. like in: https://en.wikipedia.org/wiki/Carbon_fibers\n # Carbon fibers ~or~ carbon fibres - here or is the href.\n\n if link.contents == [\"or\"]:\n return False\n\n parents_classes = [p.get(\"class\") for p in link.parents if p.get(\"class\") is not None]\n parents_classes = [str(\"_\".join(p)) for p in parents_classes]\n parents_ids = [p.get(\"id\") for p in link.parents if p.get(\"id\") is not None]\n\n # 'toc' - the Contents menu class\n # 'mw-editsection' - the Edit section\n # 'thumbcaption' - a Photo Caption\n # 'hlist' - a list like in: https://en.wikipedia.org/wiki/January\n wikipedia_classes_to_ignore = [\"thumbcaption\", \"infobox\", \"navigation-not-searchable\", \"sidebar\", \"box-text\",\n \"toc\", \"mw-editsection\", \"thumb\", \"hlist\", \"navbox\"]\n\n for p_class in parents_classes:\n\n if any(class_to_ignore in p_class for class_to_ignore in wikipedia_classes_to_ignore):\n return False\n\n # if it is a coordinates href\n if \"coordinates\" in parents_ids:\n return False\n\n '''\n Update 13.04.2021:\n ------------------\n Someone edited the \"Epistemology\" page. and changed the first link <a>branches<a/>.\n Instead of pointing to the page \"Branches of science\", it was changed to point to \"Outline of philosophy\".\n Which creates a loop. I chose to ignore it manually, and instead click on the next link.\n ( which happens to be Philosophy :) )\n This changed also caused some of the \"paths\" in the PDF files,\n generated before that date to be slightly outdated. But the concept stays the same :)\n \n Update 08.05.2021:\n ------------------\n they fixed it since :)\n \"Epistemology\" -> branches of philosophy : \"https://en.wikipedia.org/wiki/Outline_of_philosophy\" ->\n -> Philosophy.\n \n #if \"Outline_of_philosophy\" in url:\n # return False\n '''\n\n return True",
"def filter_url(text, filter_url_length=\"200\"):\n filter_url_length = int(filter_url_length)\n\n def filter_url_parse_full_links(match):\n \"\"\"Makes links out of absolute URLs. Callback for sub()\n within filter_url(). The first parenthesis in the\n regexp contains the URL, the second trailing\n punctuation.\n \"\"\"\n url = html.unescape(match.group(1))\n url = html.escape(url)\n punctuation = match.group(2)\n caption = filter_url_trim(url, filter_url_length)\n return '<a href=\"' + url + '\">' + caption + '</a>' + punctuation\n\n def filter_url_parse_email_links(match):\n \"\"\"Makes links out of e-mail addresses. Callback for sub()\n within filter_url().\n \"\"\"\n email = html.unescape(match.group(0))\n email = html.escape(email)\n caption = filter_url_trim(email, filter_url_length)\n return '<a href=\"mailto:' + email + '\">' + caption + '</a>'\n\n def filter_url_parse_partial_links(match):\n \"\"\"Makes links out of domain names starting with 'www.'.\n Callback for sub() within filter_url(). The first\n parenthesis in the regexp contains the URL, the\n second trailing punctuation.\n \"\"\"\n dname = html.unescape(match.group(1))\n dname = html.escape(dname)\n punctuation = match.group(2)\n caption = filter_url_trim(dname, filter_url_length)\n return '<a href=\"http://' + dname + '\">' + caption + '</a>' + punctuation\n\n # Tags to skip and not recurse into.\n ignore_tags = 'a|script|style|code|pre'\n\n # Pass length to regexp callback.\n # BCM: We will ignore the length limit as rather silly\n # and some work to implement.\n\n # Create an array which contains the regexps for each\n # type of link. The key to the regexp is the name of a\n # function that is used as callback function to process\n # matches of the regexp. The callback function is to\n # return the replacement for the match. The array is\n # used and matching/replacement done below inside some\n # loops.\n tasks = dict()\n\n # Prepare protocols pattern for absolute URLs.\n # check_url() will replace any bad protocols with HTTP,\n # so we need to support the identical list. While '//'\n # is technically optional for MAILTO only, we cannot\n # cleanly differ between protocols here without\n # hard-coding MAILTO, so '//' is optional for all\n # protocols.\n protocols = ['ftp', 'http', 'https', 'irc', 'mailto',\n 'news', 'nntp', 'rtsp', 'sftp', 'ssh',\n 'tel', 'telnet', 'webcal']\n protocols = r'|'.join([p + r':(?://)?' for p in protocols])\n\n # Prepare domain name pattern. The ICANN seems to be on\n # track towards accepting more diverse top level\n # domains, so this pattern has been \"future-proofed\" to\n # allow for TLDs of length 2-64.\n domain = r'(?:[A-Za-z0-9._+-]+\\.)?[A-Za-z]{2,64}\\b'\n ip = r'(?:[0-9]{1,3}\\.){3}[0-9]{1,3}'\n auth = r'[a-zA-Z0-9:%_+*~#?&=.,/;-]+@'\n trail = r'[a-zA-Z0-9:%_+*~#&\\[\\]=/;?!\\.,-]*[a-zA-Z0-9:%_+*~#&\\[\\]=/;-]'\n\n # Prepare pattern for optional trailing punctuation.\n # Even these characters could have a valid meaning for\n # the URL, such usage is rare compared to using a URL at\n # the end of or within a sentence, so these trailing\n # characters are optionally excluded.\n punctuation = r'[\\.,?!]*?'\n\n tasks = []\n\n # Match absolute URLs.\n url_pattern = r\"(?:%s)?(?:%s|%s)/?(?:%s)?\" % (auth, domain, ip, trail)\n pattern = r\"((?:%s)(?:%s))(%s)\" % (protocols, url_pattern, punctuation)\n tasks.append((filter_url_parse_full_links, pattern))\n\n # Match e-mail addresses.\n url_pattern = r\"[A-Za-z0-9._+-]{1,254}@(?:%s)\" % (domain,)\n pattern = r\"(%s)\" % (url_pattern,)\n tasks.append((filter_url_parse_email_links, pattern))\n\n # Match www domains.\n url_pattern = r\"www\\.(?:%s)/?(?:%s)?\" % (domain, trail)\n pattern = r\"(%s)(%s)\" % (url_pattern, punctuation)\n tasks.append((filter_url_parse_partial_links, pattern))\n\n # HTML comments need to be handled separately, as\n # they may contain HTML markup, especially a\n # '>'. Therefore, remove all comment contents and\n # add them back later.\n # BCM: Replaced the mess in the original PHP with a\n # cleaner split() implementation. Also moved out of loop\n # (what was with that?).\n split_comments = re_split(r'(<!--.*?-->)', text, flags=re.S)\n saved_comments = [split_comments[i]\n for i in range(1, len(split_comments), 2)]\n text = '<!---->'.join([split_comments[i]\n for i in range(0, len(split_comments), 2)])\n\n # Each type of URL needs to be processed separately. The\n # text is joined and re-split after each task, since all\n # injected HTML tags must be correctly protected before\n # the next task.\n for task, pattern in tasks:\n\n # Split at all tags; ensures that no tags or attributes are processed.\n chunks = re_split(r'(<.+?>)', text, flags=re.I|re.S)\n # The array consists of alternating delimiters and\n # literals, and begins and ends with a literal\n # (inserting NULL as required). Therefore, the first\n # chunk is always text:\n chunk_type = 'text'\n # If a tag of ignore_tags is found, it is stored in\n # open_tag and only removed when the closing tag is\n # found. Until the closing tag is found, no\n # replacements are made.\n open_tag = None\n\n for i in range(len(chunks)):\n if chunk_type == 'text':\n # Only process this text if there are no\n # unclosed ignore_tags.\n if not open_tag:\n # If there is a match, inject a link\n # into this chunk via the callback\n # function contained in task.\n chunks[i] = re_sub(pattern, task, chunks[i])\n # Text chunk is done, so next chunk must be a tag.\n chunk_type = 'tag'\n else:\n # Only process this tag if there are no unclosed ignore_tags.\n if not open_tag:\n # Check whether this tag is contained in ignore_tags.\n matches = re_match(r\"<(%s)(?:\\s|>)\" % (ignore_tags,),\n chunks[i], flags=re.I)\n if matches:\n open_tag = matches.group(1)\n # Otherwise, check whether this is the\n # closing tag for open_tag.\n elif re_match(r\"<\\/%s>\" % (open_tag,),\n chunks[i], flags = re.I):\n open_tag = None\n # Tag chunk is done, so next chunk must be text.\n chunk_type = 'text'\n text = ''.join(chunks)\n\n # Revert back to the original comment contents\n split_text = re_split(r'(<!---->)', text, flags=re.S)\n for i in range(1, len(split_text), 2):\n assert split_text[i] == \"<!---->\"\n split_text[i] = saved_comments[i // 2]\n text = ''.join(split_text)\n\n return text",
"def filter_substitution_image_links(links):\n return [link for link in links if '{' not in link]",
"def check_href(href):\n if bool(pattern.match(href)):\n if os.path.basename(urlparse.urlparse(href).path) not in file_list:\n return True\n return False",
"def _check_character_not_valid_in_resource_link(self):\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml'):\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n for name, attr in (('link', 'href'), ('script', 'src')):\n nodes = (doc.xpath('.//%s[@%s]' % (name, attr))\n if not isinstance(doc, string_types) else [])\n for node in nodes:\n resource = node.get(attr, '')\n ext = os.path.splitext(os.path.basename(resource))[1]\n if (resource.startswith('/') and not\n re.search('^[.][a-zA-Z]+$', ext)):\n self.msg_args.append((\"%s:%s\" % (xml_file,\n node.sourceline)))\n if self.msg_args:\n return False\n return True",
"def test_link_is_tracked_false_archive(self):\n self.assertFalse(link_is_tracked(\"https://web.archive.org/https://test.com/\"))",
"def _is_bad_link(info, base):\r\n # Links are interpreted relative to the directory containing the link\r\n tip = resolved(joinpath(base, dirname(info.name)))\r\n return _is_bad_path(info.linkname, base=tip)",
"def check_for_file_link(link):\n for ext in lk_nr:\n if ext in link:\n return False\n return True",
"def test_tag_hyperlinks(self):\n for h in self.hyperlinks:\n if h['name'] in ['C++', 'Java', 'Python', 'ROS', 'MATLAB']:\n self.assertTrue(\n '.md' in h['url'],\n msg='Hyperlink \"%s\" is wrongly detected as a tag in \"%s\".' % (h['md'], h['file'])\n )",
"def filter_url_parse_full_links(match):\n url = html.unescape(match.group(1))\n url = html.escape(url)\n punctuation = match.group(2)\n caption = filter_url_trim(url, filter_url_length)\n return '<a href=\"' + url + '\">' + caption + '</a>' + punctuation",
"def check_for_url_in_text(self, string):\r\n has_link = False\r\n\r\n # Find all links in the string.\r\n links = re.findall(r'(https?://\\S+)', string)\r\n if len(links)>0:\r\n has_link = True\r\n\r\n # Autolink by wrapping links in anchor tags.\r\n for link in links:\r\n string = re.sub(link, self.generate_file_link_html_from_url(link, link), string)\r\n\r\n return has_link, string",
"def create_href_checker(pattern, working_dir):\n file_list = os.listdir(working_dir)\n def check_href(href):\n \"\"\"Return whether a url is vlaid or not\"\"\"\n if bool(pattern.match(href)):\n if os.path.basename(urlparse.urlparse(href).path) not in file_list:\n return True\n return False\n return check_href",
"def test_can_filter_inline_styles(self):\n text = '<a href=\"https://google.com\">Example</a>'\n filter = Bleach(\n tags=['a'],\n attributes=['href'],\n protocols=['http'],\n )\n filtered = filter.filter(text)\n expected = '<a>Example</a>'\n\n # assert link removed as protocol was invalid\n self.assertEquals(expected, filtered)",
"def isLinkName(word):\r\n return wikiLink.match(word)",
"def is_relative_link(link):\n return not get_protocol(link) and re.search(r\"^\\.?/([a-z]|[A-Z]|[0-9]|\\.)+\", link)",
"def test_hyperlinks_do_not_contain_prohibited_characters(self):\n for h in self.hyperlinks:\n self.assertTrue(\n re.search(r'[<>]', h['name']) is None,\n msg='Hyperlink \"%s\" contains forbidden characters in \"%s\".' % (h['md'], h['file'])\n )",
"def checklink(key,value):\n try:\n if not value.startswith((\"http\",\"www\")): return False, False\n ## Value is not string, so it can't be website link\n except: return False, False\n linkresearch = LINKRE.search(key)\n ## In normal practice this really shouldn't happen :-/\n if not linkresearch: return False, False\n return linkresearch.group(\"name\"), value",
"def fix_links():\n pass",
"def physOrgNewsFilter(href):\n return(href.startswith('http://phys.org/news/'))",
"def remove_urls(text):\n pass",
"def _is_url(string):\n return \"http\" in string",
"def containsURL(line: str):\n\n URL = \"(http|ftp|https)://([\\w_-]+(?:(?:\\.[\\w_-]+)+))\" \\\n \"([\\w.,@?^=%&:/~+#-]*[\\w@?^=%&/~+#-])?\"\n if re.match(URL, line):\n return True\n else:\n return False",
"def is_web_url(text):\r\n return re.match(r'(http://|https://|www.)(www\\.)?([a-zA-Z0-9-_.]+)(\\.[a-zA-Z0-9]{2,4})(\\S+)', text)",
"def test_link_is_tracked_false(self):\n self.assertFalse(link_is_tracked(\"https://www.foo.com/\"))",
"def _islink(path):\n if not os.path.isdir(path):\n return False\n\n if not isinstance(path, str):\n path = str(path)\n\n attributes = ctypes.windll.kernel32.GetFileAttributesW(path)\n if attributes == INVALID_FILE_ATTRIBUTES:\n return False\n\n return (attributes & FILE_ATTRIBUTE_REPARSE_POINT) > 0",
"def test_non_http_links_not_shortened(self):\n sender = self.create_user()\n group = self.create_group()\n sender.add_to_group(group.pk)\n\n thread = mommy.make(Thread, group=group)\n message = Message(\n text='This is an email: <a href=\"mailto:[email protected]\">lnk</a>',\n thread=thread,\n sender=sender\n )\n message.save()\n self.assertEqual(message.links.count(), 0)",
"def test_link_safety(self):\n attack_vectors = (\n # \"standard\" javascript pseudo protocol\n ('javascript:alert`1`', ''),\n # bypass attempt\n ('jAvAsCrIpT:alert`1`', ''),\n # javascript pseudo protocol with entities\n ('javascript:alert`1`', ''),\n # javascript pseudo protocol with prefix (dangerous in Chrome)\n ('\\x1Ajavascript:alert`1`', ''),\n # data-URI (dangerous in Firefox)\n ('data:text/html,<script>alert`1`</script>', ''),\n # vbscript-URI (dangerous in Internet Explorer)\n ('vbscript:msgbox', ''),\n # breaking out of the attribute\n ('\"<>', ''),\n )\n\n for vector, expected in attack_vectors:\n # Image\n self.assertEqual(\n Markdown().render('' % vector),\n '<p><img src=\"%s\" alt=\"atk\"></p>' % expected)\n # Link\n self.assertEqual(\n Markdown().render('[atk](%s)' % vector),\n '<p><a rel=\"nofollow\" href=\"%s\">atk</a></p>' % expected)",
"def is_valid_listings(link):\n if link.has_attr(\"href\") and link.attrs[\"href\"].startswith(LISTING_PREFIX):\n return True\n return False"
] | [
"0.68379533",
"0.67308575",
"0.6564884",
"0.6558733",
"0.6462883",
"0.6375301",
"0.6374791",
"0.6300895",
"0.62788117",
"0.6265837",
"0.62631965",
"0.62593323",
"0.622585",
"0.62170655",
"0.6212652",
"0.6198974",
"0.6183221",
"0.61607987",
"0.61497504",
"0.6100527",
"0.60753065",
"0.60581857",
"0.6017692",
"0.6008195",
"0.600071",
"0.5951591",
"0.5928974",
"0.5926502",
"0.58960485",
"0.58450973"
] | 0.67699367 | 1 |
strips brackets, returns link destination (not display name) | def clean_link(self, link):
link = link.strip("[]")
if "|" in link:
link = link.split("|",1)[0]
link = link.strip() #remove trailing white space
return link | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def format_link(self, link):\n new_link = \"/\".join(link.split(\"/\")[0:3])\n return \"http://www.imdb.com\" + new_link",
"def remove_links(str):\n stripped_str = re.sub(\"\\[.*\\]\",\"\", str)\n str_list = filter(None, stripped_str.split(\" \"))\n built_string = \" \".join(str_list)\n return built_string",
"def createCompleteLink(link, domain):\n if link is not None and len(link) > 0:\n if re.match('^http', link) is not None:\n return link\n else:\n #Remove the first / to avoid //\n if link[0] == '/':\n link = link[1:]\n return domain + link\n return domain",
"def getDestination(source):\n\ti = len(source)-1\n\tif source[i] == '/':\n\t\tsource = source[0:i - 1]\n\twhile i >= 0:\n\t\tif source[i] == '/':\n\t\t\tbreak\n\t\ti -= 1\n\tdestination = source[0: i]\n\treturn destination",
"def clean_link(self, link=None, url_root=\"\"):\n\n\t\tif link == None :\n\t\t\tlink = \"\"\n\n\t\t### erase all spaces in original link\n\t\tlink = ' '.join(link.split())\n\t\tlink = link.replace(\" \",\"\").replace('\\n', '').replace('\\r', '')\n\n\t\t### get url_root if needed\n\t\tif url_root == \"\" :\n\t\t\turl_root_ = self.page_url\n\t\telse :\n\t\t\turl_root_ = url_root\n\n\t\t### checks if link is an email\n\t\tif \"@\" in link :\n\t\t\tif link.startswith(\"mailto\") or link.startswith(\"http\") or link.startswith(\"/\") :\n\t\t\t\tpass\n\t\t\telse :\n\t\t\t\tlink = \"mailto:\" + link\n\n\t\telif not link.startswith(\"http\"):\n\t\t\tseparator = \"\"\n\t\t\tif not link.startswith(\"/\") and url_root == \"\" :\n\t\t\t\tseparator = \"/\"\n\t\t\tlink \t= \"{}{}{}\".format( url_root_, separator, link)\n\n\n\t\t### DEBUG --> for instance Prix de l'Innovation Urbaine / escape and unicode follow_link\n\t\t### escape URL encoding\n\t\t# link = unquote(link)\n\t\t# log_scrap.debug(u\" === clean_link / link (%s): %s\", (type(link), link) )\n\n\t\treturn unicode(link)",
"def create_location_sublink(x: str) -> str:\n tmpname = strip_location_subtext(x)\n if tmpname in point_locations:\n loc = point_locations[tmpname]\n tmpstr = create_location_link(loc, tmpname, do_print, path=\"../locations/\")\n if tmpname != x:\n tmpstr += x[len(tmpname):]\n tmpstr = tmpstr.replace(\"<!>\", fetch_fa_glyph(\"bad location\"))\n # tmpstr = tmpstr.replace(\"<?>\", fetch_fa_glyph(\"questionable id\"))\n tmpstr = tmpstr.replace(\"<?>\", \"\").strip()\n else:\n tmpstr = x\n return tmpstr",
"def _getWikiLink(self, link):\n return reverse('wiki.document',\n kwargs={'document_slug': link.replace(' ', '+')})",
"def make_links(traceback):\r\n\r\n lwords = traceback.split('\"')\r\n\r\n # Making the short circuit compatible with <= python2.4\r\n result = (len(lwords) != 0) and lwords[0] or ''\r\n\r\n i = 1\r\n\r\n while i < len(lwords):\r\n link = make_link(lwords[i])\r\n\r\n if link == '':\r\n result += '\"' + lwords[i]\r\n else:\r\n result += link\r\n\r\n if i + 1 < len(lwords):\r\n result += lwords[i + 1]\r\n i = i + 1\r\n\r\n i = i + 1\r\n\r\n return result",
"def extract_link_str(self, link):\n if type(link) is str:\n # import pdb; pdb.set_trace()\n if re.match( r'^link:', link):\n # assume intending to specify a link, now match for rest of pattern \n matchObj = re.match( r'^link:([^ ]+)$', link)\n if matchObj:\n path = matchObj.group(1)\n node = self.get_node(path)\n link_info = {'node': node}\n return link_info\n else:\n print \"** Error, invalid path specified in link string, must not have spaces\"\n print \" link string is: '%s'\" % link\n traceback.print_stack()\n sys.exit(1)\n elif re.match( r'^extlink:', link):\n # assume intending to specify an external link, now match for rest of pattern\n matchObj = re.match( r'^extlink:([^ ]*[^ ,])[ ,]([^ ]+)$', link)\n if matchObj:\n file = matchObj.group(1)\n path = matchObj.group(2)\n link_info = {'extlink': (file, path)}\n return link_info\n else:\n print \"** Error, invalid file or path specified in extlink string\"\n print \" must not have spaces and file name must not end in comma\"\n print \"extlink string is: '%s'\"% link\n traceback.print_stack()\n sys.exit(1)\n return None",
"def test_gen_destination_for_alias_is_destination(self):\n destination = db.gen_destination_for_alias(self.dbm, \"reddit\")\n self.assertIsInstance(destination, db.Destination)\n self.assertEqual(\"https://www.reddit.com/r/{}\", destination.url)",
"def filter_url_parse_full_links(match):\n url = html.unescape(match.group(1))\n url = html.escape(url)\n punctuation = match.group(2)\n caption = filter_url_trim(url, filter_url_length)\n return '<a href=\"' + url + '\">' + caption + '</a>' + punctuation",
"def clean_link(link_text):\n\n return link_text.strip(\"\\t\\r\\n '\\\"\")",
"def clean_link(link_text):\n\n return link_text.strip(\"\\t\\r\\n '\\\"\")",
"def undo_format_link_segment(value):\n\n if json_api_settings.FORMAT_RELATED_LINKS:\n return format_value(value, \"underscore\")\n\n return value",
"def _get_full_url(self, link, url):\n from webcrawler.settings import process_link_value\n path = urlparse.urljoin(url, link)\n path = process_link_value(path)\n return path",
"def parselink2(link):\n [srcNode,srcPort,dstNode,dstPort] = link.resourceName.split(\"--\")\n return (srcNode,srcPort,dstNode,dstPort,\"0\")",
"def print_link(s):\n line = ''\n while s != empty:\n if line:\n line += ' '\n line += str(first(s))\n s = rest(s)\n print(line)",
"def parse_next_url(link_str):\n links_arr = link_str.split(\",\")\n for links in links_arr:\n a_url, direction = links.split(';')\n if \"next\" in direction:\n a_url = a_url.replace('<', '').replace('>', '')\n return a_url\n return None",
"def simplify_links(proj,exp,links):\n simple_links =[] \n\n for key in links:\n (node_name,x,y) = key.rpartition(':')\n node_name = node_name+\".\"+exp+\".\"+proj+\".emulab.net\"\n simple_links.append((node_name,links[key]['ipaddr']))\n\n return simple_links",
"def convert_single_relation_url_to_simplified_format(relation_url):\n relation_url = relation_url.strip()\n prefix = 'www.freebase.com/'\n if not relation_url.startswith(prefix):\n raise Exception(\"Invalid format of relation '{}', expected prefix '{}'\".format(relation_url, prefix))\n return relation_url[len(prefix):].replace('/', '.').strip()",
"def bs (link):\n return link.replace ('\\\\', '\\\\\\\\')",
"def join_link(s, separator):\n if s == empty:\n return\"\"\n elif rest(s) == empty:\n return str(first(s))\n else:\n return str(first(s)) + separator + join_link(rest(s), separator)",
"def get_clean_zotero_link(links):\n link = \"https://www.zotero.org/%s/items\" % os.getenv(\"ZTH_SEARCH_PREFIX_URI\")\n if \"alternate\" in links:\n link = links[\"alternate\"][\"href\"].replace(\"items\", \"items/itemKey\")\n return link",
"def join_link(s, separator):\n if s == empty:\n return \"\"\n elif rest(s) == empty:\n return str(first(s))\n else:\n return str(first(s)) + separator + join_link(rest(s), separator)",
"def get_absolute_url(self):\n return ('')",
"def join_link(s, separator):\n if s == empty:\n return ''\n elif rest(s) == empty:\n return str(first(s))\n else:\n return str(first(s)) + separator + join_link(rest(s), separator)",
"def filter_url_parse_partial_links(match):\n dname = html.unescape(match.group(1))\n dname = html.escape(dname)\n punctuation = match.group(2)\n caption = filter_url_trim(dname, filter_url_length)\n return '<a href=\"http://' + dname + '\">' + caption + '</a>' + punctuation",
"def _convert_first_href(line):\n x = line.split(',')\n x[0] = '<a href=%(url)s>%(url)s</a>' % {'url': x[0]}\n return \",\".join(x)",
"def createLinkFromWikiWord(word, wikiPage): # normalizeWikiWord\r\n return \"\"",
"def sanitize_link(link, url):\n if link.startswith('//'):\n link = f'http:{link}'\n elif link.startswith('/'):\n parsed_url = urlparse(url)\n link = f'http://{parsed_url.hostname}{link}'\n return link"
] | [
"0.6074059",
"0.6058749",
"0.5964363",
"0.59119755",
"0.58833617",
"0.5830744",
"0.57996374",
"0.57897335",
"0.57738227",
"0.5760817",
"0.5757934",
"0.5745697",
"0.5745697",
"0.5744635",
"0.57165736",
"0.57104677",
"0.57055384",
"0.5694247",
"0.5692231",
"0.5687585",
"0.5648436",
"0.564706",
"0.5641925",
"0.5622575",
"0.5615494",
"0.56044865",
"0.56007636",
"0.55730915",
"0.5551543",
"0.5536637"
] | 0.6704387 | 0 |
Evaluate quality of the fit result. Subclasses can override this method to do post analysis. | def _evaluate_quality(self, fit_data: FitData) -> Union[str, None]:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]:\n freq_increment = np.mean(np.diff(fit_data.x_data))\n\n fit_a = fit_data.ufloat_params[\"a\"]\n fit_b = fit_data.ufloat_params[\"b\"]\n fit_freq = fit_data.ufloat_params[\"freq\"]\n fit_kappa = fit_data.ufloat_params[\"kappa\"]\n\n snr = abs(fit_a.n) / np.sqrt(abs(np.median(fit_data.y_data) - fit_b.n))\n fit_width_ratio = fit_kappa.n / np.ptp(fit_data.x_data)\n\n criteria = [\n fit_data.x_range[0] <= fit_freq.n <= fit_data.x_range[1],\n 1.5 * freq_increment < fit_kappa.n,\n fit_width_ratio < 0.25,\n fit_data.reduced_chisq < 3,\n curve.utils.is_error_not_significant(fit_kappa),\n snr > 2,\n ]\n\n if all(criteria):\n return \"good\"\n\n return \"bad\"",
"def quality(self) -> float:\n if self.get_cover_size() == 0:\n return 0\n else:\n if self.baseline == Baseline.COMPLEMENT:\n return self.__complement_quality()\n else:\n return self.__population_quality()",
"def fit(self):\n self.eval_chisq([1, 1, 1, 1])",
"def set_quality(self):\n p = self.suitability + 1.15 * self.fono\n self.quality = np.exp(p) / (1 + np.exp(p))",
"def quality(self) -> int:\n return self._quality",
"def determine_quality(self, function):\n if self.ground_truth_annotation_select.value is None:\n return None\n if self.segmentation_result_select.value is None:\n return None\n if self.segmentation_result_select.value is self.ground_truth_annotation_select.value:\n return None\n\n if self.ground_truth_annotation_select.value.data.max() == 0:\n return\n if self.segmentation_result_select.value.data.max() == 0:\n return\n\n quality = function(self.ground_truth_annotation_select.value.data, self.segmentation_result_select.value.data)\n\n return quality",
"def eval_fis(self,fis):\n #res = 0.0\n #for cl_state in self.classes:\n # res += cl_state.eval_fis(fis)\n #print \"=>\",res\n #return 1.0/res\n try:\n correct,count = self.quality_fis(fis)\n except Exception as err:\n print err\n correct = 0\n return correct",
"def _set_target_quality(self):\n best_model = clone(self.model)\n # train and avaluate the model on the full size of potential dataset\n best_model.fit(self.dataset.train_data, np.ravel(self.dataset.train_labels))\n test_prediction = best_model.predict(self.dataset.test_data) \n max_quality = self.quality_method(self.dataset.test_labels, test_prediction)\n # the target_quality after which the episode stops is a proportion of the max quality\n self.target_quality = self.tolerance_level*max_quality",
"def evaluate_question(self):\n self.get_question_fequency()\n self.count_answers()",
"def fit(self):\n raise NotImplementedError # pragma: no cover",
"def fit(self):\n raise NotImplementedError",
"def fit(self):\n raise NotImplementedError('')",
"def evaluate(self):\n\n\t\tself.model_score = self.model.evaluate(self.x_test, self.y_test, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\t\treturn self.model_score",
"def __call__(self, results):\n if np.random.rand() > self.prob:\n return results\n self._imequalize(results)\n return results",
"def fit():\n pass",
"def fit_test(self):",
"def fit(self):\n self._minuit_problem.migrad() # run optimizer\n self._status = 0 if self._minuit_problem.migrad_ok() else 1",
"def evaluate_prediction(self):\n\n # ratio_train = self.evaluate_data(self.train_x, self.train_y)\n ratio_test = self.evaluate_data(self.test_x, self.test_y)\n\n print(\"\\n*NAIVE BAYES:\")\n # print(\"Test1: {}%\".format(ratio_dev*100))\n print(\"Test: {} %\".format(ratio_test*100))",
"def eval(self):\n\n # How many questions we get right at precision@1.\n correct = 0\n\n total = self._analogy_questions.shape[0]\n start = 0\n while start < total:\n limit = start + 2500\n sub = self._analogy_questions[start:limit, :]\n idx = self._predict(sub)\n start = limit\n for question in xrange(sub.shape[0]):\n if sub[question, 3] in idx[question]:\n # print(sub[question, 3], idx[question])\n correct += 1\n\n print()\n print(\"Eval %4d/%d accuracy @ top5= %4.1f%%\" % (correct, total,\n correct * 100. / total)\n )",
"def administer(self):\n\n score = 0.0\n for question in self.questions:\n if question.ask_and_evaluate() is True:\n score += 1\n return (score / len(self.questions)) * 100",
"def get_apply_scale(self, applyScaleFactor, scale_quality = 1.0):\n v = self.scale * self.scale_quality * scale_quality\n if applyScaleFactor:\n v *= self.scale_factor\n return v",
"def report_edp(self):\n lmfit.report_fit(self.edp_par)\n print(\"chisqr = {0:.3f}\".format(self.edp.chisqr))",
"def evalFitness(self, target, targetThumb):\n\t\t# Don't recompute\n\t\tif type(self.score) == int:\n\t\t\treturn self.score\n\n\t\tif self.image == None:\n\t\t\traise Exception, \"Image for fitness evaluation was not generated\"\n\n\t\t# XXX: Compare against full-size image, NOT thumbnail\n\t\tpixOrig = target.load()\n\t\tpixGen = self.image.load()\n\t\twidth = self.image.size[0]\n\t\theight = self.image.size[1]\n\n\t\t# 10/03/02 - already if 0:, so commenting out entirely. \n\t\t#if 0: # Test/debug - run faster with a thumbnail compare\n\t\t#\tpixOrig = targetThumb.load()\n\t\t#\timThumb = self.image.resize(targetThumb.size)\n\t\t#\tpixGen = imThumb.load()\n\t\t#\twidth = imThumb.size[0]\n\t\t#\theight = imThumb.size[1]\n\n\t\t# We're using a maximization scoring heuristic \n\t\tscore = 0\n\t\tfor i in range(width):\n\t\t\tfor j in range(height):\n\t\t\t\t# The closer the channels are, the higher the score\n\t\t\t\tdiffR = 255 - abs(pixOrig[i,j][0] - pixGen[i,j][0])\n\t\t\t\tdiffG = 255 - abs(pixOrig[i,j][1] - pixGen[i,j][1])\n\t\t\t\tdiffB = 255 - abs(pixOrig[i,j][2] - pixGen[i,j][2])\n\n\t\t\t\t# TODO - test new scoring heuristic\n\t\t\t\tscore += diffR*diffR + diffG*diffG + diffB*diffB \n\n\t\tself.score = score\n\n\t\tmaxScore = width * height * 255.0 * 255.0 * 3 # TODO - test new scoring heuristic.\n\t\tself.scorePercent = score / maxScore\n\n\t\treturn self.score",
"def _evaluate_during_fit(self, test_loader, epoch):",
"def __call__(self, results):\n if np.random.rand() > self.prob:\n return results\n self._adjust_contrast_img(results, self.factor)\n return results",
"def evaluate_fitness(self):\r\n fitness = 0.0\r\n # TO-DO: Write your fitness evaluation code here:\r\n \r\n if self.graph is not None:\r\n try:\r\n fitness = 1.0 / algorithms.sdr_widgerson(\r\n self.graph, self.values[0], self.values[1]\r\n )\r\n except RuntimeError:\r\n fitness = 1 / (2 ** 63)\r\n else:\r\n raise RuntimeError(\"Particle graph has not been set!\")\r\n \r\n # END TO-DO\r\n self.current_fitness = fitness\r\n \r\n # Check if we've got a better result\r\n if fitness > self.best_fitness:\r\n # Update the best performance accordingly\r\n self.best_fitness = fitness\r\n self.personal_best = self.values[:]\r\n self.best_coloring = copy.deepcopy(self.graph)\r\n \r\n self.sync = True",
"def test_evaluate(self):\n # Check build does not raise errors\n dataset = KDDCupDataset()\n dataset.create_fixed_samples(\n *self.data, samples_num=1, partition_sizes=self.partition_sizes)\n dataset.set_current_sample(0)\n model = self.MODEL(dataset, **self.model_arguments)\n model.fit(training_epochs=50)\n metric = model.evaluate('test')\n self.assertLessEqual(0, metric)\n self.assertGreaterEqual(1, metric)",
"def evaluate(self):\n\n\t\t## We should be evaluating on dev dataset as well, so commenting x_test\n\t\t#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)\n\t\tself.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\n\t\t##Saving atucal vs predicted predictions\n\t\t##np.argmax returns the index where it see's 1 in the row\n\t\t#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)\n\t\ty_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)\n\n\t\t## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack\n\t\t#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T\n\t\toutput_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T\n\t\toutputFile = self.resultDir + \"/outputPredict.csv\" \n\t\tnp.savetxt(outputFile, output_predict, fmt=\"%5.0f\", delimiter=\",\")\n\n\t\t##Error Analysis of the prediction\n\t\terrorAnalysis(outputFile)\n\n\t\treturn self.model_score",
"def evaluate(self, threshold=0.5):\n pass",
"def evaluate_design(self): # to update the pr object"
] | [
"0.7156114",
"0.684477",
"0.657213",
"0.65535766",
"0.64372116",
"0.63338166",
"0.6271525",
"0.62638944",
"0.62541264",
"0.62117773",
"0.6151621",
"0.606927",
"0.60639936",
"0.60630333",
"0.60455346",
"0.6035397",
"0.59774005",
"0.5925373",
"0.59253347",
"0.59226096",
"0.5920876",
"0.58688194",
"0.5834841",
"0.58056056",
"0.57814926",
"0.5769499",
"0.5764029",
"0.57627344",
"0.57594365",
"0.5749619"
] | 0.7637929 | 0 |
Extract curve data from experiment data. This method internally populates two types of curve data. | def _extract_curves(
self, experiment_data: ExperimentData, data_processor: Union[Callable, DataProcessor]
):
self.__processed_data_set = list()
def _is_target_series(datum, **filters):
try:
return all(datum["metadata"][key] == val for key, val in filters.items())
except KeyError:
return False
# Extract X, Y, Y_sigma data
data = experiment_data.data()
x_key = self._get_option("x_key")
try:
x_values = [datum["metadata"][x_key] for datum in data]
except KeyError as ex:
raise DataProcessorError(
f"X value key {x_key} is not defined in circuit metadata."
) from ex
if isinstance(data_processor, DataProcessor):
y_values, y_sigmas = data_processor(data)
if y_sigmas is None:
y_sigmas = np.full(y_values.shape, np.nan)
else:
y_values, y_sigmas = zip(*map(data_processor, data))
# Store metadata
metadata = np.asarray([datum["metadata"] for datum in data], dtype=object)
# Store shots
shots = np.asarray([datum.get("shots", np.nan) for datum in data])
# Format data
x_values = np.asarray(x_values, dtype=float)
y_values = np.asarray(y_values, dtype=float)
y_sigmas = np.asarray(y_sigmas, dtype=float)
# Find series (invalid data is labeled as -1)
data_index = np.full(x_values.size, -1, dtype=int)
for idx, series_def in enumerate(self.__series__):
data_matched = np.asarray(
[_is_target_series(datum, **series_def.filter_kwargs) for datum in data], dtype=bool
)
data_index[data_matched] = idx
# Store raw data
raw_data = CurveData(
label="raw_data",
x=x_values,
y=y_values,
y_err=y_sigmas,
shots=shots,
data_index=data_index,
metadata=metadata,
)
self.__processed_data_set.append(raw_data)
# Format raw data
formatted_data = self._format_data(raw_data)
if formatted_data.label != "fit_ready":
raise AnalysisError(f"Not expected data label {formatted_data.label} != fit_ready.")
self.__processed_data_set.append(formatted_data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ex_curve(data):\n rv = []\n try:\n ef = autocomplete_curve_function(data[0])\n ed = autocomplete_curve_direction(data[1])\n period = 2\n try:\n period = max(int(data[2]), 2)\n except ValueError:\n pass\n data = data[3:]\n if not data:\n if consts.VERBOSE:\n print('ERROR: No data for curve')\n return []\n f = CURVE_FUNCTIONS[ef][ed]\n maxi = len(data)-1\n for i in range(period):\n v = f(float(i) / float(period-1))\n di = int(round(v*float(maxi)))\n rv.append(data[di])\n\n except Exception as e:\n if consts.VERBOSE:\n print('ERROR: Curve failed [%s]'%e)\n\n return rv",
"def curve(self, data):\n x, y, y_smoothed = data\n\n curve_keys = ['color', 'linestyle', 'alpha', 'label']\n curve_config = self.config.filter(curve_keys, prefix='curve_')\n\n curves = self.ax.plot(x, y, **curve_config)\n\n if y_smoothed is not None:\n smoothed_color = scale_lightness(curve_config['color'], scale=.5)\n smoothed_label = self.config.get('smoothed_label')\n _ = self.ax.plot(x, y_smoothed, label=smoothed_label, color=smoothed_color, linestyle='--')\n\n return curves",
"def extract_with_curve(curves_dir, save_curve_files, star_id_col, period_col, time_col, mag_col, err_col, data):\n star_id = data[star_id_col]\n curve_path = get_curve_path(curves_dir, star_id)\n\n if path.exists(curve_path):\n #curve = get_curve(curve_path)\n curve = get_curve_simple(curve_path, time_col, mag_col, err_col)\n\n return extract_features(data, star_id_col, period_col, curve, curves_dir, save_curve_files)\n else:\n return data",
"def build_data(self):\n raise RuntimeError(\"Your Gaussian covariance code needs to \"\n \"over-ride the build_data method so it knows how to \"\n \"load the observed data\")\n #using info in self.options,\n #like filenames etc,\n #build x to which we must interpolate\n #return x, y",
"def getData(self):\n # organize the points into segments\n # 1. make sure there is an on curve\n haveOnCurve = False\n for point in self._points:\n if point.segmentType is not None:\n haveOnCurve = True\n break\n # 2. move the off curves to front of the list\n if haveOnCurve:\n _prepPointsForSegments(self._points)\n # 3. ignore double points on start and end\n firstPoint = self._points[0]\n lastPoint = self._points[-1]\n if firstPoint.segmentType is not None and lastPoint.segmentType is not None:\n if firstPoint.coordinates == lastPoint.coordinates:\n if (firstPoint.segmentType in [\"line\", \"move\"]):\n del self._points[0]\n else:\n raise AssertionError(\"Unhandled point type sequence\")\n # done\n return self._points",
"def extract_features(data, star_id_col, period_col, light_curve, curves_dir, save_curve_files):\n new_data = data.copy()\n\n #columns = [\"lt\", \"mr\", \"ms\", \"b1std\", \"rcb\", \"std\", \"mad\", \"mbrp\"\n # , \"pa\", \"totvar\", \"quadvar\", \"fslope\", \"lc_rms\"\n # , \"lc_flux_asymmetry\", \"sm_phase_rms\", \"periodicity\", \"chi_2\", \"iqr\"\n # , \"roms\", \"ptpv\", \"fourier_amplitude\", \"R_21\", \"R_31\", \"f_phase\"\n # , \"phi_21\", \"phi_31\", \"skewness\", \"kurtosis\", \"residual_br_fa_ratio\"\n # , \"shapiro_wilk\", \"slopes_10per\", \"slopes_90per\", \"cum_sum\"\n # , \"neumann_eta\", \"crosses\", \"abv_1std\", \"bel_1std\", \"abv_1std_slopes\"\n # , \"bel_1std_slopes\", \"num_obs\"\n columns = [\"ampl\", \"lt\", \"mr\", \"ms\", \"b1std\", \"rcb\", \"std\", \"mad\", \"mbrp\"\n , \"pa\", \"pst\", \"pdfp\", \"sk\", \"fpr20\", \"fpr35\", \"fpr50\", \"fpr65\", \"fpr80\"\n , \"lc_flux_asymmetry\", \"chi_2\", \"iqr\"\n , \"roms\", \"ptpv\", \"stetson_I\", \"stetson_K\", \"stetson_J\", \"skewness\", \"kurtosis\", \"residual_br_fa_ratio\"\n , \"shapiro_wilk\", \"slopes_10per\", \"slopes_90per\", \"cum_sum\"\n , \"neumann_eta\", \"abv_1std\", \"bel_1std\", \"abv_1std_slopes\"\n , \"bel_1std_slopes\", \"num_obs\"\n ]\n\n star_id = data[star_id_col]\n #period = data[period_col]\n\n times_dirty = light_curve[:,0]\n magnitudes_dirty = light_curve[:,1]\n errors_dirty = light_curve[:,2]\n\n times, magnitudes, errors = clean_light_curve(times_dirty, magnitudes_dirty, errors_dirty)\n fluxes = magnitudes_to_fluxes(magnitudes)\n\n num_obs = times.shape[0]\n slopes = curve_slopes(times, magnitudes)\n\n #phase_times, phase_magnitudes, phase_errors = phase_fold(times, magnitudes, errors, period)\n #phase_slopes = curve_slopes(phase_times, magnitudes)\n\n #sm_phase_times, sm_phase_magnitudes = smooth_curve(phase_times, magnitudes)\n #sm_phase_slopes = curve_slopes(sm_phase_times, sm_phase_magnitudes)\n\n #ls_period = lomb_scargle_periodogram(times, magnitudes, errors)\n\n ampl = amplitude(magnitudes)\n lt = linear_trend(times, magnitudes)\n mr = magnitude_ratio(magnitudes)\n ms = maximum_slope(times, magnitudes)\n b1std = beyond_1std(magnitudes)\n rcb = r_cor_bor(magnitudes)\n std = np.std(magnitudes)\n mad = median_absolute_deviation(magnitudes)\n mbrp = median_buffer_range_percentage(magnitudes)\n pa = percent_amplitude(fluxes)\n pst = pair_slope_trend(times, fluxes)\n pdfp = percent_difference_flux_percentile(fluxes)\n sk = small_kurtosis(magnitudes)\n\n fpr20 = flux_percentage_ratio(fluxes, 40, 60, 5, 95)\n fpr35 = flux_percentage_ratio(fluxes, 32.5, 67.5, 5, 95)\n fpr50 = flux_percentage_ratio(fluxes, 25, 75, 5, 95)\n fpr65 = flux_percentage_ratio(fluxes, 17.5, 82.5, 5, 95)\n fpr80 = flux_percentage_ratio(fluxes, 10, 90, 5, 95)\n\n #totvar = total_variation(sm_phase_magnitudes)\n #quadvar = total_variation(sm_phase_magnitudes)\n #fslope = maximum_slope(sm_phase_times, sm_phase_magnitudes)\n\n lc_rms = root_mean_square(magnitudes)\n lc_flux_asymmetry = light_curve_flux_asymmetry(magnitudes, lc_rms)\n #sm_phase_rms = root_mean_square(sm_phase_magnitudes)\n #periodicity = periodicity_metric(lc_rms, sm_phase_rms)\n\n chi_2 = chi_2_test(magnitudes, errors)\n iqr = interquartile_range(magnitudes)\n roms = robust_median_statistic(magnitudes, errors)\n ptpv = peak_to_peak_variability(magnitudes, errors)\n stetson_I = welch_stetson_I(magnitudes, errors)\n stetson_J = welch_stetson_J(magnitudes, errors)\n stetson_K = welch_stetson_K(magnitudes, errors)\n\n #fourier_order = 3\n #fourier_coef = fourier_decomposition(phase_times, phase_magnitudes, fourier_order)\n #fourier_amplitude = fourier_R(fourier_coef, 1)\n #R_21 = fourier_R_1(fourier_coef, 2)\n #R_31 = fourier_R_1(fourier_coef, 3)\n #f_phase = fourier_phi(fourier_coef, 1)\n #phi_21 = fourier_phi_1(fourier_coef, 2)\n #phi_31 = fourier_phi_1(fourier_coef, 3)\n\n skewness = ss.skew(magnitudes)[0]\n kurtosis = ss.kurtosis(magnitudes)[0]\n residual_br_fa_ratio = residual_bright_faint_ratio(magnitudes)\n shapiro_wilk = ss.shapiro(magnitudes)[0]\n #slopes_10per = np.percentile(phase_slopes[np.logical_not(np.isinf(phase_slopes))], 10)\n #slopes_90per = np.percentile(phase_slopes[np.logical_not(np.isinf(phase_slopes))], 90)\n slopes_10per = np.percentile(slopes[np.logical_not(np.isinf(slopes))], 10)\n slopes_90per = np.percentile(slopes[np.logical_not(np.isinf(slopes))], 90)\n cum_sum = cumulative_sum_range(magnitudes)\n neumann_eta = von_neumann_eta(magnitudes)\n\n #crosses = mean_crosses(sm_phase_magnitudes)\n #abv_1std = above_1std(sm_phase_magnitudes)\n #bel_1std = beyond_1std(sm_phase_magnitudes) - abv_1std\n abv_1std = above_1std(magnitudes)\n bel_1std = beyond_1std(magnitudes) - abv_1std\n\n abv_1std_slopes, bel_1std_slopes = above_below_1std_slopes(slopes)\n\n #new_data[columns] = [lt, mr, ms, b1std, rcb, std, mad, mbrp\n # , pa, totvar, quadvar, fslope, lc_rms\n # , lc_flux_asymmetry, sm_phase_rms, periodicity, chi_2, iqr\n # , roms, ptpv, fourier_amplitude, R_21, R_31, f_phase, phi_21, phi_31\n # , skewness, kurtosis, residual_br_fa_ratio, shapiro_wilk\n # , slopes_10per, slopes_90per, cum_sum, neumann_eta, crosses\n # , abv_1std, bel_1std, abv_1std_slopes, bel_1std_slopes, num_obs\n # ]\n new_data[columns] = [ampl, lt, mr, ms, b1std, rcb, std, mad, mbrp\n , pa, pst, pdfp, sk, fpr20, fpr35, fpr50, fpr65, fpr80\n , lc_flux_asymmetry, chi_2, iqr\n , roms, ptpv, stetson_I, stetson_K, stetson_J, skewness, kurtosis, residual_br_fa_ratio, shapiro_wilk\n , slopes_10per, slopes_90per, cum_sum, neumann_eta\n , abv_1std, bel_1std, abv_1std_slopes, bel_1std_slopes, num_obs\n ]\n\n #if save_curve_files:\n # save_curve(curves_dir, star_id, \"phase\", phase_times, magnitudes, [\"phase\", \"Mag\"])\n # save_curve(curves_dir, star_id, \"sm_phase\", sm_phase_times, sm_phase_magnitudes, [\"phase\", \"Mag\"])\n\n return new_data",
"def create_equity_curve_dataframe(self):\n curve = pd.DataFrame(self.all_holdings)\n curve.set_index('datetime', inplace=True)\n\n curve_symbols = curve[self.symbol_list]\n lists = ['buy_times', 'sell_times', 'hold', 'total_times',\n 'cash', 'commission', 'total']\n curve_lists = curve[lists]\n curve = pd.concat([curve_symbols, curve_lists], axis=1)\n\n curve['returns'] = curve['total'].pct_change()\n curve.loc[curve.index[0], 'returns'] = 0.0\n curve['equity_curve'] = (1.0 + curve['returns']).cumprod()\n self.equity_curve = curve\n\n positions = pd.DataFrame(self.all_positions)\n positions.set_index('datetime', inplace=True)\n self.positions = positions\n\n prices = pd.DataFrame(self.all_prices)\n prices.set_index('datetime', inplace=True)\n self.prices = prices",
"def initDataParms(self):\n self.xpos = self.pltw.curvelist[self.blkno].xvinfo.vidx\n self.data = self.pltw.blklst[self.blkno] # original data block\n self.idata = None # interpolated data\n (self.nvec, self.npt) = self.data.shape\n self.xmin = (self.data[self.xpos]).min()\n self.xmax = (self.data[self.xpos]).max()\n self.xspan = self.xmax - self.xmin\n if self.parent.test:\n self.dx = self.xspan / (self.npt * 5)",
"def process_data(data, star_id_col, period_col, curves_dir, time_col, mag_col, err_col, save_curve_files=False):\n data = add_feature_columns(data)\n\n extract_func = partial(extract_with_curve, curves_dir, save_curve_files\n , star_id_col, period_col, time_col, mag_col, err_col)\n new_data = data.apply(extract_func, axis=1)\n\n return new_data",
"def _curve_fitting(self, phase, info):\n start_date = info[self.START]\n end_date = info[self.END]\n population = info[self.N]\n trend = Trend(\n self.jhu_data, population, self.country, province=self.province,\n start_date=start_date, end_date=end_date\n )\n trend.analyse()\n df = trend.result()\n if trend.rmsle() > self.max_rmsle:\n df[f\"{self.S}{self.P}\"] = None\n # Get min value for vline\n r_value = int(df[self.R].min())\n # Rename the columns\n phase = self.INITIAL if phase == \"0th\" else phase\n df = df.rename({f\"{self.S}{self.P}\": f\"{phase}{self.P}\"}, axis=1)\n df = df.rename({f\"{self.S}{self.A}\": f\"{phase}{self.A}\"}, axis=1)\n df = df.rename({f\"{self.R}\": f\"{phase}_{self.R}\"}, axis=1)\n return (df, r_value)",
"def _parse_data(self):\n for i, val in enumerate(self.values.keys()):\n x_, y_ = [], []\n xy = self.values[val]\n for value in self.values.index:\n x_.append(xy[value][0])\n y_.append(xy[value][1])\n\n self.set_and_get(\"x_\", val, x_)\n self.set_and_get(\"y_\", val, y_)",
"def _data():\n data = {s: {} for s in systems}\n\n # PbPb2760 and PbPb5020 dNch/deta\n for system, args, name in [\n ('PbPb2760', (880049, 1), 'D(N)/DETARAP'),\n ('PbPb5020', (1410589, 2),\n r'$\\mathrm{d}N_\\mathrm{ch}/\\mathrm{d}\\eta$'),\n ]:\n data[system]['dNch_deta'] = {None: HEPData(*args).dataset(name)}\n\n # PbPb2760 transverse energy\n # ignore bin 0-5 since it's redundant with 0-2.5 and 2.5-5\n dset = HEPData(1427723, 1).dataset('$E_{T}$', ignore_bins=[(0, 5)])\n dset['yerr']['sys'] = dset['yerr'].pop('sys,total')\n data['PbPb2760']['dET_deta'] = {None: dset}\n\n # PbPb2760 identified dN/dy and mean pT\n system = 'PbPb2760'\n\n for obs, table, combine_func in [\n ('dN_dy', 31, np.sum),\n ('mean_pT', 32, np.mean),\n ]:\n data[system][obs] = {}\n d = HEPData(1222333, table)\n for key, re_products in [\n ('pion', ['PI+', 'PI-']),\n ('kaon', ['K+', 'K-']),\n ('proton', ['P', 'PBAR']),\n ]:\n dsets = [\n d.dataset(RE='PB PB --> {} X'.format(i))\n for i in re_products\n ]\n\n data[system][obs][key] = dict(\n dsets[0],\n y=combine_func([d['y'] for d in dsets], axis=0),\n yerr={\n e: combine_func([d['yerr'][e] for d in dsets], axis=0)\n for e in dsets[0]['yerr']\n }\n )\n\n # PbPb2760 strange baryon yields\n data['PbPb2760']['dN_dy']['Lambda'] = HEPData(1243863, 23).dataset(\n RE='PB PB --> LAMBDA X'\n )\n\n d = HEPData(1243865, 11)\n for s in ['Xi', 'Omega']:\n data[system]['dN_dy'][s] = d.dataset(\n RE='PB PB --> ({0}- + {0}BAR+) X'.format(s.upper())\n )\n\n # PbPb2760 mean pT fluctuations\n d = HEPData(1307102, 6, reverse=True)\n name = r'$\\sqrt{C_m}/M(p_{\\rm T})_m$'\n # the table only has Npart, but they are actually 5% centrality bins\n width = 5.\n d.cent = [(n*width, (n+1)*width) for n, _ in enumerate(d.y(name))]\n data['PbPb2760']['pT_fluct'] = {None: d.dataset(name, maxcent=60)}\n\n # PbPb2760 and PbPb5020 flows\n for system, tables_nk in [\n ('PbPb5020', [\n (1, [(2, 2), (2, 4)]),\n (2, [(3, 2), (4, 2)]),\n ]),\n ('PbPb2760', [\n (3, [(2, 2), (2, 4)]),\n (4, [(3, 2), (4, 2)]),\n ]),\n ]:\n data[system]['vnk'] = {}\n\n for table, nk in tables_nk:\n d = HEPData(1419244, table)\n for n, k in nk:\n data[system]['vnk'][n, k] = d.dataset(\n 'V{}{{{}{}}}'.format(\n n, k, ', |DELTAETA|>1' if k == 2 else ''\n ),\n maxcent=(70 if n == 2 else 50)\n )\n\n # PbPb2760 central flows vn{2}\n system, obs = 'PbPb2760', 'vnk_central'\n data[system][obs] = {}\n\n for n, table, sys_err_frac in [(2, 11, .025), (3, 12, .040)]:\n dset = HEPData(900651, table).dataset()\n # the (unlabeled) errors in the dataset are actually stat\n dset['yerr']['stat'] = dset['yerr'].pop('sum')\n # sys error is not provided -- use estimated fractions\n dset['yerr']['sys'] = sys_err_frac * dset['y']\n data[system][obs][n, 2] = dset\n\n # PbPb2760 flow correlations\n for obs, table in [\n ('sc', 1),\n ('sc_normed', 2),\n ('sc_central', 3),\n ('sc_normed_central', 4)\n ]:\n d = HEPData(1452590, table)\n data['PbPb2760'][obs] = {\n mn: d.dataset('SC({},{})'.format(*mn))\n for mn in [(3, 2), (4, 2)]\n }\n\n return data",
"def parse_curve_arrays(values):\n if \"x_data\" in values and not isinstance(values[\"x_data\"], np.ndarray):\n values[\"x_data\"] = np.array(values[\"x_data\"])\n if \"y_data\" in values and not isinstance(values[\"y_data\"], np.ndarray):\n values[\"y_data\"] = np.array(values[\"y_data\"])\n shape = values[\"x_data\"].shape[0], values[\"y_data\"].shape[0]\n if shape[0] == 0 or shape[1] == 0 or shape[0] != shape[1]:\n raise ValueError(f\"Invalid shape: {shape}\")\n return values",
"def get_data(self, data):\n self.data = {}\n self.data[ATTR_PM1] = data['current']['values'][0]['value']\n self.data[ATTR_PM25] = data['current']['values'][1]['value']\n self.data[ATTR_PM25_LIMIT] = data['current']['standards'][0]['limit']\n self.data[ATTR_PM25_PERCENT] = (data['current']['standards'][0]\n ['percent'])\n self.data[ATTR_PM10] = data['current']['values'][2]['value']\n self.data[ATTR_PM10_LIMIT] = data['current']['standards'][1]['limit']\n self.data[ATTR_PM10_PERCENT] = (data['current']['standards'][1]\n ['percent'])\n self.data[ATTR_PRESSURE] = data['current']['values'][3]['value']\n self.data[ATTR_HUMIDITY] = data['current']['values'][4]['value']\n self.data[ATTR_TEMPERATURE] = data['current']['values'][5]['value']\n self.data[ATTR_CAQI] = data['current']['indexes'][0]['value']\n self.data[ATTR_CAQI_LEVEL] = (data['current']['indexes'][0]\n ['level'].lower().replace('_', ' '))",
"def _format_data(self, data: CurveData) -> CurveData:\n # take average over the same x value by keeping sigma\n series, xdata, ydata, sigma, shots = multi_mean_xy_data(\n series=data.data_index,\n xdata=data.x,\n ydata=data.y,\n sigma=data.y_err,\n shots=data.shots,\n method=\"shots_weighted\",\n )\n\n # sort by x value in ascending order\n series, xdata, ydata, sigma, shots = data_sort(\n series=series,\n xdata=xdata,\n ydata=ydata,\n sigma=sigma,\n shots=shots,\n )\n\n return CurveData(\n label=\"fit_ready\",\n x=xdata,\n y=ydata,\n y_err=sigma,\n shots=shots,\n data_index=series,\n )",
"def get_investigation_data(self, gamma, dps, gamma_err=0, dps_err=0, fit_type='exponential'):\n if fit_type == 'exponential':\n coherence_length = 2 * np.log(2) / (gamma / dps)\n\n # fourier transform of an exponential decay with decay constant g = lorentzian with hwhm g\n # decay constant = 1/g\n # Fourier(exp(-2pi k0 x)) = (1/pi)(k0 / (k^2 + k0^2))\n # 2 pi k0 = g, k0 = hwhm = g / (2 pi)\n # fwhm = g / pi (*c)\n spectral_width_hz = (gamma / dps) / np.pi * constants.c\n\n elif fit_type == 'lorentzian':\n coherence_length_in_motor_steps = 2 * gamma\n coherence_length = coherence_length_in_motor_steps * dps # in metres\n # TODO: change this to in terms of the exponential decay.\n # but the lorentzian curve fit on the interferogram has no physical meaning anyway\n spectral_width_hz = constants.c / (np.pi * coherence_length)\n\n elif fit_type == 'gaussian':\n # gamma is actually sigma\n coherence_length_in_motor_steps = 2 * np.sqrt(2 * np.log(2)) * gamma\n coherence_length = coherence_length_in_motor_steps * dps # in metres\n\n # spectral_width_hz = constants.c / (np.pi * coherence_length)\n # new sigma is pi / sigma\n # idk why it's 1/ (pi * newsigma) and not pi / newsigma\n # TODO: Figure out why gaussian spec width is so narrow.\n spectral_width_hz = constants.c *2* np.sqrt(2 * np.log(2)) / (np.pi * gamma * dps)\n # print(\"%.4e, %.4e\" % (constants.c * 2 * np.sqrt(2 * np.log(2)) * np.pi / (gamma * dps), constants.c / (np.pi * coherence_length)))\n\n\n\n # steps between peaks = 1 wavelength = 1 / freq\n # dps = known_wavelength / (2 * 1 wavelength) = freq * known_wavelength / 2\n frequencies_per_motor_step, magnitudes = self.get_frequencies_with_fourier(fit=False)\n wavelengths = 2 * dps / frequencies_per_motor_step\n scipy_fit, calc_fit = self.find_best_fit_gaussian(x_y=(wavelengths, magnitudes))\n wavelengths_mean, wavelengths_std = scipy_fit[1], scipy_fit[2]\n mean_wavelength = wavelengths_mean\n spectral_width_m = mean_wavelength ** 2 / constants.c * spectral_width_hz\n\n frequencies = constants.c / wavelengths\n\n (amplitude, mean, gamma) = self.find_best_fit_lorentzian(x_y=(frequencies, magnitudes))\n scipy_fit = (amplitude, mean, gamma)\n frequencies_mean, frequencies_std = scipy_fit[1], scipy_fit[2]\n\n ## This section plots the fits to the FT\n # plt.plot(frequencies, magnitudes, '.')\n #\n # x_min, x_max = plt.xlim()\n # lorentzian_fit = lambda fit_params, x: fit_params[0] / (1 + ((x - fit_params[1]) / fit_params[2]) ** 2)\n # fit_x = np.linspace(x_min, x_max, 10000)\n # optimised_lorentzian_fit = lorentzian_fit(scipy_fit, fit_x)\n # fwhm = 2 * scipy_fit[2]\n # plt.plot(fit_x, optimised_lorentzian_fit, 'k', label='SciPy fit (Lorentzian)\\nFWHM: %.4e' % fwhm)\n #\n # gaussian_fit = lambda fit_params, x: fit_params[0] * np.exp(-(x - fit_params[1]) ** 2 / (2 * fit_params[2] ** 2))\n # scipy_fit, calc_fit = self.find_best_fit_gaussian(also_use_scipy=True, x_y=(frequencies, magnitudes))\n # optimised_gaussian_fit = gaussian_fit(scipy_fit, fit_x)\n # fwhm = 2 * np.sqrt(2 * np.log(2)) * scipy_fit[2]\n # plt.plot(fit_x, optimised_gaussian_fit, 'g', label='SciPy fit (G)\\nFWHM: %.4e' % fwhm)\n # plt.legend()\n #\n # plt.show()\n\n print('With Fourier:')\n print('Spectral Width (m): %.5e' % spectral_width_m)\n print('Coherence length: %.5e' % coherence_length)\n print('Mean frequencies: %.5e pm %.5e' % (frequencies_mean, frequencies_std))\n print('Mean wavelength: %.5e pm %.5e' % (mean_wavelength, wavelengths_std))\n\n\n\n steps, unique_steps_between_peaks, unique_steps_counts = self.get_steps_between_peaks()\n distances = dps * steps\n # distances_mean, distances_std = np.mean(distances), np.std(distances)\n wavelengths = distances * 2\n wavelengths_mean, wavelengths_std = np.mean(wavelengths), np.std(wavelengths)\n\n mean_wavelength = wavelengths_mean\n\n frequencies = constants.c / wavelengths\n frequencies_mean, frequencies_std = np.mean(frequencies), np.std(frequencies)\n\n spectral_width_m = mean_wavelength ** 2 / constants.c * spectral_width_hz\n print(\"With Step calculation\")\n print('Spectral Width (Hz): %.5e' % spectral_width_hz)\n print('Spectral Width (m): %.5e' % spectral_width_m)\n\n if gamma_err == 0 and dps_err == 0:\n print('Coherence length: %.5e' % coherence_length)\n print('Spectral width (Hz): %.5e' % spectral_width_hz)\n print('Mean frequencies: %.5e pm %.5e' % (frequencies_mean, frequencies_std))\n print('Mean wavelength: %.5e pm %.5e' % (mean_wavelength, wavelengths_std))\n else:\n print(dps_err, 'asdasdasfaegsegeg')\n coherence_length_err = dps_err / dps * coherence_length\n spectral_width_err = np.sqrt((coherence_length_err / (constants.c * mean_wavelength ** 2)) ** 2 +\n (2 * coherence_length / (constants.c * mean_wavelength ** 3)) ** 2)\n print(mean_wavelength ** 2 / coherence_length, 'dlambda')\n print('Coherence length: %.5e pm %.5e' % (coherence_length, coherence_length_err))\n print('Spectral width (Hz): %.5e pm %.5e' % (spectral_width_hz, spectral_width_err))\n print('Mean frequencies: %.5e pm %.5e' % (frequencies_mean, frequencies_std))\n print('Mean wavelength: %.5e pm %.5e' % (mean_wavelength, wavelengths_std))\n\n data = {'coherence_length': coherence_length,\n 'spectral_width_hz': spectral_width_hz,\n 'spectral_width_m': spectral_width_m,\n 'mean_wavelength': mean_wavelength,\n 'mean_frequency': frequencies_mean,\n }\n return data",
"def set_data(self, data):\n\n # Meta data and data\n _meta, _data = data['meta'], data['data']\n\n # Store timestamp of current data\n self._timestamp = _meta['timestamp']\n\n # Set data rate if available\n if 'data_rate' in _meta:\n self._drate = _meta['data_rate']\n\n # Get data rate from data in order to set time axis\n if self._time is None:\n if 'data_rate' in _meta:\n self._drate = _meta['data_rate']\n shape = int(round(self._drate) * self._period + 1)\n self._time = np.zeros(shape=shape)\n self._data = OrderedDict([(ch, np.zeros(shape=shape)) for i, ch in enumerate(self.channels)])\n\n # Fill data\n else:\n\n # If we made one cycle, start again from the beginning\n if self._idx == self._time.shape[0]:\n self._idx = 0\n self._filled = True\n\n # If we start a new cycle, set new start timestamp and offset\n if self._idx == 0:\n self._start = self._timestamp\n self._offset = 0\n\n # Set time axis\n self._time[self._idx] = self._start - self._timestamp + self._offset\n\n # Increment index\n self._idx += 1\n\n # Set data in curves\n for ch in _data:\n # Shift data to the right and set 0th element\n self._data[ch][1:] = self._data[ch][:-1]\n self._data[ch][0] = _data[ch]\n\n if not self._filled:\n self.curves[ch].setData(self._time[self._data[ch] != 0], self._data[ch][self._data[ch] != 0])\n else:\n self.curves[ch].setData(self._time, self._data[ch])",
"def _build_parsed_values(self):\n\n match = DATA_MATCHER.match(self.raw_data)\n if not match:\n raise SampleException(\"CtdParserDataParticle: No regex match of \\\n parsed sample data: [%s]\", self.raw_data)\n try:\n temp = float(match.group(2))\n cond = float(match.group(1))\n press = float(match.group(3))\n o2 = float(match.group(4))\n except (ValueError, TypeError, IndexError) as ex:\n raise SampleException(\"Error (%s) while decoding parameters in data: [%s]\"\n % (ex, self.raw_data))\n\n result = [{DataParticleKey.VALUE_ID: CtdpfkParserDataParticleKey.TEMPERATURE,\n DataParticleKey.VALUE: temp},\n {DataParticleKey.VALUE_ID: CtdpfkParserDataParticleKey.CONDUCTIVITY,\n DataParticleKey.VALUE: cond},\n {DataParticleKey.VALUE_ID: CtdpfkParserDataParticleKey.PRESSURE,\n DataParticleKey.VALUE: press},\n {DataParticleKey.VALUE_ID: CtdpfkParserDataParticleKey.OXYGEN,\n DataParticleKey.VALUE: o2}]\n log.trace('CtdpfkParserDataParticle: particle=%s', result)\n return result",
"def build_curve(curve_data, parent_curve = None, parent_type = 'transform'):\n if parent_curve is None:\n parent_curve = pm.createNode(parent_type, name = naming.get_unique_name('fr_curve'))\n else:\n # clean shapes\n shapes = parent_curve.getShapes()\n pm.delete(shapes)\n\n for key, value in curve_data.items():\n new_curve = pm.curve(\n d = value.get('degree', 3),\n per = value.get('periodic', False),\n p = [tuple(o) for o in value.get('point', [[0, 0, 0]])],\n k = value.get('knot', [1.0, 0.0])\n )\n curve_shapes = new_curve.getShapes()\n pm.parent(curve_shapes, parent_curve, addObject = True, shape = True)\n pm.delete(new_curve)\n\n curve_color = value.get('color', 17)\n for each_crv in curve_shapes:\n # colorize curve\n color_attr = 'overrideColor'\n if isinstance(curve_color, list):\n each_crv.overrideRGBColors.set(True)\n color_attr = 'overrideColorRGB'\n each_crv.attr(color_attr).set(curve_color)\n each_crv.overrideEnabled.set(True)\n\n # rename each curve\n each_crv.rename('{0}Shape{1}'.format(parent_curve.nodeName(), key.replace('curve', '')))\n\n return parent_curve",
"def test_data():\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156,\n 0.225, 3.364], dtype=float)\n\n return x, y, z",
"def get_data(self, data_type):\n\n sigma = self.sigma\n # Check for positive definite of sigma\n nobs = self.nobs\n npred = self.npred\n nresp = self.nresp\n rotation_x = self.rotation_x\n rotation_y = self.rotation_y\n mu_x = self.mu_x\n mu_y = self.mu_y\n ntest = self.ntest\n out = dict()\n if data_type in ['train', 'both']:\n out['train'] = simulate(nobs, npred, sigma, rotation_x, nresp, rotation_y, mu_x, mu_y)\n if data_type in ['test', 'both'] and ntest is not None:\n out['test'] = simulate(ntest, npred, sigma, rotation_x, nresp, rotation_y, mu_x, mu_y)\n return out",
"def _get_estimateData(self):\n\n if self._estimates is not None:\n return\n\n df = pd.read_html(self._url_est)[0]\n\n # clean data\n df = df.dropna(how='all', axis=0)\n df = df.dropna(how='all', axis=1)\n df = df.replace(\"—\", nan)\n df.iloc[0,:] = df.iloc[0,:].bfill()\n\n\n # format data\n columns = df.pop(0).tolist()\n columns[:2] = [\"Year\", \"Estimates\"]\n\n df = df.T\n df.columns = columns\n df = df.set_index([\"Year\", \"Estimates\"])\n\n currency_est = df.index[0][1]\n df.rename_axis(index={\"Estimates\": f\"Estimates {currency_est}\"})\n\n # convert dtypes\n df = df.astype(float, errors='ignore')\n df.loc[:,\"Number of Estimates\"] = df.loc[:,\"Number of Estimates\"].bfill()\n df.loc[:,\"Number of Estimates\"] = df.loc[:,\"Number of Estimates\"].astype(pd.Int64Dtype(), errors='ignore')\n \n # Inconsitent results\n # Either file bug report or find mistake\n # df.index = df.index.set_levels(\n # levels=[f\"Earnings Per Share {currency_est}\", 'Growth %'],\n # level=\"Estimates\"\n # )\n\n df.index = utils.rename_MultiIndex(df.index, currency_est)\n\n self._estimates = df\n self.currency_estimates = currency_est",
"def preproc_data(data):\n # Load data manually from Yahoo! finance\n\n # Initialize TP Matrix\n # 3-dimension: # of stock * 18 * 18\n # narray\n _TP_matrixs = np.zeros(\n (len(data.ix[stockname]) - 230, 18, 18), dtype=np.bool)\n old = data.ix[stockname]['close'][229]\n TP_matrixs = pd.Panel(_TP_matrixs, items=data.ix[stockname].index[230:])\n label = np.zeros((len(data.ix[stockname]) - 230), dtype=np.float)\n dataindex = 0\n dataset = []\n # Construct TP Matrix\n for TP_matrix in TP_matrixs.iteritems():\n # Extract raw close price of last 230 days\n # pdb.set_trace()\n tp_features = np.zeros((18, 18), dtype=np.bool)\n _list_CP = data.ix[stockname][data.ix[stockname].index <\n TP_matrix[0]]['close'].tolist()\n list_CP = _list_CP[len(_list_CP) - 230: len(_list_CP)]\n close = data.ix[stockname]['close'][dataindex + 230]\n label = (close - old) / old\n old = close\n # col[0, 8] for Upward TP Matrix\n # col[9, 17] for Downward TP Matrix\n for col in range(0, 18):\n D = columns[col][0] - 1\n for row in range(0, 18):\n # For each element of TP Matrix\n for TP in range(D, columns[col][1]):\n # Change ratio of stock on day D with repect to the price\n # at TP\n C_TPD = (list_CP[TP] - list_CP[D]) / list_CP[D]\n if C_TPD * 100 >= rows[row][0] and C_TPD * 100 < rows[row][1]:\n TP_matrix[1][row][col] = True\n tp_features[row][col] = True\n break\n\n sample = DataSet()\n sample.tp_features = tp_features\n sample.labels = label\n dataindex += 1\n dataset.append(sample)\n\n filename = 'data/TP_matrix_' + stockname + '.pkl'\n output = open(filename, 'wb')\n # # Pickle dictionary using protocol 0.\n pickle.dump(TP_matrixs, output)\n output.close()\n return dataset",
"def curves(self):\n return self._curve_reg",
"def read_curve_Ye(self, fname='./Curve_Ye.lst'):\n\t\ttry:\n\t\t\tself.create_group(name='Curve_Ye')\n\t\texcept:\n\t\t\tpass\n\t\tgroup = self['Curve_Ye']\n\t\tf = open(fname, 'r')\n\t\tlines = f.readlines()\n\t\tfor line in lines:\n\t\t\ttry:\n\t\t\t\tage, vel = line.strip().split()\n\t\t\t\tarr = np.append(arr, np.array([[float(age.replace(',','')), float(vel)]]), axis=0)\n\t\t\texcept ValueError:\n\t\t\t\tperiod = line.strip().replace('# ','').replace(' sec phase','')\n\t\t\t\tif bool(period):\n\t\t\t\t\tarr = np.array([[],[]]).T\n\t\t\t\t\tname = period+'_sec_phase'\n\t\t\t\telse:\n\t\t\t\t\tgroup.create_dataset(name=name, data=arr)\n\t\ttry:\n\t\t\tgroup[name]\n\t\texcept:\n\t\t\tgroup.create_dataset(name=name, data=arr)\n\t\tpass",
"def __init__(self, x, y, data):\n super().__init__(x=x, y=y, data=data, has_analytic_ft=False)\n self._ee = {}\n self._mtf = None\n self._nu_p = None\n self._dnx = None\n self._dny = None",
"def GetCurve(self, *args):\n return _Adaptor3d.Adaptor3d_HCurve_GetCurve(self, *args)",
"def create_curve(data_tab, state):\n global width, prev_index, min_temp, max_temp, max_humid, min_humid\n\n def min_max(arr, arr_size):\n \"\"\"\n Helper to get the min and max of the tab\n \"\"\"\n max_t = arr[0]\n min_t = arr[0]\n for i in range(arr_size):\n if arr[i] > max_t:\n max_t = arr[i]\n if arr[i] < min_t:\n min_t = arr[i]\n return min_t, max_t\n\n # The max difference between two temp; if greater than 8, then we need to move vertically\n min_data, max_data = min_max(data_tab, len(data_tab))\n min_max_diff = max(8, max_data - min_data)\n\n # Update min/max values of each curve\n if state == \"temp\":\n min_temp = min(min_data, min_temp)\n max_temp = max(max_data, max_temp)\n elif state == \"humid\":\n min_humid = min(min_data, min_humid)\n max_humid = max(max_data, max_humid)\n\n width = len(data_tab)\n\n normalized_data = data_tab.copy()\n\n for i in range(len(data_tab)):\n normalized_data[i] = ((data_tab[i] - min_data)*7) / min_max_diff\n\n full_data_tab = [[0 for x in range(8)] for y in range(width)]\n\n # The first data that we collected is gonna be centered on the y-axis\n base_data = normalized_data[0]\n\n # Change the base_index depending on max variation of temp\n base_index = 7 - round(base_data)\n\n # Records value for when we change displayed_data\n prev_index = -1\n for i in range(width):\n diff = round(normalized_data[i] - base_data)\n curr_index = base_index - diff\n full_data_tab[i][curr_index] = 1\n\n # COMMENT NEXT FULL BLOCK TO REMOVE VERTICAL PIXELS\n if i > 0:\n delta_index = curr_index - prev_index\n if delta_index > 1:\n for j in range(prev_index + 1, curr_index):\n full_data_tab[i][j] = 1\n if delta_index < -1:\n for j in range(curr_index + 1, prev_index):\n full_data_tab[i][j] = 1\n prev_index = curr_index\n # END OF BLOCK TO COMMENT\n\n\n return full_data_tab",
"def add_datum(self, x, fields):\n\t\n\t\tfor name, value in fields.iteritems():\n\t\t\tif name not in self.curves:\n\t\t\t\tcurve = QwtPlotCurve()\n\t\t\t\tcurve.attach(self)\n\t\t\t\tself.curves[name] = [curve, [], []]\n\t\t\t\n\t\t\tstuff = self.curves[name]\n\t\t\tstuff[1].append(x)\n\t\t\tstuff[2].append(value)",
"def preprocess(self, data):\n if self.mode == 'image':\n data = self.transpose(data)\n data = self.dilate(data)\n data = self.mask(data)\n\n if self.mode == 'histogram':\n data = self.flatten(data)\n data = self.mask(data)\n\n if self.mode == 'curve':\n if isinstance(data, np.ndarray) or (isinstance(data, list) and contains_numbers(data)):\n if hasattr(self, 'objects'):\n xdata = self.main_object.get_xdata()\n else:\n xdata = range(len(data))\n\n data = [xdata, data]\n\n smoothed = self.smooth(data[1].squeeze() if data[1].ndim > 1 else data[1])\n data = [*data, smoothed]\n\n if self.mode == 'loss':\n if isinstance(data, tuple):\n loss, lr = data\n else:\n loss, lr = data, None\n\n if loss is None:\n smoothed = None\n else:\n smoothed = self.smooth(loss)\n\n data = [loss, smoothed, lr]\n\n return data"
] | [
"0.6097595",
"0.59153706",
"0.5720998",
"0.56816626",
"0.55583704",
"0.5506386",
"0.5458484",
"0.54564375",
"0.5437237",
"0.5421108",
"0.54172045",
"0.54148316",
"0.54103583",
"0.53691983",
"0.5329527",
"0.52916807",
"0.5291498",
"0.52747667",
"0.5228793",
"0.5213298",
"0.5205867",
"0.52008516",
"0.51841956",
"0.51400673",
"0.51394284",
"0.513886",
"0.5135976",
"0.51357704",
"0.51286477",
"0.51274884"
] | 0.6852174 | 0 |
Return type of experiment. | def _experiment_type(self) -> str:
try:
return self.__experiment_metadata["experiment_type"]
except (TypeError, KeyError):
# Ignore experiment metadata is not set or key is not found
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def experiment_type(filename):\n assert(isinstance(filename, str))\n exp_type = filename.split('/')[-1].split('.')[-2].split('_')[1:-1]\n exp_type = '_'.join(exp_type)\n logger.debug('{} is of type {}'.format(filename, exp_type))\n return exp_type",
"def get_test_type(self):\n return self.test_type",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"type\")",
"def type(self):\n\t\treturn self.type_",
"def test_type(self):\n return self._test_type",
"def type(self):\n return self._instrument_type"
] | [
"0.701162",
"0.69926196",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6970925",
"0.6876343",
"0.6837675",
"0.68322605"
] | 0.8181823 | 0 |
Getter for physical qubit indices. | def _physical_qubits(self) -> List[int]:
try:
return list(self.__experiment_metadata["physical_qubits"])
except (TypeError, KeyError):
# Ignore experiment metadata is not set or key is not found
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def indices(self):\n return self._kbounded_partitions",
"def get_indices(self):\r\n return self._indices",
"def indices(self) -> np.ndarray:\n return self.impl.indices",
"def jw_number_indices(n_electrons, n_qubits):\n occupations = itertools.combinations(range(n_qubits), n_electrons)\n indices = [sum([2**n for n in occupation]) for occupation in occupations]\n return indices",
"def get_index(self, qubit_name):\n if isinstance(qubit_name, int):\n return qubit_name\n try:\n return self.qubitDict[qubit_name]\n except KeyError:\n return self.readoutDict[qubit_name]",
"def npix(self):\n return self._npix",
"def getIndices(self):\r\n return self._indices",
"def indices(self):\n return self.index.indices",
"def get_index(self):\n return self.disk.partitions.index(self)",
"def indices(self):\n return range(len(self))",
"def _generate_qubits(self) -> Sequence[cirq.Qid]:\n return cirq.LineQubit.range(openfermion.count_qubits(self.hamiltonian))",
"def get_multi_index(self):\n return self.basis.elements",
"def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi",
"def get_index(self):\n return (np.sqrt(self.dielectric))",
"def getPidx(self):\n return int(bytes(self.keeper.getGbl(b\"pidx\")), 16)",
"def ordered_indices(self):\r\n return np.arange(len(self), dtype=np.int64)",
"def get_data_idx(self)->list:\n return self.__data_idx",
"def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices",
"def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check",
"def indices(self, position=None):\n \n raise NotImplementedError()",
"def get_index_array(self):\n return self.region_pairs",
"def get_local_indices(self, part, ctx):\n return self.map_to_global(\n F.arange(0, self.local_size(part), ctx=ctx), part\n )",
"def get_ind(self,*q):\n try:\n if( len(q) == 1 ):\n x = q[0][:,0]\n y = q[0][:,1]\n z = q[0][:,2]\n else:\n x = q[0]\n y = q[1]\n z = q[2]\n try:\n cx = (x+0.5).astype(na.int32)\n cy = (y+0.5).astype(na.int32)\n cz = (z+0.5).astype(na.int32)\n except:\n cx = int(x+0.5)\n cy = int(y+0.5)\n cz = int(z+0.5)\n ind = cx + cy*self.dim[0]+cz*self.dim[0]*self.dim[1]\n return ind\n except Exception as error:\n print(error)\n return None",
"def childWellIndices(self):\n return self._wellIndices",
"def idx(self):\n if self._idx is None:\n self._loads()\n return self._idx",
"def q(self) -> List[Qubit]:\n return self._qubits",
"def get_indexes(self):\n indexes = []\n for c in self.components:\n indexes.extend(c.get_indexes())\n return indexes",
"def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices",
"def getLandmarkindices(self):\n return self.subsetindices",
"def getLandmarkindices(self):\n return self.subsetnodes_indices"
] | [
"0.66136235",
"0.63095975",
"0.6267602",
"0.62219816",
"0.6181241",
"0.6174731",
"0.6135461",
"0.5949904",
"0.5915764",
"0.58870023",
"0.58162713",
"0.58161235",
"0.5783942",
"0.5758474",
"0.57396424",
"0.56639326",
"0.5659677",
"0.5655574",
"0.56555057",
"0.5643877",
"0.5641117",
"0.5616011",
"0.5614245",
"0.56123275",
"0.56088084",
"0.55849946",
"0.55779344",
"0.557766",
"0.55542743",
"0.553536"
] | 0.6819982 | 0 |
Getter for backend object. | def _backend(self) -> Backend:
return self.__backend | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def backend(self):\n # This never changes (so no read locking needed).\n return self._backend",
"def get_backend():\n return _BACKEND",
"def get_backend():\n return Connection()",
"def get_backend():\n return __SETTINGS__._BACKEND",
"def backend_object(self, id):\n return self.model.Suite.everything.get(id=id)",
"def get_backend(self):\n return self.analyze_db_task(constants.TRAIN_DB).backend",
"def get_profile_backend(self, profile):\n return self._get_attribute(profile, 'backend')",
"def getBackend(self):\n return self.header['BACKEND']",
"def backend(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"backend\")",
"def backend(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"backend\")",
"def backend(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"backend\")",
"def get_storage_backend(self):\n return self.client.info()['Driver']",
"def get_backend():\n return sys.modules[__name__]",
"def get_backend():\n return sys.modules[__name__]",
"def get_backend(\n self,\n backend_id: str,\n ) -> Optional[Type[BaseCertificateStorageBackend]]:\n return self.get('backend_id', backend_id)",
"def get_backend():\n\n return sys.modules[__name__]",
"def get_backend():\n\n return sys.modules[__name__]",
"def get_backend() -> BiasCorrectionAlgoBackend:",
"def backend(self) -> str:\n return self.__class__.BACKEND_NAME",
"def which_backend(self, backend_name, type_name, conf):\n print(\"backend_name is : <{}>\".format(backend_name))\n if backend_name not in self.records.keys():\n print(\"first get object\")\n self.port_obj = PortFactory.backends.get(backend_name)(type_name, conf)\n print(\"get object from factory : {}\".format(self.port_obj))\n self.records[backend_name] = [type_name]\n else:\n print(\"re-init get object\")\n self.port_obj.reinit(type_name,conf)\n self.records[backend_name].append(type_name)\n print(\"factory records: {}\".format(self.records))\n return self.port_obj",
"def get_backend(cls, backend=None):\n return backend if backend else aws.S3Backend(\n category=cls.default_category, bucket_name=cls.default_bucket)",
"def get_backend():\n backend_path = settings.CALENDAR_BACKEND\n\n try:\n backend_modulename, backend_classname = backend_path.rsplit('.', 1)\n except ValueError:\n raise ImproperlyConfigured('{0} isn\\'t a backend module'.format(backend_path))\n\n # See if the module has already been imported.\n try:\n backend_module = sys.modules[backend_modulename]\n except KeyError:\n # ok, then import it.\n try:\n backend_module = import_module(backend_modulename)\n except ImportError as e:\n raise ImproperlyConfigured('Error importing backend {0}: \"{1}\"'.format(backend_modulename, e))\n\n try:\n backend_class = getattr(backend_module, backend_classname)\n except AttributeError:\n raise ImproperlyConfigured(\n 'Backend module \"{0}\" does not define a \"{1}\" class'.format(backend_modulename, backend_classname)\n )\n\n backend_instance = backend_class()\n\n if not isinstance(backend_instance, BaseBackend):\n raise ImproperlyConfigured(\n 'Backend class \"{0}\" is not a subclass of \"django_calendar.backends.BaseBackend\"'.format(backend_classname)\n )\n\n return backend_instance",
"def find_backend(cls) -> IBackend:\n cls.Lock.acquire()\n try:\n return cls._load_backend()\n finally:\n cls.Lock.release()",
"def backend_info_get(context, host):\n result = _backend_info_query(context, host)\n return result",
"def get_backend(name):\n return _DEFAULT_PROVIDER.get_backend(name)",
"def backend_info(self):\n\t\treturn {'valid': False}",
"def backend_plugin(self):\n return None",
"def get_service_from_backend(backend):\n return ExperimentData.get_service_from_provider(backend.provider)",
"def get_backend_class(backend):\n # NOTE(sirp): avoiding circular import\n from glance.store.http import HTTPBackend\n from glance.store.s3 import S3Backend\n from glance.store.swift import SwiftBackend\n from glance.store.filesystem import FilesystemBackend\n\n BACKENDS = {\n \"file\": FilesystemBackend,\n \"http\": HTTPBackend,\n \"https\": HTTPBackend,\n \"swift\": SwiftBackend,\n \"s3\": S3Backend}\n\n try:\n return BACKENDS[backend]\n except KeyError:\n raise UnsupportedBackend(\"No backend found for '%s'\" % backend)",
"def backend(self) -> Dict[str, Any]:\n # Terraform can only have one backend configured; this formats the\n # data to make it easier to work with\n return [\n {\"type\": k, \"config\": v}\n for k, v in self.terraform_block.get(\n \"backend\", {None: cast(Dict[str, str], {})}\n ).items()\n ][0]"
] | [
"0.79623115",
"0.76062316",
"0.7487151",
"0.7388517",
"0.72666264",
"0.7238288",
"0.71368957",
"0.7134815",
"0.7086919",
"0.7014857",
"0.6954881",
"0.6920183",
"0.6918006",
"0.6918006",
"0.6909595",
"0.690837",
"0.690837",
"0.67804307",
"0.6756487",
"0.6732792",
"0.66931",
"0.6665388",
"0.6654722",
"0.6649953",
"0.65956706",
"0.6552942",
"0.65468407",
"0.65332264",
"0.6532747",
"0.65171516"
] | 0.8183749 | 0 |
Return the experiment options of given job index. | def _experiment_options(self, index: int = -1) -> Dict[str, Any]:
try:
return self.__experiment_metadata["job_metadata"][index]["experiment_options"]
except (TypeError, KeyError, IndexError):
# Ignore experiment metadata or job metadata is not set or key is not found
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _run_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"run_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found\n return None",
"def _transpile_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"transpile_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found\n return None",
"def _analysis_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"analysis_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found\n return None",
"def index(self):\n return self._quote_get('option/index')",
"def _collect_options(self, option_index):\n input_option = list()\n if not option_index:\n for k in self._options.keys():\n input_option.append(self._options.get(k))\n else:\n for index in option_index:\n input_option.append(self._options.get(index))\n return input_option",
"def get_job_options(self):\n argument = [string.Template(self.queue.template[key]).substitute(\n {key : value}) for key, value in self.options.items()]\n\n if len(self.custom_options) > 0:\n argument += self.custom_options\n\n return argument",
"def get_step_settings_at_index(self, index):\n return self.routine_template.get_step_settings_at_index(index)",
"def get_multitask_egs_opts(egs_dir, egs_prefix=\"\",\n archive_index=-1,\n use_multitask_egs=False):\n multitask_egs_opts = \"\"\n egs_suffix = \".{0}\".format(archive_index) if archive_index != -1 else \"\"\n\n if use_multitask_egs:\n output_file_name = (\"{egs_dir}/{egs_prefix}output{egs_suffix}.ark\"\n \"\".format(egs_dir=egs_dir,\n egs_prefix=egs_prefix,\n egs_suffix=egs_suffix))\n output_rename_opt = \"\"\n if os.path.isfile(output_file_name):\n output_rename_opt = (\"--outputs=ark:{output_file_name}\".format(\n output_file_name=output_file_name))\n\n weight_file_name = (\"{egs_dir}/{egs_prefix}weight{egs_suffix}.ark\"\n \"\".format(egs_dir=egs_dir,\n egs_prefix=egs_prefix,\n egs_suffix=egs_suffix))\n weight_opt = \"\"\n if os.path.isfile(weight_file_name):\n weight_opt = (\"--weights=ark:{weight_file_name}\"\n \"\".format(weight_file_name=weight_file_name))\n\n multitask_egs_opts = (\n \"{output_rename_opt} {weight_opt}\".format(\n output_rename_opt=output_rename_opt,\n weight_opt=weight_opt))\n\n return multitask_egs_opts",
"def get_options(self):\n option_list = []\n if self.can_analyze():\n option_list.append((EpOp.TASK_ANALYZE, None))\n\n option_tup = self.predict_option()\n if option_tup:\n option_list.append(option_tup)\n\n option_tup = self.check_option()\n if option_tup:\n option_list.append(option_tup)\n\n return option_list",
"def get_options(self):\r\n return self._option_values",
"def options(self, component, workflow, index):\n\n # pylint: disable=R0912, R0915\n options = {\"type\": component}\n\n st.markdown(\"---\")\n\n # Lookup component configuration\n # - Runtime components have config defined within tasks\n # - Pipeline components have config defined at workflow root\n config = None\n if workflow:\n if component in [\"service\", \"translation\"]:\n # Service config is found in tasks section\n tasks = list(workflow[\"workflow\"].values())[0][\"tasks\"]\n tasks = [task for task in tasks if task.get(\"task\") == component or task.get(\"action\") == component]\n if tasks:\n config = tasks[0]\n else:\n config = workflow.get(component)\n\n if component == \"embeddings\":\n st.markdown(f\"**{index + 1}.) Embeddings Index** \\n*Index workflow output*\")\n options[\"index\"] = self.text(\"Embeddings storage path\", component, config, \"index\")\n options[\"path\"] = self.text(\"Embeddings model path\", component, config, \"path\", \"sentence-transformers/nli-mpnet-base-v2\")\n options[\"upsert\"] = self.boolean(\"Upsert\", component, config, \"upsert\")\n options[\"content\"] = self.boolean(\"Content\", component, config, \"content\")\n\n elif component in (\"segmentation\", \"textractor\"):\n if component == \"segmentation\":\n st.markdown(f\"**{index + 1}.) Segment** \\n*Split text into semantic units*\")\n else:\n st.markdown(f\"**{index + 1}.) Textract** \\n*Extract text from documents*\")\n\n options[\"sentences\"] = self.boolean(\"Split sentences\", component, config, \"sentences\")\n options[\"lines\"] = self.boolean(\"Split lines\", component, config, \"lines\")\n options[\"paragraphs\"] = self.boolean(\"Split paragraphs\", component, config, \"paragraphs\")\n options[\"join\"] = self.boolean(\"Join tokenized\", component, config, \"join\")\n options[\"minlength\"] = self.number(\"Min section length\", component, config, \"minlength\")\n\n elif component == \"service\":\n st.markdown(f\"**{index + 1}.) Service** \\n*Extract data from an API*\")\n options[\"url\"] = self.text(\"URL\", component, config, \"url\")\n options[\"method\"] = self.select(\"Method\", component, config, \"method\", [\"get\", \"post\"], 0)\n options[\"params\"] = self.text(\"URL parameters\", component, config, \"params\")\n options[\"batch\"] = self.boolean(\"Run as batch\", component, config, \"batch\", True)\n options[\"extract\"] = self.text(\"Subsection(s) to extract\", component, config, \"extract\")\n\n if options[\"params\"]:\n options[\"params\"] = {key: None for key in self.split(options[\"params\"])}\n if options[\"extract\"]:\n options[\"extract\"] = self.split(options[\"extract\"])\n\n elif component == \"summary\":\n st.markdown(f\"**{index + 1}.) Summary** \\n*Abstractive text summarization*\")\n options[\"path\"] = self.text(\"Model\", component, config, \"path\", \"sshleifer/distilbart-cnn-12-6\")\n options[\"minlength\"] = self.number(\"Min length\", component, config, \"minlength\")\n options[\"maxlength\"] = self.number(\"Max length\", component, config, \"maxlength\")\n\n elif component == \"tabular\":\n st.markdown(f\"**{index + 1}.) Tabular** \\n*Split tabular data into rows and columns*\")\n options[\"idcolumn\"] = self.text(\"Id columns\", component, config, \"idcolumn\")\n options[\"textcolumns\"] = self.text(\"Text columns\", component, config, \"textcolumns\")\n options[\"content\"] = self.text(\"Content\", component, config, \"content\")\n\n if options[\"textcolumns\"]:\n options[\"textcolumns\"] = self.split(options[\"textcolumns\"])\n\n if options[\"content\"]:\n options[\"content\"] = self.split(options[\"content\"])\n if len(options[\"content\"]) == 1 and options[\"content\"][0] == \"1\":\n options[\"content\"] = options[\"content\"][0]\n\n elif component == \"transcription\":\n st.markdown(f\"**{index + 1}.) Transcribe** \\n*Transcribe audio to text*\")\n options[\"path\"] = self.text(\"Model\", component, config, \"path\", \"facebook/wav2vec2-base-960h\")\n\n elif component == \"translation\":\n st.markdown(f\"**{index + 1}.) Translate** \\n*Machine translation*\")\n options[\"target\"] = self.text(\"Target language code\", component, config, \"args\", \"en\")\n\n return options",
"def get_options(self):\n additional_data = self.get_additional_data()\n options_out = []\n for key, value in additional_data['DIMENSION_VALUES'].items():\n key_label = ' '.join(key.split('_')).strip().title()\n data = {'specification': key_label, 'specification_key': key, 'choices': value}\n options_out.append(data)\n return options_out",
"def AcceleratorExperiments(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('accelerator_experiments', default)\n return [HEP.AcceleratorExperimentObject(i) for i in tmp]",
"def get_options(self):\n return []",
"def _getOptions(self):\n args = []\n for iname, value in self.options:\n args.append('-' + iname)\n if value != 'true':\n args.append(value)\n return args",
"def _default_experiment_options(cls) -> Options:\n options = super()._default_experiment_options()\n options.update_options(\n circuit_order=\"RIRIRI\",\n )\n return options",
"def get_simulation_options(self):\n return self.opts",
"def option(self, spec):\n return spec.options[self.rng.integers(len(spec.options))]",
"def get_index(self, _quals):\n return self._options['index']",
"def get(ctx, job):\n\n def get_experiment():\n try:\n response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment)\n cache.cache(config_manager=ExperimentManager, response=response)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not load experiment `{}` info.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n get_experiment_details(response)\n\n def get_experiment_job():\n try:\n response = PolyaxonClient().experiment_job.get_job(user,\n project_name,\n _experiment,\n _job)\n cache.cache(config_manager=ExperimentJobManager, response=response)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get job `{}`.'.format(_job))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n if response.resources:\n get_resources(response.resources.to_dict(), header=\"Job resources:\")\n\n response = Printer.add_status_color(response.to_light_dict(\n humanize_values=True,\n exclude_attrs=['uuid', 'definition', 'experiment', 'unique_name', 'resources']\n ))\n Printer.print_header(\"Job info:\")\n dict_tabulate(response)\n\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n\n if job:\n _job = get_experiment_job_or_local(job)\n get_experiment_job()\n else:\n get_experiment()",
"def get_options(self):\n\t\treturn self.options",
"def get_all_options(self): \n return self._options.items()",
"def data(\n self,\n index: Optional[Union[int, slice, str]] = None,\n ) -> Union[Dict, List[Dict]]:\n self._retrieve_data()\n if index is None:\n return self._result_data.copy()\n if isinstance(index, (int, slice)):\n return self._result_data[index]\n if isinstance(index, str):\n return [data for data in self._result_data if data.get(\"job_id\") == index]\n raise TypeError(f\"Invalid index type {type(index)}.\")",
"def get_testing_options(cls):\n return [{'error': [(1, NotImplementedError)],\n 'kwargs': {'initial_state': {'a': int(1)}}}]",
"def experiments(ctx, **kw):\n if not ctx.invoked_subcommand:\n ctx.invoke(list_experiments, **kw)\n else:\n if _params_specified(kw):\n print(\n \"options cannot be listed before command ('%s')\"\n % ctx.invoked_subcommand)",
"def _get_options(self):\n return self.options",
"def parse_options():\n parent_parser = common.base_parser()\n\n # We create top level parser\n parser = ArgumentParser(\n description=help_msgs.EXPERIMENT_PARSER,\n parents=[parent_parser],\n epilog=help_msgs.PARSER_EPILOG %\n {'cli': 'experiment', 'option': 'submit'},\n formatter_class=RawTextHelpFormatter)\n\n subparsers = parser.add_subparsers(dest='command')\n\n submit_parser = subparsers.add_parser(\n 'submit', help='submit user experiment',\n epilog=help_msgs.SUBMIT_EPILOG, formatter_class=RawTextHelpFormatter)\n\n submit_parser.add_argument('-l', '--list', action='append',\n dest='nodes_list', required=True,\n type=exp_resources_from_str,\n help=\"experiment list\")\n\n submit_parser.add_argument('-n', '--name', help='experiment name')\n\n submit_parser.add_argument('-d', '--duration', required=True, type=int,\n help='experiment duration in minutes')\n\n submit_parser.add_argument('-r', '--reservation', type=int,\n help=('experiment schedule starting : seconds '\n 'since 1970-01-01 00:00:00 UTC'))\n\n submit_parser.add_argument('-p', '--print',\n dest='print_json', action='store_true',\n help='print experiment submission')\n\n # ####### STOP PARSER ###############\n stop_parser = subparsers.add_parser('stop', help='stop user experiment')\n stop_parser.add_argument('-i', '--id', dest='experiment_id', type=int,\n help='experiment id submission')\n\n # ####### GET PARSER ###############\n get_parser = subparsers.add_parser(\n 'get',\n epilog=help_msgs.GET_EPILOG,\n help='get user\\'s experiment',\n formatter_class=RawTextHelpFormatter)\n\n get_parser.add_argument('-i', '--id', dest='experiment_id', type=int,\n help='experiment id')\n\n get_group = get_parser.add_mutually_exclusive_group(required=True)\n get_group.add_argument(\n '-r', '--resources', dest='get_cmd', action='store_const',\n const='resources', help='get an experiment resources list')\n get_group.add_argument(\n '-ri', '--resources-id', dest='get_cmd', action='store_const',\n const='id', help=('get an experiment resources id list '\n '(EXP_LIST format : 1-34+72)'))\n\n get_group.add_argument(\n '-s', '--exp-state', dest='get_cmd', action='store_const',\n const='state', help='get an experiment state')\n get_group.add_argument(\n '-p', '--print', dest='get_cmd', action='store_const',\n const='', help='get an experiment submission')\n get_group.add_argument(\n '-a', '--archive', dest='get_cmd', action='store_const',\n const='data', help='get an experiment archive (tar.gz)')\n\n # --list with its options\n get_group.add_argument(\n '-l', '--list', dest='get_cmd', action='store_const',\n const='experiment_list', help='get user\\'s experiment list')\n\n get_parser.add_argument('--offset', default=0, type=int,\n help='experiment list start index')\n\n get_parser.add_argument('--limit', default=0, type=int,\n help='experiment list lenght')\n\n get_parser.add_argument('--state', help='experiment list state filter')\n\n # ####### LOAD PARSER ###############\n load_parser = subparsers.add_parser('load', epilog=help_msgs.LOAD_EPILOG,\n help='load and submit user experiment',\n formatter_class=RawTextHelpFormatter)\n\n load_parser.add_argument('-f', '--file', dest='path_file',\n required=True, help='experiment path file')\n\n load_parser.add_argument('-l', '--list', dest='firmware_list', default=[],\n type=(lambda s: s.split(',')),\n help='comma separated firmware(s) path list')\n\n # ####### INFO PARSER ###############\n info_parser = subparsers.add_parser('info', epilog=help_msgs.INFO_EPILOG,\n help='resources description list',\n formatter_class=RawTextHelpFormatter)\n\n info_parser.add_argument('--site', help='resources list filter by site')\n # subcommand\n info_group = info_parser.add_mutually_exclusive_group(required=True)\n info_group.add_argument('-l', '--list', dest='list_id',\n action='store_false', help='list resources')\n info_group.add_argument('-li', '--list-id', dest='list_id',\n action='store_true',\n help=('resources id list by archi and state '\n '(EXP_LIST format : 1-34+72)'))\n\n # ####### WAIT PARSER ###############\n wait_parser = subparsers.add_parser(\n 'wait', help='wait user experiment started',\n epilog=help_msgs.WAIT_EPILOG, formatter_class=RawTextHelpFormatter)\n\n wait_parser.add_argument('-i', '--id', dest='experiment_id', type=int,\n help='experiment id submission')\n\n wait_parser.add_argument(\n '--state', default='Running',\n help=\"wait states `State1,State2` or Finished, default 'Running'\")\n wait_parser.add_argument(\n '--step', default=5, type=int,\n help=\"Wait time in seconds between each check\")\n wait_parser.add_argument(\n '--timeout', default=float('+inf'), type=float,\n help=\"Max time to wait in seconds\")\n\n return parser",
"async def get_options(self):",
"def get_step_settings_at_index(self, index):\n settings = {}\n settings.update(copy.copy(self.global_settings))\n settings.update(copy.copy(self[index][2]))\n return settings",
"def get_options_lookup():\r\n qiime_config = load_qiime_config()\r\n result = {}\r\n result['fasta_as_primary_input'] =\\\r\n make_option('-i', '--input_fasta_fp', type=\"existing_filepath\",\r\n help='path to the input fasta file')\r\n result['otu_table_as_primary_input'] =\\\r\n make_option('-i', '--otu_table_fp', type=\"existing_filepath\",\r\n help='path to the input OTU table (i.e., the output from make_otu_table.py)')\r\n result['otu_map_as_primary_input'] =\\\r\n make_option('-i', '--otu_map_fp', type=\"existing_filepath\",\r\n help='path to the input OTU map (i.e., the output from pick_otus.py)')\r\n result['log_fp'] =\\\r\n make_option('-l', '--log_fp', type=\"new_filepath\",\r\n help='path to write the log file')\r\n result['input_fasta'] =\\\r\n make_option('-f', '--input_fasta_fp', type=\"existing_filepath\",\r\n help='path to the input fasta file')\r\n result['output_dir'] =\\\r\n make_option('-o', '--output_dir', type=\"new_dirpath\",\r\n help='path to the output directory')\r\n result['output_fp'] =\\\r\n make_option('-o', '--output_fp', type=\"new_filepath\",\r\n help='the output filepath')\r\n result['output_biom_fp'] =\\\r\n make_option('-o', '--output_biom_fp', type=\"new_filepath\",\r\n help='the output otu table in biom format (recommended extension: .biom)')\r\n result['mapping_fp'] =\\\r\n make_option('-m', '--mapping_fp', type=\"existing_filepath\",\r\n help='the mapping filepath')\r\n\r\n # Define options used by the workflow scripts\r\n result['jobs_to_start_workflow'] =\\\r\n make_option('-O', '--jobs_to_start', type='int',\r\n help='Number of jobs to start. NOTE: you must also'\r\n ' pass -a to run in parallel, this defines the number of'\r\n ' jobs to be started if and only if -a is passed'\r\n ' [default: %default]',\r\n default=qiime_config['jobs_to_start'])\r\n\r\n # Define options used by the parallel scripts\r\n result['jobs_to_start'] =\\\r\n make_option('-O', '--jobs_to_start', type='int',\r\n help='Number of jobs to start [default: %default]',\r\n default=qiime_config['jobs_to_start'])\r\n result['retain_temp_files'] =\\\r\n make_option('-R', '--retain_temp_files', action='store_true',\r\n help='retain temporary files after runs complete ' +\r\n '(useful for debugging) [default: %default]',\r\n default=False)\r\n result['suppress_submit_jobs'] =\\\r\n make_option('-S', '--suppress_submit_jobs', action='store_true',\r\n help='Only split input and write commands file - don\\'t submit ' +\r\n 'jobs [default: %default]', default=False)\r\n result['poll_directly'] =\\\r\n make_option('-T', '--poll_directly', action='store_true',\r\n help='Poll directly for job completion rather than running ' +\r\n 'poller as a separate job. If -T is specified this script will ' +\r\n 'not return until all jobs have completed. [default: %default]',\r\n default=False)\r\n result['cluster_jobs_fp'] =\\\r\n make_option('-U', '--cluster_jobs_fp',\r\n help='path to cluster jobs script (defined in qiime_config) ' +\r\n ' [default: %default]',\r\n default=qiime_config['cluster_jobs_fp'] or\r\n 'start_parallel_jobs.py')\r\n result['suppress_polling'] =\\\r\n make_option('-W', '--suppress_polling', action='store_true',\r\n help='suppress polling of jobs and merging of results ' +\r\n 'upon completion [default: %default]',\r\n default=False)\r\n result['job_prefix'] =\\\r\n make_option('-X', '--job_prefix', help='job prefix ' +\r\n '[default: descriptive prefix + random chars]')\r\n result['seconds_to_sleep'] =\\\r\n make_option('-Z', '--seconds_to_sleep', type='int',\r\n help='Number of seconds to sleep between checks for run ' +\r\n ' completion when polling runs [default: %default]',\r\n default=qiime_config['seconds_to_sleep'] or 60)\r\n\r\n return result"
] | [
"0.71390533",
"0.6975858",
"0.6919278",
"0.621385",
"0.59916735",
"0.580255",
"0.5618967",
"0.549171",
"0.54512733",
"0.5414998",
"0.53518206",
"0.5308031",
"0.5284357",
"0.5283639",
"0.5231703",
"0.5185954",
"0.5171701",
"0.51661193",
"0.50663817",
"0.50663465",
"0.50529015",
"0.5050409",
"0.5048999",
"0.5032158",
"0.499205",
"0.49903998",
"0.49892807",
"0.49842077",
"0.49800178",
"0.49769393"
] | 0.80677307 | 0 |
Returns the analysis options of given job index. | def _analysis_options(self, index: int = -1) -> Dict[str, Any]:
try:
return self.__experiment_metadata["job_metadata"][index]["analysis_options"]
except (TypeError, KeyError, IndexError):
# Ignore experiment metadata or job metadata is not set or key is not found
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _experiment_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"experiment_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found\n return None",
"def _run_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"run_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found\n return None",
"def _transpile_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"transpile_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found\n return None",
"def _collect_options(self, option_index):\n input_option = list()\n if not option_index:\n for k in self._options.keys():\n input_option.append(self._options.get(k))\n else:\n for index in option_index:\n input_option.append(self._options.get(index))\n return input_option",
"def index(self):\n return self._quote_get('option/index')",
"def get_options(self):\n option_list = []\n if self.can_analyze():\n option_list.append((EpOp.TASK_ANALYZE, None))\n\n option_tup = self.predict_option()\n if option_tup:\n option_list.append(option_tup)\n\n option_tup = self.check_option()\n if option_tup:\n option_list.append(option_tup)\n\n return option_list",
"def index_parse_args(parser):\n \n parser.add_argument(\"--gcsa_index_cores\", type=int,\n help=\"number of threads during the gcsa indexing step\")\n parser.add_argument(\"--xg_index_cores\", type=int,\n help=\"number of threads during the xg indexing step\")\n parser.add_argument(\"--gbwt_index_cores\", type=int,\n help=\"number of threads during the gbwt indexing step\") \n\n parser.add_argument(\"--index_name\", type=str, default='index',\n help=\"name of index files. <name>.xg, <name>.gcsa etc.\")\n\n parser.add_argument(\"--gcsa_opts\", type=str,\n help=\"Options to pass to gcsa indexing.\")\n \n parser.add_argument(\"--minimizer_opts\", type=str,\n help=\"Options to pass to minimizer indexing.\")\n\n parser.add_argument(\"--vcf_phasing\", nargs='+', type=make_url, default=[],\n help=\"Import phasing information from VCF(s) into xg (or GBWT with --gbwt_index)\")\n parser.add_argument(\"--vcf_phasing_regions\", nargs='+', default=[],\n help=\"Hint the relevant chrom:start-end regions to the GBWT indexer, for subregion graphs\")\n parser.add_argument(\"--gbwt_input\", type=make_url,\n help=\"Use given GBWT for GCSA2 pruning\")\n parser.add_argument(\"--gbwt_prune\", action='store_true',\n help=\"Use gbwt for gcsa pruning\")\n parser.add_argument(\"--force_phasing\", type=lambda x:bool(util.strtobool(x)), default=None,\n help=\"If 'True', randomly phase unphased variants and discard unresolveable overlaps for GBWT\")",
"def get_settings(self):\n return AlgoliaUtils_request(self.client.headers, self.read_hosts, \"GET\", \"/1/indexes/%s/settings\" % self.url_index_name, self.client.timeout)",
"def get_index(self, _quals):\n return self._options['index']",
"def metric_options(self):\n return Optimizer.list_method_options(self.metric_creator.method_dict)",
"def get_step_settings_at_index(self, index):\n return self.routine_template.get_step_settings_at_index(index)",
"def get_options(self):\r\n return self._option_values",
"def index_subparser(parser):\n\n # Add the Toil options so the job store is the first argument\n Job.Runner.addToilOptions(parser)\n \n # Options specific to the toil-vg index driver\n parser.add_argument(\"out_store\",\n help=\"output store. All output written here. Path specified using same syntax as toil jobStore\")\n\n parser.add_argument(\"--graphs\", nargs='+', default=[], type=make_url,\n help=\"input graph(s). one per chromosome (separated by space)\")\n\n parser.add_argument(\"--chroms\", nargs='+',\n help=\"name(s) of reference path in graph(s) (separated by space). If --graphs \"\n \" has multiple elements, must be same length/order as --chroms (not needed for xg_index)\")\n\n parser.add_argument(\"--node_mapping\", type=make_url,\n help=\"node mapping file required for gbwt pruning. Created by toil-vg construct\"\n \" (or vg ids -j)\")\n \n parser.add_argument(\"--bwa_index_fasta\", type=make_url,\n help=\"index the given FASTA for BWA MEM alignment\")\n\n # Add common options shared with everybody\n add_common_vg_parse_args(parser)\n\n # Add indexing options\n index_toggle_parse_args(parser)\n index_parse_args(parser)\n \n # Add common docker options\n add_container_tool_parse_args(parser)",
"def options(self, component, workflow, index):\n\n # pylint: disable=R0912, R0915\n options = {\"type\": component}\n\n st.markdown(\"---\")\n\n # Lookup component configuration\n # - Runtime components have config defined within tasks\n # - Pipeline components have config defined at workflow root\n config = None\n if workflow:\n if component in [\"service\", \"translation\"]:\n # Service config is found in tasks section\n tasks = list(workflow[\"workflow\"].values())[0][\"tasks\"]\n tasks = [task for task in tasks if task.get(\"task\") == component or task.get(\"action\") == component]\n if tasks:\n config = tasks[0]\n else:\n config = workflow.get(component)\n\n if component == \"embeddings\":\n st.markdown(f\"**{index + 1}.) Embeddings Index** \\n*Index workflow output*\")\n options[\"index\"] = self.text(\"Embeddings storage path\", component, config, \"index\")\n options[\"path\"] = self.text(\"Embeddings model path\", component, config, \"path\", \"sentence-transformers/nli-mpnet-base-v2\")\n options[\"upsert\"] = self.boolean(\"Upsert\", component, config, \"upsert\")\n options[\"content\"] = self.boolean(\"Content\", component, config, \"content\")\n\n elif component in (\"segmentation\", \"textractor\"):\n if component == \"segmentation\":\n st.markdown(f\"**{index + 1}.) Segment** \\n*Split text into semantic units*\")\n else:\n st.markdown(f\"**{index + 1}.) Textract** \\n*Extract text from documents*\")\n\n options[\"sentences\"] = self.boolean(\"Split sentences\", component, config, \"sentences\")\n options[\"lines\"] = self.boolean(\"Split lines\", component, config, \"lines\")\n options[\"paragraphs\"] = self.boolean(\"Split paragraphs\", component, config, \"paragraphs\")\n options[\"join\"] = self.boolean(\"Join tokenized\", component, config, \"join\")\n options[\"minlength\"] = self.number(\"Min section length\", component, config, \"minlength\")\n\n elif component == \"service\":\n st.markdown(f\"**{index + 1}.) Service** \\n*Extract data from an API*\")\n options[\"url\"] = self.text(\"URL\", component, config, \"url\")\n options[\"method\"] = self.select(\"Method\", component, config, \"method\", [\"get\", \"post\"], 0)\n options[\"params\"] = self.text(\"URL parameters\", component, config, \"params\")\n options[\"batch\"] = self.boolean(\"Run as batch\", component, config, \"batch\", True)\n options[\"extract\"] = self.text(\"Subsection(s) to extract\", component, config, \"extract\")\n\n if options[\"params\"]:\n options[\"params\"] = {key: None for key in self.split(options[\"params\"])}\n if options[\"extract\"]:\n options[\"extract\"] = self.split(options[\"extract\"])\n\n elif component == \"summary\":\n st.markdown(f\"**{index + 1}.) Summary** \\n*Abstractive text summarization*\")\n options[\"path\"] = self.text(\"Model\", component, config, \"path\", \"sshleifer/distilbart-cnn-12-6\")\n options[\"minlength\"] = self.number(\"Min length\", component, config, \"minlength\")\n options[\"maxlength\"] = self.number(\"Max length\", component, config, \"maxlength\")\n\n elif component == \"tabular\":\n st.markdown(f\"**{index + 1}.) Tabular** \\n*Split tabular data into rows and columns*\")\n options[\"idcolumn\"] = self.text(\"Id columns\", component, config, \"idcolumn\")\n options[\"textcolumns\"] = self.text(\"Text columns\", component, config, \"textcolumns\")\n options[\"content\"] = self.text(\"Content\", component, config, \"content\")\n\n if options[\"textcolumns\"]:\n options[\"textcolumns\"] = self.split(options[\"textcolumns\"])\n\n if options[\"content\"]:\n options[\"content\"] = self.split(options[\"content\"])\n if len(options[\"content\"]) == 1 and options[\"content\"][0] == \"1\":\n options[\"content\"] = options[\"content\"][0]\n\n elif component == \"transcription\":\n st.markdown(f\"**{index + 1}.) Transcribe** \\n*Transcribe audio to text*\")\n options[\"path\"] = self.text(\"Model\", component, config, \"path\", \"facebook/wav2vec2-base-960h\")\n\n elif component == \"translation\":\n st.markdown(f\"**{index + 1}.) Translate** \\n*Machine translation*\")\n options[\"target\"] = self.text(\"Target language code\", component, config, \"args\", \"en\")\n\n return options",
"def data(\n self,\n index: Optional[Union[int, slice, str]] = None,\n ) -> Union[Dict, List[Dict]]:\n self._retrieve_data()\n if index is None:\n return self._result_data.copy()\n if isinstance(index, (int, slice)):\n return self._result_data[index]\n if isinstance(index, str):\n return [data for data in self._result_data if data.get(\"job_id\") == index]\n raise TypeError(f\"Invalid index type {type(index)}.\")",
"def get_options(self):\n return []",
"def getOptions():\n parser = argparse.ArgumentParser(description='Tool to identify a FASTQ files quality score.')\n parser.add_argument('-i','--input',dest='fq', action='store', required=True, help='A FASTQ file [Required]')\n args = parser.parse_args()\n return(args)",
"def get_job_options(self):\n argument = [string.Template(self.queue.template[key]).substitute(\n {key : value}) for key, value in self.options.items()]\n\n if len(self.custom_options) > 0:\n argument += self.custom_options\n\n return argument",
"def app_options(self):\n return [\n self.mgi_strain_report_path,\n self.output().path,\n ]",
"def get_options(self):\n\t\treturn self.options",
"def _get_options(self):\n return self.options",
"def getOptions() :\n usage = ('usage: python submit_all.py -c CONFIG -d DIR ')\n\n parser = OptionParser(usage=usage) \n parser.add_option(\"-c\", \"--config\", dest=\"config\",\n help=(\"The crab script you want to submit \"),\n metavar=\"CONFIG\")\n parser.add_option(\"-d\", \"--dir\", dest=\"dir\",\n help=(\"The crab directory you want to use \"),\n metavar=\"DIR\")\n parser.add_option(\"-f\", \"--datasets\", dest=\"datasets\",\n help=(\"File listing datasets to run over\"),\n metavar=\"FILE\")\n (options, args) = parser.parse_args()\n\n\n if options.config == None or options.dir == None:\n parser.error(usage)\n \n return options",
"def get_all_options(self): \n return self._options.items()",
"def analysis_results(\n self,\n index: Optional[Union[int, slice, str]] = None,\n refresh: bool = False,\n block: bool = True,\n timeout: Optional[float] = None,\n ) -> Union[AnalysisResult, List[AnalysisResult]]:\n if block:\n self._wait_for_futures(\n self._analysis_futures.values(), name=\"analysis\", timeout=timeout\n )\n self._retrieve_analysis_results(refresh=refresh)\n if index is None:\n return self._analysis_results.values()\n\n def _make_not_found_message(index: Union[int, slice, str]) -> str:\n \"\"\"Helper to make error message for index not found\"\"\"\n msg = [f\"Analysis result {index} not found.\"]\n errors = self.errors()\n if errors:\n msg.append(f\"Errors: {errors}\")\n return \"\\n\".join(msg)\n\n if isinstance(index, int):\n if index >= len(self._analysis_results.values()):\n raise ExperimentEntryNotFound(_make_not_found_message(index))\n return self._analysis_results.values()[index]\n if isinstance(index, slice):\n results = self._analysis_results.values()[index]\n if not results:\n raise ExperimentEntryNotFound(_make_not_found_message(index))\n return results\n if isinstance(index, str):\n # Check by result ID\n if index in self._analysis_results:\n return self._analysis_results[index]\n # Check by name\n filtered = [\n result for result in self._analysis_results.values() if result.name == index\n ]\n if not filtered:\n raise ExperimentEntryNotFound(_make_not_found_message(index))\n if len(filtered) == 1:\n return filtered[0]\n else:\n return filtered\n\n raise TypeError(f\"Invalid index type {type(index)}.\")",
"def index_toggle_parse_args(parser):\n parser.add_argument(\"--gcsa_index\", dest=\"indexes\", default=[], action=\"append_const\", const=\"gcsa\",\n help=\"Make a gcsa index for each output graph\")\n parser.add_argument(\"--xg_index\", dest=\"indexes\", action=\"append_const\", const=\"xg\",\n help=\"Make an xg index for each output graph\")\n parser.add_argument(\"--gbwt_index\", dest=\"indexes\", action=\"append_const\", const=\"gbwt\",\n help=\"Make a GBWT index alongside the xg index for each output graph\")\n parser.add_argument(\"--snarls_index\", dest=\"indexes\", action=\"append_const\", const=\"snarls\",\n help=\"Make an snarls file for each output graph\")\n parser.add_argument(\"--trivial_snarls_index\", dest=\"indexes\", action=\"append_const\", const=\"trivial_snarls\",\n help=\"Make a trivial-inclusive snarls file for each output graph\")\n parser.add_argument(\"--distance_index\", dest=\"indexes\", action=\"append_const\", const=\"distance\",\n help=\"Make a (minimum) distance index for each output graph\")\n parser.add_argument(\"--minimizer_index\", dest=\"indexes\", action=\"append_const\", const=\"minimizer\",\n help=\"Make a minimizer index for each output graph\")\n parser.add_argument(\"--id_ranges_index\", dest=\"indexes\", action=\"append_const\", const=\"id_ranges\",\n help=\"Make chromosome id ranges tables (so toil-vg map can optionally split output by chromosome)\")\n parser.add_argument(\"--alt_path_gam_index\", dest=\"indexes\", action=\"append_const\", const=\"alt-gam\",\n help=\"Save alt paths from vg into an indexed GAM\")\n parser.add_argument(\"--xg_alts\", dest=\"indexes\", action=\"append_const\", const=\"xg_alts\",\n help=\"Include alt paths in xg index\")\n parser.add_argument(\"--all_index\", dest=\"indexes\", action=\"store_const\",\n const=[\"gcsa\", \"xg\", \"gbwt\", \"snarls\", \"trivial_snarls\", \"distance\", \"minimizer\", \"id_ranges\"],\n help=\"Equivalent to --gcsa_index --xg_index --gbwt_index --snarls_index --trivial_snarls_index \"\n \"--distance_index --minimizer_index --id_ranges_index\")",
"async def get_options(self):",
"def _getOptions(self):\n args = []\n for iname, value in self.options:\n args.append('-' + iname)\n if value != 'true':\n args.append(value)\n return args",
"def getOptions():\n\tdescription=\"\"\"This script takes an input fasta file of fusions and identifies all of the identical fusions.\"\"\"\n\tparser = argparse.ArgumentParser(description=description)\n\tparser.add_argument(\"-bowtie\", \"--bowtie_log_names\", dest=\"bowtie\", action='store', required=False, nargs = '*', help=\"bowtie log file names [Optional]\")\n\tparser.add_argument(\"-last\", \"--last_log_names\", dest=\"last\", action='store', required=False, help=\"LAST log file names [Optional]\")\n\tparser.add_argument(\"-treatment\",\"--treatment_name\",dest=\"treatment\",action='store',required=True,nargs= '*', help=\"Treatment variables [Required]\")\n\tparser.add_argument(\"-o\",\"--output_file\",dest=\"output\",action='store',required=True,help=\"Output file name [Required]\")\n\targs = parser.parse_args()\n\tif not args.bowtie and not args.last: #The user should give at least one bowtie or last log argument; otherwise the program does nothing\n\t parser.error('No input logs given; add -bowtie or -last')\n\treturn(args)",
"def extract_index_urls(self, index: int) -> ListLike:\n cmd_pieces = self[index].split()\n index_urls = []\n for i, piece in enumerate(cmd_pieces):\n if piece in [\"--index-url\", \"--extra-index-url\"]:\n index_urls.append(cmd_pieces[i + 1])\n return index_urls",
"def objective_options(self):\n return Optimizer.list_method_options(self.obj_creator.method_dict)"
] | [
"0.6743674",
"0.6663001",
"0.6280733",
"0.6069232",
"0.60599047",
"0.565759",
"0.54964",
"0.5447708",
"0.54197335",
"0.53915113",
"0.53473103",
"0.53200793",
"0.52881956",
"0.52273625",
"0.51928836",
"0.5185036",
"0.5124009",
"0.51195866",
"0.5102956",
"0.5085236",
"0.5047777",
"0.50314057",
"0.5023196",
"0.5021632",
"0.50168717",
"0.500404",
"0.4899992",
"0.4890973",
"0.4884024",
"0.48799425"
] | 0.7922862 | 0 |
Returns the run options of given job index. | def _run_options(self, index: int = -1) -> Dict[str, Any]:
try:
return self.__experiment_metadata["job_metadata"][index]["run_options"]
except (TypeError, KeyError, IndexError):
# Ignore experiment metadata or job metadata is not set or key is not found
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _experiment_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"experiment_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found\n return None",
"def _analysis_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"analysis_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found\n return None",
"def index(self):\n return self._quote_get('option/index')",
"def runoptions(self):\n # outstanding = self.missing_required()\n # if outstanding:\n # raise TypeError('Module missing required parameter: %s' % ', '.join(outstanding))\n return self._runopts",
"def _transpile_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"transpile_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found\n return None",
"def get_job_options(self):\n argument = [string.Template(self.queue.template[key]).substitute(\n {key : value}) for key, value in self.options.items()]\n\n if len(self.custom_options) > 0:\n argument += self.custom_options\n\n return argument",
"def get_step_settings_at_index(self, index):\n return self.routine_template.get_step_settings_at_index(index)",
"def _collect_options(self, option_index):\n input_option = list()\n if not option_index:\n for k in self._options.keys():\n input_option.append(self._options.get(k))\n else:\n for index in option_index:\n input_option.append(self._options.get(index))\n return input_option",
"def get_options(self):\n option_list = []\n if self.can_analyze():\n option_list.append((EpOp.TASK_ANALYZE, None))\n\n option_tup = self.predict_option()\n if option_tup:\n option_list.append(option_tup)\n\n option_tup = self.check_option()\n if option_tup:\n option_list.append(option_tup)\n\n return option_list",
"def getOptions() :\n usage = ('usage: python submit_all.py -c CONFIG -d DIR ')\n\n parser = OptionParser(usage=usage) \n parser.add_option(\"-c\", \"--config\", dest=\"config\",\n help=(\"The crab script you want to submit \"),\n metavar=\"CONFIG\")\n parser.add_option(\"-d\", \"--dir\", dest=\"dir\",\n help=(\"The crab directory you want to use \"),\n metavar=\"DIR\")\n parser.add_option(\"-f\", \"--datasets\", dest=\"datasets\",\n help=(\"File listing datasets to run over\"),\n metavar=\"FILE\")\n (options, args) = parser.parse_args()\n\n\n if options.config == None or options.dir == None:\n parser.error(usage)\n \n return options",
"def getOptions():\n usage = ('usage: %prog -c CMD -d DIR [-o OPT]\\nThe multicrab command'\n ' executes \"crab CMD OPT\" for each task contained in DIR\\nUse'\n ' multicrab -h for help\"')\n\n parser = OptionParser(usage=usage)\n parser.add_option(\"-c\", \"--crabCmd\", dest=\"crabCmd\",\n help=(\"The crab command you want to execute for each task in \"\n \"the DIR\"), metavar=\"CMD\")\n parser.add_option(\"-d\", \"--projDir\", dest=\"projDir\",\n help=\"The directory where the tasks are located\", metavar=\"DIR\")\n parser.add_option(\"-o\", \"--crabCmdOptions\", dest=\"crabCmdOptions\",\n help=(\"The options you want to pass to the crab command CMD\"\n \"tasklistFile\"), metavar=\"OPT\", default=\"\")\n parser.add_option(\"-r\", \"--noAutoResubmit\", dest=\"noAutoResubmit\",\n help=(\"don't automatically run the resub commands\"),\n metavar=\"noAutoResub\",default=False,action=\"store_true\")\n parser.add_option(\"-i\", \"--ignoreCache\", dest=\"ignoreMulticrabCache\",\n help=(\"don't use cache file to skip checking status of jobs already done\"),\n metavar=\"ignoreCache\",default=False,action=\"store_true\")\n\n (options, args) = parser.parse_args()\n\n if args:\n parser.error(\"Found positional argument(s) %s.\" % args)\n if not options.crabCmd:\n parser.error(\"(-c CMD, --crabCmd=CMD) option not provided\")\n if not options.projDir:\n parser.error(\"(-d DIR, --projDir=DIR) option not provided\")\n if not os.path.isdir(options.projDir):\n parser.error(\"Directory %s does not exist\" % options.projDir)\n\n return options",
"def get_options(self):\r\n return self._option_values",
"def get_opt(self):\n return self.parser.parse_args()",
"def get_opt(self):\n return self.parser.parse_args()",
"def get_opt(self):\n return self.parser.parse_args()",
"def execution_options(self) -> pulumi.Output[Optional['outputs.JobStepExecutionOptionsResponse']]:\n return pulumi.get(self, \"execution_options\")",
"def index_parse_args(parser):\n \n parser.add_argument(\"--gcsa_index_cores\", type=int,\n help=\"number of threads during the gcsa indexing step\")\n parser.add_argument(\"--xg_index_cores\", type=int,\n help=\"number of threads during the xg indexing step\")\n parser.add_argument(\"--gbwt_index_cores\", type=int,\n help=\"number of threads during the gbwt indexing step\") \n\n parser.add_argument(\"--index_name\", type=str, default='index',\n help=\"name of index files. <name>.xg, <name>.gcsa etc.\")\n\n parser.add_argument(\"--gcsa_opts\", type=str,\n help=\"Options to pass to gcsa indexing.\")\n \n parser.add_argument(\"--minimizer_opts\", type=str,\n help=\"Options to pass to minimizer indexing.\")\n\n parser.add_argument(\"--vcf_phasing\", nargs='+', type=make_url, default=[],\n help=\"Import phasing information from VCF(s) into xg (or GBWT with --gbwt_index)\")\n parser.add_argument(\"--vcf_phasing_regions\", nargs='+', default=[],\n help=\"Hint the relevant chrom:start-end regions to the GBWT indexer, for subregion graphs\")\n parser.add_argument(\"--gbwt_input\", type=make_url,\n help=\"Use given GBWT for GCSA2 pruning\")\n parser.add_argument(\"--gbwt_prune\", action='store_true',\n help=\"Use gbwt for gcsa pruning\")\n parser.add_argument(\"--force_phasing\", type=lambda x:bool(util.strtobool(x)), default=None,\n help=\"If 'True', randomly phase unphased variants and discard unresolveable overlaps for GBWT\")",
"def extract_index_urls(self, index: int) -> ListLike:\n cmd_pieces = self[index].split()\n index_urls = []\n for i, piece in enumerate(cmd_pieces):\n if piece in [\"--index-url\", \"--extra-index-url\"]:\n index_urls.append(cmd_pieces[i + 1])\n return index_urls",
"def args_to_add(cls, index=None) -> [Argument]:\n return super().args_to_add(index) + [\n Argument('load', default=\"False\", type=str, help='load the cached weights or continue', is_bool=True),\n Argument('batches_forward', default=0, type=int, help='num batches to forward the network, to adapt bn'),\n Argument('batches_train', default=0, type=int, help='num batches to train the network, -1 for an epoch'),\n Argument('batches_eval', default=-1, type=int, help='num batches to train the network, -1 for an epoch'),\n Argument('value', default='val/accuracy/1', type=str, help='which top k value to optimize'),\n ]",
"def get_multitask_egs_opts(egs_dir, egs_prefix=\"\",\n archive_index=-1,\n use_multitask_egs=False):\n multitask_egs_opts = \"\"\n egs_suffix = \".{0}\".format(archive_index) if archive_index != -1 else \"\"\n\n if use_multitask_egs:\n output_file_name = (\"{egs_dir}/{egs_prefix}output{egs_suffix}.ark\"\n \"\".format(egs_dir=egs_dir,\n egs_prefix=egs_prefix,\n egs_suffix=egs_suffix))\n output_rename_opt = \"\"\n if os.path.isfile(output_file_name):\n output_rename_opt = (\"--outputs=ark:{output_file_name}\".format(\n output_file_name=output_file_name))\n\n weight_file_name = (\"{egs_dir}/{egs_prefix}weight{egs_suffix}.ark\"\n \"\".format(egs_dir=egs_dir,\n egs_prefix=egs_prefix,\n egs_suffix=egs_suffix))\n weight_opt = \"\"\n if os.path.isfile(weight_file_name):\n weight_opt = (\"--weights=ark:{weight_file_name}\"\n \"\".format(weight_file_name=weight_file_name))\n\n multitask_egs_opts = (\n \"{output_rename_opt} {weight_opt}\".format(\n output_rename_opt=output_rename_opt,\n weight_opt=weight_opt))\n\n return multitask_egs_opts",
"def get_jobs(dumpruninfo):\n if \"jobs\" not in dumpruninfo:\n return []\n return dumpruninfo[\"jobs\"].keys()",
"def __getitem__(self, item):\n if item not in self._moptions:\n raise KeyError(\"Invalid option '%s'.\" % item)\n return self._runopts.get(item)",
"def execution_options(self) -> Optional[pulumi.Input['JobStepExecutionOptionsArgs']]:\n return pulumi.get(self, \"execution_options\")",
"def get_run_options(assays):\n pipelines = {'macs14':None,\n 'macs2':None,\n 'rose':None,\n 'homer':None,\n 'crc':None,\n 'drose':None\n }\n \n options = {a:pipelines for a in assays}\n\n return {}",
"def _getOptions(self):\n args = []\n for iname, value in self.options:\n args.append('-' + iname)\n if value != 'true':\n args.append(value)\n return args",
"async def get_options(self):",
"def option(self, spec):\n return spec.options[self.rng.integers(len(spec.options))]",
"def options(self, component, workflow, index):\n\n # pylint: disable=R0912, R0915\n options = {\"type\": component}\n\n st.markdown(\"---\")\n\n # Lookup component configuration\n # - Runtime components have config defined within tasks\n # - Pipeline components have config defined at workflow root\n config = None\n if workflow:\n if component in [\"service\", \"translation\"]:\n # Service config is found in tasks section\n tasks = list(workflow[\"workflow\"].values())[0][\"tasks\"]\n tasks = [task for task in tasks if task.get(\"task\") == component or task.get(\"action\") == component]\n if tasks:\n config = tasks[0]\n else:\n config = workflow.get(component)\n\n if component == \"embeddings\":\n st.markdown(f\"**{index + 1}.) Embeddings Index** \\n*Index workflow output*\")\n options[\"index\"] = self.text(\"Embeddings storage path\", component, config, \"index\")\n options[\"path\"] = self.text(\"Embeddings model path\", component, config, \"path\", \"sentence-transformers/nli-mpnet-base-v2\")\n options[\"upsert\"] = self.boolean(\"Upsert\", component, config, \"upsert\")\n options[\"content\"] = self.boolean(\"Content\", component, config, \"content\")\n\n elif component in (\"segmentation\", \"textractor\"):\n if component == \"segmentation\":\n st.markdown(f\"**{index + 1}.) Segment** \\n*Split text into semantic units*\")\n else:\n st.markdown(f\"**{index + 1}.) Textract** \\n*Extract text from documents*\")\n\n options[\"sentences\"] = self.boolean(\"Split sentences\", component, config, \"sentences\")\n options[\"lines\"] = self.boolean(\"Split lines\", component, config, \"lines\")\n options[\"paragraphs\"] = self.boolean(\"Split paragraphs\", component, config, \"paragraphs\")\n options[\"join\"] = self.boolean(\"Join tokenized\", component, config, \"join\")\n options[\"minlength\"] = self.number(\"Min section length\", component, config, \"minlength\")\n\n elif component == \"service\":\n st.markdown(f\"**{index + 1}.) Service** \\n*Extract data from an API*\")\n options[\"url\"] = self.text(\"URL\", component, config, \"url\")\n options[\"method\"] = self.select(\"Method\", component, config, \"method\", [\"get\", \"post\"], 0)\n options[\"params\"] = self.text(\"URL parameters\", component, config, \"params\")\n options[\"batch\"] = self.boolean(\"Run as batch\", component, config, \"batch\", True)\n options[\"extract\"] = self.text(\"Subsection(s) to extract\", component, config, \"extract\")\n\n if options[\"params\"]:\n options[\"params\"] = {key: None for key in self.split(options[\"params\"])}\n if options[\"extract\"]:\n options[\"extract\"] = self.split(options[\"extract\"])\n\n elif component == \"summary\":\n st.markdown(f\"**{index + 1}.) Summary** \\n*Abstractive text summarization*\")\n options[\"path\"] = self.text(\"Model\", component, config, \"path\", \"sshleifer/distilbart-cnn-12-6\")\n options[\"minlength\"] = self.number(\"Min length\", component, config, \"minlength\")\n options[\"maxlength\"] = self.number(\"Max length\", component, config, \"maxlength\")\n\n elif component == \"tabular\":\n st.markdown(f\"**{index + 1}.) Tabular** \\n*Split tabular data into rows and columns*\")\n options[\"idcolumn\"] = self.text(\"Id columns\", component, config, \"idcolumn\")\n options[\"textcolumns\"] = self.text(\"Text columns\", component, config, \"textcolumns\")\n options[\"content\"] = self.text(\"Content\", component, config, \"content\")\n\n if options[\"textcolumns\"]:\n options[\"textcolumns\"] = self.split(options[\"textcolumns\"])\n\n if options[\"content\"]:\n options[\"content\"] = self.split(options[\"content\"])\n if len(options[\"content\"]) == 1 and options[\"content\"][0] == \"1\":\n options[\"content\"] = options[\"content\"][0]\n\n elif component == \"transcription\":\n st.markdown(f\"**{index + 1}.) Transcribe** \\n*Transcribe audio to text*\")\n options[\"path\"] = self.text(\"Model\", component, config, \"path\", \"facebook/wav2vec2-base-960h\")\n\n elif component == \"translation\":\n st.markdown(f\"**{index + 1}.) Translate** \\n*Machine translation*\")\n options[\"target\"] = self.text(\"Target language code\", component, config, \"args\", \"en\")\n\n return options",
"def get_options(self):\n return []",
"def get_options():\n parser = argparse.ArgumentParser(\n description=\"view the aria2 queue on localhost:6800\",\n )\n # parser.add_argument() calls here\n options = parser.parse_args()\n # extra processing of options here\n return options"
] | [
"0.64497",
"0.6384364",
"0.62348664",
"0.61879486",
"0.6184877",
"0.59024817",
"0.5853503",
"0.56605256",
"0.5530498",
"0.5475739",
"0.54434043",
"0.53275234",
"0.5277181",
"0.5277181",
"0.5277181",
"0.5276527",
"0.5255759",
"0.5234088",
"0.523196",
"0.522801",
"0.52041173",
"0.51954997",
"0.51877433",
"0.5168787",
"0.5167043",
"0.515369",
"0.5133574",
"0.5101633",
"0.5079529",
"0.50739384"
] | 0.79151005 | 0 |
Returns the transpile options of given job index. | def _transpile_options(self, index: int = -1) -> Dict[str, Any]:
try:
return self.__experiment_metadata["job_metadata"][index]["transpile_options"]
except (TypeError, KeyError, IndexError):
# Ignore experiment metadata or job metadata is not set or key is not found
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_job_options(self):\n argument = [string.Template(self.queue.template[key]).substitute(\n {key : value}) for key, value in self.options.items()]\n\n if len(self.custom_options) > 0:\n argument += self.custom_options\n\n return argument",
"def _experiment_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"experiment_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found\n return None",
"def _run_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"run_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found\n return None",
"def _analysis_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"analysis_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found\n return None",
"def _collect_options(self, option_index):\n input_option = list()\n if not option_index:\n for k in self._options.keys():\n input_option.append(self._options.get(k))\n else:\n for index in option_index:\n input_option.append(self._options.get(index))\n return input_option",
"def index(self):\n return self._quote_get('option/index')",
"def get_multitask_egs_opts(egs_dir, egs_prefix=\"\",\n archive_index=-1,\n use_multitask_egs=False):\n multitask_egs_opts = \"\"\n egs_suffix = \".{0}\".format(archive_index) if archive_index != -1 else \"\"\n\n if use_multitask_egs:\n output_file_name = (\"{egs_dir}/{egs_prefix}output{egs_suffix}.ark\"\n \"\".format(egs_dir=egs_dir,\n egs_prefix=egs_prefix,\n egs_suffix=egs_suffix))\n output_rename_opt = \"\"\n if os.path.isfile(output_file_name):\n output_rename_opt = (\"--outputs=ark:{output_file_name}\".format(\n output_file_name=output_file_name))\n\n weight_file_name = (\"{egs_dir}/{egs_prefix}weight{egs_suffix}.ark\"\n \"\".format(egs_dir=egs_dir,\n egs_prefix=egs_prefix,\n egs_suffix=egs_suffix))\n weight_opt = \"\"\n if os.path.isfile(weight_file_name):\n weight_opt = (\"--weights=ark:{weight_file_name}\"\n \"\".format(weight_file_name=weight_file_name))\n\n multitask_egs_opts = (\n \"{output_rename_opt} {weight_opt}\".format(\n output_rename_opt=output_rename_opt,\n weight_opt=weight_opt))\n\n return multitask_egs_opts",
"def _getOptions(self):\n args = []\n for iname, value in self.options:\n args.append('-' + iname)\n if value != 'true':\n args.append(value)\n return args",
"def get_step_settings_at_index(self, index):\n return self.routine_template.get_step_settings_at_index(index)",
"def options(self, component, workflow, index):\n\n # pylint: disable=R0912, R0915\n options = {\"type\": component}\n\n st.markdown(\"---\")\n\n # Lookup component configuration\n # - Runtime components have config defined within tasks\n # - Pipeline components have config defined at workflow root\n config = None\n if workflow:\n if component in [\"service\", \"translation\"]:\n # Service config is found in tasks section\n tasks = list(workflow[\"workflow\"].values())[0][\"tasks\"]\n tasks = [task for task in tasks if task.get(\"task\") == component or task.get(\"action\") == component]\n if tasks:\n config = tasks[0]\n else:\n config = workflow.get(component)\n\n if component == \"embeddings\":\n st.markdown(f\"**{index + 1}.) Embeddings Index** \\n*Index workflow output*\")\n options[\"index\"] = self.text(\"Embeddings storage path\", component, config, \"index\")\n options[\"path\"] = self.text(\"Embeddings model path\", component, config, \"path\", \"sentence-transformers/nli-mpnet-base-v2\")\n options[\"upsert\"] = self.boolean(\"Upsert\", component, config, \"upsert\")\n options[\"content\"] = self.boolean(\"Content\", component, config, \"content\")\n\n elif component in (\"segmentation\", \"textractor\"):\n if component == \"segmentation\":\n st.markdown(f\"**{index + 1}.) Segment** \\n*Split text into semantic units*\")\n else:\n st.markdown(f\"**{index + 1}.) Textract** \\n*Extract text from documents*\")\n\n options[\"sentences\"] = self.boolean(\"Split sentences\", component, config, \"sentences\")\n options[\"lines\"] = self.boolean(\"Split lines\", component, config, \"lines\")\n options[\"paragraphs\"] = self.boolean(\"Split paragraphs\", component, config, \"paragraphs\")\n options[\"join\"] = self.boolean(\"Join tokenized\", component, config, \"join\")\n options[\"minlength\"] = self.number(\"Min section length\", component, config, \"minlength\")\n\n elif component == \"service\":\n st.markdown(f\"**{index + 1}.) Service** \\n*Extract data from an API*\")\n options[\"url\"] = self.text(\"URL\", component, config, \"url\")\n options[\"method\"] = self.select(\"Method\", component, config, \"method\", [\"get\", \"post\"], 0)\n options[\"params\"] = self.text(\"URL parameters\", component, config, \"params\")\n options[\"batch\"] = self.boolean(\"Run as batch\", component, config, \"batch\", True)\n options[\"extract\"] = self.text(\"Subsection(s) to extract\", component, config, \"extract\")\n\n if options[\"params\"]:\n options[\"params\"] = {key: None for key in self.split(options[\"params\"])}\n if options[\"extract\"]:\n options[\"extract\"] = self.split(options[\"extract\"])\n\n elif component == \"summary\":\n st.markdown(f\"**{index + 1}.) Summary** \\n*Abstractive text summarization*\")\n options[\"path\"] = self.text(\"Model\", component, config, \"path\", \"sshleifer/distilbart-cnn-12-6\")\n options[\"minlength\"] = self.number(\"Min length\", component, config, \"minlength\")\n options[\"maxlength\"] = self.number(\"Max length\", component, config, \"maxlength\")\n\n elif component == \"tabular\":\n st.markdown(f\"**{index + 1}.) Tabular** \\n*Split tabular data into rows and columns*\")\n options[\"idcolumn\"] = self.text(\"Id columns\", component, config, \"idcolumn\")\n options[\"textcolumns\"] = self.text(\"Text columns\", component, config, \"textcolumns\")\n options[\"content\"] = self.text(\"Content\", component, config, \"content\")\n\n if options[\"textcolumns\"]:\n options[\"textcolumns\"] = self.split(options[\"textcolumns\"])\n\n if options[\"content\"]:\n options[\"content\"] = self.split(options[\"content\"])\n if len(options[\"content\"]) == 1 and options[\"content\"][0] == \"1\":\n options[\"content\"] = options[\"content\"][0]\n\n elif component == \"transcription\":\n st.markdown(f\"**{index + 1}.) Transcribe** \\n*Transcribe audio to text*\")\n options[\"path\"] = self.text(\"Model\", component, config, \"path\", \"facebook/wav2vec2-base-960h\")\n\n elif component == \"translation\":\n st.markdown(f\"**{index + 1}.) Translate** \\n*Machine translation*\")\n options[\"target\"] = self.text(\"Target language code\", component, config, \"args\", \"en\")\n\n return options",
"def describe_compilation_job(CompilationJobName=None):\n pass",
"def build(self, context):\r\n return ['-Z', context.config.preset]",
"def index_toggle_parse_args(parser):\n parser.add_argument(\"--gcsa_index\", dest=\"indexes\", default=[], action=\"append_const\", const=\"gcsa\",\n help=\"Make a gcsa index for each output graph\")\n parser.add_argument(\"--xg_index\", dest=\"indexes\", action=\"append_const\", const=\"xg\",\n help=\"Make an xg index for each output graph\")\n parser.add_argument(\"--gbwt_index\", dest=\"indexes\", action=\"append_const\", const=\"gbwt\",\n help=\"Make a GBWT index alongside the xg index for each output graph\")\n parser.add_argument(\"--snarls_index\", dest=\"indexes\", action=\"append_const\", const=\"snarls\",\n help=\"Make an snarls file for each output graph\")\n parser.add_argument(\"--trivial_snarls_index\", dest=\"indexes\", action=\"append_const\", const=\"trivial_snarls\",\n help=\"Make a trivial-inclusive snarls file for each output graph\")\n parser.add_argument(\"--distance_index\", dest=\"indexes\", action=\"append_const\", const=\"distance\",\n help=\"Make a (minimum) distance index for each output graph\")\n parser.add_argument(\"--minimizer_index\", dest=\"indexes\", action=\"append_const\", const=\"minimizer\",\n help=\"Make a minimizer index for each output graph\")\n parser.add_argument(\"--id_ranges_index\", dest=\"indexes\", action=\"append_const\", const=\"id_ranges\",\n help=\"Make chromosome id ranges tables (so toil-vg map can optionally split output by chromosome)\")\n parser.add_argument(\"--alt_path_gam_index\", dest=\"indexes\", action=\"append_const\", const=\"alt-gam\",\n help=\"Save alt paths from vg into an indexed GAM\")\n parser.add_argument(\"--xg_alts\", dest=\"indexes\", action=\"append_const\", const=\"xg_alts\",\n help=\"Include alt paths in xg index\")\n parser.add_argument(\"--all_index\", dest=\"indexes\", action=\"store_const\",\n const=[\"gcsa\", \"xg\", \"gbwt\", \"snarls\", \"trivial_snarls\", \"distance\", \"minimizer\", \"id_ranges\"],\n help=\"Equivalent to --gcsa_index --xg_index --gbwt_index --snarls_index --trivial_snarls_index \"\n \"--distance_index --minimizer_index --id_ranges_index\")",
"async def get_options(self) -> List[Tuple[str, str]]:\n options = [\n (\"TRUE\", \"true\"),\n (\"FALSE\", \"false\"),\n ]\n if self.context.get(\"null\"):\n options.insert(0, (\"\", \"\"))\n\n return options",
"def generateEnvList( self, index ):\n EnvList = [ \n (\"GLOBUS_DUROC_SUBJOB_INDEX\", \"%d\" % index),\n (\"LD_LIBRARY_PATH\", \"/usr/local/globus/globus-3.2/lib/\") \n ]\n return EnvList",
"def extract_index_urls(self, index: int) -> ListLike:\n cmd_pieces = self[index].split()\n index_urls = []\n for i, piece in enumerate(cmd_pieces):\n if piece in [\"--index-url\", \"--extra-index-url\"]:\n index_urls.append(cmd_pieces[i + 1])\n return index_urls",
"def index_parse_args(parser):\n \n parser.add_argument(\"--gcsa_index_cores\", type=int,\n help=\"number of threads during the gcsa indexing step\")\n parser.add_argument(\"--xg_index_cores\", type=int,\n help=\"number of threads during the xg indexing step\")\n parser.add_argument(\"--gbwt_index_cores\", type=int,\n help=\"number of threads during the gbwt indexing step\") \n\n parser.add_argument(\"--index_name\", type=str, default='index',\n help=\"name of index files. <name>.xg, <name>.gcsa etc.\")\n\n parser.add_argument(\"--gcsa_opts\", type=str,\n help=\"Options to pass to gcsa indexing.\")\n \n parser.add_argument(\"--minimizer_opts\", type=str,\n help=\"Options to pass to minimizer indexing.\")\n\n parser.add_argument(\"--vcf_phasing\", nargs='+', type=make_url, default=[],\n help=\"Import phasing information from VCF(s) into xg (or GBWT with --gbwt_index)\")\n parser.add_argument(\"--vcf_phasing_regions\", nargs='+', default=[],\n help=\"Hint the relevant chrom:start-end regions to the GBWT indexer, for subregion graphs\")\n parser.add_argument(\"--gbwt_input\", type=make_url,\n help=\"Use given GBWT for GCSA2 pruning\")\n parser.add_argument(\"--gbwt_prune\", action='store_true',\n help=\"Use gbwt for gcsa pruning\")\n parser.add_argument(\"--force_phasing\", type=lambda x:bool(util.strtobool(x)), default=None,\n help=\"If 'True', randomly phase unphased variants and discard unresolveable overlaps for GBWT\")",
"def _get_charm_pack_args(self, base_indeces: List[str], destructive_mode: bool) -> List[str]:\n args = [\"charmcraft\", \"pack\", \"--verbose\"]\n if destructive_mode:\n args.append(\"--destructive-mode\")\n for base in base_indeces:\n args.append(f\"--bases-index={base}\")\n if self.force_packing:\n args.append(\"--force\")\n return args",
"def getOptions():\n usage = ('usage: %prog -c CMD -d DIR [-o OPT]\\nThe multicrab command'\n ' executes \"crab CMD OPT\" for each task contained in DIR\\nUse'\n ' multicrab -h for help\"')\n\n parser = OptionParser(usage=usage)\n parser.add_option(\"-c\", \"--crabCmd\", dest=\"crabCmd\",\n help=(\"The crab command you want to execute for each task in \"\n \"the DIR\"), metavar=\"CMD\")\n parser.add_option(\"-d\", \"--projDir\", dest=\"projDir\",\n help=\"The directory where the tasks are located\", metavar=\"DIR\")\n parser.add_option(\"-o\", \"--crabCmdOptions\", dest=\"crabCmdOptions\",\n help=(\"The options you want to pass to the crab command CMD\"\n \"tasklistFile\"), metavar=\"OPT\", default=\"\")\n parser.add_option(\"-r\", \"--noAutoResubmit\", dest=\"noAutoResubmit\",\n help=(\"don't automatically run the resub commands\"),\n metavar=\"noAutoResub\",default=False,action=\"store_true\")\n parser.add_option(\"-i\", \"--ignoreCache\", dest=\"ignoreMulticrabCache\",\n help=(\"don't use cache file to skip checking status of jobs already done\"),\n metavar=\"ignoreCache\",default=False,action=\"store_true\")\n\n (options, args) = parser.parse_args()\n\n if args:\n parser.error(\"Found positional argument(s) %s.\" % args)\n if not options.crabCmd:\n parser.error(\"(-c CMD, --crabCmd=CMD) option not provided\")\n if not options.projDir:\n parser.error(\"(-d DIR, --projDir=DIR) option not provided\")\n if not os.path.isdir(options.projDir):\n parser.error(\"Directory %s does not exist\" % options.projDir)\n\n return options",
"def describe_transform_job(TransformJobName=None):\n pass",
"def getOptions() :\n usage = ('usage: python submit_all.py -c CONFIG -d DIR ')\n\n parser = OptionParser(usage=usage) \n parser.add_option(\"-c\", \"--config\", dest=\"config\",\n help=(\"The crab script you want to submit \"),\n metavar=\"CONFIG\")\n parser.add_option(\"-d\", \"--dir\", dest=\"dir\",\n help=(\"The crab directory you want to use \"),\n metavar=\"DIR\")\n parser.add_option(\"-f\", \"--datasets\", dest=\"datasets\",\n help=(\"File listing datasets to run over\"),\n metavar=\"FILE\")\n (options, args) = parser.parse_args()\n\n\n if options.config == None or options.dir == None:\n parser.error(usage)\n \n return options",
"def _get_job_defaults():\n\n lines = []\n lines += '[Job]\\n'\n j = Job()\n for cj in j._config_names:\n v = getattr(j, cj)\n lines += '%s = %s\\n' % (cj, v)\n lines += '\\n'\n return lines",
"def args_to_add(cls, index=None) -> [Argument]:\n return super().args_to_add(index) + [\n Argument('load', default=\"False\", type=str, help='load the cached weights or continue', is_bool=True),\n Argument('batches_forward', default=0, type=int, help='num batches to forward the network, to adapt bn'),\n Argument('batches_train', default=0, type=int, help='num batches to train the network, -1 for an epoch'),\n Argument('batches_eval', default=-1, type=int, help='num batches to train the network, -1 for an epoch'),\n Argument('value', default='val/accuracy/1', type=str, help='which top k value to optimize'),\n ]",
"def options() -> List:\n return list(c.value for c in Plugin)",
"def get_extra_options(self):\n # Options change depending on the pdf generator..\n try:\n transform_module = getattr(transforms, self.pdf_generator)\n except AttributeError:\n return []\n\n options = []\n tool_options = self.pdf_tool.make_options()\n adapter_options, adapter_overrides = self._get_adapter_options()\n\n opts_order = [self.request, tool_options]\n if adapter_overrides:\n opts_order.insert(0, adapter_options)\n else:\n opts_order.append(adapter_options)\n\n # First we check the options for which no value is\n # needed.\n # For each one, it is possible to define a --no-xxx\n # option.\n for opt_name in transform_module.simple_options:\n for opts in opts_order:\n if opts.get('--no-%s' % opt_name):\n break\n\n if opts.get(opt_name, None):\n options.append('--%s' % opt_name)\n break\n # Then we check values that expect a value.\n for opt_name in transform_module.valued_options:\n for opts in opts_order:\n opt_val = opts.get(opt_name, None)\n\n if opt_val is None:\n continue\n\n # Value is put before the option name as we\n # insert them after in another list using l.insert(2, opt)\n if isinstance(opt_val, list):\n for x in reversed(opt_val):\n options.append(str(x))\n else:\n options.append(str(opt_val))\n\n options.append('--%s' % opt_name)\n break\n\n return options",
"def get_options(self):\n option_list = []\n if self.can_analyze():\n option_list.append((EpOp.TASK_ANALYZE, None))\n\n option_tup = self.predict_option()\n if option_tup:\n option_list.append(option_tup)\n\n option_tup = self.check_option()\n if option_tup:\n option_list.append(option_tup)\n\n return option_list",
"def options(argv=[]):\r\n parser = HendrixOptionParser\r\n return vars(parser.parse_args(argv)[0])",
"def the_option_at_index(index: Union[int, str]) -> \"SelectByIndex\":\n return SelectByIndex(index)",
"def retrieve_options(env):\n\n options = []\n if env.core != -1:\n options.extend([\"--core {}\".format(env.core)])\n if env.mtor != 4:\n options.extend([\"--mtor {}\".format(env.mtor)])\n if env.n != 1000:\n options.extend([\"--n {}\".format(env.n)])\n if env.forcefield != \"OPLS2005\":\n options.extend([\"--force {}\".format(env.forcefield)])\n if env.mae_lig:\n options.extend([\"--mae_charges\"])\n if env.gridres != 10:\n options.extend([\"--gridres {}\".format(env.gridres)])\n return \" \".join(options)",
"def to_list(self):\n import tc\n opts_list = []\n for k, v in self.__class__.__dict__.iteritems():\n if isinstance(v, tc.TC):\n opts_list.append((k, v))\n opts_list = sorted(opts_list)\n return opts_list"
] | [
"0.58368",
"0.56201595",
"0.54462826",
"0.5405374",
"0.5268604",
"0.51483375",
"0.514388",
"0.5095438",
"0.48473778",
"0.48377272",
"0.476263",
"0.47249606",
"0.47134674",
"0.46772844",
"0.46634972",
"0.465322",
"0.46220458",
"0.46066916",
"0.45873234",
"0.4573063",
"0.45589238",
"0.45586374",
"0.45489714",
"0.45314068",
"0.45233485",
"0.4515343",
"0.45122516",
"0.44938493",
"0.44866276",
"0.44775257"
] | 0.7755575 | 0 |
Parse input kwargs with predicted input. Class attributes will be updated according to the ``options``. For example, if ``options`` has a key ``p0``, and the class has an attribute named ``__p0``, then the attribute ``__0p`` will be updated to ``options["p0"]``. Options that don't have matching attributes will be included in the returned dictionary. | def _arg_parse(self, **options) -> Dict[str, Any]:
extra_options = dict()
for key, value in options.items():
private_key = f"__{key}"
if hasattr(self, private_key):
setattr(self, private_key, value)
else:
extra_options[key] = value
return extra_options | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_options(options):\n opts = dict()\n for attr in dir(options):\n if attr.startswith(\"__\"):\n continue\n opts[attr] = getattr(options, attr)\n return opts",
"def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(dqpsk_mod.__init__,\n ('self',), options)",
"def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(gfsk_mod.__init__,\n ('self',), options)\n extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)",
"def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(\n dqpsk_demod.__init__, ('self',), options)",
"def _process_kwargs(name, d, definition, nodes):\n # get node class\n module_root = d.get(\"plugin\", \"podpac\")\n node_string = \"%s.%s\" % (module_root, d[\"node\"])\n module_name, node_name = node_string.rsplit(\".\", 1)\n try:\n module = importlib.import_module(module_name)\n except ImportError:\n raise ValueError(\"Invalid definition for node '%s': no module found '%s'\" % (name, module_name))\n try:\n node_class = getattr(module, node_name)\n except AttributeError:\n raise ValueError(\n \"Invalid definition for node '%s': class '%s' not found in module '%s'\" % (name, node_name, module_name)\n )\n\n kwargs = {}\n for k, v in d.get(\"attrs\", {}).items():\n kwargs[k] = v\n\n for k, v in d.get(\"inputs\", {}).items():\n kwargs[k] = _lookup_input(nodes, name, v, definition)\n\n for k, v in d.get(\"lookup_attrs\", {}).items():\n kwargs[k] = _lookup_attr(nodes, name, v)\n\n if \"style\" in d:\n style_class = getattr(node_class, \"style\", Style)\n if isinstance(style_class, tl.TraitType):\n # Now we actually have to look through the class to see\n # if there is a custom initializer for style\n for attr in dir(node_class):\n atr = getattr(node_class, attr)\n if not isinstance(atr, tl.traitlets.DefaultHandler) or atr.trait_name != \"style\":\n continue\n try:\n style_class = atr(node_class)\n except Exception as e:\n # print (\"couldn't make style from class\", e)\n try:\n style_class = atr(node_class())\n except:\n # print (\"couldn't make style from class instance\", e)\n style_class = style_class.klass\n try:\n kwargs[\"style\"] = style_class.from_definition(d[\"style\"])\n except Exception as e:\n kwargs[\"style\"] = Style.from_definition(d[\"style\"])\n # print (\"couldn't make style from inferred style class\", e)\n\n for k in d:\n if k not in [\"node\", \"inputs\", \"attrs\", \"lookup_attrs\", \"plugin\", \"style\"]:\n raise ValueError(\"Invalid definition for node '%s': unexpected property '%s'\" % (name, k))\n\n nodes[name] = node_class(**kwargs)",
"def _run_kwargs(cls, kwargs: Dict[str, Any]):\n parser = cls.setup_args()\n opt = parser.parse_kwargs(**kwargs)\n return cls._run_from_parser_and_opt(opt, parser)",
"def find_options(class_attributes):\r\n new_attributes = {}\r\n options = collections.OrderedDict()\r\n for name in sorted(class_attributes.keys()):\r\n attr = class_attributes[name]\r\n if name.startswith('_') or not isinstance(attr, Option):\r\n new_attributes[name] = attr\r\n else:\r\n options[name] = attr\r\n return new_attributes, options",
"def _parse_kwargs(kwargs):\n layout_kwargs = {}\n # For the layout object\n if \"dim\" in kwargs:\n layout_kwargs[\"dim\"] = kwargs.pop(\"dim\")\n if \"center\" in kwargs:\n layout_kwargs[\"center\"] = kwargs.pop(\"center\")\n if \"scale\" in kwargs:\n layout_kwargs[\"scale\"] = kwargs.pop(\"scale\")\n\n placement_kwargs = {}\n # For the placement object\n if \"scale_ratio\" in kwargs:\n placement_kwargs[\"scale_ratio\"] = kwargs.pop(\"scale_ratio\")\n # For closest strategy\n if \"subset_size\" in kwargs:\n placement_kwargs[\"subset_size\"] = kwargs.pop(\"subset_size\")\n if \"num_neighbors\" in kwargs:\n placement_kwargs[\"num_neighbors\"] = kwargs.pop(\"num_neighbors\")\n\n return layout_kwargs, placement_kwargs",
"def _parse_kwargs(self):\n re_kwargs = r'^[\\w_][\\w\\d_]*=.+$'\n kwargs = [a.split('=') for a in self.args if re.findall(re_kwargs, a)]\n self.kwargs = {k: self._load_json(v) for k, v in kwargs}\n self.args = [a for a in self.args if not re.findall(re_kwargs, a)]",
"def get_args_dict(class_, options: Options) -> dict:\n\n argspec = getfullargspec(class_.__init__)\n init_args = argspec.args\n init_args.pop(0) # self\n result = {k: v for k, v in options.items() if k in init_args}\n\n positional_args = init_args[:-len(argspec.defaults)]\n\n missing_args = [a for a in positional_args if a not in options]\n if missing_args:\n raise BadConfigError(\n f'Some required parameters are missing in \"{options[\"name\"]}\" config: ' +\n ', '.join(missing_args)\n )\n return result",
"def _parse(self, options):\n\n '''Start by considering all registered options, and validating them\n if they are in the incoming options dict'''\n self.results = {}\n wanted = self.wanted.copy()\n for opt in wanted.keys():\n if opt in options:\n self.results[opt] = self._access(wanted, opt, options[opt])\n\n '''As all registered options, in trac.ini, have composite names,\n consisting of a prefix and the option name separated by a dot,\n now find the starting list of prefixes to consider. Either use\n the value of incoming option of the name found in self.config,\n or use the fixed default prefix from self.prefix'''\n if self.config in options:\n parents = self._parents_to_list(options[self.config])\n del options[self.config]\n else:\n parents = [ self.prefix ]\n\n '''Look up these composite options'''\n if len(wanted) > 0:\n self._inherit(options, parents, wanted, {})\n\n '''Set all still unresolved registered options, to their defaults'''\n for opt in wanted.keys():\n self.results[opt] = (\n wanted[opt].default,\n self._is_default,\n wanted[opt]\n )\n\n '''Move over all UNregistered options as they were passed in.'''\n for opt in options.keys():\n if not opt in self.results:\n self.results[opt] = (\n options[opt],\n self._is_extra,\n None\n )",
"def parse(self, kwargs):\n for k, v in kwargs.items():\n if not hasattr(self, k):\n warnings.warn(\"Waning: opt has no attribute %s\" % k)\n setattr(self, k, v)\n\n print('Configuration:')\n for k, v in self.__class__.__dict__.items():\n if not k.startswith('__') and str(k) != 'parse':\n print('\\t{0}: {1}'.format(k, getattr(self, k)))",
"def _process_options(self, options):\n name = options.name.strip()\n package_name = options.package_name\n class_name = options.class_name\n\n if not package_name:\n package_name = self._normalize_package_name(name)\n console.print('Using \"%s\" as the package name.' % package_name)\n else:\n package_name = package_name.strip()\n\n if not re.match(r'[A-Za-z][A-Za-z0-9._-]*', package_name):\n self.error(\n '\"%s\" is not a valid package name. Try --package-name=\"%s\"'\n % (package_name,\n self._normalize_package_name(package_name)))\n\n if not class_name:\n class_name = self._normalize_class_name(name)\n console.print('Using \"%s\" as the extension class name.'\n % class_name)\n else:\n class_name = class_name.strip()\n\n if not re.match(r'[A-Za-z][A-Za-z0-9_]+Extension$', class_name):\n self.error(\n '\"%s\" is not a valid class name. Try --class-name=\"%s\"'\n % (package_name,\n self._normalize_class_name(class_name)))\n\n options.name = name\n options.package_name = package_name\n options.class_name = class_name",
"def _map_args_kwargs_to_input(self, *args, **kwargs) -> Dict[str, Any]:\n input_dict = {k: v for k, v in zip(self.inputs, args)}\n input_dict.update(kwargs)\n\n return input_dict",
"def _serialize_attributes_as_kwargs(self) -> Dict[str, Any]:\n if self._constructed_manually:\n raise UnsupportedError(\n \"Surrogates constructed manually (ie Surrogate.from_botorch) may not \"\n \"be serialized. If serialization is necessary please initialize from \"\n \"the constructor.\"\n )\n\n return {\n \"botorch_model_class\": self.botorch_model_class,\n \"model_options\": self.model_options,\n \"mll_class\": self.mll_class,\n \"mll_options\": self.mll_options,\n \"outcome_transform\": self.outcome_transform,\n \"input_transform\": self.input_transform,\n \"covar_module_class\": self.covar_module_class,\n \"covar_module_options\": self.covar_module_options,\n \"likelihood_class\": self.likelihood_class,\n \"likelihood_options\": self.likelihood_options,\n \"allow_batched_models\": self.allow_batched_models,\n }",
"def parse_args(self):\n parsed, _ = self.parser.parse_args()\n final = {}\n append = getattr(parsed, self.append_option)\n subtract = getattr(parsed, self.subtract_option)\n for option in self.all_options():\n name = option.dest\n if name is not None:\n value = getattr(parsed, name)\n default = self.defaults.get(name)\n if append and option.get_opt_string() in self.appendable:\n value = self.append(option, value)\n elif subtract and option.get_opt_string() in self.appendable:\n value = self.subtract(option, value)\n if value is None:\n value = default\n if value is None:\n value = raw_input(\"Please enter '%s': \" % option.help)\n self[name] = value\n return self",
"def ParseOptions(cls, options, config_object):",
"def parse(self):\n opt = self.gather_options()\n opt.isTrain = self.isTrain # train or test\n\n # process opt.suffix\n if opt.suffix:\n suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''\n opt.name = opt.name + suffix\n\n opt.f_map = [opt.crop_size, opt.crop_size * 2, opt.crop_size * 4, opt.crop_size * 8]\n self.print_options(opt)\n\n # set gpu ids\n str_ids = opt.gpu_ids.split(',')\n opt.gpu_ids = []\n for str_id in str_ids:\n id = int(str_id)\n if id >= 0:\n opt.gpu_ids.append(id)\n if len(opt.gpu_ids) > 0:\n torch.cuda.set_device(opt.gpu_ids[0])\n\n self.opt = opt\n return self.opt",
"def _extract_options(config, options, *args):\n extract = {}\n for key in args:\n if key not in args:\n continue\n extract[key] = config[key]\n option = getattr(options, key, None)\n if option is not None:\n extract[key] = option\n return extract",
"def parse_options(self, options):\n pass",
"def processOptions_(self, opts):\n\n for opt in opts.keys():\n val = opts[opt]\n\n # Skip actions, they are processed later in initializeActions_()\n if opt in self.main_actions:\n self.cfg_params['SKIM.'+opt[1:]] = val\n continue\n if opt in self.aux_actions:\n self.cfg_params['SKIM.'+opt[1:]] = val\n continue\n \n\n elif ( opt == '-cfg' ):\n pass\n\n elif ( opt in ('-continue', '-c') ):\n # Already processed in processContinueOption_()\n pass\n\n elif ( opt == '-Q' ):\n self.flag_quiet = 1\n pass\n\n elif ( opt == '-debug' ):\n if val: self.debug_level = int(val)\n else: self.debug_level = 1\n pass\n\n elif string.find(opt,'.') == -1:\n print common.prog_name+'. Unrecognized option '+opt\n usage()\n pass\n\n # Override config parameters from INI-file with cmd-line params\n if string.find(opt,'.') == -1 :\n self.cfg_params['SKIM.'+opt[1:]] = val\n pass\n else:\n # Command line parameters in the form -SECTION.ENTRY=VALUE\n self.cfg_params[opt[1:]] = val\n pass\n pass\n return",
"def get_run_method_kwargs(self, **kwargs) -> dict:\n return {\n key: value\n for key, value in kwargs.items()\n if self.input_definitions.get(key=key).run_method_input\n }",
"def parseKwargs(acceptable,kwargs):\n \n output = {}\n\n if kwargs:\n for key in kwargs.keys():\n \n if key in acceptable:\n output[key] = kwargs[key]\n\n return output",
"def process_module(module):\n options_dict = {}\n flags_dict = {}\n \n for klass in [getattr(module, cname) for cname in dir(module) if hasattr(getattr(module, cname), 'option_names')]:\n if klass.option_names[0] is not None:\n flags_dict[klass.option_names[0]] = klass()\n options_dict[klass.option_names[1]] = klass()\n \n module.options_dict = options_dict\n module.flags_dict = flags_dict",
"def __init__(self, **options):\n self.__dict__.update(\n (k, v) for (k, v) in options.items() if not k.startswith('__'))",
"def _collect_kwargs(step):\n dicts = {}\n for s in _expand_inputs(step):\n name = s.name if s.name is not None else s.__class__.__name__\n if name in dicts.keys():\n raise ValueError(\"Duplicate step names: %s\" % name)\n\n d = dict(s._kwargs)\n d.pop('inputs', None)\n dicts[name] = d\n\n return dicts",
"def postprocess(self, inputs: Dict[str, Any], **kwargs) -> Dict[str, Any]:\n return inputs",
"def opt_to_dict(opts):\n if isinstance(opts, dict):\n return\n args = list(itertools.chain.from_iterable([x.split(\"=\") for x in opts]))\n opt_d = {k: True if v.startswith('-') else v\n for k,v in zip(args, args[1:]+[\"--\"]) if k.startswith('-')}\n return opt_d",
"def _dispatch_kwargs(self, **kwargs) -> Tuple[Dict, Dict, Dict, Dict]:\n # Ensure each argument only matches one function\n method_kwargs = self.preprocess_kwargs | self.forward_kwargs | \\\n self.visualize_kwargs | self.postprocess_kwargs\n\n union_kwargs = method_kwargs | set(kwargs.keys())\n if union_kwargs != method_kwargs:\n unknown_kwargs = union_kwargs - method_kwargs\n raise ValueError(\n f'unknown argument {unknown_kwargs} for `preprocess`, '\n '`forward`, `visualize` and `postprocess`')\n\n preprocess_kwargs = {}\n forward_kwargs = {}\n visualize_kwargs = {}\n postprocess_kwargs = {}\n\n for key, value in kwargs.items():\n if key in self.preprocess_kwargs:\n preprocess_kwargs[key] = value\n elif key in self.forward_kwargs:\n forward_kwargs[key] = value\n elif key in self.visualize_kwargs:\n visualize_kwargs[key] = value\n else:\n postprocess_kwargs[key] = value\n\n return (\n preprocess_kwargs,\n forward_kwargs,\n visualize_kwargs,\n postprocess_kwargs,\n )",
"def _kwargs(self):\n dict = DAG._kwargs(self) \n if (self.job): \n dict[\"inputpaths\"] = self.job.inputpaths\n dict[\"outputpath\"] = self.job.outputpath\n dict[\"job\"] = \"%s()\" % self.job.__class__.__name__\n return dict"
] | [
"0.6199073",
"0.6011524",
"0.6008918",
"0.5943669",
"0.59151167",
"0.5756908",
"0.5742022",
"0.56702465",
"0.5639711",
"0.5626554",
"0.55926645",
"0.5560618",
"0.55327994",
"0.54897285",
"0.54338837",
"0.54048324",
"0.5363454",
"0.52499473",
"0.5185636",
"0.5124794",
"0.50433224",
"0.5038611",
"0.5018278",
"0.4971491",
"0.49661326",
"0.4963481",
"0.49585027",
"0.49573106",
"0.49443832",
"0.4944361"
] | 0.63792646 | 0 |
Key generator that allows to switch between keys that are provided in the `secret_key.txt` file. | def switch_key():
with open("secret_key.txt", 'r') as key_file:
api_keys = key_file.read().splitlines()
for api_key in api_keys:
yield api_key | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_key():\n key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(key)",
"def generate_key():\n key = Fernet.generate_key()\n with open(\"Secret.key\",\"wb\")as key_file:\n key_file.write(key)",
"def setup_keys():\n if os.path.isfile(\"key.txt\"):\n message = \"Key already generated\"\n else:\n secret = secrets.token_urlsafe(64)\n message = \"Secret generated and saved in key.txt\"\n with open(\"key.txt\", \"w\") as fd:\n fd.write(secret)\n return json.dumps({'message': message})",
"def generate_key(self):\n self.key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(self.key)",
"def grab_or_generate_secret_key(secret_file_path):\n try:\n secret_key = open(secret_file_path).read().strip()\n except IOError:\n try:\n from random import SystemRandom\n valid_chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'\n secret_key_as_list = [SystemRandom().choice(valid_chars) for i in range(50)]\n secret_key = ''.join(secret_key_as_list)\n secret = file(secret_file_path, 'w')\n secret.write(secret_key)\n secret.close()\n except IOError:\n Exception('Please create a %s file with random characters \\\n to generate your secret key!' % secret_file_path)\n\n return secret_key",
"def generate_key():\n key = ''.join([chr(random.randint(0, 0x10)) for _ in range(block_size)])\n return AES.new(second_key, AES.MODE_ECB).encrypt(pad((key.encode('ascii')), block_size))",
"def _generateSecretKey():\n return f\"secret.{str(datetime.now())}\"",
"def randomKeyFile(file_name):\n\twith open(file_name, \"w\") as kfile:\n\t\tkey = stringGen(256)\n\t\tkfile.write(key)\n\t\tkfile.close()",
"def generate_secret_key(self, server_name: str) -> str:\n if self.config_in_use():\n raise BaseConfigInUseError()\n\n signing_key_path = join(self.config_dir, server_name + \".signing.key\")\n subprocess.run([\"generate_signing_key.py\", \"-o\", signing_key_path])\n with open(signing_key_path, \"r\") as f:\n return f.read()",
"def generate_secret_key():\n return b64encode(Fernet.generate_key()).decode('utf-8')",
"def generate_key():\n key = Fernet.generate_key()\n with open(\"pass.key\", \"wb\") as key_file:\n key_file.write(key)",
"def load_key():\n return open(\"Secret.key\",\"rb\").read()",
"def generate_key():\r\n\t\treturn ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in range(123))",
"def load_key():\n return open(\"secret.key\", \"rb\").read()",
"def gen_key():\n key = os.urandom(32) # 256 bit\n return base64.b64encode(key).rstrip('=') # strip off padding",
"def gen_key():\n key = []\n chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'\n for i in xrange(20):\n key.append(random.choice(chars))\n return ''.join(key)",
"def get_random_secret_key():\n chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'\n return get_random_string(50, chars)",
"def _generateSecretKey():\n return ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(20))",
"def generate_key():\n return get_token_generator().generate_token()",
"def generate_key():\n key = list(Fleissner.default)\n random.shuffle(key)\n done = False\n while not done:\n try:\n Fleissner(key=\"\".join(key))\n done = True\n except:\n random.shuffle(key)\n return \"\".join(key)",
"def gen_secret_key(n: int) -> int:\n while True:\n key = int.from_bytes(os.urandom(32), 'big')\n if 1 <= key < n:\n break # the key is valid, break out\n return key",
"def generate_key():\r\n # generating key\r\n key = Fernet.generate_key()\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n # writing key in file\r\n with open(key_dir, \"wb\") as keyFile:\r\n keyFile.write(key)",
"def getKey(filename):\n try:\n fh = open(filename, 'rb')\n except IOError:\n logging.debug(\"getKey(): Creating new secret key.\")\n key = OpenSSL.rand.bytes(32)\n writeKeyToFile(key, filename)\n else:\n logging.debug(\"getKey(): Secret key file found. Loading...\")\n key = fh.read()\n fh.close()\n return key",
"def generate_random_key(self):\n self.key = ''.join(choice(ascii_letters + digits) for i in range(300))",
"def util_generate_key(conf_file=None):\n keyname = DebRepo(**config(conf_file=conf_file)).generate_key()\n print(keyname)",
"def generate_random_key():\n return '%030x' % (random.randrange(256**15),)",
"def generate_key():\n\tkey = [ randint(0,255) for i in range(16) ]\n\treturn bytes( key )",
"def install_secret_key(app, filename='secret_key'):\n filename = os.path.join(app.instance_path, filename)\n\n try:\n app.config['SECRET_KEY'] = open(filename, 'rb').read()\n except IOError:\n print('Error: No secret key. Create it with:')\n full_path = os.path.dirname(filename)\n if not os.path.isdir(full_path):\n print('mkdir -p {filename}'.format(filename=full_path))\n print('head -c 24 /dev/urandom > {filename}'.format(filename=filename))\n sys.exit(1)",
"def randkey():\n return binascii.b2a_hex(os.urandom(15))",
"def generate_key():\n return get_random_bytes(KEY_SIZE)"
] | [
"0.7030336",
"0.6970633",
"0.69157135",
"0.6851234",
"0.665555",
"0.6652344",
"0.6556344",
"0.64819336",
"0.64733076",
"0.64401174",
"0.6436973",
"0.64132476",
"0.64103454",
"0.63922274",
"0.6378862",
"0.6355134",
"0.63407135",
"0.6338451",
"0.6336893",
"0.6334349",
"0.6275037",
"0.62655115",
"0.6250005",
"0.620265",
"0.6168512",
"0.6167249",
"0.61644286",
"0.6151609",
"0.61109304",
"0.61006874"
] | 0.75049704 | 0 |
High level hook called when a SIP has been deposited in a landing zone | def ingestPostProcSipDepositInLandingZone(dataObjectPath, user, zone):
logger.info("ingestPostProcSipDepositInLandingZone()")
logger.info("dataObjectPath: %s" % dataObjectPath)
logger.info("user:%s" % user)
logger.info("zone:%s" % zone) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def place_call_offhold(self) -> None:",
"def place_call_onhold(self) -> None:",
"def _extract_kiss_destination(self):\n self.destination = aprs.Callsign(self.frame)",
"def ring_zone(self, tissue):\n print(\"controller - ring_zone!\")\n self.view.processing_gui.ask_ring_out(tissue)",
"def handle_response(self, lvap):\n\n lvaps = RUNTIME.tenants[self.tenant_id].lvaps\n\n if lvap.addr not in lvaps:\n return\n\n self.handle_callback(lvap)",
"def execute_closure_methods():\n user_account = get_user_account_data()\n # PORT FORWARDING\n if user_account and user_account.upnp():\n # Account exists and UPnP was approved\n delete_port_mapping()",
"def show_landing(self):\n print(\"Hooray, the Eagle has landed!\")",
"def land(self):\n\t\tcallTime = time.time()\n\t\tif callTime - self._last_call > 1:\n\t\t\tself._altHoldController.setTarget(max(0, self._actual_alt - 1))\n\t\t\tself._last_call = callTime",
"def call(self, callee: \"SIPPhoneTemplate\") -> None:",
"def SecurityZone(self) -> _n_6_t_7:",
"def SecurityZone(self) -> _n_6_t_7:",
"def unsuccessful_landing(self):\n self.lander_lives -= 1\n self.reset_lander('Unsuccessful landing!')",
"def event11512000():\n header(11512000)\n end_if_this_event_on()\n if_player_owns_good(0, GOOD.Lordvessel)\n flag.enable(11512000)",
"def processRouteDestReleased(self, call, dn):\n distr_parties = call.findDistributionDeviceParties()\n dn_and_trunk_parties = call.findDNandTrunkParties()\n if len(distr_parties) == 1 and len(dn_and_trunk_parties) == 1:\n if dn_and_trunk_parties[0].Role == PartyRole.Origination:\n Address.eventError(self, addPrm={\"ReferenceID\": self.routeRequestRefID})\n elif dn_and_trunk_parties[0].Role != PartyRole.ConferenceMember:\n distr_parties[0].removeFromCall()\n call.pendingQueue = None\n dn_and_trunk_parties[0].DN.leaveCall(dn_and_trunk_parties[0], abandPermited=1)",
"def __before__(self, action, environ):\n host = request.headers.get('Host')\n if not (host and host in app_globals.merchants.domain_map):\n prot, host, path, params, query, fragment = urlparse.urlparse(request.url)\n return redirect(urlparse.urlunparse((prot, app_globals.default_host, path, params, query, fragment)))\n else:\n protocol = request.headers.get('X-Forwarded-Proto', 'http')\n request.merchant = app_globals.merchants.domain_map[host]\n request.qualified_host = '%s://%s'%(protocol, host)\n request.is_secured = protocol == 'https'\n log.info('%s, %s, %s', '-'*80, protocol , protocol == 'https')\n if not websession.get('region'):\n region = request.headers.get(\"X-COUNTRY\", app_globals.country_choices.fallback.code).lower()\n region = app_globals.country_choices.map.get(region, app_globals.country_choices.fallback).code\n websession['region'] = region\n c.messages = websession.get('messages', [])\n c.user = websession.get('user', ANONUSER)\n c.user._statics = app_globals.statics_service\n c.furl = str(request.params.get(\"furl\") or request.url)\n log.info('[%s] [%s] [%s] Incoming Request at %s', c.user.u_id, websession['region'], request.headers.get('Host'), url.current())\n\n if 'lang' not in websession or websession['lang'] not in app_globals.LANGUAGES:\n websession['lang'] = negotiate_locale(request.accept_language, app_globals.LANGUAGES)\n set_lang(websession['lang'])",
"def on_station_member_lz_added(\n self, func,\n ):\n self._set_event_handler(\"stations\")\n self._events.on_station_member_lz_added(func)",
"def do_zone_event(client, args):\n args.type = 'zone'\n do_event_show(client, args)",
"def on_start(self):\n self.deposit(1000000)",
"def after_server_lookup(self, arg):\n if arg.cancelled():\n self.log.debug('server dns lookup cancelled')\n return\n if self.server_ip != self.server_reverse_ip.result():\n self.log.warn('reverse lookup: {sip} != {rsip} ({arg})'.format(\n cip=self.server_ip, rcip=self.server_reverse_ip,\n arg=arg.result()))",
"def call(self, addr, *args, **kwargs):\n prepare_cb = kwargs.pop('prepare_cb', self.jitter.func_prepare_systemv)\n super(self.__class__, self).call(prepare_cb, addr, *args)",
"def call(self, addr, *args, **kwargs):\n prepare_cb = kwargs.pop('prepare_cb', self.jitter.func_prepare_systemv)\n super(self.__class__, self).call(prepare_cb, addr, *args)",
"def call(self, addr, *args, **kwargs):\n prepare_cb = kwargs.pop('prepare_cb', self.jitter.func_prepare_systemv)\n super(self.__class__, self).call(prepare_cb, addr, *args)",
"def call(self, addr, *args, **kwargs):\n prepare_cb = kwargs.pop('prepare_cb', self.jitter.func_prepare_systemv)\n super(self.__class__, self).call(prepare_cb, addr, *args)",
"def call(self, addr, *args, **kwargs):\n prepare_cb = kwargs.pop('prepare_cb', self.jitter.func_prepare_systemv)\n super(self.__class__, self).call(prepare_cb, addr, *args)",
"def call(self, addr, *args, **kwargs):\n prepare_cb = kwargs.pop('prepare_cb', self.jitter.func_prepare_systemv)\n super(self.__class__, self).call(prepare_cb, addr, *args)",
"def call(self, addr, *args, **kwargs):\n prepare_cb = kwargs.pop('prepare_cb', self.jitter.func_prepare_systemv)\n super(self.__class__, self).call(prepare_cb, addr, *args)",
"def handle_departure(event):\n\n aircraft = event.sender\n global allocator\n \n # Register which hub this aircraft will fly to\n aircraft.hub = aircraft.route.waypoints[0]\n \n #assert aircraft.time_to_waypoint() > config.lock_time\n\n # If the origin lies within the hub lock area, the aircraft cannot\n # reach cruise before reaching the hub, so instead we ignore it altogether\n # and tell it to fly directly to its destination instead of via the hub.\n if(aircraft.time_to_waypoint() < config.lock_time):\n \n # Reset the aircraft route\n aircraft.route.waypoints = [\n aircraft.position,\n aircraft.destination\n ]\n aircraft.route.init_segments()\n aircraft.controller.calibrate()\n aircraft.is_excluded = True\n p('warning', (\n 'Excluded from flying to the hub: %s' % (\n aircraft\n )\n ))\n return\n\n allocator.add_aircraft(aircraft) \n sim.events.append(sim.Event(\n 'enter-lock-area',\n aircraft,\n # If aircraft departs from within lock area, set lock time to now\n sim.time + max(aircraft.time_to_waypoint() - config.lock_time, 0)\n ))",
"def purchased_callback(self):\r\n raise NotImplementedError",
"def zone(self, zone):\n if self._bundle:\n self._bundle.check_zone(zone)\n self._zone = zone",
"def on_deactivate(self):"
] | [
"0.5777385",
"0.5624411",
"0.5509368",
"0.54877645",
"0.5309101",
"0.52809733",
"0.5173685",
"0.514805",
"0.50955397",
"0.50581396",
"0.50581396",
"0.5057988",
"0.5046991",
"0.50424355",
"0.4994274",
"0.49913985",
"0.49630877",
"0.49369216",
"0.49329975",
"0.49219167",
"0.49219167",
"0.49219167",
"0.49219167",
"0.49219167",
"0.49219167",
"0.49219167",
"0.49178064",
"0.4916618",
"0.49115804",
"0.48882562"
] | 0.5720877 | 1 |
Do API calls, and save data in cache files. | def do_api_calls_update_cache(self):
self.get_nodes()
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __call__(self, *args, **kw):\n cachepath = self.cachepath(*args, **kw)\n try:\n # try returning from cache first\n return self.loadcache(cachepath)\n except IOError:\n # not found, so run api query\n self._sleep()\n self.lastcall = time.time()\n ret = self.apifunc(*args, **kw)\n self.savecache(ret, cachepath)\n return ret",
"def cached():\n ##from pprint import pprint\n # let's restrict this to the api server, to avoid shenanigans\n root_relative_url = request.env.request_uri.split('/cached/')[-1]\n ##pprint('ROOT-RELATIVE URL: ')\n ##pprint(root_relative_url)\n fetch_url = '%s://%s/%s' % (request.env.wsgi_url_scheme, request.env.http_host, root_relative_url)\n ##pprint('PROXYING TO SIMPLE URL: ')\n ##pprint(fetch_url)\n\n # permissive CORS handling of requests from another domain (e.g. tree.opentreeoflife.org)\n if request.env.request_method == 'OPTIONS':\n if request.env.http_access_control_request_method:\n response.headers['Access-Control-Allow-Methods'] = request.env.http_access_control_request_method\n if request.env.http_access_control_request_headers:\n response.headers['Access-Control-Allow-Headers'] = request.env.http_access_control_request_headers\n ##pprint('RESPONDING TO OPTIONS')\n raise HTTP(200, **(response.headers))\n\n # N.B. This try/except block means we'll cache errors. For now, the fix is to clear the entire cache.\n try:\n # fetch the latest IDs as JSON from remote site\n import simplejson\n\n if fetch_url.startswith('//'):\n # Prepend scheme to a scheme-relative URL\n fetch_url = \"http:%s\" % fetch_url\n\n fetch_args = request.vars # {'startingTaxonOTTId': \"\"}\n\n # TODO: For more flexibility, we should examine and mimic the original request (HTTP verb, headers, etc)\n\n # this needs to be a POST (pass fetch_args or ''); if GET, it just describes the API\n # N.B. that gluon.tools.fetch() can't be used here, since it won't send \"raw\" JSON data as treemachine expects\n req = urllib2.Request(url=fetch_url, data=simplejson.dumps(fetch_args), headers={\"Content-Type\": \"application/json\"}) \n the_response = urllib2.urlopen(req).read()\n ##pprint('RESPONSE:')\n ##pprint(the_response)\n return the_response\n\n except Exception, e:\n # throw 403 or 500 or just leave it\n return ('ERROR', e.message)",
"def _retrieveCachedData(self):",
"def get_data():\n log = common.LogFile('', LOGFILE)\n settings = load_settings()\n keywords = settings[\"keywords\"]\n api_key = settings[\"api_key\"]\n for keyword in keywords:\n print(\"[{}] : fetching data.\".format(keyword))\n filename = \"results_{0}.json\".format(keyword)\n results = {}\n hits_limit = 500\n start_at = 1\n counter = 0\n while True:\n url = create_url(keyword, hits_limit, start_at, api_key)\n records = get_records_from_url(url)\n total_results = get_total_hits(records)\n records = split_records(records)\n records_on_page = len(records)\n if records_on_page == 0:\n break\n else:\n for record in records:\n counter += 1\n id_no = extract_id_number(record)\n processed_dict = {'ID': id_no, 'problem': []}\n processed_record = parse_record(\n record, processed_dict, log)\n if id_no not in results:\n results[id_no] = processed_record\n if counter % 100 == 0:\n print(\"Processed {} out of {}\".format(\n counter, total_results))\n start_at += hits_limit\n time.sleep(THROTTLE)\n print(\"[{}] : fetched {} records to {}.\".format(\n keyword, len(results), filename))\n save_data(results, filename)",
"def test_cache(self):\n response = self.make_call().json[0]\n self.assertFalse(response['cached']) # a call has ben made to Google API\n # each step is saved\n self.assertEqual(len(r.keys(pattern=r'step*')), int(r.get('counter')))\n self.assertEqual(int(r.get('counter')), len(response['steps']))\n pairs = set((i, j) for (i, o), (j, d) in combinations_with_replacement(list(enumerate(response['steps'])), 2) if i <= j)\n self.assertEqual(len(r.keys(pattern=r'origin*')), len(pairs)) # each combination is cached\n for i, j in pairs:\n origin, destination = response['steps'][i], response['steps'][j]\n resp = self.make_call(origin=f\"{origin['start_lat']},{origin['start_lng']}\",\n destination=f\"{destination['end_lat']},{destination['end_lng']}\").json[0]\n # No new API calls are made, cached results are returned for each possible combination of origin/dest\n self.assertEqual(origin['start_lat'], resp['start_lat']) # all coordinates should match\n self.assertEqual(origin['start_lng'], resp['start_lng'])\n self.assertEqual(destination['end_lat'], resp['end_lat'])\n self.assertEqual(destination['end_lng'], resp['end_lng'])\n self.assertTrue(resp['cached'])\n # New API call is made for transit directions. We can't recycle driving directions for this one.\n response = self.make_call(mode='transit').json\n self.assertFalse(response[0]['cached'])\n self.assertTrue(len(response) > 1) # when asking for transit directions it should yield multiple alternatives\n # driving directions should be cached already\n response = self.make_call().json[0]\n self.assertTrue(response['cached'])\n # Walking directions should not be cached\n walking = self.make_call(mode='walking').json[0]\n self.assertFalse(walking['cached'])\n # Bicycling should be treated as walking but 3 times as fast\n bicycling = self.make_call(mode='bicycling').json[0]\n self.assertTrue(bicycling['cached'])\n self.assertEqual(walking['duration'], 3 * bicycling['duration'])",
"def _request(self, method, url,\n params=None, data=None, headers=None,\n files=None, save=False, savedir='', timeout=None, cache=None,\n stream=False, auth=None, continuation=True, verify=True,\n allow_redirects=True,\n json=None, return_response_on_save=False):\n\n if cache is None: # Global caching not overridden\n cache = cache_conf.cache_active\n\n if save:\n local_filename = url.split('/')[-1]\n if os.name == 'nt':\n # Windows doesn't allow special characters in filenames like\n # \":\" so replace them with an underscore\n local_filename = local_filename.replace(':', '_')\n\n local_filepath = os.path.join(savedir or self.cache_location or '.', local_filename)\n\n response = self._download_file(url, local_filepath, cache=cache, timeout=timeout,\n continuation=continuation, method=method,\n allow_redirects=allow_redirects,\n auth=auth, params=params, data=data, headers=headers,\n files=files, json=json)\n if return_response_on_save:\n return local_filepath, response\n else:\n return local_filepath\n else:\n query = AstroQuery(method, url, params=params, data=data, headers=headers,\n files=files, timeout=timeout, json=json)\n if not cache:\n with cache_conf.set_temp(\"cache_active\", False):\n response = query.request(self._session, stream=stream,\n auth=auth, verify=verify,\n allow_redirects=allow_redirects,\n json=json)\n else:\n response = query.from_cache(self.cache_location, cache_conf.cache_timeout)\n if not response:\n response = query.request(self._session,\n self.cache_location,\n stream=stream,\n auth=auth,\n allow_redirects=allow_redirects,\n verify=verify,\n json=json)\n to_cache(response, query.request_file(self.cache_location))\n\n self._last_query = query\n return response",
"def run(self):\n if self.parsed_args.fetch_cache:\n issues = self.backend.fetch_from_cache()\n else:\n issues = self.backend.fetch(from_date=self.from_date)\n\n try:\n for issue in issues:\n obj = json.dumps(issue, indent=4, sort_keys=True)\n # self.outfile.write(issue['url']+\"\\n\")\n self.outfile.write(obj)\n self.outfile.write('\\n')\n except requests.exceptions.HTTPError as e:\n raise requests.exceptions.HTTPError(str(e.response.json()))\n except IOError as e:\n raise RuntimeError(str(e))\n except Exception as e:\n if self.backend.cache:\n self.backend.cache.recover()\n raise RuntimeError(str(e))",
"def __update_data(self):\r\n # loop = asyncio.get_event_loop()\r\n api_base_info_req = self.loop.run_in_executor(None, self.__get_base_info_api)\r\n api_status_req = self.loop.run_in_executor(None, self.__get_status_api)\r\n api_status_res = yield from api_status_req\r\n api_base_info_res = yield from api_base_info_req\r\n\r\n self.__set_base_info_api(api_base_info_res)\r\n self.__set_status_api(api_status_res)",
"def apicall(self, dasquery, url, api, args, dformat, expire):\n # NOTE: I use helper function since it is 2 step process\n # therefore the expire time stamp will not be changed, since\n # helper function will yield results\n time0 = time.time()\n if api == 'dataset4site_release' or api == 'site4block' or \\\n api == 'site4dataset' or 'files4dataset_runs_site':\n genrows = self.helper(api, args, expire)\n # here I use directly the call to the service which returns\n # proper expire timestamp. Moreover I use HTTP header to look\n # at expires and adjust my expire parameter accordingly\n# NOTE: disable dataset4site, lumi4site since they take too much load\n# see combined.yml\n# if api == 'dataset4site':\n# headers = {'Accept': 'application/json;text/json'}\n# datastream, expire = \\\n# getdata(url, args, headers, expire, system='combined')\n# genrows = parse_data(datastream)\n# if api == 'lumi4dataset':\n# headers = {'Accept': 'application/json;text/json'}\n# data, expire = \\\n# getdata(url, args, headers, expire, system='combined')\n# genrows = json_parser(data, None)\n\n # proceed with standard workflow\n ctime = time.time() - time0\n try:\n if isinstance(url, dict):\n url = \"combined: %s\" % url.values()\n self.write_to_cache(dasquery, expire, url, api, \\\n args, genrows, ctime)\n except Exception as exc:\n print_exc(exc)",
"def get(self):\n CACHE_KEY = 'sources'\n if not memcache.get(CACHE_KEY):\n logging.info('Populating cache.')\n feeds = Feed.all().order('name')\n feed_list = []\n for feed in feeds:\n feed_list.append(feed.ToDict())\n memcache.add(CACHE_KEY, simplejson.dumps(feed_list), 600)\n logging.info('Using cache.')\n logging.info(memcache.get(CACHE_KEY))\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(memcache.get(CACHE_KEY))",
"def main(url, from_time, to_time, stepsize, reset, debug, silent, batchsize = 10, threads=-1, outputdir=None):\n # Override output directory if required\n global DATADIR\n global CACHEFILE\n if outputdir and outputdir != DATADIR:\n logger.info(\"Changing output directory to {outputdir}\".format(outputdir=outputdir))\n DATADIR = outputdir\n CACHEFILE = os.path.join(DATADIR,'.cache')\n logger.debug(\"Set output to {DATADIR}\".format(DATADIR=DATADIR))\n\n # Set appropriate logging levels\n if debug:\n logger.setLevel(\"DEBUG\")\n logger.debug(\"Debugmode ENGAGED\")\n elif not silent:\n logger.setLevel(\"INFO\")\n\n # Prepare output location\n target_file = clean_filename(url)\n\n os.makedirs(DATADIR, exist_ok=True)\n\n if target_file in os.listdir(DATADIR) and reset:\n logger.info(\"Resetting file {filename}\".format(filename=os.path.join(DATADIR,target_file)))\n os.remove(os.path.join(DATADIR,target_file))\n if os.path.exists(CACHEFILE) and reset:\n logger.debug(\"Resetting cache {CACHEFILE}\".format(CACHEFILE=CACHEFILE))\n os.remove(CACHEFILE)\n \n # Check resume state\n status = cache_load(url)\n if status:\n logger.info(\"Resuming previous collection:\\n {status}\".format(status=status))\n from_time = status['from']\n to_time = status['to']\n stepsize = status['stepsize']\n current = status['current']\n else:\n current = 0\n status['from' ] = from_time\n status['to' ] = to_time\n status['stepsize' ] = stepsize\n status['current' ] = current\n status['direction'] = 'unknown'\n\n # Do data collection \n with open(os.path.join(DATADIR,target_file), 'a+') as f:\n batch = []\n for start, _, step, total, direction in walk_times(from_time, to_time, stepsize):\n if status['from'] == 'now':\n status['from'] = start.isoformat()\n status['direction'] = direction\n if not step%10: logger.debug(\"now at {step} of {total}\".format(step=step, total=total))\n if step < current:\n continue \n batch.append({'url':url, 'timestamp':start, 'step':step})\n if len(batch)==batchsize:\n perc=(step/total)*100\n logger.info(\"Processing {batchsize} pages for {url} at step {step:6.0f} of {total:6.0f} {perc:3.2f}%\".format(\n batchsize=batchsize, url=url, step=step, total=total, perc=perc))\n retrieved = Parallel(threads)(delayed(get_page)(**args) for args in batch)\n for hit in retrieved:\n f.write(json.dumps(hit)+\"\\n\")\n status['current'] = hit['step']\n logger.info(\"Wrote batch to disk\")\n cache_save(url,status)\n batch=[]\n retrieved = Parallel(threads)(delayed(get_page)(**args) for args in batch)\n for hit in retrieved:\n f.write(json.dumps(hit)+\"\\n\")\n status['current'] = hit['step']\n cache_save(url,status)\n logger.info(\"wrote last batch to disk\")\n batch=[] \n logger.info(\"Succesfully stopped retrieval\")",
"def run(self) -> None:\n self.urls_list = self._create_api_ulr_list()\n self.results = self._sort_results(\n AsyncGetAPI(\n self.urls_list, self.threads, max_requests=self.max_requests\n ).results\n )",
"def download_json(self):\n # create directories for threads and images if they don't exist\n if not self.path.is_dir():\n self.path.mkdir(parents=True)\n if not self.images_path.is_dir():\n self.images_path.mkdir(parents=True)\n\n # open file, send request and write data to a file\n with self.file.open('w') as json_file:\n try:\n json_data = json.dumps(requests.get(self.endpoint).json())\n json_file.write(json_data)\n except json.JSONDecodeError as error:\n print(\"Error fetching json: \", error)",
"def fetch_and_cache(data_url, file, data_dir=\"data\", force=False):\n data_dir = Path(data_dir)\n data_dir.mkdir(exist_ok = True)\n file_path = data_dir / Path(file)\n if force and file_path.exists():\n file_path.unlink()\n if force or not file_path.exists():\n print('Downloading...', end=' ')\n resp = requests.get(data_url)\n with file_path.open('wb') as f:\n f.write(resp.content)\n print('Done!')\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n else:\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n print(\"Using cached version that was downloaded (UTC):\", last_modified_time)\n return file_path",
"def save(self):\n if self._cache is not None:\n with open(self.cache_path, 'w') as cache_file:\n json.dump(self._cache, cache_file)",
"def run(self):\n results = self.fetch()\n return results",
"def write_to_cache(self, data, filename):\n json_data = self.json_format_dict(data, True)\n cache = open(filename, 'w')\n cache.write(json_data)\n cache.close()",
"def download_models_and_data():\n\n for file in DATA_FILES:\n download_file(file[\"url\"], file[\"path\"])",
"def cache(self, irc, msg, args, channel, apicall):\n if not self.registryValue('full_access', channel):\n irc.reply('Concord denies you access on this channel!')\n return\n\n call = self._sql(\"\"\"SELECT * FROM universe_apicall\n WHERE name ILIKE %s AND type='Corporation'\"\"\", [apicall])\n if not call:\n irc.error('Unknown APICall')\n return\n else:\n update = self._sql(\"\"\"\n SELECT * FROM accounting_apiupdate\n WHERE apicall_id=%s AND owner = %s\"\"\", [call['id'], self.corporationID])\n\n if not update['last_update']:\n updated = 'never'\n else:\n updated = update['last_update']\n irc.reply('{0} last updated: {1}'.format(\n call['name'],\n updated\n ), prefixNick=False)",
"def home():\n logger.info(\"In API3 home function\")\n\n with tracer.span(\"API3_Task1\"):\n ret_val_1 = task1()\n\n with tracer.span(\"API3_Task2\"):\n ret_val_2 = task2()\n\n logger.info(\"Calling API 2\")\n response = requests.get(url='http://localhost:8100/')\n print(f\"response = {response.content}\")\n\n return jsonify({'data': 'Success API3'})",
"def cache(self):\n\n api = ('https://api.darksky.net/forecast/98cdd61d77bab4d8d739f78b33'\n 'e06c30/53.3498,-6.2603?units=si')\n\n current_weather_data = requests.get(api)\n\n if current_weather_data.status_code == 200:\n current_weather_data = json.loads(current_weather_data.text)\n\n self.weather_forecast_json = current_weather_data\n self.current_temperature = self.weather_forecast_json \\\n ['currently']['temperature']\n self.current_rainfall = self.weather_forecast_json \\\n ['currently']['precipIntensity']\n\n else:\n self.logger.error('Darksky API call failed.')\n\n threading.Timer(1200.0, self.cache).start()",
"def set_cached_response(self) -> None:\n if self.get_caching_duration() > 0: # if caching is enabled for this request\n json_response = self._request_result.json()\n with open(self.cache_file_name, 'w') as json_file:\n json.dump(json_response, json_file, indent=4)",
"def do_cache(*args, **kws):\n resp = self.response\n out = resp.out\n namespace = ''\n if self.cache_nsfuncs.get(func, None):\n namespace = self.cache_nsfuncs[func](self.request)\n p = urlsplit(self.request.url)[2]\n c = memcache.get(p, namespace)\n if c:\n # in case cache is found, use it \n # instead of rendering by calling function.\n out.write(c['body'])\n for k, i in c['hdr'].items():\n resp.headers[k] = i\n return\n\n r = func(*args, **kws)\n expire = self.cache_expires.get(func, 0)\n if expire == 0:\n return\n out.seek(0)\n try:\n p = urlsplit(self.request.url)[2]\n memcache.set(p, {'hdr':resp.headers,'body':out.read()},\n expire, namespace=namespace)\n logging.debug('%s is cahed' % p)\n except:\n memcache.flush_all()\n logging.debug('memcache is flashed.')",
"def saveCacheFile(self):\n with open(self.cachePath, 'w', encoding='utf-8') as outfile:\n json.dump(self.cacheData, outfile)",
"def _invalidate_http_cache(self):\n self._requests_cache = {}",
"def use_cached_files(self, cache_key):\r\n pass",
"def getData(self, local_cache):",
"def dispatch(self, *args, **kwargs):\n cache_allowed = self.is_cache_allowed()\n logging.debug('%s: caching is %s', self.request.path, 'allowed' if cache_allowed else 'NOT allowed', )\n\n response = None\n cache_hit = False\n if cache_allowed: # get from cache\n response = yield self.get_cached()\n cache_hit = True if response is not None else False\n logging.debug('%s: cache %s', self.request.uri, 'HIT' if cache_hit else 'MISS')\n\n if response is None: # get actual\n response = yield self.proxy_async_request()\n\n if cache_allowed:\n if 200 <= response.code <= 299: # store into cache\n yield self.set_cache(response)\n logging.debug('%s: status %d - stored in cache', self.request.uri, response.code)\n else:\n logging.debug('%s: error status %d', self.request.uri, response.code)\n\n # output proxied response\n self.process_response(response)\n self.finish()\n\n if cache_allowed:\n if cache_hit: # renew cache if cache hit\n yield self.renew_cache(self.proxy_async_request)\n logging.debug('%s: slow endpoint, cache %s', self.request.path, 'updated' if cache_hit else 'NOT updated')",
"def _run(self):\n data = None\n\n now = datetime.now().timestamp()\n\n if not os.path.exists(self.cache_file) or now - os.path.getmtime(self.cache_file) > MAX_FILE_AGE:\n data = self._download()\n self._write_cache_file(data)\n elif not self._dma_facilities_map:\n self.log.info(f\"Using cached file: {self.cache_file}\")\n data = self._read_cache_file()\n\n if data:\n self._process(self._unzip(data))\n self.log.info(\"Done loading..\")\n else:\n self.log.debug(\"Facilities are still fresh..\")\n\n threading.Timer(CHECK_INTERVAL, self._run).start()",
"def test_cached(self):\n # Setup the mocked response, refrain from matching the query string\n responses.add(responses.GET, self.api_url, json=self.valid_response,\n status=200, match_querystring=False)\n\n acme = ACMEAccount(client=self.client)\n acme.all(self.org_id)\n data = acme.all(self.org_id)\n\n # Verify all the query information\n # There should only be one call the first time \"all\" is called.\n # Due to pagination, this is only guaranteed as long as the number of\n # entries returned is less than the page size\n self.assertEqual(len(responses.calls), 1)\n self.match_url_with_qs(responses.calls[0].request.url)\n self.assertEqual(data, self.valid_response)"
] | [
"0.6925335",
"0.6491691",
"0.6327244",
"0.6154643",
"0.60999835",
"0.60896784",
"0.60562545",
"0.6047197",
"0.5878853",
"0.5847318",
"0.57860565",
"0.5767712",
"0.5724594",
"0.57162315",
"0.57134306",
"0.56965476",
"0.565406",
"0.56492305",
"0.5622184",
"0.56044537",
"0.56002927",
"0.55953264",
"0.5578971",
"0.5562443",
"0.556203",
"0.55602944",
"0.5543635",
"0.55382574",
"0.5531715",
"0.55309546"
] | 0.71164197 | 0 |
Makes an Linode API call to get the list of nodes. | def get_nodes(self):
try:
for node in Linode.search(status=Linode.STATUS_RUNNING):
self.add_node(node)
except chube_api.linode_api.ApiError, e:
print "Looks like Linode's API is down:"
print
print e
sys.exit(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_nodes(self):\n return requests.get(self.__url + 'nodes').json()",
"def get_nodes(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/nodes\"\n\n _response = self.connector.http_call(\"get\", _url)\n\n # Create the Nodes array but cleanup cache if there is one\n if self.nodes:\n self.nodes = []\n for _node in _response.json():\n _n = Node(connector=self.connector, **_node)\n _n.project_id = self.project_id\n self.nodes.append(_n)",
"def node_list(ctx):\n nodes = ctx.obj['controller'].get_node_list()\n nodes = [[x] for x in nodes]\n click.echo(generate_table(['NODE'], nodes, sort='NODE', plain=ctx.obj['plain']))",
"def list_nodes(self):\n return self.ironic_client.node.list()",
"def list_nodes(self):\n\n return list(\n dict(\n self._from_json(self.manage.run(override=\"list-nodes\"))\n ).keys()\n )",
"async def list_nodes(self, *, option: ListApiOptions) -> ListApiResponse:\n try:\n reply = await self._client.get_all_node_info(timeout=option.timeout)\n except DataSourceUnavailable:\n raise DataSourceUnavailable(GCS_QUERY_FAILURE_WARNING)\n\n result = []\n for message in reply.node_info_list:\n data = protobuf_message_to_dict(\n message=message, fields_to_decode=[\"node_id\"]\n )\n data[\"node_ip\"] = data[\"node_manager_address\"]\n data[\"start_time_ms\"] = int(data[\"start_time_ms\"])\n data[\"end_time_ms\"] = int(data[\"end_time_ms\"])\n\n result.append(data)\n\n total_nodes = len(result)\n # No reason to truncate node because they are usually small.\n num_after_truncation = len(result)\n\n result = self._filter(result, option.filters, NodeState, option.detail)\n num_filtered = len(result)\n\n # Sort to make the output deterministic.\n result.sort(key=lambda entry: entry[\"node_id\"])\n result = list(islice(result, option.limit))\n return ListApiResponse(\n result=result,\n total=total_nodes,\n num_after_truncation=num_after_truncation,\n num_filtered=num_filtered,\n )",
"def get_nodes(self, type, query_args={}):\n endpoint = '/v3/educator/%ss' % (Node.TYPE_MAP[type])\n result = self.request(endpoint, query_args)\n\n nodes = []\n for data in result.response:\n node = Node.instance(type, data)\n nodes.append(node)\n\n return nodes",
"def GetNodes(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n nodes = self._SendRequest(HTTP_GET, \"/%s/nodes\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return nodes\n else:\n return [n[\"id\"] for n in nodes]",
"def GET(self):\n user_data = web.input(cluster_id=None)\n if user_data.cluster_id == '':\n nodes = self.get_object_or_404(Node, cluster_id=None)\n elif user_data.cluster_id:\n nodes = self.get_object_or_404(\n Node,\n cluster_id=user_data.cluster_id\n )\n else:\n nodes = self.get_object_or_404(Node)\n def_net_nodes = []\n for node in nodes:\n rendered_node = self.get_default(self.render(node))\n def_net_nodes.append(rendered_node)\n return map(self.render, nodes)",
"def _rosnode_cmd_list(argv):\n args = argv[2:]\n parser = OptionParser(usage=\"usage: %prog list\", prog=NAME)\n parser.add_option(\"-u\",\n dest=\"list_uri\", default=False,\n action=\"store_true\",\n help=\"list XML-RPC URIs (NOT IMPLEMENTED)\")\n parser.add_option(\"-a\",\"--all\",\n dest=\"list_all\", default=False,\n action=\"store_true\",\n help=\"list all information (NOT IMPLEMENTED)\")\n (options, args) = parser.parse_args(args)\n namespace = None\n if len(args) > 1:\n parser.error(\"invalid args: you may only specify one namespace\")\n elif len(args) == 1:\n #namespace = rosgraph.names.script_resolve_name('rostopic', args[0])\n pass\n\n # In ROS 1, the rosnode list invocation was performed using:\n # rosnode_listnodes(namespace=namespace, list_uri=options.list_uri, list_all=options.list_all)\n\n result = rclpy.get_node_names()\n for node in result:\n print(node)",
"def GET(self):\n user_data = web.input(cluster_id=None)\n nodes = db().query(Node).options(\n joinedload('cluster'),\n joinedload('interfaces'),\n joinedload('interfaces.assigned_networks'),\n joinedload('role_list'),\n joinedload('pending_role_list'))\n if user_data.cluster_id == '':\n nodes = nodes.filter_by(\n cluster_id=None).all()\n elif user_data.cluster_id:\n nodes = nodes.filter_by(\n cluster_id=user_data.cluster_id).all()\n else:\n nodes = nodes.all()\n return self.render(nodes)",
"def get_nodes(self):\n self.get_status()\n old_api = self.version[0] <= '3'\n if old_api:\n certs_path = \"%s/certificate_statuses/*\" % (self.environment)\n nodeinfo_path_tpl = \"{env}/node/{node}\"\n else:\n certs_path = \"puppet-ca/v1/certificate_statuses/no_key?environment=%s\" % (self.environment)\n nodeinfo_path_tpl = \"puppet/v3/node/{node}?environment={env}\"\n\n csts = self._send('GET', certs_path)\n nodes_names = []\n for cst in csts:\n nodes_names.append(cst['name'])\n\n all_nodes = []\n for nname in nodes_names:\n path = nodeinfo_path_tpl.format(node=nname, env=self.environment)\n nodeinfo = self._send('GET', path)\n if old_api:\n nodeinfo = self._from_pson(nodeinfo['data'])\n else:\n nodeinfo = self._from_pson(nodeinfo)\n if 'parameters' in nodeinfo:\n node = nodeinfo['parameters']\n if self.onlynodes:\n if not (node.get('hostname') in self.onlynodes or\n node.get('ipaddress') in self.onlynodes or\n node.get('fqdn') in self.onlynodes or\n node.get('uuid') in self.onlynodes):\n continue\n all_nodes.append(node)\n\n return all_nodes",
"def list(self, filter, *args, timeout=None):\n req = NodeListRequest()\n req.meta.CopyFrom(ListRequestMetadata())\n page_size_option = self.parent._test_options.get('PageSize')\n if isinstance(page_size_option, int):\n req.meta.limit = page_size_option\n\n req.filter = plumbing.quote_filter_args(filter, *args)\n\n def generator(svc, req):\n tries = 0\n while True:\n try:\n plumbing_response = svc.stub.List(\n req,\n metadata=svc.parent.get_metadata('Nodes.List', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n tries = 0\n for plumbing_item in plumbing_response.nodes:\n yield plumbing.convert_node_to_porcelain(plumbing_item)\n if plumbing_response.meta.next_cursor == '':\n break\n req.meta.cursor = plumbing_response.meta.next_cursor\n\n return generator(self, req)",
"def getNodes(self):\n data = self.connect('get','nodes',None)\n return data",
"def list_nodes(conn=None, call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The list_nodes function must be called with -f or --function.\"\n )\n\n if not conn:\n conn = get_conn()\n\n ret = {}\n datacenter_id = get_datacenter_id()\n\n try:\n nodes = conn.list_servers(datacenter_id=datacenter_id)\n except PBNotFoundError:\n log.error(\"Failed to get nodes list from datacenter: %s\", datacenter_id)\n raise\n\n for item in nodes[\"items\"]:\n node = {\"id\": item[\"id\"]}\n node.update(item[\"properties\"])\n node[\"state\"] = node.pop(\"vmState\")\n ret[node[\"name\"]] = node\n\n return ret",
"def list_nodes_full(conn=None, call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The list_nodes_full function must be called with -f or --function.\"\n )\n\n if not conn:\n conn = get_conn() # pylint: disable=E0602\n\n ret = {}\n datacenter_id = get_datacenter_id()\n nodes = conn.list_servers(datacenter_id=datacenter_id, depth=3)\n\n for item in nodes[\"items\"]:\n node = {\"id\": item[\"id\"]}\n node.update(item[\"properties\"])\n node[\"state\"] = node.pop(\"vmState\")\n node[\"public_ips\"] = []\n node[\"private_ips\"] = []\n if item[\"entities\"][\"nics\"][\"items\"] > 0:\n for nic in item[\"entities\"][\"nics\"][\"items\"]:\n if nic[\"properties\"][\"ips\"]:\n pass\n ip_address = nic[\"properties\"][\"ips\"][0]\n if salt.utils.cloud.is_public_ip(ip_address):\n node[\"public_ips\"].append(ip_address)\n else:\n node[\"private_ips\"].append(ip_address)\n\n ret[node[\"name\"]] = node\n\n __utils__[\"cloud.cache_node_list\"](\n ret, _get_active_provider_name().split(\":\")[0], __opts__\n )\n\n return ret",
"def get_nodes(self, project_id):\n return self.http_call(\n \"get\", url=f\"{self.base_url}/projects/{project_id}/nodes\"\n ).json()",
"def fusion_api_get_ha_nodes(self, uri=None, param='', api=None, headers=None):\n return self.ha_nodes.get(uri=uri, api=api, headers=headers, param=param)",
"def get_all_nodes(self, partition: str, select: List[str] = None) -> Response:\n uri = build_uri(uri=url_node, partition=partition, select=select)\n return self._client.get(uri)",
"def get_node_list(self):\n logger.debug('Updating node list')\n self.subscribe_mqtt('/nodes/+/responses/ping')\n self.node_ids = []\n\n def on_response(payload, data):\n if data and data.get('node', None):\n node_id = data['node']\n logger.debug('Found node with ID \"%s\"' % node_id)\n\n if node_id not in self.node_ids:\n self.node_ids.append(node_id)\n\n return False\n\n self.publish_mqtt('/ping', on_response=on_response)\n time.sleep(self.timeout / 1000)\n\n return self.node_ids",
"def list_nodes(self, type_):\n raise NotImplementedError()",
"def List(ctx):\n \"\"\"Note: This method is available only through the per-node API endpoint 5.0 or later.\"\"\"\n if ctx.element is None:\n ctx.logger.error(\"You must establish at least one connection and specify which you intend to use.\")\n exit()\n\n\n\n ctx.logger.info(\"\")\n try:\n ListTestsResult = ctx.element.list_tests()\n except common.ApiServerError as e:\n ctx.logger.error(e.message)\n exit()\n except BaseException as e:\n ctx.logger.error(e.__str__())\n exit()\n\n cli_utils.print_result(ListTestsResult, ctx.logger, as_json=ctx.json, depth=ctx.depth, filter_tree=ctx.filter_tree)",
"def get(self, _include=None, filters=None, pagination=None, sort=None,\n **kwargs):\n return self._respond_list(\n self.model.node_template.list(\n include=_include,\n filters=filters,\n pagination=pagination,\n sort=sort,\n **kwargs\n )\n )",
"def list_nodes_select(call=None):\n return salt.utils.cloud.list_nodes_select(\n list_nodes_full(),\n __opts__[\"query.selection\"],\n call,\n )",
"def test_get_hyperflex_node_list(self):\n pass",
"def list():\n index = 0\n while True:\n node = Node.from_index(index)\n if os.path.exists(node.path()):\n click.echo(f'{index}: node_{index}')\n click.echo(run_lncli(node, 'getinfo | jq .identity_pubkey'))\n else:\n break\n index += 1",
"def get_list_node(showOnly=False, full=False):\n # start Requests session\n sc = requests.Session()\n\n # import cookies from Firefox\n sc.cookies.update(get_cookies('imhsc.imhadmin.net'))\n\n # send request\n vpx = sc.post('https://imhsc.imhadmin.net/index.php?v=VPNodes')\n\n # check if login failed\n check_sc_login(vpx.text)\n\n # parse with BS4\n bs = BeautifulSoup(vpx.text, \"xml\")\n\n # server=0\n slist = []\n for trr in bs.tbody.find_all('tr'):\n try:\n if full:\n tsrv = {\n 'name': trr.find_all('td')[0].text.strip(),\n 'ip': trr.find_all('td')[2].text.strip(),\n 'lan': trr.find_all('td')[3].text.strip(),\n 'cpus': trr.find_all('td')[4].text.strip(),\n 'memory': trr.find_all('td')[5].text.strip(),\n 'os': trr.find_all('td')[6].text.strip(),\n 'vz': trr.find_all('td')[7].text.strip(),\n 'license': trr.find_all('td')[8].text.strip(),\n 'cap': int(trr.find_all('td')[9].text.strip()),\n 'on': int(trr.find_all('td')[10].text.strip()),\n 'off': int(trr.find_all('td')[11].text.strip()),\n 'disk_used': trr.find_all('td')[12].text.strip(),\n 'disk_free': trr.find_all('td')[13].text.strip(),\n 'psc1': trr.find_all('td')[14].text.strip(),\n 'psc2': trr.find_all('td')[15].text.strip(),\n 'loc': trr.find_all('td')[16].text.strip(),\n 'ra': trr.find_all('td')[17].text.strip(),\n 'ba': trr.find_all('td')[18].text.strip(),\n 'model': trr.find_all('td')[19].text.strip()\n }\n else:\n tsrv = trr.find_all('td')[0].text.strip()\n except:\n continue\n slist.append(tsrv)\n if not showOnly:\n if full:\n json.dumps(tsrv)\n else:\n print(tsrv)\n\n return slist",
"def get_nodes(self):\n pass",
"def list(options=None):\n if options is None:\n return requests.get('/')\n else:\n return requests.get('/', options)",
"def get_nodes():\n nodes_config_file = Settings.CONF_NODES_FILE\n current_nodes = load_node_names(nodes_config_file)\n\n return current_nodes"
] | [
"0.7121502",
"0.6617446",
"0.65728307",
"0.6527529",
"0.64565825",
"0.6433634",
"0.6416782",
"0.6389691",
"0.6355934",
"0.6353988",
"0.6350259",
"0.6307714",
"0.62806284",
"0.62762433",
"0.6274298",
"0.6197774",
"0.61561686",
"0.60973465",
"0.6084577",
"0.60462004",
"0.6044041",
"0.5966871",
"0.5924678",
"0.5921093",
"0.5920538",
"0.5891368",
"0.58419937",
"0.5833265",
"0.5821114",
"0.5796986"
] | 0.71513474 | 0 |
Creates self._datacenter_cache, containing all Datacenters indexed by ID. | def populate_datacenter_cache(self):
self._datacenter_cache = {}
dcs = Datacenter.search()
for dc in dcs:
self._datacenter_cache[dc.api_id] = dc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Datacenters(self):\n if not self._datacenters:\n dcs = self._get_objects(vim.Datacenter)\n for dc in dcs:\n self._datacenters[dc.name] = dc\n return self._datacenters",
"def get_datacenters_by(self, datacenter=None, tenant=None, **kwargs):\n if tenant:\n kwargs.update(datacenter=datacenter, tenant=tenant)\n return self.query(_DATACENTER_JOIN, **kwargs)\n else:\n return [self.get_by_name_or_uuid('datacenters',\n datacenter, **kwargs)]",
"def list_datacenters(conn=None, call=None):\n if call != \"function\":\n raise SaltCloudSystemExit(\n \"The list_datacenters function must be called with -f or --function.\"\n )\n\n datacenters = []\n\n if not conn:\n conn = get_conn()\n\n for item in conn.list_datacenters()[\"items\"]:\n datacenter = {\"id\": item[\"id\"]}\n datacenter.update(item[\"properties\"])\n datacenters.append({item[\"properties\"][\"name\"]: datacenter})\n\n return {\"Datacenters\": datacenters}",
"def get_ceph_clusters_by_pcc(conn: dict, id: str) -> dict:\n return get(conn, f\"{S3PCCS}/{id}/storage/clusters\")",
"def main():\n\n data = get_data(URL)\n\n if not data:\n raise ValueError('No data to process')\n\n datacenters = [\n Datacenter(key, value)\n for key, value in data.items()\n ]\n\n pass # the rest of your logic here",
"def get_all_clusters(self) -> Dict[str, List[str]]:\n result = {}\n for c_id in set(self._clusters.values()):\n result[c_id] = self.get_cluster_by_id(c_id)\n return result",
"def get_active_cache(reactor, connection, tenant_id, group_id):\n eff = CassScalingGroupServersCache(tenant_id, group_id).get_servers(True)\n disp = get_working_cql_dispatcher(reactor, connection)\n d = perform(disp, eff)\n return d.addCallback(lambda (servers, _): {s['id']: s for s in servers})",
"def init_cache(self):\n if self.cacheable:\n self._instance._cache[self.name] = {}",
"def get_distribution_centers():\n dcs = DistributionCenter.query # no need to order\n dcs_data = [dc.to_dict() for dc in dcs.all()]\n return jsonify(distribution_centers=dcs_data)",
"def Datastores(self):\n if not self._datastores:\n ds = self._get_objects(vim.Datastore)\n for d in ds:\n self._datastores[d.name] = Datastore(d)\n return self._datastores",
"def find_cluster(self, id):\n raise NotImplementedError",
"def get_all_post_in_category(id):\n key_cache = str(KEY_CACHE_API_CATEGORY_POST_IN_CATEGORY) + str(id)\n cached_data = cache.get(key_cache)\n if not cached_data:\n # Get post in DB\n post_list = CategoryPostDao.get_all_post_by_category_id(id)\n # Have post to return\n if post_list.count() > 0:\n # Set list post into cache\n cache.set(key_cache, post_list, settings.CACHE_TIME)\n cached_data = post_list\n else:\n cached_data = {}\n return cached_data",
"def get_derived(self, id_):\n if not isinstance(id_, UUID):\n id_ = UUID(id_)\n with self._db_connection() as connection:\n return [\n self._make(result, full_info=True)\n for result in connection.get_derived_datasets(id_)\n ]",
"def cache_dc(self, end_user, input_data, output_data0, c_d, Name_offloaded_data):\n print(\"Caching at Data center is done at %d%%\" % (random.randint(50, 99)))\n c_kd = end_user * (input_data + output_data0)\n cache_capacity_allocation_dc.append(c_kd)\n cached_content.insert(Name_offloaded_data, output_data0)\n DC_caching_decision_variable.append(1)\n return cached_content, DC_caching_decision_variable, cache_capacity_allocation_dc",
"def createSectorsData(self):\n for sectorId, settings in self.arenaTypeData.sectors.sectors.iteritems():\n entity = next((sector for sector in ACSector.entities if sector.ident == sectorId), None)\n if entity:\n self._sectors[sectorId] = sector = ACSectorClient.ACSectorClient(settings, entity)\n sector.eStateChanged += self.onSectorStateChanged\n sector.eRocketV2TargetSectorIDChanged += self.onRocketV2TargetSectorChanged\n\n self._checkIsReady()\n return",
"def load_all_services(self, update_cache=False):\n\n all_data = []\n for domain in self.api_key_instance.get_api_keys():\n for service in SER_TYPES:\n if domain in SER_TYPES_SKIP:\n if service in SER_TYPES_SKIP[domain]:\n continue\n # set service_data obj e.g self.linz_wms=service_data obj\n data_feed = \"{0}_{1}\".format(domain, service) # eg linz_wms\n setattr(\n self,\n data_feed,\n ServiceData(\n domain,\n service,\n self.service_versions,\n self.api_key_instance,\n update_cache,\n ),\n )\n service_data_instance = getattr(self, data_feed)\n self.data_feeds[\n data_feed\n ] = service_data_instance # keep record of ser data insts\n service_data_instance.process_service_data()\n if service_data_instance.disabled:\n continue\n if service_data_instance.err:\n return service_data_instance.err\n all_data.extend(service_data_instance.info)\n self.table_model.setData(all_data)\n self.set_section_size()\n self.services_loaded = True\n\n if update_cache:\n self.purge_cache()\n return None",
"def clusters(self,project_id=os.environ.get(\"ATLAS_PROJECT\")):\n project_id = project_id if project_id != '' else self.__project_id\n return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))",
"def cache(cls):\n return Cache(cls, cls.cache_regions, cls.cache_label)",
"def _ensureCached(self, id):\n if id not in self._objects:\n self._fetchObjectsByID([id])\n misc.cdblogv(misc.kLogMsg, 0,\n \"WsObjectCache: object with id '%s' unexpectedly not cached.\" % id)",
"def get_cluster_by_id(self, c_id: str) -> List[str]:\n return [k for k, v in self._clusters.items() if v == c_id]",
"def update_service_data_cache(self):\n\n self.services_loaded = False\n thread = threading.Thread(target=self.load_all_services, args=(True,))\n thread.start()\n self.cache_updated = True",
"async def set_all_cache(self) -> dict:\n all_data = await self.storage.load_all()\n await self.cache.set_all(all_data)\n self.all_cached = True\n return all_data",
"def xyzcellcenters(self):\n cache_index = 'cellcenters'\n if cache_index not in self._cache_dict or \\\n self._cache_dict[cache_index].out_of_date:\n # get x centers\n x = np.add.accumulate(self.__delr) - 0.5 * self.delr\n # get y centers\n Ly = np.add.reduce(self.__delc)\n y = Ly - (np.add.accumulate(self.__delc) - 0.5 *\n self.__delc)\n x_mesh, y_mesh = np.meshgrid(x, y)\n if self.__nlay is not None:\n # get z centers\n z = np.empty((self.__nlay, self.__nrow, self.__ncol))\n z[0, :, :] = (self._top[:, :] + self._botm[0, :, :]) / 2.\n for l in range(1, self.__nlay):\n z[l, :, :] = (self._botm[l - 1, :, :] +\n self._botm[l, :, :]) / 2.\n else:\n z = None\n if self._has_ref_coordinates:\n # transform x and y\n x_mesh, y_mesh = self.get_coords(x_mesh, y_mesh)\n # store in cache\n self._cache_dict[cache_index] = CachedData([x_mesh, y_mesh, z])\n if self._copy_cache:\n return self._cache_dict[cache_index].data\n else:\n return self._cache_dict[cache_index].data_nocopy",
"def data_center_id(self) -> str:\n return pulumi.get(self, \"data_center_id\")",
"def data_center_id(self) -> str:\n return pulumi.get(self, \"data_center_id\")",
"def list(self, **params):\n\n _, _, cost_centers = self.http_client.get(\"/costcenters\", params=params)\n return cost_centers",
"def fusion_api_add_datacenter(self, body, api=None, headers=None):\n return self.dc.create(body, api, headers)",
"def c_centers(self):\n self.compute_c_centers(self)\n return self._c_centers",
"def _cache(self):\n return self._class(self.client_servers, **self._options)",
"def clear_cache(self):\n\n for dataset in self._datasets:\n dataset.clear_cache()"
] | [
"0.6505794",
"0.53940344",
"0.5058107",
"0.49381578",
"0.49252507",
"0.48534706",
"0.4819115",
"0.48064002",
"0.47601",
"0.47519144",
"0.4742259",
"0.4740057",
"0.4727511",
"0.47176874",
"0.47031915",
"0.4700527",
"0.4698371",
"0.46855637",
"0.4634698",
"0.46332663",
"0.46316257",
"0.4601359",
"0.4597648",
"0.45925087",
"0.45925087",
"0.45643067",
"0.4552425",
"0.45421347",
"0.45381355",
"0.4515014"
] | 0.80962306 | 0 |
Returns a the lowercase city name of the node's data center. | def get_datacenter_city(self, node):
if self._datacenter_cache is None:
self.populate_datacenter_cache()
location = self._datacenter_cache[node.datacenter_id].location
location = location.lower()
location = location.split(",")[0]
return location | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def data_center_name(self) -> str:\n return pulumi.get(self, \"data_center_name\")",
"def data_center_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"data_center_name\")",
"def data_center_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"data_center_name\")",
"def data_center_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"data_center_name\")",
"def city(self) -> str:\n return pulumi.get(self, \"city\")",
"def city(self):\r\n try:\r\n return str(self.connect()['name'])\r\n except:\r\n return '@weather_city'",
"def city(self):\n # type: () -> string_types\n return self._city",
"def datacenter(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"datacenter\")",
"def get_CityName():\n return str(root.find('provincia').text) # root.find('province') returns the direct child 'province' of root. ...\n # ... An equivalent way to get the same result is ( root[3].text ), where ...\n # ... root[2] represents 'province' tag and it's the 4th direct child of root.",
"def cluster_name(self):\n return self._data['cluster_name']",
"def city(self):\n return self._city",
"def city(self):\n return self._city",
"def city(self):\n return self._city",
"def city(self):\n return self._city",
"def city(self):\n return self._city",
"def cluster_name(self) -> str:\n return pulumi.get(self, \"cluster_name\")",
"def cluster_name(self) -> str:\n return pulumi.get(self, \"cluster_name\")",
"def cluster_name(self):\n return self.name",
"def city(self):\n if \"city\" in self._prop_dict:\n return self._prop_dict[\"city\"]\n else:\n return None",
"def city(self):\n if \"city\" in self._prop_dict:\n return self._prop_dict[\"city\"]\n else:\n return None",
"def city(self) -> Optional[str]:\n return pulumi.get(self, \"city\")",
"def city(self):\n\n try:\n city = self.status.place[\"full_name\"].strip(r\",[A-Z ]\")\n except TypeError:\n city = None\n if not city:\n try:\n city = self.metadata.as_dict.get(\"user_city\").get(\"google_geocoding\")\n except (TypeError, AttributeError):\n city = None\n return city",
"def account_name(self):\n return self.civic_no_city()",
"def get_cluster_name(cls):\n\n mid = Machineid()\n if mid.is_sps_cluster:\n return cls.SPS\n if mid.is_spts_cluster:\n return cls.SPTS\n if mid.is_mdfl_cluster:\n return cls.MDFL\n\n return cls.LOCAL",
"def cloud_name(self):\n return self._cloud_name",
"def get_mds_shortname(node):\n return str(node.getNodeName()).lower()",
"def get_coordinated_car_name(self):\n return self.coordinated_car_name",
"def data_center_id(self) -> str:\n return pulumi.get(self, \"data_center_id\")",
"def data_center_id(self) -> str:\n return pulumi.get(self, \"data_center_id\")",
"def cluster_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"cluster_name\")"
] | [
"0.74766445",
"0.7180771",
"0.69038516",
"0.69038516",
"0.66939473",
"0.6678729",
"0.6621979",
"0.6542636",
"0.64575845",
"0.6278695",
"0.6257993",
"0.6257993",
"0.6257993",
"0.6257993",
"0.6257993",
"0.6226151",
"0.6226151",
"0.61920005",
"0.614394",
"0.614394",
"0.6131929",
"0.6098843",
"0.6087667",
"0.6058889",
"0.60405505",
"0.60376596",
"0.60161334",
"0.59872997",
"0.59872997",
"0.5905353"
] | 0.7814764 | 0 |
Adds an node to the inventory and index. | def add_node(self, node):
public_ip = [addr.address for addr in node.ipaddresses if addr.is_public][0]
dest = public_ip
# Add to index
self.index[dest] = node.api_id
# Inventory: Group by node ID (always a group of 1)
self.inventory[node.label] = [dest]
# Inventory: Group by datacenter city
self.push(self.inventory, self.get_datacenter_city(node), dest)
# Inventory: Group by dipslay group
self.push(self.inventory, node.display_group, dest) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _add_node(self, node: int) -> None:\r\n self.nodes.add(node)",
"def add_node(self, node):",
"def add_node(self, node):\n self.nodes.append(node)",
"def add_node(self, node):\n self.nodes[node.name] = node\n self.dirty = True",
"def add_node(self, node):\n self.nodes.add(node)",
"def add_node (self, node):\n self.network.add_node(node.id)\n self.network.node[node.id] = node",
"def add_node(self,node):\n \n vertex = Vertex(node)\n \n self.nodes[node] = vertex\n self.numNodes += 1",
"def addNode (self, node):\n self.__nodes.add(node)",
"def add_node(self, node):\n self._nodes.add(node)",
"def add_node(self, node):\n self._nodes.add(node)",
"def addNode(self, node: Node):\n self.nodes.append(node)",
"def addNode(self, nodeItem):\n assert isinstance(nodeItem, NodeItem)\n self.addItem(nodeItem)",
"def add_node(self, node):\n self._nodes[node.id] = node\n self._clear_cache()",
"def add_node (self, node):\n raise NotImplementedError",
"def add_node(self, node):\n\n # Add node only if it does not exist yet\n if node.id() in self.__nodes:\n return\n\n labels = node.labels()\n for label in labels:\n break\n\n if label not in self.__labels:\n self.__labels[label] = len(self.__labels)\n\n js = \"nodes.push({index: \" + str(node.id()) + \", \" +\\\n \"name: \\\"\" + str(node.id()) + \"\\\", \" +\\\n \"group: \" + str(self.__labels[label]) + \\\n \" });\"\n\n d3_node_id = self.frame.evaluateJavaScript(js) - 1\n self.__nodes[node.id()] = str(d3_node_id)\n logger.info(\"node id %s - > d3 id: %s\", node.id(), d3_node_id)",
"def add_node(self, node):\n if node not in self.nodes:\n self.nodes.append(node)",
"def register_node(self, node):\n self.nodes.add(node)",
"def AddNode(self, node):\n self.nodes.append(node)\n return node",
"def add_node(self, metadata, pos):\n node = Node(metadata, pos)\n self.addItem(node)\n self.nodes[node.id] = node\n return node",
"def append_node(self, node):\n self.nodes.append(node)\n node.slot = len(self.nodes)",
"def add_node(self, node):\n if node not in self.nodes:\n self._nodes.append(node)",
"def add_node(self, node):\n\n node.number = len(self.nodes)\n node.id = len(self.nodes)\n\n if node.id not in self.nodes:\n self.nodes[node.id] = node\n\n return self",
"def add_node(self, node):\n index = self._node_index.setdefault(node.ntype, dict())\n if node.ext_id not in index:\n index.setdefault(node.ext_id, node)\n self._type_list.setdefault(node.ntype, list()).append(node)",
"def add_node(self, node: Node) -> None:\n assert len(\n self.network) <= 10, \"Too many nodes attempted to be placed in network\"\n self.network.append(node)",
"def add_node(self, node):\n self.nodes[node.id] = node\n\n self.layers = max(self.layers, node.layer + 1)",
"def add_node(self, new_node: 'GraphNode'):\n self.operator.add_node(new_node)",
"def add_node(self, name, node):\n self.nodes.setdefault(name, node)",
"def add(self, node):\n if str(node.getPosition()) in self._history:\n # duplicate entry\n return\n self._history[str(node.getPosition())] = True\n self._insort(node)",
"def add_node(self, node):\n self.nodes.append(node)\n self.edges[node.identifier] = {}\n self._id2node[node.identifier] = node\n node.parent = None",
"def addNodeToIndex(self, node):\n # self.nodeidx.add(self.nodecounter, (node.getPoint()[0], node.getPoint()[1]), obj=node)\n self.nodeidx.add(self.nodecounter, (node.getPoint()[0], node.getPoint()[1], node.getPoint()[0], node.getPoint()[1]))\n\n self.node_counter__node[self.nodecounter] = node"
] | [
"0.7500117",
"0.73828864",
"0.7321843",
"0.7307978",
"0.72790575",
"0.72646934",
"0.72339076",
"0.7178008",
"0.71437955",
"0.71437955",
"0.7089215",
"0.7021807",
"0.6995974",
"0.69755816",
"0.69561344",
"0.69527453",
"0.69520944",
"0.6948566",
"0.69292915",
"0.68842506",
"0.6870106",
"0.67440575",
"0.67098546",
"0.6692845",
"0.6691302",
"0.6685865",
"0.6682865",
"0.66689175",
"0.66474736",
"0.6569361"
] | 0.7719779 | 0 |
Pushed an element onto an array that may not have been defined in the dict. | def push(self, my_dict, key, element):
if key in my_dict:
my_dict[key].append(element);
else:
my_dict[key] = [element] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def push(self, elem):\n pass",
"def push(self, new_element):\n self.array.append(new_element)",
"def push(self, new_element):\n self.arr.append(new_element)\n self.size += 1",
"def __setitem__(self, index, value):\n assert 0 <= index < len(self), \"Array subscript out of range\"\n self._elements[index] = value",
"def insert(self, key, value):\n # Resize array here if necessary.\n if key < 0: key = 0\n elif key > len(self): key = len(self)\n if key < len(self):\n for j in range(len(self), key, -1):\n self._items[j] = self._items[j - 1]\n self._items[key] = value\n self._size += 1\n self.incModCount()",
"def popElement(self, element):\n index = self.hashd.get(element, None)\n if index == None:\n return\n del self.hashd[element]\n\n size = len(self.arr)\n last = self.arr[size-1]\n\n self.arr[index], self.arr[size-1] = self.arr[size-1], self.arr[index]\n\n del self.arr[-1]\n self.hashd[last] = index",
"def __setitem__(self, idx, element):\n if idx < 0: # For negative indexing, convert to positive counterpart\n idx = self._convert_negative_index(idx)\n if not 0 <= idx < self._length: # Ignore indices outside of bounds\n raise IndexError(f'index {idx} out of bounds')\n self._arr[idx] = element",
"def put(self, key, value):\n self.arr[key] = value",
"def insert(self, item):\n for h_num in xrange(self.k):\n val = self.hash_value(item, h_num)\n self.arr[val] = True",
"def push(self, value):\n self.last = self.current\n self.current = np.array(value)",
"def __setitem__(self, key, value):\n self.elements[key] = value",
"def remove(self, key):\n self.arr[key] = -1",
"def __setitem__(self, key, instance):\n # Check in range\n if key >= len(self) or key < -len(self):\n raise IndexError(\"array index {} out of range\".format(key))\n \n # Check the type\n if (not hasattr(instance, \"data_type\") or\n instance.data_type != self.data_type.base_type):\n raise TypeError(\"array is for type {} but got {}\".format(\n self.data_type.base_type, repr(instance)))\n \n # Check the instance isn't already in a container\n if instance._container is not None:\n raise ValueError(\"instance is already a member of a container\")\n \n # Set the element's address\n if self.address is None:\n instance.address = None\n else:\n instance.address = self.address + (instance.size * key)\n \n # If there was previously an instance here, remove this array as its\n # container.\n if self._instances[key] is not None:\n self._instances[key]._container = None\n \n self._instances[key] = instance\n \n # We are now the instance's parent, add it to the list\n instance._container = self\n \n # The array has now been changed, inform any parents\n self._value_changed()",
"def push(self, x):\n heapq.heappush(self.array, x)",
"def put(self, element):\n self.heap.append(element)\n # sift up the element append before\n self.sift_up(self.size() - 1)",
"def __setitem__(self, key, value):\n mixed_positions, vindex_positions = _advanced_indexer_subspaces(key)\n self._array[key] = np.moveaxis(value, vindex_positions, mixed_positions)",
"def __setitem__(self, index, newItem):\r\n #if index < 0 or index >= self.size():\r\n # raise IndexError(\"Array index out of bounds\")\r\n self._items[index] = newItem",
"def push(self, key, value):\r\n if len(self.heap)<self.depth:\r\n heapq.heappush(self.heap, key)\r\n self.elements[key] = value\r\n else:\r\n oldkey = heapq.heappushpop(self.heap, key)\r\n self.elements[key] = value\r\n del self.elements[oldkey]",
"def __setitem__(self, index, newItem):\n #Check to see whether or not the index is within the array's element range.\n if index >= 0 and index < len(self):\n #If the element has nothing in it\n if self._items[index] == self._fillValue:\n self._logicalSize += 1\n \n #If we are going to replace an element with fillValue\n if self._items[index] != self._fillValue and newItem == self._fillValue:\n self._logicalSize -= 1\n \n self._items[index] = newItem",
"def __setitem__(self,key, value):\n cArray.cModule.set_element(self.arrayRef,ctypes.c_int(key),ctypes.c_int(value))",
"def insert(self, e): \r\n if not e in self.vals:\r\n self.vals.append(e)",
"def insert(self, val: int) -> bool:\n if val in self.map:\n return False\n self.array.append(val)\n self.map[val] = len(self.array)-1\n return True",
"def insert(self, e): \n if not e in self.vals:\n self.vals.append(e)",
"def insert(self, e): \n if not e in self.vals:\n self.vals.append(e)",
"def insert(self, val: int) -> bool:\n if val not in self.arr:\n self.arr.append(val)\n index = len(self.arr) - 1\n self.map[val] = index\n return True\n return False",
"def __setitem__(self, key, value):\n if self.count == self.size: # For now, rehash only when full (better to keep it half empty always)\n self.rehash()\n\n hashed_idx = self._linear_probe(key, \"set\") # Get position to put. _probe() takes care of hashing the key\n if self.keys[hashed_idx] != key: # If _probe() returns a spot that doesn't have this key, then increment count\n self.count += 1\n self.keys[hashed_idx] = key # Store key in the keys array\n self.values[hashed_idx] = value # Store value in the values array",
"def array_pop(item):\n return item.pop()",
"def put_elem(self, elem):\n serialized_elem = self.serialize_elem(elem)\n self.redis_client.lpush(self.buffer_name, serialized_elem)",
"def add(self, value):\r\n # Does the underlying \"array\" have enough space? If not\r\n # increase its size. Note, we are really using a Python list\r\n # as our underlying data structure, so any \"doubling\" is\r\n # handled for us.\r\n if len(self) == len(self._data): self._data.append(None)\r\n self._data[self._size] = value # insert new item at end of arry\r\n self.up_heap(self._size) # \"bubble\" the new item up.\r\n self._size += 1 # bump the sze\r",
"def insert(self, e):\n if not e in self.vals:\n self.vals.append(e)"
] | [
"0.61763823",
"0.60454845",
"0.60098",
"0.5906978",
"0.5880794",
"0.5880467",
"0.5862668",
"0.58508754",
"0.5771033",
"0.5766721",
"0.5749718",
"0.5748016",
"0.5746526",
"0.57447404",
"0.5734783",
"0.5690246",
"0.5680334",
"0.5669741",
"0.56696004",
"0.5660492",
"0.56445444",
"0.5635928",
"0.5627039",
"0.5627039",
"0.5601947",
"0.55961424",
"0.55959505",
"0.5587796",
"0.5556714",
"0.55484104"
] | 0.6287549 | 0 |
Reads the inventory from the cache file and returns it as a JSON object. | def get_inventory_from_cache(self):
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_from_cache(self):\n try:\n with open(self.cache_filename, 'r') as cache:\n json_data = cache.read()\n data = json.loads(json_data)\n except IOError:\n data = {'data': {}, 'inventory': {}}\n\n self.data = data['data']\n self.inventory = data['inventory']",
"def read_inventory_file():\n try:\n with open('inventory', 'r') as file:\n inventory = file.read()\n return inventory\n except OSError:\n pass",
"def read_data_cache_file(self):\n with open(self.cache_filename, 'r') as json_data:\n return json.load(json_data)",
"def read_cache():\n try:\n cache_file = open(CACHE_FILENAME, 'r', encoding=\"utf-8\")\n cache_contents = cache_file.read()\n cache_dict = json.loads(cache_contents)\n cache_file.close()\n return cache_dict\n except:\n cache_dict = {}\n return cache_dict",
"def parse_inventory(inventory_fn):\n try:\n if not os.path.exists(inventory_fn):\n log(\"INFO\", \"No inventory file found at {}. Creating an empty one.\".format(inventory_fn))\n return {}\n with open(inventory_fn, 'r') as inventory_file:\n # TODO: verify the contents??\n return json.load(inventory_file)\n except Exception as ex:\n log(\"WARN\", \"Error parsing the inventory file. Assuming an empty inventory: {}\".format(ex))\n return {}",
"def write_to_cache(self):\n data = {'data': self.data, 'inventory': self.inventory}\n json_data = json.dumps(data, indent=2)\n\n with open(self.cache_filename, 'w') as cache:\n cache.write(json_data)",
"def load_inventory(file_name, lst_Inventory):\r\n \r\n try:\r\n objFile = open(file_name, 'r')\r\n lst_Inventory.clear()\r\n for line in objFile:\r\n data = line.strip().split(',')\r\n inventory = CD(data[0],data[1],data[2])\r\n lst_Inventory.append(inventory)\r\n objFile.close()\r\n except FileNotFoundError:\r\n pass\r\n return lst_Inventory",
"def products_with_inventory():\n try:\n return jsonify(get_product_caching_service().jsonified_map)\n except Exception as exception:\n return jsonify({'Something went wrong ': exception})",
"def json_from_cache(file_name: str) -> Optional[Dict]:\n\n json_path = os.path.join(CACHE_DIR, file_name)\n\n try:\n with open(json_path, \"r\") as cache_file:\n return json.load(cache_file)\n except IOError:\n log.notice(f\"Could not read JSON from {json_path}\")\n return None",
"def cache(self):\n if self._cache is None:\n with open(self.cache_path, 'r') as cache_file:\n self._cache = json.load(cache_file)\n return self._cache",
"def _read_cache_file(self) -> bytes:\n with open(self.cache_file, 'rb') as file:\n return file.read()",
"def open_inventorybook(filepath):\n path_exists = os.path.exists(filepath)\n inventorybook = None\n if path_exists:\n try: # safest way to open or close file.\n with open(filepath, 'r') as infile:\n inventorybook = json.load(infile)\n finally:\n infile.close()\n return inventorybook",
"def get_json_from_cache(file_name):\n result = None\n path = clean_path(file_name)\n cached_file_name = get_cached_file_name(path)\n if os.path.exists(cached_file_name):\n time = os.path.getmtime(path)\n cached_time = os.path.getmtime(cached_file_name)\n if cached_time > time:\n try:\n source = open(cached_file_name, \"r\")\n try:\n result = json.load(source)\n except ValueError:\n pass\n source.close()\n except OSError:\n # Includes IOError\n pass\n return result",
"def read_cache(self):\n with open(self.get_cache_filename(), 'rb') as f:\n data = pickle.loads(f.read())\n self.timestamp = data['timestamp']\n self.cache = data['cache']",
"def __read_cache(self, fileName):\n if self.__log:\n self.__logger.info(f\"Cache hit - {fileName}\")\n # Cache hit\n with open(fileName, \"rb\") as f:\n content = self.__handle_decompression(f.read())\n variables = pickle.loads(content)\n\n # Move node to front\n node = os.path.relpath(fileName, \"cache\")\n self.__shift_node(node)\n\n return variables",
"def read_inventory(inventory_acronym, year, f, download_if_missing=False):\n file_name = inventory_acronym + '_' + str(year)\n meta = set_stewi_meta(file_name, str(f))\n inventory = load_preprocessed_output(meta, paths)\n method_path = paths.local_path / meta.category\n if inventory is None:\n log.info(f'{meta.name_data} not found in {method_path}')\n if download_if_missing:\n meta.tool = meta.tool.lower() # lower case for remote access\n download_from_remote(meta, paths)\n # download metadata file\n metadata_meta = copy.copy(meta)\n metadata_meta.category = ''\n metadata_meta.ext = 'json'\n download_from_remote(metadata_meta, paths)\n else:\n log.info('requested inventory does not exist in local directory, '\n 'it will be generated...')\n generate_inventory(inventory_acronym, year)\n inventory = load_preprocessed_output(meta, paths)\n if inventory is None:\n log.error('error generating inventory')\n if inventory is not None:\n log.info(f'loaded {meta.name_data} from {method_path}')\n # ensure dtypes\n fields = f.field_types()\n fields = {key: value for key, value in fields.items()\n if key in list(inventory)}\n inventory = inventory.astype(fields)\n return inventory",
"def read_metadata(self, file_in_cache):\n metadata_file = self.get_metadata_file(file_in_cache)\n if self.context.is_file(metadata_file):\n return json.loads(auto_decode(self.context.read_file(metadata_file)))\n else:\n return {}",
"def test_deserialize(self):\n with open('tests/small.json', 'r') as fd:\n fc =json.loads(fd.read())\n input_inv = copy.deepcopy(fc)\n inventoryloader = ansible_inventory_manage.inventory.Inventory()\n inventoryloader.load_inventoryjson(fc)\n output_inv = inventoryloader.write_output_json()\n assert input_inv == output_inv",
"def load_restaurants():\n try:\n with open(CACHE_FILE) as infile:\n print(\"Cache found, loading from file {}\".format(CACHE_FILE))\n restaurants = json.load(infile)\n except Exception:\n print(\"No cache found, loading from API\")\n restaurants = get_restaurants()\n with open(CACHE_FILE, 'w+') as outfile:\n json.dump(restaurants, outfile)\n return restaurants\n return restaurants",
"def data(self):\n if self._data is None:\n try:\n with open(self.storage_path, 'r') as cache_file:\n self._data = json.load(cache_file)\n except FileNotFoundError:\n self._data = {}\n return self._data",
"def loadCacheFile(self):\n if not os.path.exists(self.cachePath):\n self.initCacheFile()\n else:\n with open(self.cachePath) as json_cacheFile:\n self.cacheData = json.load(json_cacheFile)",
"def read(self, store):\r\n path = f\"{self.system.config_path}/.storage/{STORES[store]}\"\r\n content = None\r\n if os.path.exists(path):\r\n with open(path, \"r\", encoding=\"utf-8\") as storefile:\r\n content = storefile.read()\r\n content = json.loads(content)\r\n return content",
"def get_json(file_name):\n result = None\n path = environment.which(file_name)\n if path is not None:\n result = get_json_from_cache(path)\n if result is None:\n result = make_cached_json(path)\n return result",
"def read_storage(file_name: str):\n logger.info('read_storage start') #Logs a message\n news_all = {}\n try:\n with open(file_name) as f:\n news_all = json.load(f)\n except:\n news_all = {}\n return news_all",
"def load_item_map(cache_file):\n with open(cache_file, 'rb') as f:\n full_item_map = pickle.load(f)\n return full_item_map",
"def _read_cache(url):\n\n j = None\n m = hashlib.md5()\n m.update(url)\n if os.path.exists('.cache.%s' % m.hexdigest()):\n with open('.cache.%s' % m.hexdigest(), 'rb') as infile:\n j = json.load(infile)\n\n return j",
"def get_inventory():\n return INVENTORY",
"def read_json(self, stock_name):\n with open(f\"{self.json_path}/{stock_name}.json\") as json_file:\n json_data = json.load(json_file)\n if self.debug:\n print(f\" JsonHelper.read_json() --> read {self.json_path}/{stock_name}.json success\")\n return json_data",
"def _load_cache():\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n fname = os.path.join(BASE_DIR, \"model_cache.json\")\n with open(fname) as f:\n models_cache = json.load(f)\n return models_cache",
"def read_json(filepath):\n if (filepath in _json_cache):\n return _json_cache[filepath]\n with open(filepath, 'r', encoding='utf-8') as fileinfo:\n data = json.load(fileinfo)\n _json_cache[filepath] = data\n return data"
] | [
"0.7557402",
"0.7366649",
"0.7024034",
"0.6800572",
"0.6741193",
"0.64853036",
"0.62057525",
"0.6125807",
"0.61111367",
"0.60826665",
"0.60408515",
"0.603484",
"0.6020536",
"0.60187274",
"0.6007673",
"0.598478",
"0.5969202",
"0.59221053",
"0.5900663",
"0.58964336",
"0.5871589",
"0.58659905",
"0.58461654",
"0.57853067",
"0.5732393",
"0.572168",
"0.56820714",
"0.56659436",
"0.5633408",
"0.5602453"
] | 0.88659257 | 0 |
Reads the index from the cache file and sets self.index. | def load_index_from_cache(self):
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _populate_index(self):\n os.makedirs(self.cache_dir, exist_ok=True)\n local_files = glob('{}/*'.format(self.cache_dir))\n for file in local_files:\n self._add_to_index(os.path.basename(file), os.path.getsize(file))",
"def _load_index(self):\n try:\n with open(self._index_path, \"rb\") as f:\n version = pickle.load(f)\n data = f.read()\n except EnvironmentError as e:\n # Index doesn't exist yet?\n if e.errno in (errno.ENOENT,):\n return {}\n raise\n if version != self._version:\n # This is another version. Avoid trying to unpickling the\n # rest of the stream, as that may fail.\n return {}\n stamp, overloads = pickle.loads(data)\n _cache_log(\"[cache] index loaded from %r\", self._index_path)\n if stamp != self._source_stamp:\n # Cache is not fresh. Stale data files will be eventually\n # overwritten, since they are numbered in incrementing order.\n return {}\n else:\n return overloads",
"def _load_index(self):\n try:\n with open(self._index_path, \"rb\") as f:\n version = pickle.load(f)\n data = f.read()\n except FileNotFoundError:\n # Index doesn't exist yet?\n return {}\n if version != self._version:\n # This is another version. Avoid trying to unpickling the\n # rest of the stream, as that may fail.\n return {}\n stamp, overloads = pickle.loads(data)\n _cache_log(\"[cache] index loaded from %r\", self._index_path)\n if stamp != self._source_stamp:\n # Cache is not fresh. Stale data files will be eventually\n # overwritten, since they are numbered in incrementing order.\n return {}\n else:\n return overloads",
"def read(self, file, path):\n pos, = struct.unpack('<Q', file.read(8))\n if pos == 0:\n raise VergeMLError(\"Invalid cache file: {}\".format(path))\n file.seek(pos)\n self.index, self.meta, self.info = pickle.load(file)",
"def __loadIndex( self ):\n\n assert self.mCreateMode == False, \"asked to read from database opened for writing\"\n\n if self.mMethod == \"uncompressed\":\n self.mDatabaseFile = open( self.mDbname, \"r\" )\n elif self.mMethod == \"dictzip\":\n import dictzip\n self.mDatabaseFile = dictzip.GzipFile( self.mNameDb)\n elif self.mMethod == \"lzo\":\n import lzo\n self.mDatabaseFile = Uncompressor( self.mNameDb, lzo.decompress )\n elif self.mMethod == \"gzip\":\n self.mDatabaseFile = Uncompressor( self.mNameDb, gzip_demangler )\n elif self.mMethod == \"zlib\":\n self.mDatabaseFile = Uncompressor( self.mNameDb, zlib.decompress )\n elif eslf.mMethod == \"bz2\":\n self.mDatabaseFile = bz2.BZ2File( self.mNameDb )\n elif self.mMethod == \"debug\":\n self.mDatabaseFile = Uncompressor( self.mDbname + \".debug\", lambda x: x ) \n\n self.mIndex = {}\n\n for line in open(self.mNameIndex, \"r\"):\n\n if line.startswith(\"#\"): continue\n data = line[:-1].split(\"\\t\")\n\n # index with random access points\n if len(data) > 4:\n (identifier, pos_id, block_size, lsequence) = bytes(data[0]), int(data[1]), int(data[2]), int(data[-1])\n points = map(int, data[3:-1])\n self.mIndex[int(identifier)] = (pos_id, block_size, lsequence, points)\n else:\n (identifier, pos_id, pos_seq, lsequence) = bytes(data[0]), int(data[1]), int(data[2]), int(data[-1])\n self.mIndex[int(identifier)] = (pos_id, pos_seq, lsequence) \n \n self.mIsLoaded = True",
"def __init__(self, cache_dir: str, cache_size: int):\n self.cache_dir = cache_dir\n self.cache_size = int(cache_size * 1e6)\n self.index = {}\n self.touch_list = []\n self._populate_index()",
"def get_index(self):\n with open(self.index_path, \"r\") as f:\n return json.load(f)",
"def read_cache(self):\n with open(self.get_cache_filename(), 'rb') as f:\n data = pickle.loads(f.read())\n self.timestamp = data['timestamp']\n self.cache = data['cache']",
"def getIndex(self):\n # Create Cache path if it does not exist.\n if not os.path.exists(self.cache_dir):\n os.mkdir(self.cache_dir)\n if not os.path.exists(self.pickle_file):\n self.reload_infos()\n # Read the dict.\n file = open(self.pickle_file, 'r')\n self.networks = pickle.load(file)\n file.close()",
"def test_index_files_cache():\n index_file_cache = ReadIndexFilesCache()\n index_file_cache.add(\"file_1\", None)\n index_file_cache.add(\"file_1\", None)\n assert len(index_file_cache.lookup_set) == 1\n assert index_file_cache.has_not_read(\"file_1\") is False\n assert index_file_cache.has_not_read(\"file_2\") is True\n index_file_cache.add(\"file_2\", None)\n index_file_cache.add(\"file_3\", None)\n index_file_cache.add(\"file_4\", None)\n assert len(index_file_cache.lookup_set) == 4\n\n # Test cache eviction logic\n\n index_file_cache.cache_limit = 2 # override cache limit\n index_file_cache.add(\"file_5\", \"file_1\")\n assert len(index_file_cache.lookup_set) == 5 # No elements evicted\n index_file_cache.add(\"file_6\", \"file_4\")\n assert (\n len(index_file_cache.lookup_set) == 3\n ) # Elements in the cache will be file_4, file_5, file_6",
"def __load_index(self):\n import os\n if not os.path.exists(self.__dir):\n filename=os.path.join(MY_STORE,self.__dir,INTERNAL_DB_FILE)\n else:\n filename=os.path.join(self.__dir,INTERNAL_DB_FILE)\n try:\n self.__handle = open(filename,self.__mode)\n except IOError, e:\n print 'Cannot create status file. Ensure you have permission to write'\n return False\n\n fcntl.flock(self.__handle.fileno(), fcntl.LOCK_EX)\n internal_db = dbm.open(filename, 'c', 0644 )\n self.__storage = shelve.Shelf(internal_db)\n return True",
"def load_index(self, fn):\n # print('Load ', fn)\n # if fn[len(fn)-4:] == '.pkl':\n # fn = fn[0:len(fn)-4]\n fn = 'idx_bench'\n inverted_index = utils.load_obj(fn)\n return inverted_index",
"def read_from_index(self):\n self.__mode = self.READ_MODE\n if not self.__storage:\n self.__load_index()\n\n try:\n tmp=dict(self.__storage)\n except Exception,e:\n print e\n self.__storage = None\n return None\n \n self.__close_storage()\n return tmp",
"def reset_file_index_cache() -> None:\n fileindex_cache_five_minutes.invalidate()",
"def __init__(self, iReader):\n self.__index_reader = iReader",
"def load_index():\n\tprint \"Offline Wikipedia: Loading Index\\nThis may take a bit...\"\n\tindex = {}\n\tnum_entries = 0\n\tstart_time = time.time()\n\n\twith open(wikipedia_index_file) as index_file:\n\t\tcsvreader = csv.reader(index_file, delimiter=',')\n\n\t\tfor line in csvreader:\n\t\t\tindex[line[0].lower()] = join(wikipedia_base_directory, line[1])\n\t\t\tnum_entries += 1\n\n\tprint \"Loaded \" + str(num_entries) + \" index entries in \" + \\\n\t\t\tstr(time.time() - start_time) + \" seconds.\"\n\treturn index",
"def reload_cache(self):\n self.data = self.read_data_cache()",
"def load_index(self, index_path: str = \"hnswlib_index.bin\"):\n if index_path and os.path.exists(index_path):\n corpus_emb_json_path = index_path + \".json\"\n logger.info(f\"Loading index from: {index_path}, corpus embedding from: {corpus_emb_json_path}\")\n super().load_index(corpus_emb_json_path)\n if self.index is None:\n self.create_index()\n self.index.load_index(index_path)\n else:\n logger.warning(\"No index path given. Index not loaded.\")",
"def load(self):\n self.index = nmslib.init(method='hnsw', space='cosinesimil')\n self.index.loadIndex(c.index_path('hnsw.index'))\n self.ys = joblib.load(\"%s.ys\" % self.index_file_prefix)",
"def load(self, key):\n overloads = self._load_index()\n data_name = overloads.get(key)\n if data_name is None:\n return\n try:\n return self._load_data(data_name)\n except OSError:\n # File could have been removed while the index still refers it.\n return",
"def load_index(self, index_path: str = \"annoy_index.bin\"):\n if index_path and os.path.exists(index_path):\n corpus_emb_json_path = index_path + \".json\"\n logger.info(f\"Loading index from: {index_path}, corpus embedding from: {corpus_emb_json_path}\")\n super().load_index(corpus_emb_json_path)\n if self.index is None:\n self.create_index()\n self.index.load(index_path)\n else:\n logger.warning(\"No index path given. Index not loaded.\")",
"def _readchunk(self, chunk_index):\n chunk = self._cache.get(chunk_index)\n if chunk is not None:\n return chunk\n\n chunk = self._uncached_readchunk(chunk_index)\n self._cache.put(chunk_index, chunk)\n return chunk",
"def load_cache(self, filename=None):\n try:\n if not os.path.getsize(self._cache_filename(filename)):\n print(\"On-disk cache empty\")\n return\n\n with open(self._cache_filename(filename), \"rb\") as fh:\n cached = pickle.load(fh)\n self.name_cache = cached.name_cache\n self.run_cache = cached.run_cache\n self.row_cache = cached.row_cache\n self.extend(cached)\n print(\"On-disk cache loaded\")\n except OSError: # (FileNotFoundError is Python 3 only)\n print(\"On-disk cache not found\")",
"def __getitem__(self, index):\n if self.hdf5_cache_mode == \"all\":\n return self.getitem_cache[index]\n return self.get_item(index)",
"def use_cached_files(self, cache_key):\r\n pass",
"def __read_cache(self, fileName):\n if self.__log:\n self.__logger.info(f\"Cache hit - {fileName}\")\n # Cache hit\n with open(fileName, \"rb\") as f:\n content = self.__handle_decompression(f.read())\n variables = pickle.loads(content)\n\n # Move node to front\n node = os.path.relpath(fileName, \"cache\")\n self.__shift_node(node)\n\n return variables",
"def _load_cache(self):\n self.cache = self.cache_manager.retrieve(self.cache_file)\n if self.cache is None:\n self.cache = {}\n return",
"def load_index(self, fn):\n name = fn.split('.pkl')[0]\n return utils.load_obj(name)",
"def load_index(self, dictfile):\n\n self.index_file = os.path.join(self.path,\n dictfile.split(\".\")[0] + \".index\")\n try:\n self.fp = codecs.open(self.index_file, \"r\",\n encoding=\"utf-8\", errors=\"ignore\")\n except IOError:\n self.create_index(dictfile)\n\n self.fp = codecs.open(self.index_file, \"r\", encoding=\"utf-8\")\n self.dictionary = {}\n while True:\n text = unicode(self.fp.readline())\n if text:\n line = text.split(\"=\")\n if len(line) == 2:\n index = line[0]\n value = line[1]\n self.dictionary[index] = value\n else:\n break\n\n self.fp.close()\n return self.dictionary",
"def do_api_calls_update_cache(self):\n self.get_nodes()\n self.write_to_cache(self.inventory, self.cache_path_cache)\n self.write_to_cache(self.index, self.cache_path_index)"
] | [
"0.7139627",
"0.69223547",
"0.69004935",
"0.68209773",
"0.66870165",
"0.64805925",
"0.6415881",
"0.64003915",
"0.63989496",
"0.6365108",
"0.6176035",
"0.61550426",
"0.6151214",
"0.6092105",
"0.6086096",
"0.6045057",
"0.6004933",
"0.5955538",
"0.5945302",
"0.59057784",
"0.5904825",
"0.58977956",
"0.5871189",
"0.58560795",
"0.5774804",
"0.57646364",
"0.57444346",
"0.5737207",
"0.5736295",
"0.57281613"
] | 0.8064955 | 0 |
Find the regular expression pattern s in dictionary. | def findPattern(self,s):
# pat = re.compile('^'+s+'$')
pat = re.compile(s)
results = {}
for k in self.__clidRep.keys():
if pat.match(str(k)) or pat.match(self.__clidRep[k]):
results[k] = self.__clidRep[k]
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_by_pattern(self):\n while True: \n word = input(\"Enter a regular expression ex: \\d\\d\\w+. Press Q to \"\n \"quit to the main screen: \")\n if word.upper() in [\"Q\", \"QUIT\", \"EXIT\"]:\n return self.dict_list\n self.find_by_pattern_list = []\n count = 0\n for i in self.dict_list:\n for key, value in i.items():\n if re.search(word, value):\n self.find_by_pattern_list.append(i)\n count+=1\n break\n if count == 0:\n print(\"There were no matches.\")\n else:\n self.display_style(self.find_by_pattern_list)\n break\n self.del_or_edit()",
"def find_pattern_in_str(pattern, source):\n pattern = re.compile(pattern)\n for match in re.finditer(pattern,source):\n return match.groups()\n return None",
"def matcher(string):\n rec = re.compile(rexp, re.VERBOSE)\n groups = set(rec.groupindex) # index nos of no interest; discard\n m = rec.search(string)\n if m is None: return None\n # Match succeeded at this point\n # match-data -> Python\n mapped_d = {gname : m.group(gname) for gname in groups}\n # postprocess and done!\n return {k : ppers[k](mapped_d[k]) for k in mapped_d}",
"def search_pattern(self, value, pattern):\n _pattern = re.compile(pattern)\n _match = _pattern.search(value)\n return _match",
"def lookup_pattern(name):\n\treturn _registered_patterns[name]",
"def match(pattern, s):\n # The regexp compilation caching is inlined in both Match and Search for\n # performance reasons; factoring it out into a separate function turns out\n # to be noticeably expensive.\n if pattern not in _regexp_compile_cache:\n _regexp_compile_cache[pattern] = sre_compile.compile(pattern)\n return _regexp_compile_cache[pattern].match(s)",
"def _find_reg(self, reg_str, content):\n reg_find = re.findall(reg_str, content)\n assert reg_find is not None, \"ERROR: Could not extract any content, check regex string\"\n return reg_find",
"def match(self, s):\n self.matches = self.re.search(s)\n return self.matches",
"def extractDef(c: Cmdr, s: str) -> str:\n for pat in c.config.getData('extract-patterns') or []:\n try:\n pat = re.compile(pat)\n m = pat.search(s)\n if m:\n return m.group(1)\n except Exception:\n g.es_print('bad regex in @data extract-patterns', color='blue')\n g.es_print(pat)\n for pat in extractDef_patterns:\n m = pat.search(s)\n if m:\n return m.group(1)\n return ''",
"def find_key(dic, val): \n return [k for k, v in dic.iteritems() if re.search(v, val)]",
"def pattern(self):\n return fnmatch.translate(self.key)",
"def _compile_regexes(tokdict):\r\n for key, value in tokdict.items():\r\n tokdict[key] = re.compile('^(?:%s)$' % value, re.I).match\r\n return tokdict",
"def findMatchingNames(regname, map):\n list = []\n regname += \"$\"\n\n # Find the existing items that match this string\n\n for name in map:\n regexp = re.compile(regname).match(name)\n if regexp:\n list.append(regexp)\n\n return list",
"def _parse_line(line):\n\n for key, rx in rx_dict.items():\n match = rx.search(line)\n if match:\n return key, match\n # if there are no matches\n return None, None",
"def _parse_line(line):\n\n for key, rx in rx_dict.items():\n match = rx.search(line)\n if match:\n return key, match\n # if there are no matches\n return None, None",
"def search_by_pattern(self, tl):\n print(\"Search by regex pattern\")\n pattern = input(\"Please enter search pattern: \")\n return tl.findall_pattern(pattern)",
"def init():\r\n global num_subs, general\r\n for k, v in general.items():\r\n general[k] = re.compile(v, re.IGNORECASE)\r\n for k, v in num_subs.items():\r\n num_subs[k] = re.compile(v, re.IGNORECASE)",
"def extract_pattern(fmt):\n class FakeDict(object):\n def __init__(self):\n self.seen_keys = set()\n\n def __getitem__(self, key):\n self.seen_keys.add(key)\n return ''\n\n def keys(self):\n return self.seen_keys\n\n fake = FakeDict()\n try:\n fmt % fake\n except TypeError:\n # Formatting error\n pass\n return set(fake.keys())",
"def find_first_regex_match(key, regex_candidates):\n for cand in regex_candidates:\n try:\n pattern = re.compile(BaseInterface.cap_match_string(cand))\n if pattern.match(key):\n return cand\n except:\n logging.warn('[ros_interface] Ignoring invalid regex string \"{0!s}\"!'.format(cand))\n\n return None",
"def first_match(s,patterns):\n\n for p in patterns:\n m=p.match(s)\n if m:\n return p,m\n return None,None",
"def pattern(self):\n return self[\"pattern\"]",
"def pattern(self):\n return self[\"pattern\"]",
"def search_by_pattern(self, pattern, key=lambda data: data['meta']):\n result = []\n for node, data in self.traverse():\n if re.search(pattern, key(data), flags=re.VERBOSE):\n result.append([node, data])\n return result",
"def find_pattern_in_file(pattern, file_name):\n pattern = re.compile(pattern)\n with open(file_name) as f:\n for line in f:\n for match in re.finditer(pattern,line):\n return match.groups()\n return None",
"def match_rule_patterns(fixed_text, cur=0):\n pattern = exact_find_in_pattern(fixed_text, cur, RULE_PATTERNS)\n # if len(pattern) == 1:\n if len(pattern) > 0:\n return {\"matched\": True, \"found\": pattern[0]['find'],\n \"replaced\": pattern[0]['replace'], \"rules\": pattern[0]['rules']}\n else:\n return {\"matched\": False, \"found\": None,\n \"replaced\": fixed_text[cur], \"rules\": None}",
"def main(self, regex_string):\n sql_sen = regex_string[0][0]\n reg = \"\\$\\w+\"\n if re.search(reg, sql_sen, re.I):\n\n p = re.compile(reg)\n match = p.findall(sql_sen)\n return match\n return None",
"def match(self, pattern):\n if isinstance(pattern, Var):\n substitution = {pattern: self}\n elif isinstance(pattern, Term) and self.function == pattern.function \\\n and len(self.arguments) == len(pattern.arguments):\n terms = [Term.__match(self.arguments[idx], pattern.arguments[idx])\n for idx in range(0, len(self.arguments))]\n substitution = reduce(merge, terms)\n else:\n substitution = None\n return substitution",
"def _match(self, filename: str) -> Optional[dict]:\n if not self.named_regexp:\n self.log.warning(\n \"Regular expression not provided for plugin. Run with \"\n \"`--help-all` flag for more information.\"\n )\n return None\n\n match = re.match(self.named_regexp, filename)\n if not match or not match.groups():\n self.log.warning(\n \"Regular expression '{}' did not match anything in: {}\"\n \"\".format(self.named_regexp, filename)\n )\n return None\n\n gd = match.groupdict()\n self.log.debug(\n \"Regular expression '{}' matched\\n'{}' in: {}\"\n \"\".format(self.named_regexp, gd, filename)\n )\n return gd",
"def add_scoreInfo(pattern, raw_text, keyName):\n match_pat = re.search(pattern, raw_text)\n if match_pat is None:\n info[keyName] = None\n else:\n info[keyName] = match_pat.group(1)",
"def match(self, string: str) -> Tuple:\n re_match = None\n re_rule = None\n for regex_name in self.regexes:\n regex = self.regexes[regex_name]\n re_match = regex.match(string)\n if re_match is not None:\n re_rule = regex_name\n break\n return re_rule, re_match"
] | [
"0.6469642",
"0.63880825",
"0.63732696",
"0.6253539",
"0.6212993",
"0.61480343",
"0.60889447",
"0.5976892",
"0.594639",
"0.5908699",
"0.5843748",
"0.57777935",
"0.5762092",
"0.5741424",
"0.5741424",
"0.57190794",
"0.57145727",
"0.56568784",
"0.56494045",
"0.5643466",
"0.5632549",
"0.5632549",
"0.5617302",
"0.5611548",
"0.55855614",
"0.5566542",
"0.5533572",
"0.553039",
"0.5523185",
"0.5518741"
] | 0.8029055 | 0 |
coverts devices to json string into | def devicelist_to_json(self):
devices_json = json.dumps(self.device_list)
print(devices_json) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def devices_json():\n return [\n {\n \"macAddress\": \"84:F3:EB:21:90:C4\",\n \"lastData\": {\n \"dateutc\": 1546889640000,\n \"baromrelin\": 30.09,\n \"baromabsin\": 24.61,\n \"tempinf\": 68.9,\n \"humidityin\": 30,\n \"date\": \"2019-01-07T19:34:00.000Z\",\n },\n \"info\": {\"name\": \"Home\", \"location\": \"Home\"},\n },\n {\n \"macAddress\": \"84:F3:EB:21:90:C4\",\n \"lastData\": {\n \"dateutc\": 1546889640000,\n \"baromrelin\": 30.09,\n \"baromabsin\": 24.61,\n \"tempinf\": 68.9,\n \"humidityin\": 30,\n \"date\": \"2019-01-06T19:34:00.000Z\",\n },\n \"info\": {\"name\": \"Home\", \"location\": \"Home\"},\n },\n ]",
"def device_details_json():\n return [\n {\n \"dateutc\": 1547094300000,\n \"winddir\": 344,\n \"windspeedmph\": 1.6,\n \"windgustmph\": 2.2,\n \"maxdailygust\": 3.4,\n \"tempf\": 34,\n \"hourlyrainin\": 0,\n \"eventrainin\": 0,\n \"dailyrainin\": 0,\n \"weeklyrainin\": 0,\n \"monthlyrainin\": 0,\n \"totalrainin\": 0,\n \"baromrelin\": 30.38,\n \"baromabsin\": 24.89,\n \"humidity\": 49,\n \"tempinf\": 69.6,\n \"humidityin\": 30,\n \"uv\": 0,\n \"solarradiation\": 0,\n \"feelsLike\": 34,\n \"dewPoint\": 16.87,\n \"date\": \"2019-01-10T04:25:00.000Z\",\n },\n {\n \"dateutc\": 1547094000000,\n \"winddir\": 344,\n \"windspeedmph\": 0,\n \"windgustmph\": 0,\n \"maxdailygust\": 3.4,\n \"tempf\": 34,\n \"hourlyrainin\": 0,\n \"eventrainin\": 0,\n \"dailyrainin\": 0,\n \"weeklyrainin\": 0,\n \"monthlyrainin\": 0,\n \"totalrainin\": 0,\n \"baromrelin\": 30.38,\n \"baromabsin\": 24.89,\n \"humidity\": 50,\n \"tempinf\": 69.4,\n \"humidityin\": 29,\n \"uv\": 0,\n \"solarradiation\": 0,\n \"feelsLike\": 34,\n \"dewPoint\": 17.34,\n \"date\": \"2019-01-10T04:20:00.000Z\",\n },\n ]",
"def discover_json() -> Response:\n\n device_id = int(uid[:8], 16) # Hex string to int\n valid_id = device_id + _device_id_checksum(device_id)\n\n data = {\n \"FriendlyName\": locast_service.city,\n \"Manufacturer\": \"locast2dvr\",\n \"ModelNumber\": config.device_model,\n \"FirmwareName\": config.device_firmware,\n \"TunerCount\": config.tuner_count,\n \"FirmwareVersion\": config.device_version,\n \"DeviceID\": hex(valid_id)[2:],\n \"DeviceAuth\": \"locast2dvr\",\n \"BaseURL\": f\"http://{host_and_port}\",\n \"LineupURL\": f\"http://{host_and_port}/lineup.json\"\n }\n return jsonify(data)",
"def __http_update_device_list(self):\n\n # Make sure we are (still) logged in\n self.__login_if_required()\n\n # Fetch all devices from Govee\n req = {\n 'key': '',\n 'transaction': self.__current_milli_time(),\n 'view': 0\n }\n res = self.__http_post(req, '/device/rest/devices/v1/list')\n\n # Response:\n \"\"\"\n {\n \"devices\": [\n {\n \"device\": \"AA:BB:CC:DD:EE:FF:11:22\",\n \"deviceExt\": {\n \"deviceSettings\": \"{\\\"wifiName\\\":\\\"MyWifi\\\",\\\"address\\\":\\\"CC:DD:EE:FF:11:22\\\",\\\"bleName\\\":\\\"ihoment_H6159_XXXX\\\",\\\"topic\\\":\\\"GD/123467890123467890123467890\\\",\\\"sku\\\":\\\"H6159\\\",\\\"device\\\":\\\"AA:BB:CC:DD:EE:FF:11:22\\\",\\\"deviceName\\\":\\\"Kitchen light\\\",\\\"versionHard\\\":\\\"1.00.01\\\",\\\"versionSoft\\\":\\\"1.02.14\\\"}\",\n \"extResources\": \"{\\\"skuUrl\\\":\\\"\\\",\\\"headOnImg\\\":\\\"\\\",\\\"headOffImg\\\":\\\"\\\",\\\"ext\\\":\\\"\\\"}\",\n \"lastDeviceData\": \"{\\\"online\\\":false}\"\n },\n \"deviceName\": \"Kitchen light\",\n \"goodsType\": 0,\n \"sku\": \"H6159\",\n \"versionHard\": \"1.00.01\",\n \"versionSoft\": \"1.02.14\"\n },\n {\n \"device\": \"A2:B2:C3:D4:E5:F6:77:88\",\n \"deviceExt\": {\n \"deviceSettings\": \"{\\\"wifiName\\\":\\\"MyWifi\\\",\\\"address\\\":\\\"C3:D4:E5:F6:77:88\\\",\\\"bleName\\\":\\\"ihoment_H6163_YYYY\\\",\\\"topic\\\":\\\"GD/123467890123467890123467890\\\",\\\"sku\\\":\\\"H6163\\\",\\\"device\\\":\\\"A2:B2:C3:D4:E5:F6:77:88\\\",\\\"deviceName\\\":\\\"Living room\\\",\\\"versionHard\\\":\\\"1.00.01\\\",\\\"versionSoft\\\":\\\"1.02.14\\\"}\",\n \"extResources\": \"{\\\"skuUrl\\\":\\\"\\\",\\\"headOnImg\\\":\\\"\\\",\\\"headOffImg\\\":\\\"\\\",\\\"ext\\\":\\\"\\\"}\",\n \"lastDeviceData\": \"{\\\"online\\\":false}\"\n },\n \"deviceName\": \"Living room\",\n \"goodsType\": 0,\n \"sku\": \"H6163\",\n \"versionHard\": \"1.00.01\",\n \"versionSoft\": \"1.02.14\"\n }\n ],\n \"message\": \"\",\n \"status\": 200\n }\n \"\"\"\n\n # Check response status\n if res['status'] != 200:\n raise GoveeException('Govee answered with device list status {}'.format(res['status'])) \n\n for raw_device in res['devices']:\n identifier = raw_device['device']\n sku = raw_device['sku']\n if not identifier or not sku:\n continue\n name = raw_device['deviceName']\n device_settings = json.loads(raw_device['deviceExt']['deviceSettings'])\n device_settings_keys = device_settings.keys()\n if not 'address' in device_settings_keys and not 'topic' in device_settings_keys:\n continue\n topic = device_settings['topic']\n\n if identifier in self.__devices.keys():\n device = self.__devices[identifier]\n device._name = name\n else:\n device_factory = self.__get_device_factory(sku)\n if not device_factory:\n continue\n last_device_data = json.loads(raw_device['deviceExt']['lastDeviceData'])\n if 'online' in last_device_data.keys():\n if last_device_data['online']:\n iot_connected = dev.IotConnectionStatus.ONLINE\n else:\n iot_connected = dev.IotConnectionStatus.OFFLINE\n elif not 'wifiName' in device_settings:\n iot_connected = dev.IotConnectionStatus.NO_IOT\n else:\n iot_connected = dev.IotConnectionStatus.UNKNOWN\n device = device_factory.build(self, identifier, topic, sku, name, iot_connected)\n if device:\n self.__devices[identifier] = device\n self.on_new_device(self, device, raw_device)",
"def bridge_create_json():\n return {\n \"base_stations\": {\n \"id\": 98765,\n \"name\": \"New Bridge\",\n \"mode\": \"home\",\n \"hardware_id\": \"0x1234567890abcdef\",\n \"hardware_revision\": 4,\n \"firmware_version\": {\n \"wifi\": \"0.121.0\",\n \"wifi_app\": \"3.3.0\",\n \"silabs\": \"1.0.1\",\n },\n \"missing_at\": None,\n \"created_at\": \"2019-04-30T01:43:50.497Z\",\n \"updated_at\": \"2019-04-30T01:44:43.749Z\",\n \"system_id\": 12345,\n \"firmware\": {\"wifi\": \"0.121.0\", \"wifi_app\": \"3.3.0\", \"silabs\": \"1.0.1\"},\n \"links\": {\"system\": 12345},\n }\n }",
"def loadDevices(filename):\n with open(filename, \"r\") as file:\n data = json.loads(file.read())\n\n return data[\"devices\"]",
"def get_devices():\n names = devices.list()\n if request.args.get('full') is not None:\n data = {d: devices.show(d) for d in names}\n else:\n data = names\n return jsonify({'devices': data})",
"def devices_dict(self):\n return self.devices.dict",
"def getDeviceInfo():\n url = \"https://api.roblox.com/reference/deviceinfo\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j",
"def retr_device( device_id ) :\n\n\t\t\t_logger.info( '...retr_device...' )\n\t\t\toutput = []\n\n\t\t\tdb = mongo.db.auth_devices\n\t\t\tdev = db.find( { 'device_id' : device_id } )\n\t\t\tif dev.count() == 0 :\n\t\t\t\t_logger.error( '...retr_device %s' % e.message )\n\t\t\t\traise mongo_no_resource_exception( 'no tokenized device found')\n\t\t\tfor device in dev :\n\t\t\t\toutput = {'moniker' : device['device_moniker'] ,\n\t\t\t\t\t\t 'description' : device['description'] ,\n\t\t\t\t\t\t 'active' : device['active'] ,\n\t\t\t\t\t\t 'device_id' : device['device_id'] ,\n\t\t\t\t\t\t 'spawned' : device['spawned'] ,\n\t\t\t\t\t\t 'last_known_remote_ip' : device['last_known_remote_ip'] ,\n\t\t\t\t\t\t 'canonical_user' : device['canonical_user'] ,\n\t\t\t\t\t\t 'segment' : device['segment'] ,\n\t\t\t\t\t\t 'auth_apps' : device['auth_apps'] ,\n\t\t\t\t\t\t 'cloak_origin' : device['cloak_origin'] ,\n\t\t\t\t\t\t 'cloak_monitor_stream' : device['cloak_monitor_stream'] ,\n\t\t\t\t\t\t 'auth_http_id' : device['auth_http_id']\n\t\t\t\t\t\t }\n\n\t\t\treturn jsonify({'result' : output})",
"def device_info(dev, testbed_obj, showcmd='show version', save_to_json=False, logstdout=True):\n\n device = testbed_obj.devices[dev]\n device.connect(log_stdout=logstdout)\n response = device.parse(showcmd)\n print(f\"Response from {dev} is of type {type(response)} and length {len(response)}\")\n print(f\"RAW response: \\n{response}\\n\")\n print(f\"FORMATTED response:\\n{json.dumps(response, indent=4)}\")\n print(response.keys())\n\n if save_to_json:\n json_filename = f\"{dev}.json\"\n with open(json_filename, 'w', encoding='utf-8') as f:\n json.dump(response, f, ensure_ascii=False, indent=4)\n print(f\"\\nFILE SAVED: Saved Response to JSON file {json_filename}\")\n\n return device, response",
"def get_device_properties(device):\n results = devices.show(device)\n return jsonify(results)",
"def json(self):\n robot_dict = self.robot_dict()\n target_dict = self.target_dict()\n json_str = '{'\n json_str = json_str + '\"robot_obj\" : ' + json.dumps(robot_dict) + \",\\n\"\n json_str = json_str + '\"target_obj\" : ' + json.dumps(target_dict) + \"\\n\"\n json_str = json_str + '}'\n return(json_str)",
"def get_user_devices_adapter(json_response):\n\n if 'devices' in json_response:\n ret = {\"result\": []}\n for device in json_response['devices']:\n ret[\"result\"].append(\n {\"name\": device[\"name\"],\n \"type\": device[\"type\"],\n \"id\": device[\"id\"],\n \"is_active\": device[\"is_active\"]})\n return ret\n return json_response",
"def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self._uuid)},\n \"name\": self._device.device_data[self._uuid]['name'],\n \"manufacturer\": \"Nest Labs\",\n \"model\": self._device.device_data[self._uuid]['model'],\n }",
"def bridge_all_json():\n return {\n \"base_stations\": [\n {\n \"id\": 12345,\n \"name\": None,\n \"mode\": \"home\",\n \"hardware_id\": \"0x1234567890abcdef\",\n \"hardware_revision\": 4,\n \"firmware_version\": {\n \"wifi\": \"0.121.0\",\n \"wifi_app\": \"3.3.0\",\n \"silabs\": \"1.0.1\",\n },\n \"missing_at\": None,\n \"created_at\": \"2019-04-30T01:43:50.497Z\",\n \"updated_at\": \"2019-04-30T01:44:43.749Z\",\n \"system_id\": 12345,\n \"firmware\": {\"wifi\": \"0.121.0\", \"wifi_app\": \"3.3.0\", \"silabs\": \"1.0.1\"},\n \"links\": {\"system\": 12345},\n }\n ]\n }",
"def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {(DOMAIN, self._device.unique_id)},\n \"name\": self._device.name,\n \"manufacturer\": \"Apple\",\n \"model\": self._device.device_model,\n }",
"def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"name\": self.name,\n \"manufacturer\": \"Brightech\",\n }",
"def get_kcca_devices():\n headers = {'x-api-key': CLARITY_API_KEY, 'Accept-Encoding': 'gzip'}\n api_url = f\"{CLARITY_API_BASE_URL}devices\"\n\n results = requests.get(api_url, headers=headers)\n\n device_data = pd.DataFrame(results.json())\n\n devices = []\n\n for index, row in device_data.iterrows():\n\n try:\n location = row['location']['coordinates']\n\n device = dict({\n \"channelID\": row['code'],\n \"name\": row['code'],\n \"createdAt\": row['workingStartAt'],\n \"longitude\": location[0],\n \"latitude\": location[1],\n \"device_manufacturer\": 'CLARITY',\n \"isActive\": True,\n \"visibility\": True,\n \"owner\": \"KCCA\",\n \"description\": \"Particulate Matter and NO2 monitor\",\n \"product_name\": \"NODE - S\"\n })\n\n except Exception as ex:\n print(ex)\n continue\n\n devices.append(device)\n\n return json.dumps(devices)",
"def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {(DOMAIN, self._config[CONF_SERIAL])},\n \"name\": self._config[CONF_NAME],\n \"manufacturer\": \"Bosch\",\n }",
"def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {(DOMAIN, self.config_entry.entry_id)},\n \"name\": NAME,\n \"model\": VERSION,\n \"manufacturer\": NAME,\n }",
"def _get_device_data(chosen_env, device_id):\n return _read_json(chosen_env, \"get_device_data/\" + device_id)",
"def as_dict(self, short=False):\n\n res = super(DeviceMapperDevice, self).as_dict(short=short)\n res['dmsetup_cmd'] = self.dmsetup_cmd\n res['sysfs_dm_dir'] = self.sysfs_dm_dir\n res['sysfs_dm_name_file'] = self.sysfs_dm_name_file\n res['sysfs_suspended_file'] = self.sysfs_suspended_file\n res['sysfs_uuid_file'] = self.sysfs_uuid_file\n res['dm_name'] = self.dm_name\n res['suspended'] = self.suspended\n res['uuid'] = self.uuid\n res['table'] = self.table\n\n return res",
"def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.coordinator.data[\"deviceID\"])},\n \"name\": self.coordinator.data[\"deviceName\"],\n \"manufacturer\": self.coordinator.data[\"deviceManufacturer\"],\n \"model\": self.coordinator.data[\"deviceModel\"],\n \"sw_version\": self.coordinator.data[\"appVersionName\"],\n }",
"def load_devices():",
"def build_config(device):\n capabilities = device.capabilities(verbose=True)\n config = {}\n\n for key, value in capabilities.items():\n for element in value:\n if type(element[0]) is tuple:\n config[element[0][1]] = element[0][0]\n elif type(element[0]) is list:\n config[element[1]] = element[0][0]\n elif (\"SYN\" in str(element[0])) or (\"FF\" in str(element[0])):\n pass\n else:\n config[element[1]] = element[0]\n\n print(\"Config Dict: \" + str(config) + \"\\n\")\n return config",
"def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"manufacturer\": \"Somfy\",\n \"name\": self.name,\n \"model\": self.tahoma_device.widget,\n \"sw_version\": self.tahoma_device.type,\n }",
"def get_devices():\n global managed_objects\n global devices_by_adr\n \n devices_by_adr = {}\n \n r = re.compile(\"\\/org\\/bluez\\/hci\\d*\\/dev\\_(.*)\")\n # e.g., match a string like this:\n # /org/bluez/hci0/dev_58_C9_35_2F_A1_EF\n \n for key, value in managed_objects.items():\n # print(\"key=\", key)\n m = r.match(key)\n if m is not None:\n dev_str = m.group(1) # we have a device string!\n # print(\"dev_str=\", dev_str)\n # let's flatten that dict a bit\n devices_by_adr[dev_str] = value[\"org.bluez.Device1\"]",
"def devices_list_view(request):\n return read_json(request.registry.settings['devices_path'], [])",
"async def get_discovered_device_data(self):\n json = self._api_call(\"monitors/%s/devices\" % self.sense_monitor_id)\n return await json"
] | [
"0.74697614",
"0.6751789",
"0.65418833",
"0.6319735",
"0.61290795",
"0.6120263",
"0.60992014",
"0.60623235",
"0.60572946",
"0.6028789",
"0.5987714",
"0.5979106",
"0.597772",
"0.5972768",
"0.59643567",
"0.59492177",
"0.5925081",
"0.5899812",
"0.5844271",
"0.58301526",
"0.580903",
"0.57983744",
"0.578501",
"0.57708704",
"0.57507324",
"0.57493067",
"0.5748261",
"0.5718786",
"0.5710462",
"0.56840307"
] | 0.72180307 | 1 |
returns an integer that respresents base_depth for specified date | def base_depth_for_date(resort_name, date):
resort_table = resort_table_dict[resort_name]
new_date = str(date)
base_depth_to_return = None
query = "SELECT base_depth FROM %s WHERE status_date = to_date(%s::text, 'YYYYMMDD')" %(resort_table, date)
connection = get_connection()
if connection is not None:
try:
for row in get_select_query_results(connection, query):
base_depth_to_return = row
except Exception as e:
print(e, file=sys.stderr)
connection.close()
return json.dumps(base_depth_to_return) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def base_depth_average_for_date(resort_name, date):\n\n resort_table = resort_table_dict[resort_name]\n\n date_month = int(date[4:6])\n date_day = int(date[6:8])\n query = \"SELECT base_depth FROM %s WHERE CAST(EXTRACT(MONTH FROM status_date) AS INTEGER) = %d AND CAST(EXTRACT(DAY FROM status_date) AS INTEGER) = %d\" %(resort_table, date_month, date_day)\n connection = get_connection()\n total = 0\n counter = 0\n for row in get_select_query_results(connection, query):\n counter += 1\n total += int(row[0])\n if (counter != 0): \n base_depth_to_return = int(total/counter)\n else:\n base_depth_to_return = 0\n return json.dumps(base_depth_to_return)",
"def depth(x):\n return max(int(x * depth_multiplier), 8)",
"def getDepth(depth: str) -> int:\n return {\n CommonPrefs.DEPTH_8: 8,\n CommonPrefs.DEPTH_16: 16,\n CommonPrefs.DEPTH_24: 24,\n CommonPrefs.DEPTH_32: 32,\n }.get(depth, 24)",
"def get_recursion_depth(self):\n str_depth_input = self.entries[\"ent_recursion_depth\"].get()\n if str_depth_input == '':\n return None # default of fractal class while drawing in None\n # draws the base curve instead\n return int(str_depth_input)",
"def date_to_draw_number(date):\n\n today = date.today()\n\n #hotspot plays only last for 180 days\n #validate entered date\n if (today - date).days > 180 or date > today:\n return 0\n\n days_between = (date - INIT_DATE).days\n\n return INIT_DRAW_NUMBER + (300 * days_between)\n\n\n # num_spots_sampled, spot_histogram, range_histogram, mod_histogram,\n # last_seen_dict, avg_draw_distance_dict, draw_distance_dict, last_n_avg_distance_dict_list, current_draw_num",
"def _active_depth(self):\n for n_left, n_right in self.graph.dfs():\n if self.node(n_right)['pad'] == 0:\n return self.node(n_right)['level']\n return 0",
"def depth(self):\n if not self.root:\n return None\n else:\n return self.root.balance_number",
"def base_depth_for_period(resort_name, start_date, end_date):\n\n start_date_year = int(start_date[0:4])\n start_date_month = int(start_date[4:6])\n start_date_day = int(start_date[6:8])\n\n end_date_year = int(end_date[0:4])\n end_date_month = int(end_date[4:6])\n end_date_day = int(end_date[6:8])\n\n resort_table = resort_table_dict[resort_name]\n\n query = \"SELECT status_date FROM %s\" %(resort_table)\n connection = get_connection()\n\n period_date_list = []\n base_depth_list = []\n\n if connection is not None:\n try:\n for row in get_select_query_results(connection, query):\n row_year = int(row[0].strftime('%Y'))\n row_month = int(row[0].strftime('%m'))\n row_day = int(row[0].strftime('%d'))\n\n if row_year < start_date_year or row_year > end_date_year:\n continue\n if start_date_year == row_year:\n if start_date_month > row_month:\n continue\n if start_date_year == row_year:\n if start_date_month == row_month:\n if start_date_day > row_day:\n continue\n if end_date_year == row_year:\n if end_date_month < row_month:\n continue\n if end_date_year == row_year:\n if end_date_month == row_month:\n if end_date_day < row_day:\n continue\n\n date_to_add = (row[0].strftime('%Y') + row[0].strftime('%m') + row[0].strftime('%d'))\n period_date_list.append(date_to_add)\n\n except Exception as e:\n print(e, file=sys.stderr)\n\n for date in period_date_list:\n base_depth_for_list = base_depth_for_date(resort_name, date)\n base_depth_list.append(base_depth_for_list)\n\n return json.dumps(base_depth_list)",
"def get_depth(self, current, n):\n if current is not None:\n return max(self.get_depth(current.left, n + 1), self.get_depth(current.right, n + 1))\n else:\n return n",
"def find_depth_tree(root):\n if root is not None:\n max_depth = 0\n if root.branches is None:\n return 1\n else:\n for value in root.branches.values():\n max_depth = max(max_depth, DecisionTree.find_depth_tree(value))\n return 1 + max_depth\n else:\n return 1",
"def r_to_depth(x, interval):\n return x * interval / 3600.0",
"def depth_estimation(x_left, x_right, f=33.4, d=114):\n depth = abs(f * d / ((x_left - x_right) / 72 * 2.54)) / 100 # - 0.418879\n return depth",
"def depth(self):\n\t\tdef helper(tree, d):\n\t\t\tif tree.isLeaf():\n\t\t\t\treturn d\n\t\t\telse:\n\t\t\t\td_left=helper(tree.left, d+1) if tree.hasLeftChild() else 0\n\t\t\t\td_right=helper(tree.right, d+1) if tree.hasRightChild() else 0\n\t\t\t\treturn max(d_left, d_right)\n\n\t\treturn helper(self.root, 1) if not self.isEmpty() else 0",
"def depth(self):\n if self.size == 0:\n return 0\n return int(math.log(self.size, 2)) + 1",
"def max_depth(self) -> int:\n return pulumi.get(self, \"max_depth\")",
"def get_max_dmag_from_depth(depth):\n return 2.5 * np.log10(depth)",
"def resolve_depth(self: models.FeatureDetails, info, **kwargs):\n min = self.data.get(\"berth_min_depth\")\n max = self.data.get(\"berth_max_depth\")\n\n if min is None:\n return None\n\n return {\n \"min\": min,\n \"max\": max,\n }",
"def depth(self, d=0):\n d1 = 0\n d2 = 0\n if self.leftChild:\n d1 = max(self.leftChild.depth(d + 1), d)\n if self.rightChild:\n d2 = max(self.rightChild.depth(d + 1), d)\n return max(d1, d2, d)",
"def depth(self) -> int:\n return self.__depth",
"def bit_depth(self, ch):\n ch = ct.c_int(ch)\n depth = ct.c_uint()\n self.lib.GetBitDepth(ch, ct.pointer(depth))\n return depth.value",
"def depth(self):\n L, R = 0,0\n if self.left:\n L = self.left.depth()\n if self.right:\n R = self.right.depth()\n\n return 1 + max(L, R)",
"def depth(state):\n current_depth = 0\n for i in range(0, len(state.board)):\n for j in range(0, len(state.board[0])):\n if state.board[i][j] == '_':\n current_depth += 1\n return current_depth",
"def depth(self):\n return self._max_depth",
"def get_caravan_depth(self):\n return self.caravan_depth",
"def depth_to_ata(depth):\n return (depth / 10.0) + 1.0",
"def checkDRs(depths):\n okaybases = 0\n for d in depths:\n if(d >= 10):\n okaybases += 1\n okayp = okaybases / len(depths)\n return okayp",
"def depth(self, node):\n\n if not node:\n return 0\n else:\n l_depth = self.depth(node.left)\n r_depth = self.depth(node.right)\n\n if l_depth > r_depth:\n return l_depth + 1\n else:\n return r_depth + 1",
"def get_max_depth(self):\n return self.MAX_DEPTH",
"def max_depth(self) -> int:\n return 0",
"def depth_percent(self):\n return self.container['depth_percent']"
] | [
"0.6894696",
"0.61948436",
"0.61282104",
"0.6101949",
"0.59978324",
"0.57817864",
"0.57461077",
"0.57212085",
"0.56724894",
"0.5652006",
"0.5621178",
"0.56116706",
"0.558995",
"0.5588037",
"0.5577575",
"0.55354685",
"0.5507787",
"0.54877305",
"0.54871655",
"0.54178995",
"0.5417858",
"0.5403052",
"0.53605396",
"0.5348817",
"0.5310176",
"0.5305862",
"0.5295065",
"0.5281023",
"0.5267091",
"0.5266096"
] | 0.7155536 | 0 |
returns average of base depth across all years on specific date | def base_depth_average_for_date(resort_name, date):
resort_table = resort_table_dict[resort_name]
date_month = int(date[4:6])
date_day = int(date[6:8])
query = "SELECT base_depth FROM %s WHERE CAST(EXTRACT(MONTH FROM status_date) AS INTEGER) = %d AND CAST(EXTRACT(DAY FROM status_date) AS INTEGER) = %d" %(resort_table, date_month, date_day)
connection = get_connection()
total = 0
counter = 0
for row in get_select_query_results(connection, query):
counter += 1
total += int(row[0])
if (counter != 0):
base_depth_to_return = int(total/counter)
else:
base_depth_to_return = 0
return json.dumps(base_depth_to_return) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def avg(year):\r\n df = ouvrir_fichier()\r\n df = df.loc[df[\"year\"].isin([year])]\r\n df = df[(\r\n df[\r\n \"emissions\"\r\n ] == 'Emissions (thousand metric tons of carbon dioxide)'\r\n )]\r\n print(df)\r\n mean_value = df.mean()['value']\r\n resultat = {}\r\n resultat[\"year\"] = year\r\n resultat['total'] = float(mean_value)\r\n print(mean_value)\r\n return resultat",
"def yearly_avg(dacycle,avg):\n\n if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:\n raise IOError,'Choice of averaging invalid'\n\n analysisdir = dacycle['dir.analysis']\n monthdir = os.path.join(analysisdir , 'data_%s_monthly'%avg )\n yeardir = os.path.join(analysisdir,'data_%s_yearly'%avg)\n\n if not os.path.exists(yeardir):\n print \"Creating new output directory \" + yeardir\n os.makedirs(yeardir)\n\n files = os.listdir(monthdir) # get monthly files\n files = [f for f in files if '-' in f and f.endswith('.nc')]\n\n if not files:\n print \"No full year finished yet, skipping yearly average...\"\n return\n\n fileinfo = {}\n for filename in files:\n date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m')\n fileinfo[filename] = date\n\n years = set([d.year for d in fileinfo.values()])\n\n sd = datetime.datetime(min(years),1,1)\n ed = datetime.datetime(max(years)+1,1,1)\n\n while sd < ed: \n\n nd = sd + relativedelta(years=+1)\n \n avg_files = [os.path.join(monthdir,k) for k,v in fileinfo.iteritems() if v < nd and v >= sd]\n \n if not len(avg_files) == 12 : \n print \"Year %04d not finished yet, skipping yearly average...\"%sd.year\n else:\n targetfile = os.path.join(yeardir,'%s_fluxes.%s.nc'%(avg,sd.strftime('%Y')))\n \n if not os.path.exists(targetfile):\n print \"Year %04d is complete, I have 12 months for the next file\"%sd.year\n command = ['ncra','-O']+ avg_files + [targetfile]\n status = subprocess.check_call(command)\n\n sd = nd",
"def avg_ttm(df, years):\n\n # Start with the non-shifted data.\n df_result = df.copy()\n\n # Add shifted data for each year.\n for i in range(1, years):\n df_result += df.shift(4 * i)\n\n # Take the average.\n df_result /= years\n\n return df_result",
"def average_age_dc(all_profile_dict: dict) -> float:\n \"\"\"Param:all_profile_dc: Dictionary containing all profiles\"\"\"\n today = date.today()\n value = sum(map(lambda v: today.year - v['birthdate'].year - ((today.month, today.day) < (\n v['birthdate'].month, v['birthdate'].day)), all_profile_dict.values())) / len(all_profile_dict)\n return value",
"def calc_yearly_mean(yy_dly, x_dly):\n return calc_yearly(yy_dly, x_dly, np.mean)",
"def calc_base_year_data(base_year_vehicles_df):\n pass",
"def winter_avg(var_nc,lat_slice=None,lon_slice=None): \n #\n # accumulate in shape [plev,lat,lon]\n #\n # use the whole array if slice objects are missing\n #\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n print \"in winter avg: \",lat_slice,lon_slice\n else:\n num_lats=lat_slice.stop - lat_slice.start\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n #\n # year 0 is special case since it doesn't have a december\n #\n djf0=np.array([0,1],dtype=np.int32) #january and feburary\n the_slice=var_nc[djf0,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0) #average over the two months\n accumulate+=the_slice\n num_years=var_nc.shape[0]//12\n #\n # now year 1 has year 0's december\n #\n djf=np.array([11,12,13],dtype=np.int32)\n #\n # iterate one year less because we've alread\n # done year zero as a special case\n #\n for the_year in np.arange(0,num_years-1):\n the_slice=var_nc[djf,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n djf=djf+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg",
"def year_average_price_rule(_m, y):\r\n\r\n # Total revenue\r\n return sum(m.SCENARIO_REVENUE[y, s] for s in m.S) / sum(m.SCENARIO_DEMAND[y, s] for s in m.S)",
"def run(start_year, end_year, depth_from, depth_to):\n years, times, rootgrps = retrieve(1950,2018)\n \n HC = calculate_HC(rootgrps,25,31, -43, 41)\n \n months, month_avgs = monthly_avgs(HC)\n pos = str(-43)+\"N \"+str(41)+\"E\"\n \n return years, times, HC, pos, months, month_avgs",
"def calc_x_day_avg(data, x=3):\n pass",
"def seasonal_avg(var_nc,the_season,lat_slice=None,lon_slice=None): \n the_season=np.array(the_season,dtype=np.int32)\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n else:\n if lat_slice.stop is None:\n num_lats=var_nc.shape[2]\n else:\n num_lats=lat_slice.stop - lat_slice.start\n if lon_slice.stop is None:\n num_lons=var_nc.shape[3]\n else:\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n num_years=var_nc.shape[0]//12\n\n for the_year in np.arange(0,num_years):\n the_slice=var_nc[the_season,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n the_season=the_season+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg",
"def get_yearly_avg(all_stock_data):\n try:\n yearly_stock_data = {}\n for data in all_stock_data:\n year = data[0][0:4]\n if year not in yearly_stock_data:\n yearly_stock_data[year] = []\n yearly_stock_data[year].append(data)\n yearly_avg_list = []\n for year, stock_data in yearly_stock_data.items():\n yearly_avg_list.append((year, get_avg(stock_data)))\n return yearly_avg_list\n\n except Exception as e:\n print(e)\n exit()",
"def av(self, data):\n ts_ = self.ts(data)\n if 'year' not in ts_.coords:\n return ts_\n return ts_.mean('year')",
"def daily_avg(dacycle,avg):\n \n if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:\n raise IOError,'Choice of averaging invalid'\n\n analysisdir = dacycle['dir.analysis']\n weekdir = os.path.join(analysisdir , 'data_%s_weekly'%avg)\n daydir = os.path.join(analysisdir , 'data_%s_daily'%avg)\n\n if not os.path.exists(daydir):\n print \"Creating new output directory \" + daydir\n os.makedirs(daydir)\n\n files = os.listdir(weekdir)\n files = [f for f in files if '-' in f and f.endswith('.nc')]\n\n fileinfo = {}\n for filename in files:\n date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m-%d')\n fileinfo[filename] = date\n \n dt = dacycle['cyclelength']\n\n for k,v in fileinfo.iteritems():\n cycle_file = os.path.join(weekdir,k)\n for i in range(abs(dt.days)):\n daily_file = os.path.join(daydir,'%s_fluxes.%s.nc'%(avg,(v+datetime.timedelta(days=i)).strftime('%Y-%m-%d')))\n if not os.path.lexists(daily_file):\n os.symlink(cycle_file,daily_file)\n #print daily_file,cycle_file",
"def average_emission(data: List[EmissionPerCapita], current_year: int) -> float:\r\n\r\n index = current_year - data[0].start_year # get the index for current year\r\n\r\n # Get all emissions from that year.\r\n current_year_emissions = []\r\n for countries in data:\r\n current_year_emissions.append(countries.epc_year[index])\r\n\r\n average = sum(current_year_emissions) / len(data)\r\n return average",
"def yearly_mean(args_file):\n product, start_date, end_date, variable_name, shape_file = Utility.read_yml_params(args_file)\n stat = Statistic.Mean\n time = TimePeriod.Yearly\n\n ds = get_data_set(product, shape_file)\n\n result = Utility.Apply_stat(ds, start_date, end_date, variable_name, stat, time)\n return result",
"def annualized_gains(self, day='today'):\n assert day == 'today' or isinstance(day, date), 'Error! You have to pass a datetime.date istance to the day parameter.'\n if day == 'today':\n day = self.data.index[-1]\n if self.data.index[-1] >= day >= self.data.index[0]:\n day = self._first_good_date(day)\n initialValue = self.invested_amount(day)\n finalValue = self.value(day)\n numberOfDays = (day - self.data.index[0]).days\n return round(((finalValue / initialValue)**(365/numberOfDays) - 1) * 100, 2) \n else:\n return 0",
"def calc_average_load (self):\n #~ self.generation = self.forecast.generation_by_type['generation diesel']\\\n #~ [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]",
"def water_year_means(df):\n\n monthly = df.groupby(pd.TimeGrouper('M')).mean()\n monthly['year'] = monthly.index.year\n monthly['month'] = monthly.index.month\n monthly['water_year'] = np.roll(monthly['year'],-3)\n \n # Because there will typically not be data starting and ending in\n # October, we will need to drop the first and last years as we have\n # incomplete records for the first and last year respectively.\n annual = monthly.groupby(monthly['water_year']).mean().iloc[1:-1]\n return annual.drop(['year','month'],axis=1).set_index('water_year')",
"def averageDominationCount(leaf):\n averageDominationCount = np.nanmean(leaf.calDominationCount())\n return averageDominationCount",
"def process_rolling_average(df, level, code_level, trendType, column):\n df = df.sort_values(by=['date'])\n df = df.reset_index(drop=True)\n\n df['date_7days_ago'] = df['date'].apply(lambda x: datetime.strftime(\n datetime.strptime(x, \"%Y-%m-%d\") - timedelta(days=7), \"%Y-%m-%d\"\n ))\n df['mean'] = df['date'].apply(lambda x: get_rolling_average(x, df, column))\n df['mean_7days_ago'] = df['date_7days_ago'].apply(lambda x: df[df['date'] == x]['mean'].iloc[0] if(df[df['date'] == x].shape[0] > 0) else None)\n df['evol_mean'] = df['mean'] - df['mean_7days_ago']\n df['evol_mean_percentage'] = df['evol_mean'] / df['mean_7days_ago'] * 100\n\n return format_dict(\n int(df[df['date'] == df.date.max()]['mean'].iloc[0]),\n df.date.max(),\n int(df[df['date'] == df.date.max()]['evol_mean'].iloc[0]),\n df[df['date'] == df.date.max()]['evol_mean_percentage'].iloc[0],\n level,\n code_level,\n df[['mean', 'date']],\n 'mean',\n trendType\n )",
"def annual_average(new_cube):\n\n annual_average_cube = new_cube.aggregated_by('year', iris.analysis.MEAN)\n\n return annual_average_cube",
"def monthly_avg(dacycle,avg):\n \n if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:\n raise IOError,'Choice of averaging invalid'\n\n analysisdir = dacycle['dir.analysis']\n\n daydir = os.path.join(analysisdir , 'data_%s_daily'%avg)\n monthdir = os.path.join(analysisdir,'data_%s_monthly'%avg)\n\n if not os.path.exists(monthdir):\n print \"Creating new output directory \" + monthdir\n os.makedirs(monthdir)\n\n\n files = os.listdir(daydir) # get daily files\n files = [f for f in files if '-' in f and f.endswith('.nc')]\n\n if len(files) < 28:\n print 'No month is yet complete, skipping monthly average'\n return\n\n fileinfo = {}\n for filename in files: # parse date from each of them\n date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m-%d')\n fileinfo[filename] = date\n\n years = [d.year for d in fileinfo.values()] # get actual years\n months = set([d.month for d in fileinfo.values()]) # get actual months\n \n sd = datetime.datetime(min(years),1,1)\n ed = datetime.datetime(max(years)+1,1,1)\n\n while sd < ed: \n\n nd = sd + relativedelta(months=+1)\n\n ndays_in_month = (nd-sd).days\n \n avg_files = [os.path.join(daydir,k) for k,v in fileinfo.iteritems() if v < nd and v >= sd]\n \n if len(avg_files) != ndays_in_month: # only once month complete \n #print 'New month (%02d) is not yet complete, skipping monthly average'%(sd.month)\n pass\n else:\n targetfile = os.path.join(monthdir,'%s_fluxes.%s.nc'%(avg,sd.strftime('%Y-%m')))\n if not os.path.exists(targetfile):\n print \"New month (%02d) is complete, I have %d days for the next file\"%(sd.month,ndays_in_month)\n command = ['ncra','-O']+ avg_files + [targetfile]\n status = subprocess.check_call(command)\n else:\n pass\n\n sd = nd",
"def mpg_by_year(self):\n ## create reference dict and aggregated dict\n reference_mpgs = defaultdict(list)\n year_avg_mpgs = defaultdict(int)\n ## loop through the data and add to both dicts\n for auto in self.data:\n ## get the year\n the_year = auto.year\n ## maintain a list of mpgs for each key=year\n reference_mpgs[the_year].append(auto.mpg)\n ## update the cumulative mpg as we read auto objects\n year_avg_mpgs[the_year] = sum(reference_mpgs[the_year]) / len(reference_mpgs[the_year])\n return year_avg_mpgs",
"def standardize(year, df):\n return (df[year] - df[year].mean()) / df[year].std()",
"def batting_average(df,start_year,end_year,bat_met,player_name):\n\n base_fields = ['H','AB']\n emp_list = check_base_fields(df,base_fields)\n\n if not emp_list:\n return round(df['H'].sum(axis = 0) / df['AB'].sum(axis = 0),3)\n\n else:\n df = original_dataframe(start_year,end_year,bat_met+emp_list,player_name)\n return round(df['H'].sum(axis = 0) / df['AB'].sum(axis = 0),3)",
"def GetGraphicAverages(diagnostic_cases, diagnostic, weeks,year, n_years):\n t = 1.96\n\n current_year = Year.objects.get(year=year)\n weeks_current_year = weeks.filter(year=current_year)\n year_ob = Year.objects.filter(year__lt=year)\n weeks = weeks.filter(year__in=year_ob)\n\n popu = 0\n\n #cases per diagnostic\n diagnostic_cases_w = diagnostic_cases\n\n #arithmetic average of the weeks / n_years\n averages = [0] * 52\n\n standard_deviations = [0] * 52\n #number of years\n\n #cases per week of the diferent years\n cases_per_weeks = [0] * 52\n\n for i in range(len(averages)):\n\n f = [0]*(n_years)\n \n\n year = 0\n\n y_idx = 0\n for w in range(len(weeks)):\n #print(y)\n if weeks[w].week == i+1:\n \n if year != weeks[w].year: # Esto no pasa nunca\n year = weeks[w].year\n cases = 0\n \n \n for p in diagnostic_cases_w:\n\n if p.week == weeks[w]:\n \n cases += p.cases\n\n f[y_idx ] = cases\n y_idx +=1\n\n averages[i] = np.average(f) #borrar\n\n standard_deviations[i] = np.std(f)\n \n cases = 0\n for week in weeks_current_year:\n if week.week == i+1:\n dia = diagnostic_cases.filter(week=week)\n \n for d in dia:\n\n cases += d.cases\n\n cases_per_weeks[i] = cases \n\n\n #array of class dots for draw the chart of averages\n dots_graphic_averages = []\n #array of class dots for draw the chart of cumulative\n dots_graphic_cumulative = []\n\n\n average_cumulative = 0\n top_rank_cumulative = 0\n cases_acumulative = 0\n lower_rank_cumulative = 0\n\n for i in range(len(standard_deviations)):\n lower_rank = 0\n top_rank = 0\n\n if n_years != 0:\n lower_rank = averages[i] - (t * standard_deviations[i]/ math.sqrt(n_years))\n top_rank = averages[i] + (t * standard_deviations[i] / math.sqrt(n_years))\n if lower_rank < 0:\n lower_rank = 0\n\n # Acumulative dots\n cases_acumulative += cases_per_weeks[i]\n average_cumulative += averages[i]\n if lower_rank >= 0:\n lower_rank_cumulative += lower_rank\n top_rank_cumulative += top_rank\n\n dots_average = DotsGraphicAverage(averages[i],i+1, lower_rank, top_rank,cases_per_weeks[i])\n dots_cumulative = DotsGraphicAverage(average_cumulative,i+1, lower_rank_cumulative, top_rank_cumulative,cases_acumulative)\n dots_graphic_averages.append(dots_average)\n dots_graphic_cumulative.append(dots_cumulative)\n\n\n return dots_graphic_averages, dots_graphic_cumulative",
"def longterm_avg(dacycle,avg):\n\n if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:\n raise IOError,'Choice of averaging invalid'\n\n analysisdir = dacycle['dir.analysis']\n\n yeardir = os.path.join(analysisdir , 'data_%s_yearly'%avg )\n longtermdir = os.path.join(analysisdir,'data_%s_longterm'%avg)\n\n if not os.path.exists(longtermdir):\n print \"Creating new output directory \" + longtermdir\n os.makedirs(longtermdir)\n\n files = os.listdir(yeardir)\n files = [f for f in files if '-' in f and f.endswith('.nc')]\n\n if not files:\n print \"No full year finished yet, skipping longterm average...\"\n return\n\n dates = []\n for filename in files:\n date=datetime.datetime.strptime(filename.split('.')[-2],'%Y')\n dates.append( date )\n\n avg_files = [os.path.join(yeardir,k) for k in files]\n \n if len(avg_files) > 0 : \n command = ['ncra','-O']+ avg_files + [os.path.join(longtermdir,'%s_fluxes.%04d-%04d.nc'%(avg,min(dates).year, max(dates).year))]\n status = subprocess.check_call(command)",
"def five_years_avg_dividend(self) -> float:\n return self._five_years_avg_dividend",
"def dishlist_avg_cal(n:list)->float:\r\n all_cal = dishlist_cal(n)\r\n return sum(all_cal)/len(all_cal)"
] | [
"0.64000183",
"0.61367154",
"0.6118295",
"0.6117146",
"0.61015445",
"0.6100895",
"0.60893524",
"0.60777545",
"0.6077354",
"0.6042014",
"0.59638566",
"0.5926371",
"0.59044516",
"0.5842373",
"0.5823526",
"0.5815007",
"0.58064413",
"0.57317835",
"0.5730693",
"0.5663666",
"0.56539077",
"0.56235766",
"0.5602132",
"0.5594024",
"0.55885893",
"0.5547512",
"0.5543839",
"0.5539423",
"0.5523833",
"0.55136853"
] | 0.7233447 | 0 |
returns int that is avg snowfall on this date over all years | def snowfall_average_for_date(resort_name, date):
resort_table = resort_table_dict[resort_name]
date_month = int(date[4:6])
date_day = int(date[6:8])
query = "SELECT snowfall FROM %s WHERE CAST(EXTRACT(MONTH FROM status_date) AS INTEGER) = %d AND CAST(EXTRACT(DAY FROM status_date) AS INTEGER) = %d" %(resort_table, date_month, date_day)
connection = get_connection()
total = 0
counter = 0
for row in get_select_query_results(connection, query):
counter += 1
total += int(row[0])
if (counter != 0):
snowfall_to_return = int(total/counter)
else:
snowfall_to_return = 0
return json.dumps(snowfall_to_return) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def five_years_avg_dividend(self) -> float:\n return self._five_years_avg_dividend",
"def max_drawdown_cal_year(self) -> float:\n return float(self.tsdf.groupby([self.tsdf.index.year]).apply(\n lambda x: (x / x.expanding(min_periods=1).max()).min() - 1).min())",
"def av(self, data):\n ts_ = self.ts(data)\n if 'year' not in ts_.coords:\n return ts_\n return ts_.mean('year')",
"def year_average_price_rule(_m, y):\r\n\r\n # Total revenue\r\n return sum(m.SCENARIO_REVENUE[y, s] for s in m.S) / sum(m.SCENARIO_DEMAND[y, s] for s in m.S)",
"def seasonal_avg(var_nc,the_season,lat_slice=None,lon_slice=None): \n the_season=np.array(the_season,dtype=np.int32)\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n else:\n if lat_slice.stop is None:\n num_lats=var_nc.shape[2]\n else:\n num_lats=lat_slice.stop - lat_slice.start\n if lon_slice.stop is None:\n num_lons=var_nc.shape[3]\n else:\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n num_years=var_nc.shape[0]//12\n\n for the_year in np.arange(0,num_years):\n the_slice=var_nc[the_season,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n the_season=the_season+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg",
"def season_count_fraction(past_season, past_day, current_season, current_day, nb_days_per_season):\n total_seasons = current_season - past_season\n total_days = current_day - past_day\n if total_days < 0:\n total_days += nb_days_per_season\n total_seasons -= 1\n assert(total_seasons >= 0)\n assert(total_days >= 0)\n return (total_seasons * nb_days_per_season + total_days) / nb_days_per_season",
"def year_emissions_intensity_rule(_m, y):\r\n\r\n return m.YEAR_EMISSIONS[y] / m.YEAR_DEMAND[y]",
"def get_average_mood(mood_data, past_days=None):\n mood_sum = 0\n total_days = 0\n if past_days is None:\n past_days = (datetime.now() - datetime(1970, 1, 1)).days\n start_date = datetime.now() - timedelta(days=past_days-1)\n for date, mood in mood_data[:-past_days:-1]:\n if date > start_date:\n mood_sum += int(mood)\n total_days += 1\n return round(mood_sum/total_days, 2)",
"def dishlist_avg_cal(n:list)->float:\r\n all_cal = dishlist_cal(n)\r\n return sum(all_cal)/len(all_cal)",
"def calc_x_day_avg(data, x=3):\n pass",
"def average_emission(data: List[EmissionPerCapita], current_year: int) -> float:\r\n\r\n index = current_year - data[0].start_year # get the index for current year\r\n\r\n # Get all emissions from that year.\r\n current_year_emissions = []\r\n for countries in data:\r\n current_year_emissions.append(countries.epc_year[index])\r\n\r\n average = sum(current_year_emissions) / len(data)\r\n return average",
"def create_dreamteam_count_yearly(player, raw_list):\n num_seasons = 0\n yearly_dreamteam_count = 0\n for i, raw in enumerate(raw_list):\n if i == len(raw_list) - 1:\n break\n if player in raw['name'].values:\n num_seasons += 1\n yearly_dreamteam_count += raw[['dreamteam_count']][raw.name == player].values[0][0]\n if num_seasons == 0:\n return np.nan\n return yearly_dreamteam_count / num_seasons",
"def calc_average_load (self):\n #~ self.generation = self.forecast.generation_by_type['generation diesel']\\\n #~ [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]",
"def get_avg_num_logs(self):\n\n return int(self.database[\"date\"].value_counts().sum()) // 120",
"def annual_dividend(self) -> float:\n return self._annual_dividend",
"def arithmetic_ret(self) -> float:\n return float(np.log(self.tsdf).diff().mean() * self.periods_in_a_year)",
"def avg(year):\r\n df = ouvrir_fichier()\r\n df = df.loc[df[\"year\"].isin([year])]\r\n df = df[(\r\n df[\r\n \"emissions\"\r\n ] == 'Emissions (thousand metric tons of carbon dioxide)'\r\n )]\r\n print(df)\r\n mean_value = df.mean()['value']\r\n resultat = {}\r\n resultat[\"year\"] = year\r\n resultat['total'] = float(mean_value)\r\n print(mean_value)\r\n return resultat",
"def yearfrac(self) -> float:\n return (self.last_idx - self.first_idx).days / 365.25",
"def avg_after_harry():\n copy = movies.copy()\n copy = copy.sort_values(['Year']).reset_index(drop = True) #years early to present\n harry_years = copy[copy['#1 Movie'].str.contains('Harry')].Year #years where harry potter was #1\n next_years = harry_years + 1\n check = list(next_years.values)\n next_years_df = copy[copy['Year'].isin(check)]\n avg = next_years_df['Number of Movies'].mean()\n if avg is np.nan:\n raise\n return ('avg_after_harry', avg)",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def avg(self):\n return sum(self.times) / len(self.times)",
"def calc_yearly_mean(yy_dly, x_dly):\n return calc_yearly(yy_dly, x_dly, np.mean)",
"def sumYear(self):\n yearuse = 0\n for day in self.daylist:\n yearuse = yearuse + sum(day.use)\n return yearuse",
"def annualized_gains(self, day='today'):\n assert day == 'today' or isinstance(day, date), 'Error! You have to pass a datetime.date istance to the day parameter.'\n if day == 'today':\n day = self.data.index[-1]\n if self.data.index[-1] >= day >= self.data.index[0]:\n day = self._first_good_date(day)\n initialValue = self.invested_amount(day)\n finalValue = self.value(day)\n numberOfDays = (day - self.data.index[0]).days\n return round(((finalValue / initialValue)**(365/numberOfDays) - 1) * 100, 2) \n else:\n return 0",
"def AverageDividendPercent(self, years=10):\n nSamples = 0\n dividendSum = 0.\n import datetime\n import math\n now = datetime.datetime.now()\n for index in range(len(self._history)):\n snapshot = self._history[-1 - index]\n if now - snapshot.date > datetime.timedelta(days=365*years):\n break\n nSamples += 1\n if not math.isnan(snapshot.annualDividend / snapshot.price):\n dividendSum += snapshot.annualDividend / snapshot.price\n if nSamples == 0:\n return 0.\n avgDiv = 100. * dividendSum / nSamples\n if math.isnan(avgDiv):\n return 0\n return avgDiv",
"def winter_avg(var_nc,lat_slice=None,lon_slice=None): \n #\n # accumulate in shape [plev,lat,lon]\n #\n # use the whole array if slice objects are missing\n #\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n print \"in winter avg: \",lat_slice,lon_slice\n else:\n num_lats=lat_slice.stop - lat_slice.start\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n #\n # year 0 is special case since it doesn't have a december\n #\n djf0=np.array([0,1],dtype=np.int32) #january and feburary\n the_slice=var_nc[djf0,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0) #average over the two months\n accumulate+=the_slice\n num_years=var_nc.shape[0]//12\n #\n # now year 1 has year 0's december\n #\n djf=np.array([11,12,13],dtype=np.int32)\n #\n # iterate one year less because we've alread\n # done year zero as a special case\n #\n for the_year in np.arange(0,num_years-1):\n the_slice=var_nc[djf,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n djf=djf+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg",
"def water_year_means(df):\n\n monthly = df.groupby(pd.TimeGrouper('M')).mean()\n monthly['year'] = monthly.index.year\n monthly['month'] = monthly.index.month\n monthly['water_year'] = np.roll(monthly['year'],-3)\n \n # Because there will typically not be data starting and ending in\n # October, we will need to drop the first and last years as we have\n # incomplete records for the first and last year respectively.\n annual = monthly.groupby(monthly['water_year']).mean().iloc[1:-1]\n return annual.drop(['year','month'],axis=1).set_index('water_year')",
"def avg_based_on_forecast(city):\n wparams = { 'city': city,\n 'key': WEATHERBIT_API_KEY\n }\n resp = requests.get(WEATHERBIT_FORECAST_URL, params=wparams)\n alltemps = [farenheit(x['temp']) for x in json.loads(resp.text)['data']]\n return round(sum(alltemps) / len(alltemps))",
"def average(self):\n return self.summation() / self.count()"
] | [
"0.6527509",
"0.63874155",
"0.6387261",
"0.63743424",
"0.62814784",
"0.61865",
"0.6170374",
"0.6097223",
"0.6072147",
"0.6064186",
"0.6035137",
"0.6027913",
"0.6025581",
"0.6023191",
"0.5905982",
"0.5880306",
"0.58751917",
"0.5872359",
"0.5859398",
"0.5838657",
"0.5838657",
"0.5838657",
"0.58321166",
"0.5829905",
"0.5815771",
"0.58112746",
"0.5792773",
"0.5763294",
"0.5754138",
"0.5749265"
] | 0.69543517 | 0 |
returns a date that had the highest snowfall during specified year | def highest_snowfall_for_year(resort_name, year):
resort_table = resort_table_dict[resort_name]
year = int(year)
query = "SELECT snowfall FROM %s WHERE CAST(EXTRACT(YEAR FROM status_date) AS INTEGER) = %d" %(resort_table, year)
connection = get_connection()
snowfall_list = []
if connection is not None:
try:
for row in get_select_query_results(connection, query):
snowfall_list.append(row)
except Exception as e:
print(e, file=sys.stderr)
connection.close()
snowfall_list.sort(reverse=True)
"""
need to think about making our own sorter so we can break ties effectively
"""
highest_snowfall = snowfall_list[0]
return json.dumps(highest_snowfall) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def maxyear():\n\n return datetime.MAXYEAR",
"def latest_season_before(date):\n\tif date.month < 9:\n\t\treturn date.year - 1\n\treturn date.year",
"def max_drawdown_cal_year(self) -> float:\n return float(self.tsdf.groupby([self.tsdf.index.year]).apply(\n lambda x: (x / x.expanding(min_periods=1).max()).min() - 1).min())",
"def max_rain_compare(rain_by_year):\n\treturn rain_by_year[1]",
"def closeyear(year):\n\n # Return the specific year\n return int(year % 4)",
"def get_last_year(data_id):\n if data_id.startswith(\"cfsv2\"):\n return 2017\n return 2018",
"def end_year(self) -> float:\n\n end_year = -np.inf\n for td_table in list(self.tdve.values()) + self.transfers + self.interpops:\n if len(td_table.tvec) and np.amax(td_table.tvec) > end_year:\n end_year = np.amax(td_table.tvec)\n return end_year",
"def get_year_with_most_rain(totals_list_per_day_from_datasource):\n\train_by_year_collection = {}\n\tfor row in totals_list_per_day_from_datasource:\n\t\tyear_component_of_parsed_date = row[0].split('-')[2]\n\t\tif None == rain_by_year_collection.get(year_component_of_parsed_date):\n\t\t\train_by_year_collection[year_component_of_parsed_date] = 0\n\t\tif '' == row[1]:\n\t\t\tcontinue\n\t\train_by_year_collection[year_component_of_parsed_date] += int(row[1])\n\tmax_rainy_year = max(rain_by_year_collection.items(), key=max_rain_compare) # use .items() and always searches based on the structure given, here .items() returns the dictionary as a tuple only.\n\tyear_with_most_rain = { max_rainy_year[0] : max_rainy_year[1] }\n\t#year_most_rain = { key : value for key, value in rain_by_year.items() if value == max_value[1] } # find key/year in the dictionary by value/rain reverse thinking.\n\treturn year_with_most_rain",
"def get_max_pop_year(group):\n max_year_val = group.loc[group[\"mean\"].idxmax()][[\"year_id\", \"mean\"]]\n\n return max_year_val",
"def _bands_competed_last_year():\n lLastYear = datetime.datetime.now().year - 1\n cursor = connection.cursor()\n cursor.execute(\"SELECT count(distinct(r.band_id)) FROM contests_contestevent e, contests_contestresult r WHERE r.contest_event_id = e.id AND extract(year from e.date_of_event) = %(year)s GROUP BY extract(year from e.date_of_event) ORDER BY extract(year from e.date_of_event) desc\", {'year' : lLastYear})\n rows = cursor.fetchall()\n lReturn = 0\n if rows and rows[0]:\n lReturn = rows[0][0]\n cursor.close()\n return lReturn",
"def hottest_summer(df) -> int:\n df['summer_max'] = df[['JUN', 'JUL', 'AUG']].max(axis=1)\n hottest_year = df['summer_max'].idxmax()\n return hottest_year",
"def returns_over_max_drawdown(tot_returns_dict, year, lifetime_maximum_drawdown):\n\n return round(tot_returns_dict[year] / abs(lifetime_maximum_drawdown), 2)",
"def _days_before_year(year):\n y = year - 1\n return y * 365 + y // 4 - y // 100 + y // 400",
"def threat(year, clock):\n # find latest adjustment preceding or equal to the given year\n index = -1\n while clock[index][0] > year:\n index -= 1\n # return time set at latest adjustment\n return clock[index][1]",
"def _get_max_year(out_id_list):\n try:\n indicator = database.fetch_tables(['Indicator'])[0]\n ind_list = indicator[indicator['fk_indicator_output'].isin(out_id_list)]['id'].tolist()\n ind_str = ', '.join([str(i) for i in ind_list])\n \n sql = f'SELECT MAX(year) FROM SimpleCount WHERE fk_simplecount_indicator in ({ind_str})'\n \n c = _CONN.cursor()\n c.execute(sql)\n max_year = c.fetchall()[0][0]\n c.close()\n\n return int(max_year)\n except:\n raise",
"def get_max_days(\n self,\n year=None, # type: Optional[int]\n ):\n # type: (...) -> int\n\n max_days = self._max_days\n\n if self.name == 'FEBRUARY':\n if year is None:\n raise ValueError('Year is necessary for \\'FEBRUARY\\'.')\n\n return max_days[1] if self.is_leap(year) else max_days[0]\n\n return max_days",
"def state_with_most_deaths(arr, year, cause='All causes'):\n # find correct list of states\n list_of_states = arr[(arr[:,0] == str(year)) & (arr[:,2] == cause) & (arr[:,3] != \"United States\")] \n\n # fint largest deaths number in a states & find state with this death number\n result_state = list_of_states[list_of_states[:,4] == np.max(list_of_states[:,4].astype(int)).astype(str)][0] \n \n #print for assignment\n print(f'The state with most deaths from {cause}, in {year} was {result_state[3]} with {result_state[4]} deaths')\n \n return result_state",
"def test_21st_century(self):\r\n season = \"2019-20\"\r\n res = get_end_year(season)\r\n assert res == 2020",
"def test_year_2000(self):\r\n season = \"1999-00\"\r\n res = get_end_year(season)\r\n assert res == 2000",
"def highest_ratings_year(data: pd.DataFrame):\n # Convert time integer into datatime type and then to year only\n data['review_year'] = pd.to_datetime(data['review_time'], unit='s').dt.year\n # Find all rows with highest rating (5)\n highest_ratings = data[['review_overall', 'review_year']].loc[data.review_overall == 5]\n # Find year with highest count of 5 star reviews\n highest_year =highest_ratings.value_counts().reset_index().review_year.values[0]\n\n print(f\"The year with highest ratings is {highest_year}\")",
"def biggest_incr(array, f_year, t_year, cause='All causes'):\n\n # raise error if f_year >= t_year\n if f_year >= t_year:\n raise ValueError('from year must be less then to year')\n\n # creates an array of unique states\n state_keys = np.unique(array[:,3])\n # but without 'United States'\n state_keys = state_keys[np.where(state_keys != 'United States')]\n\n # create masks which only consider the chosen years and cause\n f_mask = (array[:,0].astype(int) == f_year) & (array[:,2] == cause)\n t_mask = (array[:,0].astype(int) == t_year) & (array[:,2] == cause)\n\n # create arrays with sum of death for every state at both years\n f_death_per_state = np.array([np.sum(array[f_mask & (array[:,3] == state)][:,4].astype(int)) for state in state_keys])\n t_death_per_state = np.array([np.sum(array[t_mask & (array[:,3] == state)][:,4].astype(int)) for state in state_keys])\n\n # calculating the differens in death in the interval for every state\n diff_death_per_state = t_death_per_state - f_death_per_state\n\n # only show states with a increase (since the question mentions an increase, only 'positive' numbers)\n state_keys = state_keys[diff_death_per_state > 0]\n\n try:\n # find index of smallest positive number\n # (np.argmax() raises an ValueError if it gets an empty array)\n index_max_incr = np.argmax(diff_death_per_state[diff_death_per_state > 0])\n \n # print result (for exercise)\n print(f'State with biggest increase of deaths ({diff_death_per_state[diff_death_per_state > 0].max()}) from {f_year} to {t_year} is {state_keys[index_max_incr]} by {cause}')\n\n return state_keys[index_max_incr]\n except:\n # print result (for exercise)\n print(f'no state had an increase in death by {cause} from {f_year} to {t_year}')\n\n return None",
"def print_year_most_rain(year_highest_rain_daily_total):\n\t# [ [key, value] for key, value in year_highest_rain_daily_total.items() ]\n\tlist_print_out = list(year_highest_rain_daily_total.items()) # cast the dictionary for output as properly taught be calling the .items() method of the dictionary type\n\tprint('The year with the highest rain amount was: {} with rain amount {}.'.format(list_print_out[0][0], list_print_out[0][1]))\n\n\t\"BE IT KNOWN ANYONE WHO DARES TO JUDGE THIS CODE -- I WILL NOT BE JUDGED BY YOU OR BY ANYONE -- THERE IS ONLY ONE WHO WILL JUDGE ME. Now off with the likes of you!\"",
"def calc_easter(year):\n a = year % 19\n b = year // 100\n c = year % 100\n d = (19 * a + b - b // 4 - ((b - (b + 8) // 25 + 1) // 3) + 15) % 30\n e = (32 + 2 * (b % 4) + 2 * (c // 4) - d - (c % 4)) % 7\n f = d + e - 7 * ((a + 11 * d + 22 * e) // 451) + 114\n month = f // 31\n day = f % 31 + 1\n return datetime.date(year, month, day)",
"def get_last_year(year=None):\n if year:\n return str(int(year)-1)\n else:\n return str(get_current_year(as_string=False) - 1)",
"def get_best_noc_in_year(self, year):\n query = '''SELECT nocs.noc_name, COUNT(medals.medal)\n FROM nocs, athletes, medals, athletes_nocs, games\n WHERE athletes.id = medals.athlete_id\n AND games.id = medals.game_id\n AND athletes.id = athletes_nocs.athlete_id\n AND nocs.id = athletes_nocs.noc_id\n AND lower(medals.medal) = 'gold'\n AND games.game_year = '{}'\n GROUP BY nocs.noc_name\n ORDER BY COUNT(medals.medal) DESC;'''.format(year)\n \n self.__cursor.execute(query)\n return next(self.__cursor)",
"def first_day_of_year(year):\n year -= 1\n return (year + (year // 4) - (year // 100) + (year // 400) + 1) % NUM_DAYS_IN_WEEK",
"def test_20th_century(self):\r\n season = \"1989-90\"\r\n res = get_end_year(season)\r\n assert res == 1990",
"def get_next_hockey_year(year=None):\n\n today = date.today()\n\n # if we are in the end of a hockey year (anytime from jan 1 until next season \"sept\")\n if today.month <= 8:\n return get_current_year() + get_next_year()\n\n else: # if month >= 9 (Sept)\n next_year = get_next_year()\n return next_year + get_next_year(year=next_year)",
"def current_season():\n td = datetime.datetime.today()\n if td.month > 8:\n return td.year\n return td.year - 1",
"def get_nuts_spec(year):\n for t in [2016, 2013, 2010, 2006, 2003]:\n if year >= t:\n return t"
] | [
"0.71644264",
"0.70825464",
"0.69648576",
"0.66781336",
"0.64653206",
"0.6443147",
"0.6366688",
"0.6256422",
"0.5998703",
"0.5982007",
"0.5957845",
"0.58945405",
"0.58911014",
"0.5854378",
"0.5853436",
"0.5780126",
"0.57732373",
"0.5755967",
"0.57521063",
"0.57442605",
"0.57415795",
"0.57305104",
"0.5729135",
"0.5727407",
"0.5724346",
"0.5720162",
"0.5714828",
"0.56823754",
"0.56768227",
"0.5674548"
] | 0.75053 | 0 |
returns list of snowfall for each date in the period | def snowfall_for_period(resort_name, start_date, end_date):
#yyyymmdd
start_date_year = int(start_date[0:4])
start_date_month = int(start_date[4:6])
start_date_day = int(start_date[6:8])
end_date_year = int(end_date[0:4])
end_date_month = int(end_date[4:6])
end_date_day = int(end_date[6:8])
resort_table = resort_table_dict[resort_name]
query = "SELECT status_date FROM %s" %(resort_table)
connection = get_connection()
period_date_list = []
snowfall_list = []
if connection is not None:
try:
for row in get_select_query_results(connection, query):
#yyyymmdd
row_year = int(row[0].strftime('%Y'))
row_month = int(row[0].strftime('%m'))
row_day = int(row[0].strftime('%d'))
if row_year < start_date_year or row_year > end_date_year:
continue
if start_date_year == row_year:
if start_date_month > row_month:
continue
if start_date_year == row_year:
if start_date_month == row_month:
if start_date_day > row_day:
continue
if end_date_year == row_year:
if end_date_month < row_month:
continue
if end_date_year == row_year:
if end_date_month == row_month:
if end_date_day < row_day:
continue
date_to_append = (row[0].strftime('%Y') + row[0].strftime('%m') + row[0].strftime('%d'))
period_date_list.append(date_to_append)
except Exception as e:
print(e, file=sys.stderr)
for date in period_date_list:
snowfall_to_add = snowfall_for_date(resort_name, date)
snowfall_list.append(snowfall_to_add)
return json.dumps(snowfall_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def snowfall_for_date(resort_name, date):\n\n resort_table = resort_table_dict[resort_name]\n\n new_date = str(date)\n\n query = \"SELECT snowfall FROM %s WHERE status_date = to_date(%s::text, 'YYYYMMDD')\" %(resort_table, new_date)\n connection = get_connection()\n snowfall_to_return = None\n\n\n if connection is not None:\n try:\n for row in get_select_query_results(connection, query):\n snowfall_to_return = row\n except Exception as e:\n print(e, file=sys.stderr)\n\n connection.close()\n return json.dumps(snowfall_to_return)",
"def get_daily_goals(self, surface, dates):\n iterator = DjuDay.objects.filter(day__in=dates).order_by('day')\n return [\n [x.day, x.average * DJU_TO_KWH * KWH_TO_EUROS * surface] for x in iterator\n ]",
"def snowfall_average_for_date(resort_name, date):\n resort_table = resort_table_dict[resort_name]\n\n date_month = int(date[4:6])\n date_day = int(date[6:8])\n query = \"SELECT snowfall FROM %s WHERE CAST(EXTRACT(MONTH FROM status_date) AS INTEGER) = %d AND CAST(EXTRACT(DAY FROM status_date) AS INTEGER) = %d\" %(resort_table, date_month, date_day)\n connection = get_connection()\n total = 0\n counter = 0\n for row in get_select_query_results(connection, query):\n counter += 1\n total += int(row[0])\n if (counter != 0):\n snowfall_to_return = int(total/counter)\n else:\n snowfall_to_return = 0\n return json.dumps(snowfall_to_return)",
"def zenith_range_dates(list_dates, timeframe):\r\n\r\n\tzeniths = []\r\n\r\n\tfor date in list_dates:\r\n\t\tsolar_noon = l.solar_noon(date=date, local=True)\r\n\t\tsolar_zenith = l.solar_elevation(solar_noon.replace(tzinfo=None))\r\n\t\tzeniths.append(solar_zenith)\r\n\r\n\tlist_dates = [date.isoformat() for date in list_dates]\r\n\r\n\tif timeframe == 'last_seven_days' or timeframe == 'this_month' or timeframe == 'last_month':\r\n\t\tformat = 'M/D'\r\n\telif timeframe == 'this_year' or timeframe == 'last_year':\r\n\t\tformat = 'MMM D'\r\n\r\n\treturn {'labels': list_dates, 'data': zeniths, 'yAxisLabel': 'Solar Zenith', 'format': format}",
"async def async_forecast_twice_daily(self) -> list[Forecast]:\n return self._forecast_twice_daily",
"def parse_snowfall(regime, lines, data):\n for linenum, line in enumerate(lines):\n # skipme\n if len(line.strip()) < 14:\n continue\n tokens = make_tokens(regime, line)\n key = tokens[0].strip()\n if key.startswith(\"SNOW DEPTH\"):\n data[\"snowdepth\"] = get_number(tokens[1])\n continue\n key = convert_key(key)\n data[f\"snow_{key}\"] = get_number(tokens[1])\n data[f\"snow_{key}_record\"] = get_number(tokens[3])\n yeartest = get_number_year(tokens[4])\n if yeartest is not None:\n data[f\"snow_{key}_record_years\"] = [yeartest]\n data[f\"snow_{key}_normal\"] = get_number(tokens[5])\n data[f\"snow_{key}_departure\"] = get_number(tokens[6])\n data[f\"snow_{key}_last\"] = get_number(tokens[7])\n if (\n key == \"today\"\n and yeartest is not None\n and data[f\"snow_{key}_record_years\"][0] is not None\n ):\n while (linenum + 1) < len(lines) and len(\n lines[linenum + 1].strip()\n ) == 4:\n n = get_number_year(lines[linenum + 1])\n if n is not None:\n data.setdefault(\"snow_today_record_years\", []).append(n)\n linenum += 1",
"def get_season_list_BDEW(weather_data):\n season_list = []\n\n for j, date_obj in enumerate(weather_data.index):\n YEAR = date_obj.year\n\n winter_end = dt.datetime(YEAR, 3, 21, 00, 00, 00)\n winter_start = dt.datetime(YEAR, 10, 31, 00, 00, 00)\n summer_start = dt.datetime(YEAR, 5, 15, 00, 00, 00)\n summer_end = dt.datetime(YEAR, 9, 15, 00, 00, 00)\n\n if date_obj <= winter_end or date_obj > winter_start:\n season_list.append('Winter') # Winter\n\n elif date_obj > summer_start and date_obj <= summer_end:\n season_list.append('Sommer') # Summer\n\n else:\n season_list.append('Übergangszeit') # Transition\n\n return season_list",
"def make_vlf_flare_list():\n vlf_days = []\n for i in range(len(days_to_plot)):\n tt = parse_time(days_to_plot[i]).strftime(\"%Y%m%d\")\n files_vlf = glob.glob(vlf_data_dir + tt + '*.csv')\n if len(files_vlf) != 0:\n vlf_days.append(days_to_plot[i])\n return vlf_days",
"def get_games(season, date):\n url = \"http://live.nhl.com/GameData/SeasonSchedule-\" + season + \".json\"\n response = urllib.urlopen(url)\n data = json.loads(response.read())\n games = []\n for game in data:\n if game[\"est\"][:8] == date:\n games.append(game)\n return games",
"def distributeSeason(self):\n i = 1\n for day in self.daylist:\n if i >= monthbeg[5] and i < monthbeg[9]: #june through SEpt as per SCE\n day.season = 'summer' #https://www.sce.com/residential/rates/Time-Of-Use-Residential-Rate-Plans\n i = i + 1\n else:\n day.season = 'winter'\n i = i+1",
"def _create_historic_forecasts(\n data, time_dt, frt_dt, standard_grid_metadata=\"uk_ens\", number_of_days=5, **kwargs\n):\n historic_forecasts = iris.cube.CubeList([])\n for day in range(number_of_days):\n new_frt_dt = frt_dt + datetime.timedelta(days=day)\n new_time_dt = time_dt + datetime.timedelta(days=day)\n historic_forecasts.append(\n set_up_variable_cube(\n data - 2 + 0.2 * day,\n time=new_time_dt,\n frt=new_frt_dt,\n standard_grid_metadata=standard_grid_metadata,\n **kwargs,\n )\n )\n return historic_forecasts",
"def get_dryspells_perseason(dryspells, seasons=((12, 1, 2), (3, 4, 5),\n (6, 7, 8), (9, 10, 11))):\n dryspells_seasons = []\n for season in seasons:\n eveSeas = []\n for eveLand in dryspells:\n eves = [e for e in eveLand if e.start_date().month in season]\n eveSeas.append(eves)\n dryspells_seasons.append(eveSeas)\n\n return dryspells_seasons",
"async def async_forecast_daily(self) -> list[Forecast]:\n return self._forecast_daily",
"def get_dividends(self, stock_list, start_date=None, end_date=None):\n df_dict = {}\n df_list = []\n file_in_path = [year.replace(\".csv\", \"\") for year in self.get_csv_in_path(self.dividend_eps_path)]\n if not start_date:\n start_date = file_in_path[0]\n if not end_date:\n end_date = file_in_path[-1]\n if start_date > end_date:\n return df_dict\n for year in range(int(start_date), int(end_date)+1):\n target_path = \"{}/{}.csv\".format(self.dividend_eps_path, year)\n df = pd.read_csv(target_path, index_col=\"名稱\")\n self.replace_nan_to_other(df, \"\")\n for stock in stock_list:\n pd_index = df.index.to_list()\n old_list = []\n if stock in pd_index:\n data = df.loc[stock]\n\n # print(\"日期 = {}\".format(data.get(\"除息交易日\")))\n if df_dict.get(stock):\n old_list = df_dict.get(stock)\n\n # check data is available\n dict = {}\n if data.get(\"現金股利\") != \"\":\n dict.update({\"除息交易日\": \"{}{}\".format(year, data.get(\"除息交易日\").split(\"'\")[1].replace(\"/\", \"\")) if data.get('除息交易日') else \"\",\n \"現金股利\": data.get(\"現金股利\"),\n })\n if data.get(\"股票股利\") != \"\":\n dict.update({\"除權交易日\": \"{}{}\".format(year, data.get(\"除權交易日\").split(\"'\")[1].replace(\"/\", \"\")) if data.get('除權交易日') else \"\",\n \"股票股利\": data.get(\"股票股利\"),\n })\n if dict:\n old_list.append(dict)\n df_dict.update({stock: old_list})\n\n return df_dict",
"def get_comeback_days(self):\n n_comebacks = self.n_times_comeback()\n\n comebacks = []\n if n_comebacks == 0:\n return comebacks\n else:\n period = 30 / float(n_comebacks)\n for i in range(1, n_comebacks + 1):\n mean = i*period\n return_day = np.around(np.random.normal(loc=mean, scale=2, size=1))\n comebacks.append(int((max(min(return_day, 30), 1))))\n\n return comebacks",
"def impute_dates(tables, dates):\n new_fights = []\n for idx, date in enumerate(dates):\n if date == 'FUTURE EVENTS':\n break\n tables[idx]['Date'] = date\n for table in tables[:-1]:\n fights = [table[x:x+2] for x in range(0, len(table), 2)] \n for idxf, fight in enumerate(fights):\n fight.reset_index(drop=True, inplace=True)\n fight['Time'] = fight['Time'][0]\n new_fights.append(fight) \n return new_fights",
"def forecast(days):\n transition = np.array([[.7, .6], [.3, .4]])\n state = 0\n record = []\n for day in xrange(days):\n state = np.random.binomial(1, transition[1, state])\n record.append(state)\n return record",
"def scrape(self):\n self._validate_date_range(self.start_date, self.end_date)\n self._validate_team()\n self._cache_source()\n soup = self.season_raw_cache[self.start_date.year]\n df = self._parse_raw(soup)\n return self._apply_filters(df)",
"def get_flare_list(start, end, source='NASA', file_format=\"hessi_flare_list_%Y%m.fits\", inc=relativedelta(months=+1)):\r\n\r\n formats = {\r\n 5: \"%y-%m\", # YY-mm\r\n 6: \"%Y%m\", # YYYYmm\r\n 7: \"%Y-%m\", # YYYY-mm\r\n 8: \"%Y%m%d\", # YYYYmmdd\r\n 10: \"%Y-%m-%d\", # YYYY-mm-dd\r\n 19: \"%Y-%m-%dT%H:%M:%S\", # YYYY-mm-ddThh:MM:ss\r\n }\r\n try:\r\n start_dt = datetime.strptime(start, formats[len(start)])\r\n end_dt = datetime.strptime(end, formats[len(end)])\r\n except (KeyError, ValueError):\r\n raise ValueError(\"invalid datetime\")\r\n\r\n format_str = file_format[file_format.index(\"%\"):file_format.rindex(\"%\") + 2]\r\n cur_format = start_dt.strftime(format_str)\r\n end_format = end_dt.strftime(format_str)\r\n\r\n if source in KNOWN_FLARE_LIST_SOURCES:\r\n source = KNOWN_FLARE_LIST_SOURCES[source]\r\n\r\n cur_dt = start_dt\r\n result = pd.DataFrame()\r\n while cur_format <= end_format:\r\n file = file_format.replace(format_str, cur_format)\r\n cur_dt = cur_dt + inc\r\n cur_format = cur_dt.strftime(format_str)\r\n\r\n # allow missing files with a warning, e.g. there is no file for 2014-07\r\n try:\r\n result = result.append(read_flare_list_file(source + file), ignore_index=True)\r\n except HTTPError as e:\r\n if e.code == 404:\r\n warnings.warn(\"Skipped: \" + file + \" (\" + str(e.code) + \" \" + e.msg + \")\")\r\n else:\r\n raise\r\n except FileNotFoundError as e:\r\n warnings.warn(\"Skipped: \" + file + \" (file not found)\")\r\n\r\n # filter results for more detailed time constraints (if applicable)\r\n if len(end) < 8:\r\n end_dt += relativedelta(months=+1, microseconds=-1) # add month -1ms to address inclusive right bound\r\n elif len(end) <= 10:\r\n end_dt += relativedelta(days=+1, microseconds=-1) # add day if end date was specified on a day-basis\r\n\r\n left_bound = result['END_TIME'].searchsorted(start_dt, 'left') # END_TIME >= start_dt\r\n right_bound = result['START_TIME'].searchsorted(end_dt, 'right') # START_TIME <= end_dt (inclusive)\r\n return result[left_bound:right_bound]",
"def get_daily(Data, Y, M, D):\n start = datetime(year=Y, month=M, day=D, hour=0, minute=0)\n end = datetime(year=Y, month=M, day=D, hour=23, minute=59, second=59)\n return Data[start:end][\"clouds\"].map(value_by_cloud)",
"def betting_lines(year):\n\n # MongoDB Collection\n m = mongo.Mongo()\n\n # Webapges are by dates\n all_dates = m.find('game_log', {'season': year}, {'_id': 0, 'date': 1}).distinct('date')\n\n browser = webdriver.Chrome('chromedriver')\n\n # Iterate through each date in a season\n for game_date in all_dates:\n\n # Get URL\n url = 'https://classic.sportsbookreview.com/betting-odds/nba-basketball/money-line/?date=' + datetime.strftime(game_date, '%Y%m%d')\n\n scrape_betting_page(url, browser, m, game_date)\n\n browser.close()",
"def get_yearly_vectors(dates, occ_schedules, el_schedules, dhw_schedules, pro_schedules, month_schedule):\n\n occ = []\n el = []\n dhw = []\n pro = []\n\n if dhw_schedules[0].sum() != 0:\n dhw_weekday_max = dhw_schedules[0].sum() ** -1\n else: dhw_weekday_max = 0\n\n if dhw_schedules[1].sum() != 0:\n dhw_sat_max = dhw_schedules[1].sum() ** -1\n else: dhw_sat_max = 0\n\n if dhw_schedules[2].sum() != 0:\n dhw_sun_max = dhw_schedules[2].sum() ** -1\n else: dhw_sun_max = 0\n\n for date in dates:\n month_year = month_schedule[date.month - 1]\n hour_day = date.hour\n dayofweek = date.dayofweek\n if 0 <= dayofweek < 5: # weekday\n occ.append(occ_schedules[0][hour_day] * month_year)\n el.append(el_schedules[0][hour_day] * month_year)\n dhw.append(dhw_schedules[0][hour_day] * month_year * dhw_weekday_max) # normalized dhw demand flow rates\n pro.append(pro_schedules[0][hour_day] * month_year)\n elif dayofweek is 5: # saturday\n occ.append(occ_schedules[1][hour_day] * month_year)\n el.append(el_schedules[1][hour_day] * month_year)\n dhw.append(dhw_schedules[1][hour_day] * month_year * dhw_sat_max) # normalized dhw demand flow rates\n pro.append(pro_schedules[1][hour_day] * month_year)\n else: # sunday\n occ.append(occ_schedules[2][hour_day] * month_year)\n el.append(el_schedules[2][hour_day] * month_year)\n dhw.append(dhw_schedules[2][hour_day] * month_year * dhw_sun_max) # normalized dhw demand flow rates\n pro.append(pro_schedules[2][hour_day] * month_year)\n\n return occ, el, dhw, pro",
"def forecast(self) -> list[Forecast]:\r\n return self._forecast",
"def get_dates(season, info):\n url = 'http://www.basketball-reference.com/leagues/NBA_{0}_games.html'.format(season.split('-')[-1])\n rv = requests.get(url)\n soup = BeautifulSoup(rv.text)\n seasons = soup.find_all('table', {'class': 'sortable stats_table'})\n if len(seasons) == 2:\n reg_season, post_season = seasons\n else:\n reg_season, post_season = seasons[0], None\n dates = set()\n for table in [reg_season, post_season]:\n if table:\n rows = table.tbody.find_all('tr')\n for row in rows:\n match = row.find('a', href=True, text='Box Score')\n if match:\n match_code = match['href'].split('/')[2].split('.')[0]\n date = match_code[:-4]\n if info == 'money_lines':\n date = \"-\".join([date[:4], date[4:6], date[6:]])\n dates.add(date)\n return sorted(list(dates))",
"def get_holidays(year, url, service_key):\n payload = {'solYear': str(year),\n 'numOfRows': '50',\n '_type': 'json',\n 'ServiceKey': service_key}\n\n payload_str = urllib.parse.urlencode(payload, safe=\"%\") # service key contains \"%\"\n\n response = requests.get(url, params=payload_str)\n if response.status_code == 200:\n holidays = [item['locdate'] for item in response.json()['response']['body']['items']['item']]\n holidays = list(map(conv_int_to_date, holidays))\n return holidays",
"def scrape():\n league_year = Config.get_property(\"league_year\")\n\n # Create table\n season_data = client.season_schedule(league_year)\n season_data = br_enum_to_string(season_data)\n return season_data",
"def get_dates(cinema_code):\n dates = []\n dates_url = get_dates_url(cinema_code)\n dates_json = json_response(dates_url)\n for date in dates_json['body']['dates']:\n dates.append(date)\n return dates",
"def holtWintersForecast(requestContext, seriesList):\n results = []\n bootstrapList = _fetchWithBootstrap(requestContext, seriesList, days=7)\n for bootstrap, series in zip(bootstrapList, seriesList):\n analysis = holtWintersAnalysis(bootstrap)\n results.append(_trimBootstrap(analysis['predictions'], series))\n return results",
"def iterateList(self, numDays):\n import dateutil as du\n self.daysList = []\n for pull_date in range(numDays):\n self.daysList.append(str((self.right_now + du.relativedelta.relativedelta(days=pull_date)).date()))\n return self.daysList",
"def sacred_wednesdays_in_range(range):\n a = range[0]\n b = range[1]\n wed = DayOfWeek.Wednesday.on_or_after(a)\n h_date = HinduLunarDate.from_fixed(wed)\n ell = [wed] if (h_date.day == 8) else []\n if is_in_range(wed, range):\n ell[:0] = sacred_wednesdays_in_range([wed + 1, b])\n return ell\n else:\n return []"
] | [
"0.63649917",
"0.6109526",
"0.6082759",
"0.5757297",
"0.5724909",
"0.5718514",
"0.56843966",
"0.5634992",
"0.56042325",
"0.5594472",
"0.5592005",
"0.5579007",
"0.54786044",
"0.5469435",
"0.5457135",
"0.54524297",
"0.54033566",
"0.53840905",
"0.532132",
"0.53100914",
"0.5305155",
"0.5283616",
"0.5282685",
"0.5254685",
"0.5238447",
"0.5230298",
"0.52297854",
"0.52283174",
"0.51883787",
"0.5186164"
] | 0.7388031 | 0 |
returns list of base_depth for each date in the period | def base_depth_for_period(resort_name, start_date, end_date):
start_date_year = int(start_date[0:4])
start_date_month = int(start_date[4:6])
start_date_day = int(start_date[6:8])
end_date_year = int(end_date[0:4])
end_date_month = int(end_date[4:6])
end_date_day = int(end_date[6:8])
resort_table = resort_table_dict[resort_name]
query = "SELECT status_date FROM %s" %(resort_table)
connection = get_connection()
period_date_list = []
base_depth_list = []
if connection is not None:
try:
for row in get_select_query_results(connection, query):
row_year = int(row[0].strftime('%Y'))
row_month = int(row[0].strftime('%m'))
row_day = int(row[0].strftime('%d'))
if row_year < start_date_year or row_year > end_date_year:
continue
if start_date_year == row_year:
if start_date_month > row_month:
continue
if start_date_year == row_year:
if start_date_month == row_month:
if start_date_day > row_day:
continue
if end_date_year == row_year:
if end_date_month < row_month:
continue
if end_date_year == row_year:
if end_date_month == row_month:
if end_date_day < row_day:
continue
date_to_add = (row[0].strftime('%Y') + row[0].strftime('%m') + row[0].strftime('%d'))
period_date_list.append(date_to_add)
except Exception as e:
print(e, file=sys.stderr)
for date in period_date_list:
base_depth_for_list = base_depth_for_date(resort_name, date)
base_depth_list.append(base_depth_for_list)
return json.dumps(base_depth_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def base_depth_for_date(resort_name, date):\n\n resort_table = resort_table_dict[resort_name]\n\n new_date = str(date)\n base_depth_to_return = None\n query = \"SELECT base_depth FROM %s WHERE status_date = to_date(%s::text, 'YYYYMMDD')\" %(resort_table, date)\n\n connection = get_connection()\n\n if connection is not None:\n try:\n for row in get_select_query_results(connection, query):\n base_depth_to_return = row\n except Exception as e:\n print(e, file=sys.stderr)\n connection.close()\n return json.dumps(base_depth_to_return)",
"def base_depth_average_for_date(resort_name, date):\n\n resort_table = resort_table_dict[resort_name]\n\n date_month = int(date[4:6])\n date_day = int(date[6:8])\n query = \"SELECT base_depth FROM %s WHERE CAST(EXTRACT(MONTH FROM status_date) AS INTEGER) = %d AND CAST(EXTRACT(DAY FROM status_date) AS INTEGER) = %d\" %(resort_table, date_month, date_day)\n connection = get_connection()\n total = 0\n counter = 0\n for row in get_select_query_results(connection, query):\n counter += 1\n total += int(row[0])\n if (counter != 0): \n base_depth_to_return = int(total/counter)\n else:\n base_depth_to_return = 0\n return json.dumps(base_depth_to_return)",
"def get_depths(self, variables):\n\n return [0.]",
"def get_periods():\n return [\n relativedelta(),\n relativedelta(days=6),\n relativedelta(months=1),\n relativedelta(months=3),\n relativedelta(years=1),\n relativedelta(years=5)\n ]",
"def r_to_depth(x, interval):\n return x * interval / 3600.0",
"def derived_series(self):\n res = [self]\n current = self\n nxt = self.derived_subgroup()\n while not current.is_subgroup(nxt):\n res.append(nxt)\n current = nxt\n nxt = nxt.derived_subgroup()\n return res",
"def depths(self):\n return self._origin.depth",
"def depths(self):\n return self._origin.depth",
"def getLevels():",
"def depths(self):\n deps = []\n quads = self.getQuadrilaterals()\n groups = self._getGroupIndex()\n u_groups = np.unique(groups)\n ng = len(u_groups)\n for i in range(ng):\n q_ind = np.where(groups == u_groups[i])[0]\n nq = len(q_ind)\n top_deps = []\n bot_deps = []\n for j in range(nq):\n if j == 0:\n top0 = [quads[q_ind[j]][0].depitude]\n bot0 = [quads[q_ind[j]][3].depitude]\n top_deps = top_deps + top0\n bot_deps = bot_deps + bot0\n top_deps = top_deps + [quads[q_ind[j]][1].depitude]\n bot_deps = bot_deps + [quads[q_ind[j]][2].depitude]\n deps = deps + top_deps + bot_deps[::-1] + top0 + [np.nan]\n\n return np.array(deps)",
"def factor_ret(self):\n factor_ret_all = pd.DataFrame([])\n for i in range(len(self.trade_date) - self.timelog):\n date = self.trade_date.iloc[i,0]\n date_lag = self.trade_date.iloc[i + self.timelog,0]\n factor_ret = get_factor_ret(date,date_lag)\n factor_ret_all = pd.concat([factor_ret_all,pd.DataFrame(factor_ret).T],axis = 0)\n print(i)\n cumulative_factor_ret = factor_ret_all.cumsum(axis = 0)\n factor_ret_all.index = self.trade_date.iloc[:len(self.trade_date) - self.timelog,0]\n cumulative_factor_ret.index = self.trade_date.iloc[:len(self.trade_date) -self.timelog,0]\n return factor_ret_all,cumulative_factor_ret",
"def get_tank_levels(self, start):\n tanks_dict = {}\n for tank in self.tank_levels:\n dataframe_ = pd.DataFrame()\n dataframe_['Time'] = list(map(lambda x: start + x * pd.Timedelta('1S'), self.tank_times[tank]))\n dataframe_.tail(1)['Time'] -= pd.Timedelta('1S')\n dataframe_[tank] = self.tank_levels[tank]\n tanks_dict[tank] = dataframe_\n return tanks_dict",
"def getHierarchies():",
"def getHierarchies():",
"def get_levels(std0, slope, nsigma):\n nslope = nsigma * slope\n levels = [0]\n while levels[-1] <= 1:\n levels.append((levels[-1] * (1 + nslope) + 2 * nsigma * std0) / (1 - nslope))\n levels.pop()\n return levels",
"def MaxMinLevels(dates, levels):\r\n datestart_neg = 0\r\n datestart_pos = 0\r\n date_interval_neg = 0\r\n date_interval_pos = 0\r\n bin_start_neg = 0\r\n bin_start_pos = 0\r\n max_dates = []\r\n min_dates = []\r\n y_mins = []\r\n y_maxes = []\r\n for bin_index in range(len(dates)-1):\r\n elev_start = levels[bin_index]\r\n elev_end = levels[bin_index+1]\r\n trans_cond = (elev_start-np.nanmean(levels))*(elev_end-np.nanmean(levels)) # subtract the means for a good crossover point\r\n if (trans_cond<=0)&(elev_start<elev_end):\r\n datestart_pos = dates.iloc[bin_index]\r\n bin_start_pos = bin_index\r\n dateend_neg = dates.iloc[bin_index+1]\r\n if (datestart_neg!=0):\r\n date_interval_neg = (dateend_neg - datestart_neg).seconds # date interval in seconds\r\n if (date_interval_neg > 6000): # Make sure small fluctuations aren't being counted\r\n temp_interval = levels.iloc[bin_start_neg:bin_index]\r\n min_index = temp_interval.loc[temp_interval==np.nanmin(temp_interval)].index.values[0]\r\n if (len(min_dates) == 0):\r\n y_mins.append(np.nanmin(temp_interval))\r\n min_dates.append(dates.iloc[min_index])\r\n if (dates.iloc[min_index] != min_dates[-1]): # makes sure duplicates aren't being printed\r\n y_mins.append(np.nanmin(temp_interval)) # duplicates are somehow the result of nans\r\n min_dates.append(dates.iloc[min_index])\r\n if (trans_cond<=0)&(elev_start>elev_end):\r\n datestart_neg = dates.iloc[bin_index]\r\n bin_start_neg = bin_index\r\n dateend_pos = dates.iloc[bin_index+1]\r\n if (datestart_pos!=0):\r\n date_interval_pos = (dateend_pos - datestart_pos).seconds # date interval in seconds\r\n if (date_interval_pos > 6000): # Make sure small fluctuations aren't being counted\r\n temp_interval = levels.iloc[bin_start_pos:bin_index] \r\n max_index = temp_interval.loc[temp_interval==np.nanmax(temp_interval)].index.values[0] \r\n if (len(max_dates) == 0):\r\n y_maxes.append(np.nanmax(temp_interval))\r\n max_dates.append(dates.iloc[max_index])\r\n if (dates.iloc[max_index] != max_dates[-1]): \r\n y_maxes.append(np.nanmax(temp_interval)) # makes sure duplicates aren't being printed\r\n max_dates.append(dates.iloc[max_index]) # duplicates are somehow the result of nans\r\n min_dates = np.array(min_dates)\r\n max_dates = np.array(max_dates)\r\n y_mins = np.array(y_mins)\r\n y_maxes = np.array(y_maxes)\r\n return min_dates, y_mins, max_dates, y_maxes",
"def nodes_at_depth(depth):\n return list(range(2**depth-1, 2**(depth+1)-1))",
"def days_from_start(self) -> List[int]:\n n_periods = [(x - self.date_range.min())/pd.Timedelta('1D')\n for x in self.date_range]\n return n_periods",
"def days_from_start(self) -> List[int]:\n n_periods = [(x - self.date_range.min())/pd.Timedelta('1D')\n for x in self.date_range]\n return n_periods",
"def period(self):\n from sage.arith.all import gcd\n\n g = 0\n\n for component in self.strongly_connected_components():\n levels = dict((s, None) for s in component)\n vertices_in_scc = levels # considers level as a set\n s = component[0]\n levels[s] = 0\n this_level = [s]\n l = 1\n while this_level:\n next_level = []\n for u in this_level:\n # we have levels[u] == l-1\n for v in self.neighbor_out_iterator(u):\n # ignore edges leaving the component\n if v not in vertices_in_scc:\n continue\n level_v = levels[v]\n if level_v is not None: # Non-Tree Edge\n g = gcd(g, l - level_v)\n if g == 1:\n return 1\n else: # Tree Edge\n next_level.append(v)\n levels[v] = l\n this_level = next_level\n l += 1\n\n return g",
"def nesting_factor(for_position):\n deg=1 \n deg_list=[]\n if for_position:\n for i,position in enumerate(for_position):\n #exempting the first item in the list of positions of for loops in a script\n if i !=0:\n #increases the depth by 1 if the difference btw current position and the previous is 4 \n if position - for_position[i-1] ==4:\n deg+=1\n continue\n #Update the degree list and degree when difference btw current position and the previous >= -(degree -1)X 4 \n if position - for_position[i-1] >= (1-deg)*4:\n deg_list.append(deg)\n deg=1\n continue\n if for_position[-1] and deg>1:\n deg_list.append(deg)\n return deg_list",
"def _get_lags_dict(self):\n lags_dict = {}\n for fcst_date in self.dates:\n day_of_year = self.calculate_day_of_year(fcst_date)\n for init_date in self.init_dates:\n lag = day_of_year - self.calculate_day_of_year(init_date)\n days_of_year = lags_dict.get(lag)\n if days_of_year:\n days_of_year.append(day_of_year)\n else:\n lags_dict[lag] = [day_of_year]\n \n return lags_dict",
"def get_levels(self, arcs):\n levels = set(map(lambda arc: arc['end'] - arc['start'], arcs))\n return sorted(list(levels))",
"def create_date_list(\n periods: int, start_date: str = \"2020-09-01\", freq: str = \"d\"\n) -> list:\n return [str(d)[:10] for d in pd.date_range(start_date, periods=periods, freq=freq)]",
"def daily_values(self) -> List[RecipeObjectNutrientsCalories]:\n return self._daily_values",
"def __fill_consecutive_tree_levels(parent=self.root):\n for child in parent.children:\n lst.append(child.value)\n __fill_consecutive_tree_levels(parent=child) # call recursively",
"def subgraphs_of_length(self, days=None, periods=None):\n graphs = []\n if days:\n sg_length = datetime.timedelta(days=days)\n else:\n sg_length = periods\n\n start_date = self.min_date\n end_date = start_date + sg_length\n done = False\n while not done:\n if start_date > self.max_date:\n break\n if end_date > self.max_date:\n # end_date = self.max_date\n done = True\n print(start_date, end_date)\n new = self.subgraph_within_dates(start_date, end_date)\n if new.nx_graph.number_of_edges():\n graphs.append(new)\n start_date += sg_length\n end_date += sg_length\n return graphs",
"def dayPeriod(lon,lat,n1,n2,day):\n x, y, z = _getXYZ(lon,lat)\n N = range(n1,n2+1)\n D = []\n for n_ in N:\n n = n_ * day\n i = range(0,n)\n j = range(n,n+n)\n d_ = gcDist(x[i],y[i],z[i],\n x[j],y[j],z[j])\n D = D + [d_,]\n print n, d_\n\n return (N,D)",
"def run(start_year, end_year, depth_from, depth_to):\n years, times, rootgrps = retrieve(1950,2018)\n \n HC = calculate_HC(rootgrps,25,31, -43, 41)\n \n months, month_avgs = monthly_avgs(HC)\n pos = str(-43)+\"N \"+str(41)+\"E\"\n \n return years, times, HC, pos, months, month_avgs",
"def _active_depth(self):\n for n_left, n_right in self.graph.dfs():\n if self.node(n_right)['pad'] == 0:\n return self.node(n_right)['level']\n return 0"
] | [
"0.6518969",
"0.60030866",
"0.5712574",
"0.54644364",
"0.5354301",
"0.5350619",
"0.53494644",
"0.53494644",
"0.53139776",
"0.52619964",
"0.5192515",
"0.51612735",
"0.5154456",
"0.5154456",
"0.5072636",
"0.50671273",
"0.50522",
"0.5036785",
"0.5036785",
"0.50244904",
"0.5015947",
"0.49943283",
"0.49829018",
"0.4982048",
"0.4960305",
"0.49267054",
"0.49113557",
"0.49054086",
"0.48992845",
"0.48969045"
] | 0.73825467 | 0 |
Downloads the olivetti faces dataset and saves it in the output_filepath directory. | def main(output_filepath):
logger = logging.getLogger(__name__)
logger.info('Downloading Olivetti faces...')
olivetti_faces = fetch_olivetti_faces()
data = pd.DataFrame(data=np.apply_along_axis(exposure.equalize_hist, 1, olivetti_faces.data))
labels = pd.DataFrame(data=olivetti_faces.target)
logger.info('Splitting dataset into training and testing sets...')
train_data, test_data, train_labels, test_labels = train_test_split(
data, labels, test_size=0.2, random_state=0)
train_data.to_csv(os.path.join(output_filepath, 'face_data_train.csv'), index=False)
train_labels.to_csv(os.path.join(output_filepath, 'labels_train.csv'), index=False)
test_data.to_csv(os.path.join(output_filepath, 'face_data_test.csv'), index=False)
test_labels.to_csv(os.path.join(output_filepath, 'labels_test.csv'), index=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def maybe_download():\n\n print(\"Downloading Inception 5h Model ...\")\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)",
"def download_glove ():\n # Get the URL ...\n print(\"Downloading https://nlp.stanford.edu/data/glove.6B.zip ...\")\n res = requests.get(\"https://nlp.stanford.edu/data/glove.6B.zip\", stream=True)\n if res.status_code != 200:\n print(\"Could not download the 6B GloVe Dataset! The server responded with code \" + res.status_code + \".\")\n sys.exit(1)\n\n # ... and write it to file\n fp = open(\"data/glove.6B.zip\", \"wb\")\n total_length = int(res.headers.get('content-length'))\n # Thanks again to the internet for this beautiful piece of code <3\n for chunk in tqdm.tqdm(res.iter_content(chunk_size=1024), unit=\"KB\", total=ceil(total_length/1024) + 1):\n if chunk:\n fp.write(chunk)\n fp.flush()\n fp.close()\n print(\"ZIP-file downloaded! Extracting ...\")\n with ZipFile(\"data/glove.6B.zip\", \"r\") as zf:\n files = zf.namelist()\n print(\"Members in archive:\")\n print(\"\\n\".join(files))\n\n for file in files:\n if file.endswith(\"glove.6B.300d.txt\"):\n print(\"Extracting member \" + file + \" from archive ...\")\n zf.extract(file)\n break\n \n # Remove the zip file again\n os.remove(\"data/glove.6B.zip\")\n print(\"Successfully extracted GloVe embeddings (300 dimensions) to data directory.\")\n print(\"You can now train the classifier using the GloVe embeddings.\")",
"def face_scraper():\n base_directory = pathlib.Path(__file__).parent.absolute()\n test_or_train, is_target_face = ask_for_directory()\n folders = ['test', 'train']\n test_or_train = folders[test_or_train]\n source_directory = os.path.join(base_directory, 'rawimages', test_or_train, str(is_target_face))\n target_directory = os.path.join(base_directory, 'datasets', test_or_train, str(is_target_face))\n print('The source folder is ' + source_directory)\n print('The target folder is ' + target_directory)\n print('Files before saving images:')\n print(os.listdir(target_directory))\n crop_and_save_images(source_directory, target_directory)\n print('Files after saving images:')\n print(os.listdir(target_directory))",
"def download_imagenet(self):\n raise NotImplementedError('download_imagenet method not implemented.')",
"def download(self):\n cloud_path = f\"gs://{const.GCS_BUCKET}/{self.GCS_PATH}\"\n # download label file\n label_zip = download_file_from_gcs(\n cloud_path, self.root, self.LABEL_ZIP\n )\n with zipfile.ZipFile(label_zip, \"r\") as zip_dir:\n zip_dir.extractall(self.root)\n\n # download tfexamples for a dataset split\n tfexamples_zip = download_file_from_gcs(\n cloud_path, self.root, self.SPLITS_ZIP.get(self.split)\n )\n with zipfile.ZipFile(tfexamples_zip, \"r\") as zip_dir:\n zip_dir.extractall(self.root)",
"def download_dataset(base_dir, scene):\n\n # setup depends on dataset\n if len(scene.split('_')) == 1: # default\n modality, part = None, None # declaration necessary for instatiation check\n base_dir = Path(base_dir).expanduser().joinpath(scene)\n filepath_data = base_dir.joinpath(DATASETS_CONFIG[scene]['img']['name'])\n filepath_labels = base_dir.joinpath(DATASETS_CONFIG[scene]['gt']['name'])\n \n elif len(scene.split('_')) == 3: # AeroRIT\n scene, modality, part = scene.split('_')\n base_dir = Path(base_dir).expanduser().joinpath(scene)\n filepath_data = base_dir.joinpath(DATASETS_CONFIG[scene][modality]['img']['name'])\n filepath_labels = base_dir.joinpath(DATASETS_CONFIG[scene][modality]['gt']['name'])\n else :\n raise RuntimeError('Given scene unknown!')\n\n base_dir.mkdir(parents=True, exist_ok=True)\n\n # download data and load from file\n if filepath_data.suffix == '.mat': # datasets from ehu.es\n if not filepath_data.is_file():\n with TqdmUpTo(unit='B', unit_scale=True, miniters=1,\n desc=\"Downloading {}\".format(filepath_data)) as t:\n url = DATASETS_CONFIG[scene]['img']['url']\n urlretrieve(url, filename=filepath_data, reporthook=t.update_to)\n\n if not filepath_labels.is_file():\n with TqdmUpTo(unit='B', unit_scale=True, miniters=1,\n desc=\"Downloading {}\".format(filepath_labels)) as t:\n url = DATASETS_CONFIG[scene]['gt']['url']\n urlretrieve(url, filename=filepath_labels, reporthook=t.update_to)\n \n data = loadmat(filepath_data)[DATASETS_CONFIG[scene]['img']['key']]\n labels = loadmat(filepath_labels)[DATASETS_CONFIG[scene]['gt']['key']]\n\n elif filepath_data.suffix == '.tif': # aerorit\n if not filepath_data.is_file(): # download image if necessary\n print(\"Downloading {}\".format(filepath_data))\n url = DATASETS_CONFIG[scene][modality]['img']['url']\n gdown.download(url=url, output=str(filepath_data), quiet=False)\n\n if not filepath_labels.is_file(): # download labels if necessary\n print(\"Downloading {}\".format(filepath_labels))\n url = DATASETS_CONFIG[scene][modality]['gt']['url']\n gdown.download(url=url, output=str(filepath_labels), quiet=False)\n \n # extract part of image as defined in Rangnekar et al.\n base_dir = base_dir.joinpath(modality).joinpath(part)\n base_dir.mkdir(parents=True, exist_ok=True)\n \n # check early if data exists already to avoid unecessarily loading and encoding data\n filepath_hdf = base_dir.joinpath(f'aerorit_{modality}_{part}.h5')\n if filepath_hdf.is_file():\n return filepath_hdf\n\n # extract defined part of dataset\n start_col = DATASETS_CONFIG[scene][part]['start_col']\n end_col = DATASETS_CONFIG[scene][part]['end_col']\n \n data = np.transpose(io.imread(filepath_data), (1,2,0))[53:,7:,:]\n data = data[:, start_col:end_col, :]\n\n labels = encode_labelmap(io.imread(filepath_labels), AERORIT_COLOURLABELMAP)[53:,7:]\n labels = labels[:, start_col:end_col]\n filepath_data = filepath_hdf\n\n filepath_hdf = filepath_data.with_suffix('.h5')\n \n # export data and labels to hdf\n if not filepath_hdf.is_file():\n with h5py.File(filepath_hdf, \"w\") as f:\n f.create_dataset(\"data\", data=data)\n f.create_dataset(\"labels\", data=labels)\n f.attrs['scene'] = scene\n if not modality is None:\n f.attrs['modality'] = modality\n if not part is None:\n f.attrs['part'] = part\n return filepath_hdf\n\n return filepath_hdf",
"def download_coco_dataset():\n # Create file structure\n os.makedirs(os.path.join(\"data\", \"coco\", \"train\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"dev\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"test\"), exist_ok=True)\n # Download the train, dev and test datasets\n print(\"Downloading COCO dataset.\")\n url = \"http://images.cocodataset.org/zips/train2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"train2014.zip\"))\n url = \"http://images.cocodataset.org/zips/val2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"val2014.zip\"))\n url = \"http://images.cocodataset.org/zips/test2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"test2014.zip\"))\n print(\"Done downloading COCO dataset.\")\n # Unzip the files\n print(\"Extracting COCO dataset.\")\n # Extract Train dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"train2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"train2014\"),\n os.path.join(\"data\", \"coco\", \"train\", \"dummy\"),\n )\n # Extract Validation dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"val2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"val2014\"),\n os.path.join(\"data\", \"coco\", \"dev\", \"dummy\"),\n )\n # Extract Test dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"test2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"test2014\"),\n os.path.join(\"data\", \"coco\", \"test\", \"dummy\"),\n )\n print(\"Done extracting COCO dataset.\")",
"def download():\n\n trainset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=True, download=True)\n testset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=False, download=True)\n train_images = numpy.array(trainset.data)\n train_labels = numpy.array(trainset.targets)\n test_images = numpy.array(testset.data)\n test_labels = numpy.array(testset.targets)\n\n assert numpy.max(train_images) == 255\n\n train_images = train_images/255.\n test_images = test_images/255.\n\n utils.write_hdf5(paths.cifar10_train_images_file(), train_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_train_images_file())\n utils.write_hdf5(paths.cifar10_test_images_file(), test_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_test_images_file())\n utils.write_hdf5(paths.cifar10_train_labels_file(), train_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_train_labels_file())\n utils.write_hdf5(paths.cifar10_test_labels_file(), test_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_test_labels_file())",
"def get_data(data_dir, hdf5):\r\n\r\n # Get the filenames of the lists containing image paths and labels.\r\n train_file, val_file = build_dataset_index(data_dir)\r\n\r\n # Check if (creating and) loading from hdf5 database is desired.\r\n if hdf5:\r\n # Create folder to store dataset.\r\n if not os.path.exists('hdf5'):\r\n os.makedirs('hdf5')\r\n # Check if hdf5 databases already exist and create them if not.\r\n if not os.path.exists('hdf5/tiny-imagenet_train.h5'):\r\n from tflearn.data_utils import build_hdf5_image_dataset\r\n print ' Creating hdf5 train dataset.'\r\n build_hdf5_image_dataset(train_file, image_shape=(64, 64), mode='file', output_path='hdf5/tiny-imagenet_train.h5', categorical_labels=True, normalize=True)\r\n\r\n if not os.path.exists('hdf5/tiny-imagenet_val.h5'):\r\n from tflearn.data_utils import build_hdf5_image_dataset\r\n print ' Creating hdf5 val dataset.'\r\n build_hdf5_image_dataset(val_file, image_shape=(64, 64), mode='file', output_path='hdf5/tiny-imagenet_val.h5', categorical_labels=True, normalize=True)\r\n\r\n # Load training data from hdf5 dataset.\r\n h5f = h5py.File('hdf5/tiny-imagenet_train.h5', 'r')\r\n X = h5f['X']\r\n Y = h5f['Y']\r\n\r\n # Load validation data.\r\n h5f = h5py.File('hdf5/tiny-imagenet_val.h5', 'r')\r\n X_test = h5f['X']\r\n Y_test = h5f['Y'] \r\n\r\n # Load images directly from disk when they are required.\r\n else:\r\n from tflearn.data_utils import image_preloader\r\n X, Y = image_preloader(train_file, image_shape=(64, 64), mode='file', categorical_labels=True, normalize=True, filter_channel=True)\r\n X_test, Y_test = image_preloader(val_file, image_shape=(64, 64), mode='file', categorical_labels=True, normalize=True, filter_channel=True)\r\n\r\n # Randomly shuffle the dataset.\r\n X, Y = shuffle(X, Y)\r\n\r\n return X, Y, X_test, Y_test",
"def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()",
"def download_and_extract_data(tmp_dir, dataset):\n url = dataset[0]\n print(dataset)\n compressed_filename = os.path.basename(url)\n compressed_file = generator_utils.maybe_download(\n tmp_dir, compressed_filename, url)\n\n for file in dataset[1]:\n tf.logging.info(\"Reading file: %s\" % file)\n filepath = os.path.join(tmp_dir, file)\n\n # Extract from tar if needed.\n if not tf.gfile.Exists(filepath):\n with tarfile.open(compressed_file, \"r:gz\") as corpus_tar:\n corpus_tar.extractall(tmp_dir)\n\n documents_filename, labels_filename = dataset[1]\n documents_filepath = os.path.join(tmp_dir, documents_filename)\n labels_filepath = os.path.join(tmp_dir, labels_filename)\n return documents_filepath, labels_filepath",
"def maybe_download_and_extract(self, DATA_URL):\n\n print('Will download the pre-trained Inception Model to the same path with this validator!')\n self.Model_Save_Path = os.path.join(\"/\",\n os.getcwd(), 'DownLoaded_Inception/')\n print('Start download to ' + self.Model_Save_Path)\n\n if not os.path.exists(self.Model_Save_Path):\n os.makedirs(self.Model_Save_Path)\n\n dest_directory = self.Model_Save_Path\n\n filename = self.DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(\n DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename,\n statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def download(self):\n\n if self._check_exists():\n return\n\n os.makedirs(self.raw_folder, exist_ok=True)\n os.makedirs(self.processed_folder, exist_ok=True)\n\n # download files\n for url in self.resources:\n filename = url.rpartition('/')[2]\n download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=None)\n\n print('Processing...')\n\n training_set = (\n self.read_image_label_file(os.path.join(self.raw_folder, 'mnist_all_rotation_normalized_float_train_valid.amat'))\n )\n test_set = (\n self.read_image_label_file(os.path.join(self.raw_folder, 'mnist_all_rotation_normalized_float_test.amat'))\n )\n\n with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:\n torch.save(training_set, f)\n with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:\n torch.save(test_set, f)\n\n print('Done!')",
"def download_genotype_data():\n print(\"downloading genotype data\")\n download_from_url(PSAM_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.psam\", desc=\"downloading psam\")\n download_from_url(PVAR_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pvar.zst\",\n desc=\"downloading pvar\")\n download_from_url(PGEN_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pgen.zst\",\n desc=\"downloading pgen\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pvar\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pgen\")",
"def fetch_training_data(filename, output, db_url=None):\n r2dt.write_training_data(filename, db_url, output)",
"def download_all_data(self) -> None:\n print(\"Download in progress.\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_CHARACTERS\"], \"nextcloud\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_FRAGMENTS\"], \"nextcloud\")\n self.download_data(os.environ[\"HABBAKUK_URL\"], \"generic_url\")\n print(\"Download complete!\")",
"def get_glove_dataset(dataset):\n md5sums = {'6B.50d': '8e1557d1228decbda7db6dfd81cd9909',\n '6B.100d': 'c92dbbeacde2b0384a43014885a60b2c',\n '6B.200d': 'af271b46c04b0b2e41a84d8cd806178d',\n '6B.300d': '30290210376887dcc6d0a5a6374d8255'}\n glove_path = os.path.abspath('data/glove/results')\n return get_file(dataset,\n 'http://files.fast.ai/models/glove/' + dataset + '.tgz',\n cache_subdir=glove_path,\n md5_hash=md5sums.get(dataset, None),\n untar=True)",
"def download(self, root='./'):\n dir = os.path.join(root, 'tiny-imagenet-200')\n dir_train = os.path.join(dir, 'train')\n if os.path.exists(dir) and os.path.exists(dir_train):\n print('==> Already downloaded.')\n return\n\n path = Path(os.path.join(root, 'tiny-imagenet-200.zip'))\n if not os.path.exists(path):\n os.makedirs(path.parent, exist_ok=True)\n\n print('==> Downloading TinyImagenet200...')\n with urllib.request.urlopen(self.url) as response, \\\n open(str(path), 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n\n print('==> Extracting TinyImagenet200...')\n with zipfile.ZipFile(str(path)) as zf:\n zf.extractall(root)",
"def test_DL_export_create_file(self):\n filepath = '1.txt'\n dl = flow_processing_input.DetectorsLocation(2021)\n dl.detectors_location_dict = createDLDataset(1).dataset\n dl.export_to_file(filepath)\n # Check if file was created at filepath\n self.assertTrue(os.path.exists(filepath))\n os.remove(filepath)",
"def download_data(dev_mode: str, model: word2vec.Word2Vec) -> (np.ndarray, np.ndarray):\n assert dev_mode.lower() == 'false' or dev_mode.lower() == 'true'\n \n if dev_mode.lower() == 'false':\n print('Using Actual Data...')\n data_path = os.path.join(args.data_dir, 'HIV.csv')\n df = pd.read_csv(data_path)\n df['sentence'] = df.apply(lambda x: MolSentence(mol2alt_sentence(Chem.MolFromSmiles(x['smiles']), 1)), axis=1)\n df['mol2vec'] = [DfVec(x) for x in sentences2vec(df['sentence'], model, unseen='UNK')]\n \n # convert dataframe into numpy array for training\n X = np.array([x.vec for x in df['mol2vec']])\n y = np.array(df['HIV_active'].astype(int))\n else:\n # use example data set\n data_path = os.path.join(args.data_dir, 'ames.sdf')\n df = PandasTools.LoadSDF(data_path)\n df['sentence'] = df.apply(lambda x: MolSentence(mol2alt_sentence(x['ROMol'], 1)), axis=1)\n df['mol2vec'] = [DfVec(x) for x in sentences2vec(df['sentence'], model, unseen='UNK')]\n \n # convert dataframe into numpy array for training\n X = np.array([x.vec for x in df['mol2vec']])\n y = np.array(df['class'].astype(int))\n \n return X,y",
"def download():\n base_loc = DATA_DIR + '/raw/human_activity'\n loc = base_loc + '/human_activity.zip'\n if os.path.exists(loc):\n print('Path already exists at {}. If you wish to re-download you must delete this folder.'.format(loc))\n return\n if not os.path.exists(base_loc):\n os.mkdir(base_loc)\n\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00341/HAPT%20Data%20Set.zip'\n urllib.request.urlretrieve(url, loc)\n\n with zipfile.ZipFile(loc, 'r') as zip_ref:\n zip_ref.extractall(base_loc)",
"def _download_to_flc(self):\n self.communicator.download_to_flc()",
"def download_dataset(self):\n raise NotImplementedError",
"def initiate_yolo_detect(images_path, save_to_path, detections_file='pickles/bounding_boxes.pickle'):\n for filename in os.listdir(images_path):\n bound_boxes = detect_objects_on_image(\n os.path.join(images_path, filename), detections_file)\n predictions_path = os.path.join(\n save_to_path, 'predictions_' + filename)\n print('predictions path', predictions_path)\n copy2('predictions_' + os.path.basename(image_directory) +\n '.png', predictions_path)",
"def main(args):\n data_transform = transforms.Compose([\n transforms.Scale((256, 256)),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n dataset = datasets.ImageFolder(root=args.root_dir, transform=data_transform)\n dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, \n shuffle=False, num_workers=0, pin_memory=True)\n net = get_feature_extractor()\n\n if torch.cuda.is_available():\n net = net.cuda()\n\n features_out = np.zeros((len(dataset), 4096))\n labels_out = np.zeros(len(dataset))\n \n p = progressbar.ProgressBar(widgets=[progressbar.ETA(), ' ', progressbar.Percentage()])\n for i, samples in p(enumerate(dataloader)):\n images, labels = samples\n if torch.cuda.is_available():\n images = images.cuda()\n images = Variable(images)\n features = net(images).cpu().data.numpy()\n features_out[i*BATCH_SIZE:i*BATCH_SIZE+BATCH_SIZE] = features\n labels_out[i*BATCH_SIZE:i*BATCH_SIZE+BATCH_SIZE] = labels.int().numpy()\n print(i)\n\n with open(os.path.join(args.out, 'features.pickle'),'wb') as f:\n pickle.dump(features_out, f)\n with open(os.path.join(args.out, 'labels.pickle'),'wb') as f:\n pickle.dump(labels_out, f)",
"def save_data(ofile, nsteps, lx, ly, time, xedges, yedges, rho_hist, vx_hist, vy_hist, vorticity): \n \n edges_grp = ofile.create_group('edges')\n edges_grp.create_dataset('x', data=xedges, compression='gzip')\n edges_grp.create_dataset('y', data=yedges, compression='gzip')\n \n ofile.create_dataset('time', data=time, compression='gzip')\n \n tables_grp = ofile.create_group('tables')\n tables_grp.create_dataset('rho', data=rho_hist, compression='gzip')\n tables_grp.create_dataset('vx', data=vx_hist, compression='gzip')\n tables_grp.create_dataset('vy', data=vy_hist, compression='gzip')\n tables_grp.create_dataset('vorticity', data=vorticity, compression='gzip')\n \n box_grp = ofile.create_group('box')\n box_grp.create_dataset('x', data=lx)\n box_grp.create_dataset('y', data=ly)\n \n ofile.create_dataset('nsteps', data=nsteps)\n \n return",
"def train():\n face_recognizer = cv2.face.LBPHFaceRecognizer_create()\n \n # Load all saved people\n people = PersonModel.select()\n\n # List of face images\n photos = []\n # List of person IDs corresponding to images in photos[]\n labels = []\n\n for person in people:\n person_dataset_path = os.path.join(Constants.PATH_DATASET, \"person_{}\".format(person.id))\n\n if not os.path.exists(person_dataset_path):\n continue\n\n # List of all images for current person\n photo_files = [os.path.join(person_dataset_path, item) for item in os.listdir(person_dataset_path)]\n person.update(photos_count=len(photo_files)).execute()\n\n # Load all photos\n for photo_file in photo_files:\n photos.append(\n np.array(Image.open(photo_file).convert(\"L\"))\n )\n \n labels.append(person.id)\n\n face_recognizer.train(photos, np.array(labels))\n\n if not face_recognizer.write(Constants.FILE_MODEL):\n return False\n\n return True",
"def yolo_test_file(self):\n # Detect objects\n annotatedImage, predictedObjects = self.detect_from_file(\n self.inputFile)\n # Show image\n if self.showImage:\n cv2.imshow('YOLO Detection', annotatedImage)\n cv2.waitKey(10)\n # Save annotated image\n if self.saveAnnotatedImage:\n cv2.imwrite(self.outputFile, annotatedImage)\n # Save the parameters of detected objects in xml format\n if self.saveAnnotatedXML:\n xmlFileName = os.path.join(\n self.textOutputFolder,\n self.outputFile.split('.')[0] + '.xml')\n self.save_xml(xmlFileName, predictedObjects)",
"def download_dataset(dataset):\n\n if dataset not in URLS:\n print(f\"unknown dataset {dataset}\")\n sys.exit(0)\n\n filename = f'{dataset}.tar.gz'\n url = URLS[dataset]\n\n if not os.path.exists(filename):\n print(f'downloading dataset \"{dataset}\"')\n os.system(f'curl \"{url}\" -o {filename}')\n else:\n print(f'zipfile \"{filename}\" already exists, remove it if you want to re-download.')\n\n if not os.path.exists(dataset):\n print(f'extracting \"{filename}\"')\n os.system(f'tar -xvf {filename}')\n else:\n print(f'folder \"{dataset}\" already exists, remove it if you want to re-create.')\n\n image_chips = f'{dataset}/image-chips'\n label_chips = f'{dataset}/label-chips'\n if not os.path.exists(image_chips) and not os.path.exists(label_chips):\n print(\"creating chips\")\n libs.images2chips.run(dataset)\n else:\n print(f'chip folders \"{image_chips}\" and \"{label_chips}\" already exist, remove them to recreate chips.')",
"def geolife(redownload: bool = False) -> Dataset:\n return Dataset.get(\"geolife\", redownload=redownload)"
] | [
"0.59466165",
"0.58815235",
"0.58095616",
"0.5797585",
"0.57857496",
"0.57724977",
"0.5553984",
"0.55409586",
"0.5527344",
"0.5513376",
"0.54741013",
"0.5404894",
"0.539826",
"0.53865135",
"0.5356633",
"0.5356331",
"0.53492486",
"0.53430045",
"0.5337607",
"0.5328182",
"0.5300041",
"0.5282351",
"0.52793086",
"0.527427",
"0.52572745",
"0.5255849",
"0.52496636",
"0.52242565",
"0.5222116",
"0.5212714"
] | 0.7823156 | 0 |
Perform 12 OT for Bob and return Alice's input list m_c without revealing c. | def Bob_OT(c, l, n=100):
# Error handling.
if c != 0 and c != 1:
raise Exception("Input argument c must be either 0 or 1.")
if l > n:
raise Exception("Input argument l cannot be greater than n.")
# (Step 1)
# Bob runs 1-2 ROT.
s_c = Bob_ROT(c, l, n)
# (Step 3)
# Bob receives (m0 XOR s0) and (m1 XOR s1) from Alice.
with CQCConnection("Bob") as Bob:
data0 = Bob.recvClassical()
xor_0 = list(data0)
data1 = Bob.recvClassical()
xor_1 = list(data1)
# Bob computes m_c.
if c == 0:
xor_c = xor_0
else:
xor_c = xor_1
m_c = []
for i in range(l):
m_c.append((s_c[i] + xor_c[i]) %2)
print("Bob outputs m_c.")
return m_c | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tickets(people):\n people= [100, 50, 25]",
"def getMutation(AA,Codon):\r\n temp_mutationlist = []\r\n '''create a list of possible triplets within hamming distance 1 '''\r\n for item in INI.genetic_code.keys():\r\n isvalid = INI.isvalidtriplet(item,Codon)\r\n ''' Hamming distance 1, AA is not equal to the given AA,forbid mutation to stopcodon '''\r\n if (isvalid == True and AA !=INI.genetic_code[item] and INI.genetic_code[item]!=\"*\"):\r\n temp_mutationlist.append(item)\r\n \r\n \r\n aalist = []\r\n # generate a list of all possible amino acids resulting from the temp_mutationlist \r\n for item in temp_mutationlist:\r\n if (item in INI.genetic_code):\r\n aalist.append(INI.genetic_code[item])\r\n else:\r\n aalist.append(\"n\")\r\n \r\n return(temp_mutationlist,aalist)",
"def test_list(self):\n output, _err = self.executor.prepare('chat', 'mention', person=['emett', 'lucy']).batch()\n self.assertEqual(output, 'mentioning folks')",
"def mutator(mutate):\r\n @functools.wraps(mutate)\r\n def ecspy_mutator(random, candidates, args):\r\n mutants = []\r\n for i, cs in enumerate(candidates):\r\n mutants.append(mutate(random, cs, args))\r\n return mutants\r\n ecspy_mutator.single_mutation = mutate\r\n return ecspy_mutator",
"def hotpotato(names, num):\n circle = Queue()\n for name in names:\n circle.enqueue(name)\n while circle.size() > 1:\n for i in range(num):\n circle.enqueue(circle.dequeue())\n circle.dequeue()\n return circle.dequeue()",
"def test_when_oppenent_all_Cs(self):\n self.responses_test([C, C, C, C], [C, C, C, C], [C, C, C],\n random_seed=5)",
"def test_when_oppenent_all_Cs(self):\n self.responses_test([C, C, C, C], [C, C, C, C], [C, C, C],\n random_seed=5)",
"def main():\n n, m = map(int, raw_input().split(' '))\n knowledge = []\n for i in xrange(n):\n knowledge.append(raw_input())\n print '\\n'.join(map(str, acm_icpc_team(knowledge)))",
"def problem3(bob, message):\n # raise NotImplemented(\"TODO\")\n \n messageLen = len(message)\n allPhotons = []\n allFilters = []\n correctPhotons = 0\n\n\n while correctPhotons < messageLen*5:\n\n #Generate a random string of photons\n photons = generatePhotons(messageLen)\n #Generate a list len = len(photons) where bob should show what photon he recieved (I decided on every third photon)\n tellList = generateTell(photons)\n\n #transmit the photon list and tellList to bob\n bobFilters = bob.quantum_channel(photons, tellList)\n allPhotons += photons\n allFilters += bobFilters\n\n # Here we check for eve evesdropping\n allPhotons, allFilters = checkForEve(bob, allPhotons, allFilters)\n \n # If we get back Nones, eve is evesdropping and we stop the function as she has been reported\n if allPhotons == None and allFilters == None:\n return \n \n # If eve is not evesdropping we can get disposal instructions and check if our key is long enough\n disposalInstructions, correctPhotons = validateFilters(allPhotons, allFilters)\n \n # If the key is long enough, tell bob what photons to drop and calculate the key\n if correctPhotons >= messageLen*5:\n \n bob.dispose(disposalInstructions)\n key = getKey(allPhotons, disposalInstructions, messageLen)\n\n # only send bob the cipher text if eve is not evesdropping\n ciphertext = otp_encrypt(key, message)\n bob.message(ciphertext)",
"def cointoss(self, mess, args):\n return random.choice(['heads', 'tails'])",
"def interpretBiom(bf, mf, c, OTUIds):\n biom_file = parse_biom_table(bf)\n mapping_file = parse_mapping_file_to_dict(mf)\n mapping_file = mapping_file[0]\n\n category_dict = dict( [ ( key, val [ c ] ) for ( key, val ) in mapping_file.iteritems() ] )\n sorted_category_dict = sorted(category_dict.iteritems(), key = operator.itemgetter(1))\n\n print sorted_category_dict\n\n samp_ids = []\n for vals, ids, md in biom_file.iterSamples():\n samp_ids.append(ids)\n \n samples_present = []\n final_list = []\n count = 0\n counter = 0\n\n # This takes in the list of OTU ID's and matches them with ID from\n # the biom file using the getValueByIds. If it isn't 0 then keep track\n # of it (i.e. the OTU is present in that sample) and do an intersection\n # between all of the said OTU's within each sample. Unfortunately, matching\n # is O(n^2) no matter what.\n for j in OTUIds:\n\tfor id in samp_ids:\n\t for k in j:\n\t \tif int(biom_file.getValueByIds(k, id)) != 0:\n count = count + 1\n if count == len(j):\n samples_present.append(id)\n count = 0\n\t if id == \n counter = counter + 1\n final_list.append(counter)\n\t# temporary hack: used the set function to make a unique list \n\t# I should clear the list after each iteration through the OTUIds\n\t# but it somehow clears the entire list even if I append it before.\n final_list.append(set(samples_present))\n\n # this overcomes the temporary hack and converts from set to list\n for i in xrange(len(final_list)):\n\tif i % 2 != 0:\n\t final_list[i] = list(final_list[i])\n\n\n return final_list",
"def get_friends(character, _info):\n return map(get_character, character.friends)",
"def brute_force_cow_transport(cows,limit=10):\n cows_list=list(cows.items())\n ans=sorted(test_comb(cows_list,limit,0,limit),key=lambda x:len(x))\n return ans",
"def gen_donor():\n# <<<<<<< master\n return [donor for donor in donor_data]",
"def exo6(mu,x0, n, m):\r\n liste=exo3_2(mu,x0,n,m)\r\n listem=[]\r\n listem1=[]\r\n \r\n for i in range(0,len(liste)-1):\r\n listem.append(liste[i])\r\n listem1.append(liste[i+1])\r\n \r\n listem.append(exo2_1(n,mu))\r\n listem1.append(exo2_1(n+1,mu))\r\n \r\n return listem,listem1 #\r",
"def diffiehellman_mitm_sim(prime, base):\n alice = {}\n\n #Alice generates their public key an sends to 'bob'\n alice['dh'] = DiffieHellman(prime, base, secret_key=secrets.randbelow(prime))\n alice_pub = alice['dh'].gen_public_key()\n\n (prime, base, key_for_bob) = yield (prime, base, alice_pub)\n\n \n\n #bob recieves 'alice's' public key, generates their own public key and\n #the shared key. Sends their public key ot 'alice'\n bob = {'dh':DiffieHellman(prime, base, secret_key=secrets.randbelow(prime))}\n bob_pup = bob['dh'].gen_public_key()\n bob['dh'].gen_shared_key(key_for_bob)\n\n key_for_alice = yield bob_pup\n\n ### Alice recieves Bob's public key, generates the shared key and encrypts\n ### message for bob\n\n alice['dh'].gen_shared_key(key_for_alice)\n \n alice['sha1'] = SHA1(bso.int_to_bytes(alice['dh'].shared_key))\n alice['cipher'] = AES_CBC(alice['sha1'].digest()[:16], secrets.token_bytes(16))\n alice_ciphertext = alice['cipher'].encrypt(b'Message to Bob')\n alice_ciphertext += alice['cipher'].IV\n\n ciphertext_for_bob = yield alice_ciphertext\n \n #Bob recieves the ciphertext, decrypts it and send a reply.\n\n bob['sha1'] = SHA1(bso.int_to_bytes(bob['dh'].shared_key))\n bob['cipher'] = AES_CBC(bob['sha1'].digest()[:16], secrets.token_bytes(16))\n bob_ciphertext = bob['cipher'].encrypt(b'Message to Alice')\n bob_ciphertext += bob['cipher'].IV\n\n ciphertext_for_alice = yield bob_ciphertext\n\n ### Finally alice decrypts bobs reply\n\n alice['cipher'].decrypt(ciphertext_for_alice[:-16], ciphertext_for_alice[-16:])",
"def names_interaction():\n already_printed = []\n for protocol in protocols:\n for account in protocol.accounts:\n for contact in account.contacts:\n for message in contact.messages:\n if message.name not in already_printed:\n already_printed.append(message.name)\n print(message.name)\n nicks = input(\"Own nicks, comma separated: \")\n nicks = nicks.split(\",\")\n nicks = [nick.strip() for nick in nicks]\n return nicks",
"def groupMutate(o,number,p):\n results = p.map(mutateAndTest,[o]*int(number))\n return results",
"def main():\n\n prime = 0xffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca237327ffffffffffffffff\n base = 2\n \n connection = diffiehellman_mitm_sim(prime, base)\n\n # intercept alices public key\n prime, base , _ = next(connection)\n\n # send prime instead of alices public key to bob. Recieve Bobs public key, \n # which we forget as it is not needs. The shared kill will be 0.\n\n connection.send((prime, base, prime))\n\n #Send prime as bob's public key to alice. We have ensured that the shared\n #hared secret key is 0. Recieve Alice's ciphertext for bob\n ciphertext_a2b = connection.send(prime)\n\n # decrypt\n malcolm = AES_CBC(SHA1(bso.int_to_bytes(0)).digest()[:16], b'0'*16)\n messages = []\n messages.append(bso.remove_padding_pkcs7(malcolm.decrypt(ciphertext_a2b[:-16], ciphertext_a2b[-16:])))\n\n #Send the ciphertext to bob. Recieve his response\n ciphertext_b2a = connection.send(ciphertext_a2b)\n\n messages.append(bso.remove_padding_pkcs7(malcolm.decrypt(ciphertext_b2a[:-16], ciphertext_b2a[-16:])))\n\n assert messages[0] == b'Message to Bob'\n assert messages[1] == b'Message to Alice'\n\n \n return",
"def hello_world(input: Input) -> Output:\n #print(sp.candidates(input.message))sp.candidates(input.message)\n return Output(message=\" \".join(buscador(input.message)))",
"def mewe_misspecified(M,N,m,n,target):\r\n\toutput = []\r\n\tfor k in tqdm(range(0,M)):\r\n\t\t# Allocate space for output\r\n\t\tmewe_store = np.zeros((len(n),target['thetadim']))\r\n\t\tmewe_runtimes = np.zeros(len(n))\r\n\t\tmewe_evals = np.zeros(len(n))\r\n\t\t\r\n\t\t# generate all observations and sets of randomness to be used\r\n\t\t\r\n\t\tif target[\"observed_law\"] == \"Gamma\":\r\n\t\t\tobs_all = np.random.gamma(true_theta[0], true_theta[1],np.max(n))\r\n\t\telif target[\"observed_law\"] == \"Cauchy\":\r\n\t\t\tobs_all = np.random.standard_cauchy(np.max(n))\r\n\t\telse : \r\n\t\t\treturn(\"Not implemented law\")\r\n\t\t\tbreak\r\n\t\t# la ligne du dessus est modifiée pour générer un échantillon contaminé\r\n\t\t\r\n\t\t# generate the synthetic randomness, sort.\r\n\t\t\r\n\t\trandomness = [target['generate_randomness'](m) for i in range(N)]\r\n\t\t\r\n\t\tfor i in range(0,len(n)):\r\n\t\t\t# subset observations and sort\r\n\t\t\tobs = obs_all[:n[i]]\r\n\t\t\tsort_obs = np.sort(obs)\r\n\t\t\tsort_obs_mult = np.repeat(sort_obs, m / n[i], axis = 0)\r\n\t\t\t\r\n\t\t\t# Define the objective to be minimized to find the MEWE\r\n\t\t\t\r\n\t\t\tdef obj1(theta):\r\n\t\t\t\tif(theta[1] < 0 ):\r\n\t\t\t\t\tout = 10e6\r\n\t\t\t\telse :\r\n\t\t\t\t\twass_dists = [target['dist'](sort_obs_mult, np.sort(target['simulation'](theta, x))) for x in randomness]\r\n\t\t\t\t\tout = np.mean(wass_dists)\r\n\t\t\t\t\r\n\t\t\t\treturn out\r\n\t\t\t\t\r\n\t\t\t# Optimization\r\n\t\t\t\r\n\t\t\tt_mewe = time.process_time()\r\n\t\t\tmewe = minimize(fun = obj1, x0 = true_theta)\r\n\t\t\tt_mewe = time.process_time() - t_mewe\r\n\t\t\t\r\n\t\t\t# Save the results\r\n\t\t\tmewe_store[i] = mewe.x\r\n\t\t\tmewe_runtimes[i] = t_mewe\r\n\t\t\tmewe_evals[i] = mewe.nit\r\n\t\t\r\n\t\toutput_cbind = np.c_[mewe_store, mewe_runtimes, mewe_evals, n, np.arange(len(n))]\r\n\t\toutput.append(output_cbind)\r\n\t\t\r\n\treturn output",
"def brute_force_cow_transport(cows,limit=10):\n # TODO: Your code here\n #print(list(cows.items()))\n cows_list=list(cows.items())\n curr_list=[[[0]]]\n for i in range(1,len(cows_list)):\n smaller_fun(curr_list,i,limit,cows_list)\n\n ans =sorted(curr_list,key=lambda x:len(x))\n print(ans)\n ansfinal=[]\n for item in ans:\n trip=[]\n for i in range(len(item)):\n trip.append(cows_list[item[i]][0])\n ansfinal.append(trip)\n return ansfinal",
"def brute_force_cow_transport(cows,limit=10):\n # TODO: Your code here\n pass",
"def operationListReturn(self, a, b, c, operation):\n assert len(a) == len(b) == len(c), 'Length mismatch'\n for i in range(len(a)):\n if isinstance(a[i], list): self.operationListReturn(a[i], b[i], c[i], operation)\n else: c[i] = operation(a[i],b[i])\n return c",
"def within_discussion_comment_and_user_anonymization(comment_gen,\r\n extract_comment_name,\r\n extract_user_name,\r\n anonymous_coward_name):\r\n comment_name_set = list()\r\n user_name_set = list()\r\n\r\n append_comment_name = comment_name_set.append\r\n append_user_name = user_name_set.append\r\n\r\n ####################################################################################################################\r\n # Extract comment and user name from the initial post.\r\n ####################################################################################################################\r\n initial_post = next(comment_gen)\r\n\r\n initial_post_name = extract_comment_name(initial_post)\r\n op_name = extract_user_name(initial_post)\r\n\r\n append_comment_name(initial_post_name)\r\n append_user_name(op_name)\r\n\r\n ####################################################################################################################\r\n # Iterate over all comments.\r\n ####################################################################################################################\r\n for comment in comment_gen:\r\n comment_name = extract_comment_name(comment)\r\n commenter_name = extract_user_name(comment)\r\n\r\n append_comment_name(comment_name)\r\n append_user_name(commenter_name)\r\n\r\n ####################################################################################################################\r\n # Perform anonymization.\r\n ####################################################################################################################\r\n # Remove duplicates and then remove initial post name because we want to give it id 0.\r\n comment_name_set = set(comment_name_set)\r\n comment_name_set.remove(initial_post_name)\r\n\r\n # Remove duplicates and then remove OP because we want to give them id 0.\r\n user_name_set = set(user_name_set)\r\n user_name_set.remove(op_name)\r\n\r\n # Anonymize.\r\n within_discussion_comment_anonymize = dict(zip(comment_name_set, range(1, len(comment_name_set) + 1)))\r\n within_discussion_comment_anonymize[initial_post_name] = 0 # Initial Post gets id 0.\r\n\r\n within_discussion_user_anonymize = dict(zip(user_name_set, range(1, len(user_name_set) + 1)))\r\n within_discussion_user_anonymize[op_name] = 0 # Original Poster gets id 0.\r\n\r\n comment_name_set.add(initial_post_name)\r\n user_name_set.add(op_name)\r\n\r\n if anonymous_coward_name is not None:\r\n # if op_name == anonymous_coward_name:\r\n # print(\"The Original Poster is Anonymous.\")\r\n try:\r\n within_discussion_anonymous_coward = within_discussion_user_anonymize[anonymous_coward_name]\r\n except KeyError:\r\n within_discussion_anonymous_coward = None\r\n else:\r\n within_discussion_anonymous_coward = None\r\n\r\n return comment_name_set,\\\r\n user_name_set,\\\r\n within_discussion_comment_anonymize,\\\r\n within_discussion_user_anonymize,\\\r\n within_discussion_anonymous_coward",
"def moove_character(self, case_list):\n\t\tself.actual_hero.start_moove(case_list)",
"def get_m_male_names_in_2_lists(m):\n\n # 2 lists of 100 strings each: full and first names for males.\n # http://listofrandomnames.com/index.cfm?generated\n list_male_full_names = [\n \"Hans Jacobsen\",\"Nathanael Whitehorn\",\"Everett Yarnall\",\"Marcos Kennedy\",\n \"Ike Mees\",\"Josiah Kucera\",\"Cristopher Regalado\",\"Ricky Minyard\",\n \"Lenard Breese\",\"Erwin Cale\",\"Dennis Litten\",\"Ashley Mcgurk\",\n \"Bobbie Michaelson\",\"Monty Levar\",\"Cristobal Cangelosi\",\"Jacinto Shotts\",\n \"Javier Duncan\",\"Mohammad Crays\",\"Leland Batista\",\"Alexander Wilhelm\",\n \"Broderick Fields\",\"Kennith Drees\",\"Mitchel Oelke\",\"Jeremy Bussard\",\n \"Casey Maynez\",\"Karl Kirschbaum\",\"Wilfredo Durkin\",\"Warner Heatherly\",\n \"Enrique Ricken\",\"Brian Wittig\",\"Alexander Chau\",\"Jamal Warden\",\n \"Jerome Copper\",\"Rosendo Voegele\",\"Hassan Tibbles\",\"Earl Dorrance\",\n \"Teddy Organ\",\"Dorian Barile\",\"Devin Pendergraft\",\"Freeman Coulston\",\n \"Booker Mcminn\",\"Joan Brannen\",\"Dallas Messmer\",\"Miguel Bellefeuille\",\n \"Anthony Tillett\",\"Donald Minier\",\"Carson Lacour\",\"Hubert Ellett\", \n \"Rickey Lyon\",\"Isidro Dublin\",\"Enrique Chausse\",\"Cristobal Vancamp\",\n \"Quintin Bramble\",\"Edmundo Pooser\",\"Landon Sells\",\"Cary Allsup\",\n \"Timmy Fudge\",\"Quentin Tay\",\"Freddy Yant\",\"Billie Lipp\",\n \"Shirley Paff\",\"Monte Stetson\",\"Samuel Perham\",\"Kim Amo\",\n \"Jacob Yankey\",\"Riley Lappin\",\"Andy Houlihan\",\"Arturo Remmers\",\n \"Millard Bachmann\",\"Dylan Woodmansee\",\"Neil Mccarter\",\"Boyce Hurt\",\n \"Rickey Hebel\",\"Dave Worthington\",\"Weldon Nees\",\"Jamaal Selman\",\n \"Austin Kuhlman\",\"Val Neale\",\"Titus Mickelson\",\"Dorsey Northrup\",\n \"Roland Priddy\",\"Antwan Conine\",\"Wilburn Haner\",\"Vern Reams\",\n \"Tanner Jacome\",\"Milford Radebaugh\",\"Vance Heap\",\"Bert Carter\",\n \"Sang Brobst\",\"Ellsworth Haws\",\"Willard Cheers\",\"Fredrick Luther\",\n \"Jeremiah Vicario\",\"Bobbie Vanderhoff\",\"Loren Soliday\",\"John Stiverson\",\n \"Barney Hadley\",\"David Wadleigh\",\"Reuben Mccann\",\"Darius Hunter\"\n ]\n \n list_male_first_names = [\n \"Hans\",\"Nathanael\",\"Everett\",\"Marcos\",\"Ike\",\"Josiah\",\"Cristopher\",\"Ricky\",\n \"Lenard\",\"Erwin\",\"Dennis\",\"Ashley\",\"Bobbie\",\"Monty\",\"Cristobal\",\"Jacinto\",\n \"Javier\",\"Mohammad\",\"Leland\",\"Alexander\",\"Broderick\",\"Kennith\",\"Mitchel\",\"Jeremy\",\n \"Casey\",\"Karl\",\"Wilfredo\",\"Warner\",\"Enrique\",\"Brian\",\"Alexander\",\"Jamal\",\n \"Jerome\",\"Rosendo\",\"Hassan\",\"Earl\",\"Teddy\",\"Dorian\",\"Devin\",\"Freeman\",\n \"Booker\",\"Joan\",\"Dallas\",\"Miguel\",\"Anthony\",\"Donald\",\"Carson\",\"Hubert\",\n \"Rickey\",\"Isidro\",\"Enrique\",\"Cristobal\",\"Quintin\",\"Edmundo\",\"Landon\",\"Cary\",\n \"Timmy\",\"Quentin\",\"Freddy\",\"Billie\",\"Shirley\",\"Monte\",\"Samuel\",\"Kim\",\n \"Jacob\",\"Riley\",\"Andy\",\"Arturo\",\"Millard\",\"Dylan\",\"Neil\",\"Boyce\",\n \"Rickey\",\"Dave\",\"Weldon\",\"Jamaal\",\"Austin\",\"Val\",\"Titus\",\"Dorsey\",\n \"Roland\",\"Antwan\",\"Wilburn\",\"Vern\",\"Tanner\",\"Milford\",\"Vance\",\"Bert\",\n \"Sang\",\"Ellsworth\",\"Willard\",\"Fredrick\",\"Jeremiah\",\"Bobbie\",\"Loren\",\"John\",\n \"Barney\",\"David\",\"Reuben\",\"Darius\"\n ]\n\n return {\n \"m_full_names\": list_male_full_names[:m], \n \"m_first_names\": list_male_first_names[:m]\n }",
"def exercise_b2_98():\r\n pass",
"def challenge() : \n\treturn [random.randint(1,9) for i in range(5)]",
"def hot_potato(name_list, num):\n queue = ArrayQueue()\n for i in name_list:\n queue.enqueue(i)\n\n stop = False\n while not stop:\n for i in range(1,num):\n s = queue.dequeue()\n queue.enqueue(s)\n print(s)\n print()\n print(\" Drop it {0}\".format(queue.dequeue()))\n if queue.__len__() == 1:\n return queue.dequeue()"
] | [
"0.50222504",
"0.4935165",
"0.48622358",
"0.48220435",
"0.47618267",
"0.47593555",
"0.47593555",
"0.4643787",
"0.46382758",
"0.46354654",
"0.4608568",
"0.46082112",
"0.45961824",
"0.45707282",
"0.45663276",
"0.45569414",
"0.4548844",
"0.4518503",
"0.44995117",
"0.44929427",
"0.44911963",
"0.44853035",
"0.4436276",
"0.44352788",
"0.44332284",
"0.44247937",
"0.4416471",
"0.44014162",
"0.439421",
"0.43783465"
] | 0.6884154 | 0 |
Start a daemon with given daemon class. | def run(self, name: str, daemon_class: object, **kwargs) -> None:
if name in self._running_daemons:
raise AlreadyRunningDaemon(
'Daemon with name "{0}" already running'.format(name)
)
logger.info(self, 'Starting daemon with name "{0}" and class "{1}" ...'
.format(name, daemon_class))
daemon = daemon_class(name=name, kwargs=kwargs, daemon=True)
daemon.start()
self._running_daemons[name] = daemon | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_daemon(self, *args, **kwargs):\n pass",
"def daemon(self):\n obj = self.subparsers.add_parser(\"daemon\", help=\"Daemon scripts\")\n obj.add_argument(\n \"daemon_type\",\n # default=\"all\",\n # const=\"all\",\n nargs=1,\n choices=[\"ipfs\", \"slurm\"],\n help=\"Select program to run as a daemon on the background\",\n )",
"def __new__(\n cls,\n *args,\n daemon_id : Optional[str] = None,\n **kw\n ):\n instance = super(Daemon, cls).__new__(cls)\n if daemon_id is not None:\n instance.daemon_id = daemon_id\n if instance.pickle_path.exists():\n instance = instance.read_pickle()\n return instance",
"def start_daemon(config):\n interval = config.get(\"REFRESH_INTERVAL\", 600)\n target_language = config.get(\"TARGET_LANGUAGES\", (\"PT\", ))\n source_language = config.get(\"SOURCE_LANGUAGE\", \"EN\")\n story_collection = config.get(\"STORY_COLLECTION\", \"articles\")\n hn_daemon = HackerNewsDaemon(\n interval,\n source_language,\n target_language,\n story_collection\n )\n hn_daemon.run()\n LOGGER.info(\"Started daemon with time interval {}.\".format(interval))",
"def start_daemon(args):\n\n context = daemon.DaemonContext(\n working_directory='/var/tmp',\n umask=0o002,\n pidfile=pidfile.TimeoutPIDLockFile(args.pid_file),\n )\n\n context.signal_map = {\n signal.SIGHUP: receive_signal,\n signal.SIGINT: receive_signal,\n signal.SIGQUIT: receive_signal,\n signal.SIGTERM: receive_signal,\n }\n\n with context:\n start_bme280_sensor(args)",
"def launch_new_pantsd_instance():\n\n options_bootstrapper = OptionsBootstrapper.create(\n env=os.environ, args=sys.argv, allow_pantsrc=True\n )\n daemon = PantsDaemon.create(options_bootstrapper)\n daemon.run_sync()",
"def daemon_main():\n # handle SIGTERM gracefully\n signal.signal(signal.SIGTERM, sigterm)\n\n try:\n dispatcher = dispatcher_type(args.server,\n args.dispatch_uri,\n daemon.logger,\n args.cafile)\n except Exception as e:\n daemon.logger.error(\"Startup error: {}\".format(e))\n sys.exit(1)\n mochad_client = MochadClient(args.server, daemon.logger, dispatcher)\n global loop\n loop = asyncio.get_event_loop()\n # dispatcher.watchdog() runs continuously to monitor the dispatcher's health\n # and act on any problems asyncronously\n asyncio.async(dispatcher.watchdog(loop))\n asyncio.async(mochad_client.worker(loop))\n loop.run_forever()",
"def __init__(\n self, callback=None, daemon_params=None, is_worker=True,\n daemon_class=Daemon, daemon=None, **kwargs):\n daemon_params = daemon_params or {}\n if daemon is None:\n self.daemon = daemon_class(**daemon_params)\n else:\n self.daemon = daemon\n\n self.is_worker = (\n is_worker and callback is not None and callable(callback))\n\n if ((not self.daemon.worker or not callable(self.daemon.worker)) and\n self.is_worker):\n # If the callback is the worker, then don't pass the\n # callback to the parent class so we don't call it twice\n self.daemon.worker = callback\n callback = None\n\n # The context object will be the Daemon object\n context_settings = {'obj': self.daemon}\n\n if not kwargs.get('help'):\n kwargs['help'] = self.daemon.worker.__doc__\n\n super(DaemonCLI, self).__init__(\n callback=callback, context_settings=context_settings, **kwargs)",
"def daemon_thread_builder(target, args=()):\r\n th = threading.Thread(target=target, args=args)\r\n th.setDaemon(True)\r\n return th",
"def daemon_thread_builder(target, args=()) -> threading.Thread:\n th = threading.Thread(target=target, args=args)\n th.setDaemon(True)\n return th",
"def daemon_thread_builder(target, args=()) -> threading.Thread:\n th = threading.Thread(target=target, args=args)\n th.setDaemon(True)\n return th",
"def daemon_thread_builder(target, args=()) -> threading.Thread:\n th = threading.Thread(target=target, args=args)\n th.setDaemon(True)\n return th",
"def daemon_thread_builder(target, args=()) -> threading.Thread:\n th = threading.Thread(target=target, args=args)\n th.setDaemon(True)\n return th",
"def start(self):\n status = is_pidfile_stale(self.pidfile) \n if status == True:\n self.pidfile.break_lock()\n elif status == False:\n ## Allow only one instance of the daemon\n pid = self.pidfile.read_pid()\n logger.info(\"Daemon already running with PID %(pid)r\" % vars())\n return\n \n try:\n self.daemon_context.open()\n except lockfile.AlreadyLocked:\n pidfile_path = self.pidfile.path\n logger.info(\"PID file %(pidfile_path)r already locked\" % vars())\n return\n pid = os.getpid()\n logger.info('Daemon started with pid %(pid)d' % vars())\n\n self.run()",
"def _get_configured_daemon(self, daemon_id='', daemon_prefix=''):\n\n daemon_uri_dict = {}\n pyro_daemon = Pyro4.Daemon()\n\n Pyro4.config.THREADPOOL_SIZE_MIN = 10\n Pyro4.config.THREADPOOL_SIZE = 200\n\n daemon_lib_path = ConfigUtil().get_prefix_lib_path(prefix=daemon_prefix, package='pyswitchlib')\n\n if daemon_lib_path:\n sys.prefix = daemon_prefix\n sys.exec_prefix = daemon_prefix\n sys.path.insert(0, daemon_lib_path)\n\n pyswitchlib_api_create = __import__('pyswitchlib.api.create', fromlist=['*'])\n pyswitchlib_api_update = __import__('pyswitchlib.api.update', fromlist=['*'])\n pyswitchlib_api_delete = __import__('pyswitchlib.api.delete', fromlist=['*'])\n pyswitchlib_api_get = __import__('pyswitchlib.api.get', fromlist=['*'])\n pyswitchlib_api_rpc = __import__('pyswitchlib.api.rpc', fromlist=['*'])\n\n map(lambda filtered_api: setattr(PySwitchLibApiDaemon, filtered_api[0], filtered_api[1]), filter(lambda api: '__' not in api[0], pyswitchlib_api_create.__dict__.items()))\n map(lambda filtered_api: setattr(PySwitchLibApiDaemon, filtered_api[0], filtered_api[1]), filter(lambda api: '__' not in api[0], pyswitchlib_api_update.__dict__.items()))\n map(lambda filtered_api: setattr(PySwitchLibApiDaemon, filtered_api[0], filtered_api[1]), filter(lambda api: '__' not in api[0], pyswitchlib_api_delete.__dict__.items()))\n map(lambda filtered_api: setattr(PySwitchLibApiDaemon, filtered_api[0], filtered_api[1]), filter(lambda api: '__' not in api[0], pyswitchlib_api_get.__dict__.items()))\n map(lambda filtered_api: setattr(PySwitchLibApiDaemon, filtered_api[0], filtered_api[1]), filter(lambda api: '__' not in api[0], pyswitchlib_api_rpc.__dict__.items()))\n\n api_exposed_class = Pyro4.expose(PySwitchLibApiDaemon)\n daemon_obj = api_exposed_class(pyro_daemon=pyro_daemon)\n\n uri = pyro_daemon.register(daemon_obj, force=True)\n\n daemon_uri_dict[daemon_id] = uri\n\n ConfigFileUtil().write(filename=pyswitchlib_ns_daemon_file, conf_dict=daemon_uri_dict)\n\n return pyro_daemon, uri",
"def generateDaemonizer(working_dir=\".\"):\n py_template = \"\"\"#!/usr/bin/python\n\nimport daemon\nimport subprocess\n\nwith daemon.DaemonContext(working_directory=\".\"):\n proc = subprocess.Popen([\"nohup\", \"bash\", \"run.sh\"])\n\"\"\"\n py_sh = open(os.path.join(working_dir, \"daemonize.py\"), \"w\")\n py_sh.write(py_template)\n \n py_sh.close()\n return py_sh",
"def start(self, detach=True):\n\n with daemon.DaemonContext(\n detach_process=detach,\n working_directory=self.root,\n pidfile=daemon.pidfile.PIDLockFile(self.pidfile),\n stdout=(None if detach else sys.stdout),\n stderr=(None if detach else sys.stderr),\n ):\n self.run(detach)",
"def start_daemon(self):\n LOGGER.info(\"starting uploader daemon\")\n\n global RUN_STATE\n RUN_STATE.value = self.STATE_RUNNING\n\n # Record the start time of instantiation, so that we can report uptime\n self._start_time = time.time()\n\n # Create and start all workers\n self._workers = self._create_workers(start=True)\n LOGGER.debug(\"Started workers:\\n\\t%s\",\n \"\\n\\t\".join(sorted([w.name for w in self._workers])))",
"def run():\n\t\tsys.stderr.write(\"Error: Daemon.run() has not been overwritten, exiting...\\n\")\n\t\tself.stop()\n\t\tsys.exit(1)",
"def _start_thread(self, fn, daemon=False):\n daemon = Thread(target=fn, daemon=daemon)\n daemon.start()",
"def getDaemon(self, start, count):\n fakeEnvironment = self.initializeEnvironment(count, os.getpid())\n return ListenFDs.fromEnvironment(environ=fakeEnvironment, start=start)",
"def main():\n try:\n if get_global_option('daemon'):\n daemon = DynamicDynamoDBDaemon(\n '{0}/dynamic-dynamodb.{1}.pid'.format(\n get_global_option('pid_file_dir'),\n get_global_option('instance')))\n\n if get_global_option('daemon') == 'start':\n logger.debug('Starting daemon')\n try:\n daemon.start()\n logger.info('Daemon started')\n except IOError as error:\n logger.error('Could not create pid file: {0}'.format(error))\n logger.error('Daemon not started')\n elif get_global_option('daemon') == 'stop':\n logger.debug('Stopping daemon')\n daemon.stop()\n logger.info('Daemon stopped')\n sys.exit(0)\n\n elif get_global_option('daemon') == 'restart':\n logger.debug('Restarting daemon')\n daemon.restart()\n logger.info('Daemon restarted')\n\n elif get_global_option('daemon') in ['foreground', 'fg']:\n logger.debug('Starting daemon in foreground')\n daemon.run()\n logger.info('Daemon started in foreground')\n\n else:\n print(\n 'Valid options for --daemon are start, '\n 'stop, restart, and foreground')\n sys.exit(1)\n else:\n if get_global_option('run_once'):\n execute()\n else:\n while True:\n execute()\n\n except Exception as error:\n logger.exception(error)",
"def daemon_main(main_func, argv=None, pidfile=None):\n if argv is None:\n argv = sys.argv\n if pidfile is None:\n pidfile = '/tmp/{}.pid'.format(argv[0])\n\n if len(argv) < 2 or argv[1] not in ('start', 'stop'):\n print(\"Usage: {} [start|stop]\".format(argv[0]))\n raise SystemExit(1)\n\n if argv[1] == 'start':\n daemonize(pidfile)\n main_func()\n elif argv[1] == 'stop':\n if os.path.exists(pidfile):\n with open(pidfile) as fobj:\n os.kill(int(fobj.read()), signal.SIGTERM)\n else:\n print(\"Not running\")\n raise SystemExit(1)\n else:\n print(\"Unknown command\")\n raise SystemExit(1)",
"def main(cls):\n parser = argparse.ArgumentParser(\n description='Server for the {} SOA service'.format(cls.service_name),\n )\n parser.add_argument(\n '-d', '--daemon',\n action='store_true',\n help='run the server process as a daemon',\n )\n if not cls.use_django:\n # If Django mode is turned on, we use the Django settings framework\n # to get our settings, so the caller needs to set DJANGO_SETTINGS_MODULE.\n parser.add_argument(\n '-s', '--settings',\n help='The settings file to use',\n required=True,\n )\n cmd_options, _ = parser.parse_known_args(sys.argv[1:])\n\n # Load settings from the given file (or use Django and grab from its settings)\n if cls.use_django:\n # noinspection PyUnresolvedReferences\n from django.conf import settings as django_settings\n try:\n settings = cls.settings_class(django_settings.SOA_SERVER_SETTINGS)\n except AttributeError:\n raise ValueError('Cannot find SOA_SERVER_SETTINGS in the Django settings')\n else:\n try:\n settings_module = importlib.import_module(cmd_options.settings)\n except ImportError as e:\n raise ValueError('Cannot import settings module %s: %s' % (cmd_options.settings, e))\n try:\n settings_dict = getattr(settings_module, 'SOA_SERVER_SETTINGS')\n except AttributeError:\n try:\n settings_dict = getattr(settings_module, 'settings')\n except AttributeError:\n raise ValueError(\n \"Cannot find 'SOA_SERVER_SETTINGS' or 'settings' variable in settings module {}.\".format(\n cmd_options.settings,\n )\n )\n settings = cls.settings_class(settings_dict)\n\n PySOALogContextFilter.set_service_name(cls.service_name)\n\n # Set up logging\n logging.config.dictConfig(settings['logging'])\n\n # Optionally daemonize\n if cmd_options.daemon:\n pid = os.fork()\n if pid > 0:\n print('PID={}'.format(pid))\n sys.exit()\n\n # Set up server and signal handling\n server = cls(settings)\n\n # Start server event loop\n server.run()",
"def startdaemon_command(chat, message, args):\n start_daemon= os.popen(path_to_bin + \"/bitcannad -daemon\").read()\n print(\"Result:\", start_daemon)\n chat.send('Output: \\n' + start_daemon)",
"def _start(self):\n\n super(PySwitchLibApiDaemonRunner, self)._start()",
"def main():\n\n\n\n\tdaemon = ORsched(scheduler_config.pidfile, stdout=scheduler_config.outstream, stderr=scheduler_config.outstream)\n\ttry:\n\t\topts, list = getopt.getopt(sys.argv[1:], 'st')\n\texcept getopt.GetoptError, e:\n\t\tprint(\"Bad options provided!\")\n\t\tsys.exit()\n\n\tfor opt, a in opts:\n\t\tif opt == \"-s\":\n\t\t\ttry:\n\t\t\t\tpid_number = open(scheduler_config.pidfile,'r').readline()\n\t\t\t\tif pid_number:\n \t\t\t\tsys.exit('Daemon is already running!')\n \t\texcept Exception, e:\n \t\t\tpass\n\n\t\t\tprint(\"Starting daemon...!\")\n\t\t\tdaemon.start()\n\t\telif opt == \"-t\":\n\t\t\tdaemon.stop()\n\t\t\tprint \"The daemon is stoped!\"\n\t\telse:\n\t\t\tprint(\"Option %s not supported!\" % (opt))",
"def __init__(self, pyswitchlib_conf=None, daemon_id='default'):\n\n self._pyswitchlib_conf = pyswitchlib_conf\n self._daemon_id = daemon_id\n self._daemon_prefix = ConfigUtil().get_prefix_for_daemon_id(daemon_id=self._daemon_id, conf_dict=self._pyswitchlib_conf)\n self._daemon_thread = None\n self._pyro_ns_port = None\n\n if self._pyswitchlib_conf:\n if 'ns_port' in self._pyswitchlib_conf:\n self._pyro_ns_port = int(self._pyswitchlib_conf['ns_port'])\n\n if self._daemon_thread == None:\n self._daemon_thread = threading.Thread(target=self._daemon_loop, kwargs={'daemon_id': self._daemon_id, 'daemon_prefix':self._daemon_prefix, 'pyro_ns_port': self._pyro_ns_port})\n self._daemon_thread.daemon = True\n\n self.stdin_path = os.path.join(os.sep, 'dev', 'null')\n self.stdout_path = os.path.join(os.sep, 'dev', 'null')\n self.stderr_path = os.path.join(os.sep, 'dev', 'null')\n self.pidfile_path = ConfigUtil().get_pidfilename_for_daemon_id(daemon_id=self._daemon_id, conf_dict=self._pyswitchlib_conf)\n self.pidfile_timeout = 1\n\n super(PySwitchLibApiDaemonRunner, self).__init__(self)",
"def set_daemon_running(self, status):\n if status:\n log.debug(\"The DHCP daemon is running\")\n else:\n log.debug(\"The DHCP daemon is NOT running\")\n\n self.daemon_running = status\n\n # XXX: write the network log\n\n return defer.succeed(None)",
"def _start(self):\n if is_pidfile_stale(self.pidfile):\n self.pidfile.break_lock()\n\n try:\n self.daemon_context.open()\n except pidlockfile.AlreadyLocked:\n pidfile_path = self.pidfile.path\n raise DaemonRunnerStartFailureError(\n \"PID file %(pidfile_path)r already locked\" % vars())\n\n pid = os.getpid()\n message = self.start_message % vars()\n emit_message(message)\n\n self.app.run()"
] | [
"0.73497087",
"0.70751816",
"0.66535014",
"0.62650317",
"0.6090769",
"0.6031981",
"0.5863433",
"0.5808442",
"0.5699542",
"0.56858295",
"0.56858295",
"0.56858295",
"0.56858295",
"0.56688225",
"0.5592923",
"0.558033",
"0.54989725",
"0.5492751",
"0.5471203",
"0.54689485",
"0.5437582",
"0.54153264",
"0.5405622",
"0.5403799",
"0.53953683",
"0.53910863",
"0.53448427",
"0.5338578",
"0.5332857",
"0.53287846"
] | 0.77797806 | 0 |
Stop daemon with his name and wait for him. Where name is given name when daemon started with run method. | def stop(self, name: str) -> None:
if name in self._running_daemons:
logger.info(self, 'Stopping daemon with name "{0}" ...'
.format(name))
self._running_daemons[name].stop()
self._running_daemons[name].join()
del self._running_daemons[name]
logger.info(self, 'Stopping daemon with name "{0}": OK'
.format(name)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stop(name):\n __salt__[\"file.touch\"](\"{}/down\".format(_service_path(name)))\n cmd = \"svc -d {}\".format(_service_path(name))\n return not __salt__[\"cmd.retcode\"](cmd, python_shell=False)",
"def stop(self):\n \n\n if os.path.isfile(self.pidfilename):\n\n with open(self.pidfilename) as f:\n data = json.load(f)\n pid = data['pid']\n os.kill(int(pid), signal.SIGTERM)\n\n # Check that the process has been killed\n # Give up after 15 seconds\n for i in range(15):\n if int(pid) not in psutil.pids():\n\n return True\n time.sleep(1)\n return False\n\n # If the daemon is not currently running, do nothing\n else:\n log(\"The daemon is not currently running\")",
"def stop_service(service_name):\n subprocess.run([SUPERVISOR_CMD, \"stop\", service_name])",
"def stop(self, name=None):\n server = self.cloudman.get_server(name)['id']\n r = self.cloudman.compute.stop_server(server)\n return r",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n\t\tself._run_flag = False\n\t\tself.wait()",
"def daemonControlStop (self):\n self.stop()",
"def processStop(name):\n imrclient.update_server_info()\n imrclient.process_stop(name)",
"def stopProcess(self, name, wait=True):\r\n self._update('stopProcess')\r\n\r\n group, process = self._getGroupAndProcess(name)\r\n\r\n if process is None:\r\n group_name, process_name = split_namespec(name)\r\n return self.stopProcessGroup(group_name, wait)\r\n\r\n stopped = []\r\n called = []\r\n\r\n def killit():\r\n if not called:\r\n if process.get_state() not in RUNNING_STATES:\r\n raise RPCError(Faults.NOT_RUNNING)\r\n # use a mutable for lexical scoping; see startProcess\r\n called.append(1)\r\n\r\n if not stopped:\r\n msg = process.stop()\r\n if msg is not None:\r\n raise RPCError(Faults.FAILED, msg)\r\n stopped.append(1)\r\n\r\n if wait:\r\n return NOT_DONE_YET\r\n else:\r\n return True\r\n\r\n if process.get_state() not in (ProcessStates.STOPPED,\r\n ProcessStates.EXITED):\r\n return NOT_DONE_YET\r\n else:\r\n return True\r\n\r\n killit.delay = 0.2\r\n killit.rpcinterface = self\r\n return killit # deferred\r",
"def stop(self):\n self._listeners = None\n\n try:\n if self._started_daemon:\n logging.info('Stopping Transmission daemon')\n exec_cmd(['transmission-remote', '--exit'], wait_after=2)\n\n except subprocess.CalledProcessError:\n logging.error('Unable to stop daemon')\n logging.debug('Error details', stack_info=True, exc_info=True)\n\n self._done = True",
"def stop(self, exit_status, exit_message=None):\n if self.daemon_state == 'started':\n self.running = 0\n self.daemon_state = 'down'\n self.save_local_dir_state()\n if exit_message:\n print exit_message\n exit(exit_status)",
"def stop_daemon(api_port=None, api_host=None):\n if api_port is not None:\n port = api_port\n else:\n port = _api_port()\n if api_host is not None:\n host = api_host\n else:\n host = _api_host()\n url = f'http://{host}:{port}/halt'\n try:\n x = _http_get_json(url)\n except:\n return False\n return x.get('success')",
"def _stop(self):\n if not self.pidfile.is_locked():\n pidfile_path = self.pidfile.path\n raise DaemonRunnerStopFailureError(\n \"PID file %(pidfile_path)r not locked\" % vars())\n\n if is_pidfile_stale(self.pidfile):\n self.pidfile.break_lock()\n else:\n self._terminate_daemon_process()",
"def stop(self):\n if not self.pidfile.is_locked():\n pidfile_path = self.pidfile.path\n logger.info(\"PID file %(pidfile_path)r not locked\" % vars())\n return\n \n if is_pidfile_stale(self.pidfile):\n self.pidfile.break_lock()\n else:\n self._terminate_daemon_process()\n self.pidfile.break_lock()\n logger.info(\"Daemon stopped\")",
"def stop(name, call=None):\n datacenter_id = get_datacenter_id()\n conn = get_conn()\n node = get_node(conn, name)\n\n conn.stop_server(datacenter_id=datacenter_id, server_id=node[\"id\"])\n\n return True",
"def stopProcessGroup(self, name, wait=True):\r\n self._update('stopProcessGroup')\r\n\r\n group = self.supervisord.process_groups.get(name)\r\n\r\n if group is None:\r\n raise RPCError(Faults.BAD_NAME, name)\r\n\r\n processes = list(group.processes.values())\r\n processes.sort()\r\n processes = [ (group, process) for process in processes ]\r\n\r\n killall = make_allfunc(processes, isRunning, self.stopProcess,\r\n wait=wait)\r\n\r\n killall.delay = 0.05\r\n killall.rpcinterface = self\r\n return killall # deferred\r",
"def destroy(name, stop=False, path=None):\n _ensure_exists(name, path=path)\n if not stop and state(name, path=path) != \"stopped\":\n raise CommandExecutionError(f\"Container '{name}' is not stopped\")\n return _change_state(\"lxc-destroy\", name, None, path=path)",
"def stop(name, kill=False, path=None, use_vt=None):\n _ensure_exists(name, path=path)\n orig_state = state(name, path=path)\n if orig_state == \"frozen\" and not kill:\n # Gracefully stopping a frozen container is slower than unfreezing and\n # then stopping it (at least in my testing), so if we're not\n # force-stopping the container, unfreeze it first.\n unfreeze(name, path=path)\n cmd = \"lxc-stop\"\n if kill:\n cmd += \" -k\"\n ret = _change_state(cmd, name, \"stopped\", use_vt=use_vt, path=path)\n ret[\"state\"][\"old\"] = orig_state\n return ret",
"def _terminate_daemon_process(self):\n pid = self.pidfile.read_pid()\n try:\n os.kill(pid, signal.SIGTERM)\n except OSError, exc:\n raise DaemonRunnerStopFailureError(\n \"Failed to terminate %(pid)d: %(exc)s\" % vars())",
"def run():\n\t\tsys.stderr.write(\"Error: Daemon.run() has not been overwritten, exiting...\\n\")\n\t\tself.stop()\n\t\tsys.exit(1)",
"def stop_server(manager):\n if not manager.is_daemon:\n return\n web_server = WebServer()\n if web_server.is_alive():\n web_server.stop()",
"def stop_server(manager):\n if not manager.is_daemon:\n return\n web_server = WebServer()\n if web_server.is_alive():\n web_server.stop()",
"def stop(self):\n self.should_run = False\n if self.is_alive():\n self.join()",
"def _stop(self):\n\n if self._daemon_id:\n pyro_proxy_name = 'PySwitchLib.' + self._daemon_id\n uri = None\n\n try:\n with Pyro4.locateNS(host='localhost', port=self._pyro_ns_port) as ns:\n try:\n uri = ns.lookup(pyro_proxy_name)\n except:\n pass\n\n if uri:\n ns.remove(pyro_proxy_name)\n except:\n pass\n finally:\n ns_daemon_dict = ConfigFileUtil().read(filename=pyswitchlib_ns_daemon_file)\n\n if self._daemon_id in ns_daemon_dict:\n uri = ns_daemon_dict[self._daemon_id]\n del ns_daemon_dict[self._daemon_id]\n\n if len(ns_daemon_dict):\n ConfigFileUtil().write(filename=pyswitchlib_ns_daemon_file, conf_dict=ns_daemon_dict, do_merge=False)\n else:\n try:\n os.unlink(pyswitchlib_ns_daemon_file)\n except:\n pass\n\n if uri:\n try:\n with Pyro4.Proxy(uri) as pyro_proxy:\n pyro_proxy.shutdown()\n pyro_proxy._pyroRelease()\n except:\n pass\n\n super(PySwitchLibApiDaemonRunner, self)._stop()",
"def processEnded(self, name):\n # Cancel the scheduled _forceStopProcess function if the process\n # dies naturally\n if name in self.murder:\n if self.murder[name].active():\n self.murder[name].cancel()\n del self.murder[name]\n\n self.processes[name][0].stopped()\n\n del self.protocols[name]\n\n if self._reactor.seconds() - self.timeStarted[name] < self.threshold:\n # The process died too fast - back off\n nextDelay = self.delay[name]\n self.delay[name] = min(self.delay[name] * 2, self.maxRestartDelay)\n\n else:\n # Process had been running for a significant amount of time\n # restart immediately\n nextDelay = 0\n self.delay[name] = self.minRestartDelay\n\n # Schedule a process restart if the service is running\n if self.running and name in self.processes:\n self.restart[name] = self._reactor.callLater(nextDelay,\n self.startProcess,\n name)\n if self.stopping:\n deferred = self.deferreds.pop(name, None)\n if deferred is not None:\n deferred.callback(None)"
] | [
"0.6745563",
"0.6272498",
"0.61989063",
"0.61097825",
"0.61078",
"0.61078",
"0.61078",
"0.61078",
"0.61078",
"0.61078",
"0.6076316",
"0.60686696",
"0.5986718",
"0.59339917",
"0.59136623",
"0.58186764",
"0.5801948",
"0.5795598",
"0.57658505",
"0.5718274",
"0.56732404",
"0.56692153",
"0.56423384",
"0.56354547",
"0.55906117",
"0.55717087",
"0.55717087",
"0.5571227",
"0.55163604",
"0.55138886"
] | 0.8205817 | 0 |
Stop all started daemons and wait for them. | def stop_all(self) -> None:
logger.info(self, 'Stopping all daemons')
for name, daemon in self._running_daemons.items():
logger.info(self, 'Stopping daemon "{0}" ...'.format(name))
daemon.stop()
for name, daemon in self._running_daemons.items():
logger.info(
self,
'Stopping daemon "{0}" waiting confirmation'.format(name),
)
daemon.join()
logger.info(self, 'Stopping daemon "{0}" OK'.format(name))
self._running_daemons = {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stopdaemons(self):\n # TODO: we may want to improve this if we had the PIDs from the\n # specific EMANE daemons that we\"ve started\n cmd = [\"killall\", \"-q\", \"emane\"]\n stop_emane_on_host = False\n if emane.VERSION > emane.EMANE091:\n for node in self.getnodes():\n if hasattr(node, \"transport_type\") and \\\n node.transport_type == \"raw\":\n stop_emane_on_host = True\n continue\n if node.up:\n node.cmd(cmd, wait=False)\n # TODO: RJ45 node\n else:\n stop_emane_on_host = True\n if stop_emane_on_host:\n subprocess.call(cmd)\n subprocess.call([\"killall\", \"-q\", \"emanetransportd\"])",
"def stop(self):\r\n for srv in self._servers:\r\n srv.stop()",
"def stop(self):\n for process in self.process:\n process.stop()",
"def stop_daemon(self):\n\n # Cycle through each worker, and change the share object's state\n # value to \"stopping\n for worker, run_state in self._workers.iteritems():\n LOGGER.debug(\"changing %s from %s to %s\", worker.name,\n run_state.value, self.STATE_STOPPING)\n run_state.value = self.STATE_STOPPING\n\n # Join the workers. It's generally good practice to do this.\n # Otherwise the parent process can exit (and return control\n # back to shell) before the child processes exit (creating\n # zombie processes). see here:\n # https://docs.python.org/2/library/multiprocessing.html#all-platforms\n for wrk in self._workers:\n wrk.join()\n\n LOGGER.debug(\"All procs exited:\\n\\t%s\",\n \"\\n\\t\".join(sorted([w.name for w in self._workers])))\n\n # Log out the uptime of the daemon\n self.log_uptime()",
"def stop(self):\n for module in self.asynchronous:\n module.stop()",
"def stop_all():\n\twhile _running:\n\t\t_running[0].stop(noerror=True)",
"def wait(self):\n [p.join() for p in self._downloaders]\n self._ckq.join()\n [p.terminate() for p in self._checkers]\n [p.join() for p in self._checkers]",
"def cleanup_manager(self) -> None:\n \n for p in self.process_list:\n if p.is_alive():\n p.terminate()\n sleep(1)\n p.close()",
"def stop_all():\n subprocess.check_call(\n ['./run.py --down'], shell=True,\n cwd=orc8_docker_path,\n )\n subprocess.check_call(\n 'docker-compose down', shell=True,\n cwd=feg_docker_integ_test_path,\n )\n subprocess.check_call(\n 'vagrant halt magma', shell=True,\n cwd=agw_path,\n )",
"def Stop(self, wait_for_client=False):\n if wait_for_client:\n status = False\n while not status:\n status = True\n for client in self.client_list:\n status = status and client.host.Poll(client.child_pid)\n for client in self.client_list:\n client.Stop()\n for server in self.server_list:\n server.Stop()",
"def stop(self):\n self._listeners = None\n\n try:\n if self._started_daemon:\n logging.info('Stopping Transmission daemon')\n exec_cmd(['transmission-remote', '--exit'], wait_after=2)\n\n except subprocess.CalledProcessError:\n logging.error('Unable to stop daemon')\n logging.debug('Error details', stack_info=True, exc_info=True)\n\n self._done = True",
"def stopService(self):\n self.stopping = True\n self.deferreds = {}\n for name in self.processes:\n self.deferreds[name] = Deferred()\n super(DelayedStartupProcessMonitor, self).stopService()\n\n # Cancel any outstanding restarts\n for name, delayedCall in self.restart.items():\n if delayedCall.active():\n delayedCall.cancel()\n\n # Stop processes in the reverse order from which they were added and\n # started\n for name in reversed(self.processes):\n self.stopProcess(name)\n return gatherResults(self.deferreds.values())",
"def kill_processes(self):\n for proc in self.processes:\n if proc['proc'].poll() is not None:\n proc['proc'].terminate()",
"def stop(self) -> None:\n for instance in self.instances:\n instance.listener = None\n instance.stop()",
"async def stop_all(self):\n log.debug(\"Stopping nested services.\")\n for service in reversed(self.started_services):\n try:\n await service.stop()\n except Exception: # noqa\n log.exception(\"Fail to stop %s service.\", service)\n else:\n log.debug(\"There are no services to stop.\")\n log.debug(\"All nested services were stopped.\")",
"def atexit(self):\n self.stop_listen()\n for driver in self.drivers.values():\n driver.stop()\n if hasattr(driver, \"atexit\"):\n driver.atexit()\n try:\n self.processor_thread.join()\n except AttributeError:\n pass",
"def _stopAllWorkers():\n if _workerConfig.get('waitThread'):\n return\n delay = 0\n try:\n delay = float(_workerConfig['idle-time']['all'])\n except Exception:\n delay = 300\n delay -= time.time() - _workerConfig['lastChange']\n if delay > 0:\n _workerConfig['waitThread'] = threading.Timer(delay, _delayStop)\n _workerConfig['waitThread'].daemon = True\n _workerConfig['waitThread'].start()\n return\n for worker in list(_workerConfig['started']):\n _stopWorker(worker)",
"def stop_services(self):\n logger.info(\"Stopping services: %s\", self.services)\n for service in self.services:\n with hide(*fab_quiet):\n sudo('service %s stop' % service)",
"def hard_stop_drivers(self, drivers_to_stop: Set[str]):\n for process in find_processes():\n if process.comm in drivers_to_stop:\n process.kill()",
"def terminate(self):\r\n deferreds = []\r\n\r\n for container in self._containers.copy():\r\n deferreds.append(container.remote_destroy())\r\n\r\n if deferreds:\r\n deferredList = DeferredList(deferreds)\r\n deferredList.addCallback(self._cleanPackageDir)\r\n return deferredList\r\n else:\r\n self._cleanPackageDir()",
"def shutdown(self) -> None:\n for worker in self.remote_workers:\n worker.shutdown.remote()\n worker.__ray_terminate__.remote()",
"def stopAllProcesses(self, wait=True):\r\n self._update('stopAllProcesses')\r\n\r\n processes = self._getAllProcesses()\r\n\r\n killall = make_allfunc(processes, isRunning, self.stopProcess,\r\n wait=wait)\r\n\r\n killall.delay = 0.05\r\n killall.rpcinterface = self\r\n return killall # deferred\r",
"def cleanup(self):\n\n # NOTE(jbresnah) call stop on each of the servers instead of\n # checking the pid file. stop() will wait until the child\n # server is dead. This eliminates the possibility of a race\n # between a child process listening on a port actually dying\n # and a new process being started\n servers = [self.api_server, self.conductor_server, ]\n for s in servers:\n try:\n s.stop()\n except Exception:\n pass\n\n for f in self.files_to_destroy:\n if os.path.exists(f):\n os.unlink(f)",
"def __stop(self):\n\n # send commands\n poller = Poller()\n for (pipe, svc) in self.__services.items():\n pipe.send_string('STOP')\n self.logger.debug('sent STOP command to %s service' % svc)\n poller.register(pipe, POLLIN)\n\n # give services a few seconds to cleanup and exit before checking responses\n sleep(1)\n\n max_attempts = len(self.__services)\n attempts = 0\n\n while self.__some_alive() and attempts < max_attempts:\n attempts += 1\n\n # poll for any replies\n items = dict(poller.poll(60000)) # wait for messages\n\n # mark responding services as stopped\n alive = dict(self.__services) # make copy\n for (pipe, svc) in alive.items():\n if pipe in items:\n reply = pipe.recv_string()\n if 'STOPPED' == reply:\n self.logger.debug('received STOPPED control reply from %s service' % svc)\n svc.join(timeout=5) # STOPPED response should be sent right before svc exit\n if svc.is_alive():\n self.logger.error('%s service is still alive; not waiting' % svc)\n else:\n self.logger.debug('%s service thread stopped' % svc)\n poller.unregister(pipe)\n pipe.close()\n del (self.__services[pipe])\n else:\n self.logger.debug('unknown control reply: %s' % reply)\n\n # log some useful info\n if len(self.__services) > 0:\n msg = '%s services still alive after %d cycles; ' % (\n [str(s) for s in self.__services.values()], attempts)\n if attempts < max_attempts:\n msg += 'waiting'\n else:\n msg += 'giving up'\n self.logger.debug(msg)",
"async def _stop_nested_services(self):\n await self._services.stop_all()",
"def kill_manager(self) -> None:\n\n for p in self.process_list:\n p.terminate()\n # NOTE: Seems Python does not appreciate if close is called too quickly.\n sleep(0.5)\n # Release the resources held by the Proess (Python 3.7 and up)\n p.close()",
"def cleanup(self):\n log = logging.getLogger('mailman.runner')\n # Send SIGTERMs to all the child processes and wait for them all to\n # exit.\n for pid in self._kids:\n try:\n os.kill(pid, signal.SIGTERM)\n except OSError as error:\n if error.errno == errno.ESRCH:\n # The child has already exited.\n log.info('ESRCH on pid: %d', pid)\n # Wait for all the children to go away.\n while self._kids:\n try:\n pid, status = os.wait()\n self._kids.drop(pid)\n except OSError as error:\n if error.errno == errno.ECHILD:\n break\n elif error.errno == errno.EINTR:\n continue\n raise",
"def terminate_services(self, services):\n services = self._filter_cid(services)\n for service in services:\n ctr = self.check_service_running(service,\n raise_on=['terminated'])\n logger.info(\"Stopping and \"\n \"removing docker instance : %s\" % service)\n self.driver.stop_container(ctr['Id'], remove=True)\n if service not in self._dirty_service:\n self._dirty_service[service] = {\"ctr\": ctr,\n \"terminated\": True}\n else:\n self._dirty_service[service][\"terminated\"] = True\n return services",
"def stop(self):\n for worker in self.workers:\n import sys; sys.stdout.flush()\n try: worker.exec_code('import sys;sys.exit(0)')\n except:\n #should really do something here to\n # trap non-SystemExit errors.\n pass",
"def wait(self):\n try:\n self.relay.wait()\n self.responder.wait()\n except KeyboardInterrupt:\n print_notification(\"Stopping\")\n finally:\n self.terminate_processes()"
] | [
"0.6984014",
"0.6787286",
"0.6787142",
"0.67022586",
"0.66824",
"0.66690004",
"0.66061735",
"0.6547983",
"0.64799696",
"0.64767784",
"0.6424291",
"0.6407507",
"0.64050364",
"0.63752985",
"0.63732857",
"0.6344195",
"0.6343308",
"0.63306564",
"0.63221437",
"0.6291567",
"0.6280011",
"0.62601626",
"0.6250911",
"0.62442684",
"0.6218768",
"0.6214795",
"0.620889",
"0.6169303",
"0.61501074",
"0.6112388"
] | 0.75923276 | 0 |
Add callback to self._daemon_execute_callbacks. See service_actions function to their usages. | def append_thread_callback(self, callback: collections.Callable) -> None:
self._daemon_execute_callbacks.append(callback) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register_post_exec_callback(action_logger):\n logging.debug(\"Adding %s to post execution callback\", action_logger)\n __post_exec_callbacks.append(action_logger)",
"def add_done_callback(self, callback):\n with self._done_condition:\n if self._state in [PENDING, RUNNING]:\n self._done_callbacks.append(callback)\n return\n try:\n callback(self)\n except Exception:\n print('exception calling callback')",
"def add_callback(self, callback) -> None:\r\n self._callbacks.append(callback)",
"def on_add(self, callback):\n self._add_callback = callback if callable(callback) else _void",
"def add(self, callback):\n self._callbacks += as_cb_list(callback)",
"def register_command_callback(self, cmd, callback):\n if not self.configured:\n return\n self.bcp_receive_commands[cmd] = callback",
"def add_callback(self, callback):\n\n self._callbacks.append(callback)",
"def register_callback(self, callback):\n self.callbacks.add(callback)",
"def add_notify_handler(self, callback: NotifyHandler) -> None:\n self._notify_handlers.append(callback)",
"def add_callback(self, callback):\n if callback is not None:\n self.callbacks.append(callback)",
"def add_done_callback(self, fn):\n if self.done():\n # self._loop.call_soon(fn,self)\n call_soon(fn, self)\n else:\n self._callbacks.append(fn)",
"def perform_callback(self, *args, **kwargs):\n pass",
"def add_callback(self, on_msg_cb):\n self.callbacks.add(on_msg_cb)",
"def register_callback(self, callback: Callable[[], None]) -> None:\r\n print(\"register callback called\")\r\n self._callbacks.add(callback)",
"def register_callback(self, callback):\n self._callbacks.append(callback)",
"def add_callback(self, callback: Callback):\n self._callbacks.add(callback)\n self._stage.add_callback(callback)",
"def add_callback(self, fn):\n self._callbacks.append(fn)\n return self",
"def setRunCallback(self, run_callback):\n self.run_callback = run_callback",
"def add_callback(self, done_cb: Callable[[], None] = None) -> None:\n\n if done_cb is not None:\n self.callbacks.append(done_cb)",
"def add_default_done_callback(self, fn):\n\n self._default_done_callbacks.append(fn)",
"def add_delegate(self, callback):\n\n if callback in self._delegate_methods:\n return\n\n self._delegate_methods.append(callback)",
"def add_child_handler(self, pid, callback, *args):\n h = self._loop.trio_as_future(self._waitpid, pid, callback, *args)\n self._callbacks[pid] = h",
"def register_hook(self, callback: Callable[[BaseEvent], None]) -> None:\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f\"Registering callback: {str(callback)}\")\n\n self._registered_hooks.add(callback)",
"def _call_service(self, action):\n conf_service = action.get(CONF_SERVICE, action.get(CONF_SERVICE_OLD))\n self._last_action = action.get(CONF_ALIAS, conf_service)\n _LOGGER.info(\"Executing script %s step %s\", self._name,\n self._last_action)\n domain, service = split_entity_id(conf_service)\n data = action.get(CONF_SERVICE_DATA, {})\n self.hass.services.call(domain, service, data)",
"def append_thread_callback(self, callback: collections.Callable) -> None:\n self._server.append_thread_callback(callback)",
"def add_launch_app_callback(self, callback):\n raise NotImplementedError",
"def addCallback(self,newCallback):\n self.callback.append(newCallback)",
"def addServiceListener(self, listener: ghidra.framework.plugintool.util.ServiceListener) -> None:\n ...",
"def add_done_callback(self, fn):\n if self.done():\n fn(self)\n else:\n self._callbacks.append(fn)",
"def on_post_execution(**kwargs):\n logging.debug(\"Calling callbacks: %s\", __post_exec_callbacks)\n for callback in __post_exec_callbacks:\n try:\n callback(**kwargs)\n except Exception:\n logging.exception(\"Failed on post-execution callback using %s\", callback)"
] | [
"0.595641",
"0.57329416",
"0.5647649",
"0.56453633",
"0.56191623",
"0.55979604",
"0.5584443",
"0.5554504",
"0.5554059",
"0.5503657",
"0.55009544",
"0.5450373",
"0.54332",
"0.5432897",
"0.5395858",
"0.53488904",
"0.53168035",
"0.53141373",
"0.53129905",
"0.53031254",
"0.52827644",
"0.52826345",
"0.52693844",
"0.5239294",
"0.52176875",
"0.52143675",
"0.5210698",
"0.52061695",
"0.519342",
"0.5189642"
] | 0.71937627 | 0 |
Give the callback to running server through tracim.lib.daemons.TracimSocketServerMixinappend_thread_callback | def append_thread_callback(self, callback: collections.Callable) -> None:
self._server.append_thread_callback(callback) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def append_thread_callback(self, callback: collections.Callable) -> None:\n raise NotImplementedError()",
"def append_thread_callback(self, callback: collections.Callable) -> None:\n raise NotImplementedError()",
"def append_thread_callback(self, callback: collections.Callable) -> None:\n self._daemon_execute_callbacks.append(callback)",
"def on_run(self):\n wxMediator.on_run(self)\n listener_evt = InterThreadEventWX(self,\n wxEVT_NEW_LISTEN_CONN) \n talker_evt = InterThreadEventWX(self,\n wxEVT_NEW_TALK_CONN) \n server = self.server()\n sys.stderr.write('Starting server threads...\\n')\n sys.stderr.flush()\n server.start_other_threads(listener_evt, talker_evt)",
"def add_accept_handler(sock, callback):\n ...",
"def start_listening(self,callback_function,client_count=1):\n self.sockObj.listen(client_count)\n\n try:\n while True:\n conn,addr = self.sockObj.accept()\n obj = baseIpcClass(conn)\n t = threading.Thread(target=callback_function,args=(obj,))\n t.start()\n except:\n pass # Generated during the server socket closing",
"def create_listen_thread(self):\n self.listen_thread = threading.Thread(target=self.listen, daemon=True)\n self.listen_thread.start()\n print('Started listener thread')",
"def listen(self):\n print \"starting server thread with address \" + str(self.address)\n server_thread = ServerThread(self.address, self.response_queue, self.queue_lock, self.on_message_received)\n server_thread.start()\n self.connected_as_server = True # TODO only if successful",
"def run(self):\n server = TCPServer((self.host, self.port), TCPHandler)\n server.lymphocytes_getter = self.lymphocytes_getter\n\n #runs forever - so make this thread daemon\n server.serve_forever()",
"def peer_server(self):\n try:\n listener_thread = threading.Thread(target=self.peer_server_listener)\n listener_thread.setDaemon(True)\n\n operations_thread = threading.Thread(target=self.peer_server_host)\n operations_thread.setDaemon(True)\n\n listener_thread.start()\n operations_thread.start()\n\n threads = []\n threads.append(listener_thread)\n threads.append(operations_thread)\n\n for t in threads:\n t.join()\n except Exception as e:\n print \"Peer Server Error, %s\" % e\n sys.exit(1)",
"def run_in_thread(self, callback: Callable, thread: int, **kwargs) -> None:\n self.run_in(callback, 0, pin=False, pin_thread=thread, **kwargs)",
"def pass_message_to_main_thread_fn():\n\n pass",
"def __init__(self, server_address, handler_class, main_server):\n\n self.mainserver = main_server\n logger.info(\"auxiliary server started, listening on: %s\", server_address)\n SocketServer.TCPServer.__init__(self, server_address, handler_class)",
"def serve(self):\n\t\timport thread\n\t\tthread.start_new_thread(self._server_thread, tuple())",
"def serveThread(self):\r\n while True:\r\n try:\r\n client = self.clients.get()\r\n self.serveClient(client)\r\n except Exception, x:\r\n logging.exception(x)",
"def _recv_thread_func(self):\r\n raise NotImplementedError()",
"def on_server_start(self, server):\n pass",
"def onSlave(self):",
"def server_main(args=None):\n\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.bind((HOST, PORT))\n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n backlog = 0\n # server_socket.listen(backlog)\n\n games = GamesHandler(args)\n global shouldRunning\n threads = []\n # handle links with thread\n t = threading.Thread(target=handle_link_backs, args=(games,)).start()\n threads.append(t)\n\n server_socket.listen(backlog)\n while True: # grand loop of the server\n try:\n client_socket, client_addr = server_socket.accept()\n t = threading.Thread(target=client_thread, args=(client_socket, client_addr, games)).start()\n threads.append(t)\n except KeyboardInterrupt as e:\n shouldRunning = False\n break\n\n # clean-ups\n for thread in threads:\n thread.join()\n server_socket.close()",
"def setUpZServerThread(self):\n\n from ZServer import zhttp_server, zhttp_handler, logger\n from cStringIO import StringIO\n\n zlog = logger.file_logger(StringIO())\n\n zserver = zhttp_server(ip=self.host,\n port=self.port, \n resolver=None,\n logger_object=zlog)\n zhandler = zhttp_handler(module=bobo_app_name, uri_base='')\n zserver.install_handler(zhandler)\n\n self.zserver = zserver\n name = self.__class__.__name__\n self.zthread = ZServerThread(name=\"%s server\" % name)\n self.zthread.start()",
"def thread_serve(self):\n self.threaded_server = StoppableThread(target=self.start)\n self.threaded_server.start()\n\n while not self.threaded_server.stopped():\n time.sleep(1)\n\n # Stop the listeners...\n self.dp.qprint(\"setting b_stopThread on all listeners...\")\n for i in range(0, self.listeners):\n self.dp.qprint(\"b_stopThread on listener %d and executing join()...\" % i)\n self.l_listener[i].b_stopThread = True\n self.l_listener[i].join()\n\n # Stop the fileIO\n self.fileIO.b_stopThread = True\n self.dp.qprint(\"b_stopThread on fileIO executing join()...\")\n self.fileIO.join()\n\n self.dp.qprint(\"Shutting down the zmq infrastructure...\")\n try:\n self.dp.qprint('calling self.socket_back.close()')\n self.socket_back.close()\n except:\n self.dp.qprint('Caught exception in closing back socket')\n\n try:\n self.dp.qprint('calling self.socket_front.close()')\n self.socket_front.close()\n except zmq.error.ZMQError:\n self.dp.qprint('Caught exception in closing front socket...')\n\n self.dp.qprint('calling zmq_context.term()')\n # self.zmq_context.term()\n\n self.dp.qprint(\"calling join() on all this thread...\")\n self.threaded_server.join()\n self.dp.qprint(\"shutdown successful...\")",
"def on_server_start(self):\n raise NotImplementedError",
"def main():\n server = ThreadedServer(MasterControllerService, port=5000)\n server.start()",
"def server():",
"def server():",
"def __init__(self, server_address, handler_class, main_server):\n self.mainserver = main_server\n SocketServer.UDPServer.__init__(self, server_address, handler_class)",
"def serve(self,cb):\n self.cb = cb\n self.run()",
"def start_socket_thread(self):\n self.socket_thread = BCPServer(self, self.receive_queue,\n self.sending_queue)\n self.socket_thread.daemon = True\n self.socket_thread.start()",
"def call_in_thread(self, callback):\n reactor.callFromThread(reactor.callInThread, callback)",
"def serve(self):\r\n for i in range(self.threads):\r\n try:\r\n t = threading.Thread(target = self.serveThread)\r\n t.setDaemon(self.daemon)\r\n t.start()\r\n except Exception, x:\r\n logging.exception(x)\r\n\r\n # Pump the socket for clients\r\n self.serverTransport.listen()\r\n while True:\r\n try:\r\n client = self.serverTransport.accept()\r\n self.clients.put(client)\r\n except Exception, x:\r\n logging.exception(x)"
] | [
"0.69699574",
"0.69699574",
"0.6759113",
"0.650974",
"0.6113602",
"0.6105394",
"0.6034728",
"0.5840231",
"0.58131206",
"0.5809989",
"0.58065826",
"0.57807076",
"0.5766478",
"0.57587993",
"0.57118356",
"0.57069206",
"0.5706047",
"0.5696736",
"0.5682783",
"0.56253636",
"0.5603856",
"0.56038266",
"0.5599172",
"0.55927175",
"0.55927175",
"0.5591342",
"0.5543466",
"0.55376005",
"0.5512703",
"0.54945827"
] | 0.7782145 | 0 |
Validate if price amount does not have too many decimal places. Price amount can't have more decimal places than currency allow to. Works only with decimal created from a string. | def validate_price_precision(value: Optional["Decimal"], currency: str = None):
# check no needed when there is no value
if not value:
return
currency_fraction = get_currency_fraction(currency or settings.DEFAULT_CURRENCY)
value = value.normalize()
if abs(value.as_tuple().exponent) > currency_fraction:
raise ValidationError(
f"Value cannot have more than {currency_fraction} decimal places."
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_price(price):\n try:\n price = float(price)\n except ValueError:\n raise ValueError('Please provide valid price')\n if price < 1:\n raise ValueError('Price should be positive number')\n return price",
"def monetary_amount_valid(record, field_name='price', min=1, max=10):\n monetary_amount = record[field_name]\n assert isinstance(monetary_amount, float)\n string_price = str(monetary_amount)\n decimal = string_price.split(\".\")[1]\n assert min <= monetary_amount <= max and len(decimal) <= 2",
"def test_decimal_places_validation_errors(self):\n field = DecimalFractionField(max_digits=3, decimal_places=2)\n\n with self.assertRaises(ValidationError):\n # too many non-decimal digits\n field.clean(\"10\")\n\n with self.assertRaises(ValidationError):\n # too many decimal digits\n field.clean(\"1/100\")",
"def is_valid_decimal(string: str) -> bool:\n try:\n float(string)\n except ValueError:\n return False\n else:\n return True",
"def str_to_decimal_price(str_val):\n result = None\n\n try:\n val = Decimal(str_val)\n except (InvalidOperation, TypeError):\n result = None\n else:\n if val >= 0.0:\n result = val.quantize(Decimal('0.01'), rounding=ROUND_HALF_UP)\n\n return result",
"def _validate_decimal(d: decimal.Decimal, precision: int, scale: int) -> None:\n _, digits, exp = d.as_tuple()\n # Precision represents the number of digits that can be stored.\n if len(digits) > precision:\n raise ValueError(\n \"decimal value has more digits than is legal according \"\n + \"to the schema's precision\"\n )\n\n # Scale represents the number of digits held after the decimal point.\n if exp < 0:\n if -exp > scale:\n raise ValueError(\n \"decimal value requires greater decimal scale than is \"\n + \"legal according to the schema\"\n )",
"def pricevalidator(self, price):\n if type(price) != int:\n API.abort(400, error_messages[15]['str_price'])\n\n return True",
"def validate_decimal(v: str, field: Field):\n field_info = field.field_info\n inclusive = field_info.ge is not None or field_info.le is not None\n min_value = field_info.gt if field_info.gt is not None else field_info.ge\n min_value = Decimal(min_value) if min_value is not None else min_value\n max_value = field_info.lt if field_info.lt is not None else field_info.le\n max_value = Decimal(max_value) if max_value is not None else max_value\n ret = validate_decimal(v, min_value, max_value, inclusive)\n if ret is not None:\n raise ValueError(ret)\n return v",
"def test_no_decimals_01(self):\n self.assertEqual(currency(188.01, False), \"$188.01\")",
"def clean_price(self):\n price = self.cleaned_data.get('price')\n if price == \"0\":\n raise forms.ValidationError(\n u'Please insert a price for your product')\n return price",
"def is_amount_valid(amount):\n return isinstance(amount, float) and len(str(amount).split('.')[-1]) <= TERMINAL_DECIMALS_ALLOWED \\\n and TERMINAL_MAXIMAL_AMOUNT_REQUESTABLE >= amount >= TERMINAL_MINIMAL_AMOUNT_REQUESTABLE",
"def test_no_decimals_00(self):\n self.assertEqual(currency(188.00, False), \"$188\")",
"def get_price(str_val):\n return float(str_val.replace('.', '').replace(',', '.'))",
"def get_price():\n\n while (True):\n price = input(\"Enter the purchase price (xx.xx) or 'q' to quit: \")\n if(price.capitalize() == 'Q'):\n return -1\n elif price.replace('.', '').isdigit() and not is_valid(price):\n print(\"Illegal price: Must be a non-negative multiple of 5 cents.\\n\")\n elif not price.replace('.', '').isdigit():\n print(\"Illegal entry: Must be a price like (1.75) or 'q' for quit.\\n\")\n else:\n return float(price)",
"def is_valid_procent(inString):\r\n if is_float(inString):\r\n procent = float(inString)\r\n return procent >= 0 and procent < 100\r\n #the 0 is acepted, beacuse later it will be modifyed\r\n else:\r\n return False",
"def validate_amount(self, amount):\n try:\n amount = float(amount)\n except ValueError:\n raise IncorrectVariableTypeException(self.__class__.__name__, 'amount: [%s]' % str(amount))\n\n super().validate_amount( amount )",
"def check_price(self):\n if self.price < 0:\n self.raise_user_error(\"negative_amount\")",
"def price(self, price: str) -> Money:\n\n if not price:\n return Money(\"0.00\", self.currency)\n elif [x for x in self.currency_sign if x in price]:\n return Money(re.findall(num_pattern, price)[0], self.currency)",
"def validate_entered_amount(amount_entered):\n if len(amount_entered) > 0 and len(amount_entered) <= 15:\n if amount_entered.isdigit:\n if re.match(\"^[0-9]*\\\\.?[0-9]*$\", amount_entered):\n amount = round(float(amount_entered), 2)\n if amount > 0:\n return str(amount)\n return 0",
"def clean(amount):\n # Return empty input immediately.\n if not amount:\n return amount\n\n if re.search(r'[\\. ][0-9]{3},[0-9]{1,2}$', amount):\n # Assume amount is in 1.123,12 or 1 123,12 format (Dutch).\n return amount.replace('.', '').replace(' ', '').replace(',', '.')\n\n if re.search(r'[, ][0-9]{3}\\.[0-9]{1,2}$', amount):\n # Assume amount is in 1,123.12 format (Engels).\n return amount.replace(',', '').replace(' ', '')\n\n if re.search(r'[0-9](,[0-9]{1,2}$)', amount):\n # Assume amount is in 123,12 or in 123,1 format (Dutch).\n return amount.replace(',', '.')\n\n # Format of amount is not recognized. Return amount.\n return amount",
"def validate_price(price_str: str) -> bool:\n\n # if no digit is found, return false\n if not extract_required_data(data_str=price_str, req_type=r'\\d+'):\n return False\n\n # if per('/') is not found, return false\n if '/' not in price_str:\n print(\"Please specify item price per ('/') units\")\n return False\n\n # extract the unit from the price string\n unit = price_str[price_str.index('/') + 1:]\n\n # is unit not found in stored units, return false\n if not StandardUnits.has_value(unit) and unit not in units_mapping:\n return False\n\n return True",
"def test_default_w_decimals(self):\n self.assertEqual(currency(188.00), \"$188.00\")",
"def _isDecimalNumber(strWord):\n return NumberFormula.DECIMALNUMBERREGEX.match(strWord) != None",
"def check_price(URL, headers):\n page = requests.get(URL, headers=headers)\n soup = BeautifulSoup(page.content, 'html.parser')\n price = soup.find(id=\"priceblock_ourprice\").get_text()\n converted_price = price[:-3]# -3 removes the .99 pence value from product\n float_price = ''\n for c in converted_price:\n if c.isdigit():\n float_price = float_price + c\n #loop that removes the £$,. from product so the string can convert to float correctly\n return float(float_price)",
"def charge(self, price):\n '''try:\n type(price) == int or type(price) == float\n except ValueError: \n print 'Not a number!'\n \n if type(price) != int or type(price) != float:\n raise ValueError(\"Not a number!\")\n '''\n if price < 0:\n return False\n elif price + self._balance > self._limit:\n return False\n else:\n self._balance += price\n return True",
"def normalize_price(price: str) -> float:\n return float(price.strip().replace(',', ''))",
"def check_price():\n global NUMBER_OF_TOTAL_COINS, BEVERAGE_PRICE\n\n if NUMBER_OF_TOTAL_COINS == BEVERAGE_PRICE:\n return True\n elif NUMBER_OF_TOTAL_COINS < BEVERAGE_PRICE:\n return False\n else:\n return \"FATAL\"",
"def ParseAmount(am):\n\n ParseAmount.valid = True\n # filter\n am = re.sub('[^0-9,.-]','',am)\n # integers(any number).fraction(0..2) \n # find decimal point\n frac1 =len(am)-am.find('.')\n frac2 =len(am)-am.find(',')\n # No grouping & No fraction / decimal-point\n if (frac1 == frac2):\n am = '%s.00'% am\n # xxx,xxx,xxx.xx comma-grouping, dot-decimal\n elif (frac1 < 4) and (frac1 > 0): \n am = am.replace(',','')\n # xxx.xxx.xxx,xx dot-grouping, comma-decimal\n elif (frac2 < 4) and (frac2 > 0):\n am = am.replace('.','')\n am = am.replace(',','.') # harmonize decimal-point\n # grouping & No fraction / decimal-point\n else:\n am = am.replace(',','')\n am = am.replace('.','')\n am = '%s.00'% am\n # check validity result\n if (len(am) - am.find('.')) != 3:\n ParseAmount.valid = False\n return am",
"def test_normal_decimal_input(self):\r\n ws_leader = \"S. O'Neal (14.9)\"\r\n res = treat_input(ws_leader, type=\"float\")\r\n assert res == 14.9",
"def test_collect_money_handles_value_error(self):\n # Params\n f_max_value = 100.00\n f_quarters = 'k'\n f_dimes = 1\n f_nickels = 5\n\n # Returns\n return_1 = 'Please enter valid currency.\\n'\n\n # Calls\n string_1 = collect_money(f_max_value, f_quarters, f_dimes, f_nickels)\n\n # Asserts\n self.assertEqual(string_1, return_1)"
] | [
"0.7102188",
"0.6969148",
"0.67373794",
"0.66555464",
"0.6602999",
"0.65736985",
"0.64711094",
"0.64241284",
"0.64125",
"0.6275703",
"0.61710167",
"0.6137798",
"0.6098253",
"0.6075076",
"0.6048671",
"0.6011906",
"0.5996882",
"0.5977033",
"0.5968822",
"0.5943398",
"0.5926788",
"0.592012",
"0.58631754",
"0.58593607",
"0.5857771",
"0.5770855",
"0.575695",
"0.5732306",
"0.571673",
"0.5712215"
] | 0.79515773 | 0 |
Function to handle the initialization of the class. Creates a [x,y] sample for each timestep std sequenceLenght | def __init__(self, std, sequenceLength, device):
#create data steps from 2 to 10 with the given sequence length
xTimeSteps = np.linspace(2, 10, sequenceLength + 1)
#create numpy array with sin(x) input
yNp = np.zeros((2, sequenceLength + 1))
yNp[1,:] = np.sin(xTimeSteps) + np.random.normal(0, std, xTimeSteps.size)
yNp[0,:] = xTimeSteps
#yNp.resize((sequenceLength + 1, 1))
#yInput = Variable(torch.Tensor(Yt[:, :-1]).type(dtype), requires_grad = False).to(device)
self.yInput = torch.tensor(yNp[:, :-1], dtype=torch.float, device=device, requires_grad=False)
# create the target or ground truth data
#yTarget = Variable(torch.Tensor(Yt[:, 1:]).type(dtype), requires_grad = False).to(device)
self.yTarget = torch.tensor(yNp[:, 1:], dtype=torch.float, device=device, requires_grad=False)
# Normalizes values
self.yInput = self.yInput / torch.max(self.yInput)
self.yTarget = self.yTarget / torch.max(self.yTarget) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _init_sample(self):\n self.timestamps = np.zeros(5)\n self.data = np.zeros((5, 12))",
"def __init__(self, samples):\n self.samples = samples",
"def setUp(self):\n shape = RNG.integers(5, 50)\n periods = self.periods = RNG.normal() * 3\n freq = periods / shape\n amp = RNG.normal()\n offset = RNG.normal()\n phase = (RNG.normal() - 1 / 2) * 5 / 3 * np.pi\n p_gt = self.p_gt = (amp, freq, phase, offset)\n x = self.x = np.arange(shape)\n self.data = sine(x, *p_gt)",
"def __init__(self, step_time, mu=None, sigma=None, n_step=None, ss=None):\n self.ss = ss\n self.n_step = n_step\n self.mu = mu\n self.sigma = sigma\n self.step_time = step_time",
"def __init__(self, L, T_range):\n self.L = L\n self.spins = np.ones((L, L, len(T_range)))\n self.InitializeSpins(T_range[0])",
"def create_step_samples(self):\n pass # Deferred to subclasses\n\n \"\"\" Example using pod height:\n start_value = self.sim.pod.last_height\n end_value = self.sim.pod.height\n\n # Lerp values to get samples\n samples = start_value + self.step_lerp_pcts * (end_value - start_value) # Or use self.lerp(start_value, end_value), but doing it directly is faster since no function call\n if self.noise_scale > 0:\n # Add gaussian noise if specified\n return samples + np.random.normal(0.0, noise_scale, len(samples))\n else:\n # No noise\n return samples \n \"\"\"",
"def generate_samples(self, n_samples):",
"def generate_samples(self, n_samples):",
"def __init__(self,\n data,\n num_steps,\n seed=None):\n seed1, seed2 = random_seed.get_seed(seed)\n # If op level seed is not set, use whatever graph level seed is returned\n np.random.seed(seed1 if seed is None else seed2)\n \n inps, outs = slide_window(data, num_steps)\n # inps = data[:,:num_steps,:]\n # outs = data[:,1:num_steps+1,:]\n\n assert inps.shape[0] == outs.shape[0], (\n 'inps.shape: %s outs.shape: %s' % (inps.shape, outs.shape))\n\n\n self._num_examples = inps.shape[0]\n self._inps = inps\n self._outs = outs\n self._epochs_completed = 0\n self._index_in_epoch = 0",
"def __init__(self, data_dir, seq_id):\n super().__init__()\n self.data_dir = data_dir\n self.seq_id = seq_id\n self.ys = np.load(data_dir + 'samples.npy')[seq_id].reshape(-1, 1, 1)\n self.ys = cuda_move(torch.tensor(self.ys).float())",
"def __init__(self,\n data,\n num_steps,\n num_test_steps=None,\n seed=None):\n seed1, seed2 = random_seed.get_seed(seed)\n # If op level seed is not set, use whatever graph level seed is returned\n np.random.seed(seed1 if seed is None else seed2)\n \n #inps, outs = slide_window(data, num_steps)\n #inps = data[:,:num_steps,:]\n #outs = data[:,1:num_steps+1,:]\n \n time_len = data.shape[1]\n if num_test_steps is None:\n num_test_steps= time_len-num_steps \n enc_inps = data[:,:num_steps, :]\n dec_inps = np.insert(data[:,num_steps:num_steps+num_test_steps-1,:], 0, SOS, axis=1)\n #dec_outs = np.insert(data[:,num_steps:num_steps+num_test_steps,:], num_test_steps, EOS, axis=1)\n dec_outs = data[:,num_steps:num_steps+num_test_steps,:]\n\n assert enc_inps.shape[0] == dec_outs.shape[0], (\n 'inps.shape: %s outs.shape: %s' % (inps.shape, outs.shape))\n\n\n self._num_examples = enc_inps.shape[0]\n self._enc_inps = enc_inps\n self._dec_inps = dec_inps\n self._dec_outs = dec_outs\n self._epochs_completed = 0\n self._index_in_epoch = 0",
"def __init__(self, step_time, saw_time, step_interval=None, n_step=None, ss=None):\n self.ss = ss\n self.n_step = n_step\n self.interval = step_interval\n self.step_time = step_time\n self.saw_time = saw_time",
"def new_sample(self):\n\n self.u_seq.append([])\n self.r_exp.append(0)",
"def __init__(self, num_samples):\n self._num_samples = num_samples\n self._current = 0\n self._data = None",
"def __init__(self, len_x, len_y):\n self._gen_window(len_x, len_y)",
"def __init__(self, step_time, step_interval=None, n_step=None, ss=None):\n self.ss = ss\n self.n_step = n_step\n self.interval = step_interval\n self.step_time = step_time",
"def _init_sample(self, PB_X):\n sample = Sample(self)\n self._set_sample(sample, PB_X, -1)\n return sample",
"def __init__(self, time_constant: float, sampling_time: float):\n self.alpha = sampling_time / (time_constant + sampling_time)\n self.state = None",
"def __init__(self, lengths):\n self.lengths = lengths\n\n # weights: index 1 = layer index, index 2 = feature index\n # last index in each layer is bias weight\n self.weights = np.array()\n self.activations = np.array()\n for i,length in enumerate(self.lengths):\n self.weights[i] = np.random.rand(1,length+1)\n self.activations[i] = np.random.rand(1,length+1)",
"def __init__(self, samples, analysis):\r\n self.samples = samples\r\n self.analysis = analysis",
"def __init__(self):\n\n self.sequence = []",
"def __init__(self, n_per_sample, title, unit):\n self.min_sample = 0xffffffffffffffff\n self.max_sample = 0\n self.n_per_sample = n_per_sample\n self.title = title\n self.unit = unit\n self.sum_ = 0\n self.count = 0\n self.sample_lock = threading.Lock()",
"def __init__(self, X):\n self._T = X\n self._step = X[1] - X[0]\n X = np.array(range(len(X)))\n super().__init__(X)",
"def generate_seq(self):\n\n # Variable initialization\n eos = False\n c_s = 99\n x = []\n y = []\n\n while not eos:\n\n # Start of sequence\n if c_s == 99:\n # Sample from initial\n c_s = self.sample_p(self.proba[\"initial\"])\n\n # Consecutive iterations\n\n # We generate until we get length of self length\n elif len(x) < self.length:\n # Sample from transition of last state\n c_s = self.sample_p(self.proba[\"transition\"][c_s])\n\n # Generate emission\n\n # Note that we append the states as labels and observations as input\n y.append(c_s)\n x.append(self.sample_p(self.proba[\"emission\"][c_s]))\n\n else:\n eos = True\n\n # We get the state ID by offseting their idx by the length of observations\n ofs = len(self.obs)\n y = [i + ofs for i in y]\n return (x, y)",
"def __init__(self, sequence):\n self._seq = sequence # Copy of the given data.\n # Reference to the underlying data, will increment to 0 on first call\n # to next element.\n self._k = -1",
"def __init__(self, center_words, context_words, neg_samples): \n self.center_words = center_words\n self.context_words = context_words\n self.neg_samples = neg_samples\n # The index of the data the batch should start from. \n self.data_index = 0",
"def __init__(self, length, ntraj, hop_dist='gaussian', dwell_dist='power', hop_sigma=1, alpha=0.5, lamb=0.5,\n padding=10, dt=1, nt=1, H=0.5):\n\n self.nsteps = length\n self.ntraj = ntraj\n self.hop_distribution = hop_dist\n self.hop_sigma = hop_sigma\n self.dwell_distribution = dwell_dist\n self.lamb = lamb\n self.alpha = alpha\n self.padding = padding\n self.dt = dt\n self.nt = nt\n self.H = H\n\n self.trajectories = np.zeros([self.ntraj, self.nsteps, 2])\n self.trajectory_hops = np.zeros([self.ntraj, 2 * self.nsteps - 1, 2]) # for visualization\n self.time_uniform = None\n self.z_interpolated = np.zeros([self.ntraj, self.nsteps*self.padding]) # separate from time_uniform to save memory\n\n self.msd = None\n self.fit_parameters = None\n self.bootstraps = None\n self.fit_cut = 1\n self.fit_start = 0\n self.acf = None # autocorrelation function\n self.final_msd = None\n self.steps = []\n\n # Initialize multi-threading\n self.pbar = None",
"def generate(self):\n # --------------------------------\n # Initializing Variables\n # --------------------------------\n\n # Set NumPy Random Seed\n np.random.seed(self._seed)\n\n # Create Active List\n active = []\n\n # Clear previously generated examples\n if len(self._samples) > 0:\n self._clear_previous_samples()\n\n # --------------------------------\n # Begin Generating Samples\n # --------------------------------\n\n # Create the first sample\n self._samples.append(np.random.uniform(low=np.zeros(shape=(self._dim,)),\n high=self._extent, size=(self._dim,)))\n active.append(self._samples[0])\n self._grid[self._get_grid_coord(self._samples[0])] = 0\n\n while active:\n # Choose Random Active Sample\n idx = np.random.choice(len(active))\n\n # Make new point & confirm it is valid\n new_point = self._make_point(active[idx])\n if new_point is None:\n active.pop(idx)\n else:\n # Add sample to listings and store in grid for neighboring locations.\n self._samples.append(new_point)\n active.append(new_point)\n self._grid[self._get_grid_coord(new_point)] = len(self._samples) - 1\n\n # Return samples as numpy array\n self._samples = np.array(self._samples)\n return self._samples",
"def __init__(self, xs, ys, gauge_length, sample_width, sample_thickness, name=None):\n assert len(xs) == len(ys)\n\n self.xs = np.array(xs)\n self.ys = np.array(ys)\n self.gauge_length = gauge_length # m\n self.sample_width = sample_width # m\n self.sample_thickness = sample_thickness # m\n self.name = name",
"def __init__(self, name=\"untitled\", pulseGenerator=None, start1=0, stop1=10000, start2=None, stop2=None):\n self.name = name\n self._pulseGenerator = pulseGenerator\n self._numberOfPoints = self._pulseGenerator.numberOfPoints()\n self._samplingTime = self._pulseGenerator.samplingTime()\n self._shape1 = numpy.zeros(self._numberOfPoints)\n self._shape2 = numpy.zeros(self._numberOfPoints)\n self._shape1[:] = getattr(generatorFunctionLib, 'square')(\n self._numberOfPoints, self._samplingTime, start1, stop1, amplitude=1)\n if start2 is None:\n start2 = start1\n if stop2 is None:\n stop2 = stop1\n self._shape2[:] = getattr(generatorFunctionLib, 'square')(\n self._numberOfPoints, self._samplingTime, start2, stop2, amplitude=2)\n self.markerArray = [sum(i)for i in zip(self._shape1, self._shape2)]"
] | [
"0.7491473",
"0.6757173",
"0.6536982",
"0.645627",
"0.6443957",
"0.63328993",
"0.63073266",
"0.63073266",
"0.6270523",
"0.622384",
"0.6216968",
"0.6200106",
"0.61991644",
"0.61531895",
"0.6119851",
"0.6113955",
"0.6103908",
"0.6098615",
"0.609861",
"0.60970324",
"0.6093548",
"0.60912615",
"0.6058444",
"0.60332084",
"0.6027203",
"0.6021962",
"0.601366",
"0.59962183",
"0.5974717",
"0.595862"
] | 0.67708 | 1 |
Creates the matrices for the Elman model, in this case W1 and V contextConcatInputLayerSize hiddenLayerSize outputLayerSize | def __init__(self, contextConcatInputLayerSize, hiddenLayerSize, outputLayerSize, device):
super(ElmanNet, self).__init__()
self.hidden_layer_size = hiddenLayerSize
# Initializes the W1 matrix
W1 = torch.zeros((contextConcatInputLayerSize, hiddenLayerSize), dtype=torch.float, device=device)
self.W1 = Parameter(W1, requires_grad=True)
#randomly init W1 parameter matrix with mean 0 and std 0.4
nn.init.normal_(self.W1, 0.0, 0.4)
# Initializes the V matrix
V = torch.zeros((hiddenLayerSize, outputLayerSize), dtype=torch.float, device=device)
self.V = Parameter(V, requires_grad=True)
# randomly init V parameter matrix with mean 0 and std 0.3
nn.init.normal_(self.V, 0.0, 0.3) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_variables(self):\n self.create_weight_variable(self.input_size + [self.hidden_size[0]], name=\"W1\")\n\n self.create_bias_variable((1, self.hidden_size[0]), name=\"b1\")\n\n for i in range(self.n_hidden-1):\n self.create_weight_variable([self.hidden_size[i], self.hidden_size[i+1]], \n name=\"W\"+str(i+2))\n\n self.create_bias_variable((1, self.hidden_size[i+1]), name=\"b\"+str(i+2))\n\n for i in range(len(self.output_size)):\n self.create_weight_variable([self.hidden_size[-1], self.output_size[i]], name=\"Wo_%s\"%i)\n\n self.create_bias_variable((1, self.output_size[i]), name=\"bo_%s\"%i)",
"def build(self):\n\n self.W = self.init([self.n_atom_input_feat, self.n_output])\n self.b = model_ops.zeros(shape=[\n self.n_output,\n ])\n\n self.trainable_weights = self.W + self.b",
"def build_model(self):\n import tensorflow as tf\n \n y = tf.nn.relu(tf.matmul(self.variables[\"input_observation\"], self.variables[\"W1\"]) + \n self.variables[\"b1\"], name=\"y1\")\n \n for i in range(self.n_hidden-1):\n y = tf.nn.relu(tf.matmul(y, self.variables[\"W\"+str(i+2)]) + \n self.variables[\"b\"+str(i+2)], name=\"y\"+str(i+2))\n \n self.variables[\"y\"] = [tf.matmul(y, self.variables[\"Wo_0\"]) + self.variables[\"bo_0\"]]\n for i in range(1, len(self.output_size)):\n self.variables[\"y\"] += [tf.matmul(y, self.variables[\"Wo_%s\"%i]) + self.variables[\"bo_%s\"%i]]",
"def create_weight_matrices(self):\n rad = 1 / np.sqrt(self.no_of_in_nodes)\n X = truncated_normal(mean=0, \n sd=1, \n low=-rad, \n upp=rad)\n self.wih = X.rvs((self.no_of_hidden_nodes, \n self.no_of_in_nodes))\n rad = 1 / np.sqrt(self.no_of_hidden_nodes)\n X = truncated_normal(mean=0, sd=1, low=-rad, upp=rad)\n self.who = X.rvs((self.no_of_out_nodes, \n self.no_of_hidden_nodes))",
"def initParams(self):\n sizes = [self.inputDim]+self.layerSizes+[self.outputDim]\n scales = [np.sqrt(6)/np.sqrt(n+m) for n,m in zip(sizes[:-1],sizes[1:])]\n self.stack = [[np.random.rand(m,n)*2*s-s,np.zeros((m,1))] \\\n for n,m,s in zip(sizes[:-1],sizes[1:],scales)]\n self.hActs_M = [cm.empty((s,self.maxBatch)) for s in sizes]\n\n if self.train:\n # Now assuming that all layers are the same size\n self.grad = [[cm.empty(w.shape),cm.empty(b.shape)] for w,b in self.stack]\n self.deltasC_M = cm.empty((self.outputDim,self.maxBatch))\n self.deltasOut_M = cm.empty((sizes[1],self.maxBatch)) \n self.deltasIn_M = cm.empty((sizes[1],self.maxBatch)) \n self.tmpGrad_M = cm.empty((self.layerSize,self.maxBatch))\n \n # Allocate memory once here and reuse\n # Store probs\n self.probs_M = cm.empty((self.outputDim,self.maxBatch))\n # Store col max\n self.rowVec_M = cm.empty((1,self.maxBatch))\n \n self.stack = [[cm.CUDAMatrix(w),cm.CUDAMatrix(b)]\n for w,b in self.stack]\n\n if self.temporalLayer > 0:\n # dummy bias used for temporal layer\n dummy = cm.empty((1,1))\n dummy.assign(0.0)\n\n scale = np.sqrt(6)/np.sqrt(self.layerSize*2)\n wtf = cm.CUDAMatrix(2*scale*np.random.rand(self.layerSize,\n self.layerSize)-scale)\n wtb = cm.CUDAMatrix(2*scale*np.random.rand(self.layerSize,\n self.layerSize)-scale)\n self.stack.append([wtf,dummy])\n self.stack.append([wtb,dummy])\n\n # forward and backward activations for temporal layer\n self.hActsFor_M = cm.empty((self.layerSize,self.maxBatch))\n self.hActsBack_M = cm.empty((self.layerSize,self.maxBatch))\n\n if self.train:\n dwtf = cm.empty(wtf.shape)\n self.grad.append([dwtf,dummy])\n dwtb = cm.empty(wtb.shape)\n self.grad.append([dwtb,dummy])\n\n self.tmpGradBack_M = cm.empty((self.layerSize,self.maxBatch))\n self.deltasFor_M = cm.empty((self.layerSize,self.maxBatch))\n self.deltasBack_M = cm.empty((self.layerSize,self.maxBatch))",
"def __init__(self, inputSize, hiddenSize, outputSize, epochs = 100, debug = False):\n self.inputSize = inputSize\n self.hiddenSize = hiddenSize\n self.outputSize = outputSize\n self.epochs = epochs\n self.debug = debug\n\n #weights\n self.W1 = np.random.randn(self.inputSize, self.hiddenSize) \n self.W2 = np.random.randn(self.hiddenSize, self.outputSize)",
"def prepare_word_emb_matrices(experiment):\r\n\r\n with open(\"public_data/stats/stats_train.pkl\", 'rb') as stats:\r\n stats = pickle.load(stats)\r\n vocab = stats[\"VOCAB\"]\r\n stops = [word.lower() for word in set(stopwords.words('english'))]\r\n vocab = vocab + stops\r\n\r\n if experiment == \"RANDOM\":\r\n word_embs = np.random.uniform(low=-1.0, high=1.0, size=(len(vocab), PARAMS[\"SIZE\"])).astype(\"float32\")\r\n\r\n else:\r\n word_embs = []\r\n count_unk = 0\r\n count_kn = 0\r\n\r\n if experiment == \"5\":\r\n emb_model = KeyedVectors.load_word2vec_format(\"public_data/models/experiment_5/embeddings_5.bin\",\r\n binary=True)\r\n elif experiment == \"6\":\r\n emb_model = Word2Vec.load(\"public_data/models/experiment_6/embeddings_6\")\r\n\r\n elif experiment in [\"7\", \"8\"]:\r\n emb_model = FastText.load_fasttext_format(\"public_data/models/experiment_%s/embeddings_%s.bin\"\r\n %(experiment, experiment))\r\n for word in vocab:\r\n if word in emb_model:\r\n word_embs.append(emb_model[word])\r\n count_kn += 1\r\n else:\r\n word_embs.append(np.random.uniform(low=-1.0, high=1.0, size=PARAMS[\"SIZE\"]))\r\n count_unk += 1\r\n\r\n word_embs = np.array(word_embs).astype(\"float32\")\r\n print(count_unk / (count_kn + count_unk))\r\n\r\n pad = np.zeros(shape=PARAMS[\"SIZE\"]).astype(\"float32\")\r\n unk = np.random.uniform(low=-1.0, high=1.0, size=PARAMS[\"SIZE\"]).astype(\"float32\")\r\n word_embs = np.insert(word_embs, 0, unk, axis=0) #id 1\r\n word_embs = np.insert(word_embs, 0, pad, axis=0) #id 0\r\n\r\n with open(\"public_data/embeddings/word_embeddings_%s.pkl\" %experiment, 'wb') as out:\r\n pickle.dump(word_embs, out, protocol=4)\r\n\r\n return word_embs",
"def _generate_embeddings(self, config): \n tr_parts = []\n te_parts = []\n all_columns = []\n for comp in self.components:\n tr_tmp, te_tmp, cols = comp.generate(config)\n if cols != None:\n print(tr_tmp.shape,te_tmp.shape)\n tr_parts.append(tr_tmp)\n te_parts.append(te_tmp)\n all_columns += cols\n X_train = np.concatenate(tr_parts, axis=1)\n X_test = np.concatenate(te_parts, axis=1)\n print(\"Concatenated size:\", X_train.shape, X_test.shape)\n self.feature_columns = all_columns\n return X_train, X_test",
"def build_model(allidx,MAX_LENGTH,onlyArg):\n wordidx, labelidx, featuresidx, extraidx=allidx\n posidx, neridx, depidx, distanceidx, chnkidx, wikineridx, dbpedianeridx, subneridx = featuresidx\n\n main_input = Input(shape=(MAX_LENGTH,), name='main_input', dtype='int32')\n inputNodes=[main_input]\n\n w2vmodel=\"../embeddings/Domain-Word2vec.model\"\n\n embedding_matrix,EMBEDDING_DIM,vocabulary_size=prepare.wv_embedded(wordidx,w2vmodel)\n \n x = Embedding(output_dim=EMBEDDING_DIM, weights=[embedding_matrix],input_dim=vocabulary_size, input_length=MAX_LENGTH, mask_zero=False)(main_input)\n numnode=int(EMBEDDING_DIM/2)\n\n # pos embedding\n inputNodes,pos_layer=layers.embedlayer(inputNodes,\"pos_input\",posidx,MAX_LENGTH)\n x=Concatenate()([x,pos_layer])\n numnode+=int(len(posidx)/2)\n\n # ner embedding\n inputNodes,ner_layer=layers.embedlayer(inputNodes,\"ner_input\",neridx,MAX_LENGTH)\n x=Concatenate()([x,ner_layer])\n numnode+=int(len(neridx)/2)\n\n inputNodes,wikiner_layer=layers.embedlayer(inputNodes,\"wikiner_input\",wikineridx,MAX_LENGTH)\n x=Concatenate()([x,wikiner_layer])\n numnode+=int(len(wikineridx)/2)\n\n inputNodes,dbpedianer_layer=layers.embedlayer(inputNodes,\"dbpedianer_input\",dbpedianeridx,MAX_LENGTH)\n x=Concatenate()([x,dbpedianer_layer])\n numnode+=int(len(dbpedianeridx)/2)\n\n # dep embedding\n inputNodes,dep0_layer=layers.embedlayer(inputNodes,\"dep0_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep0_layer])\n numnode+=int(len(depidx)/2)\n\n inputNodes,dep1_layer=layers.embedlayer(inputNodes,\"dep1_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep1_layer])\n numnode+=int(len(depidx)/2)\n\n inputNodes,dep2_layer=layers.embedlayer(inputNodes,\"dep2_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep2_layer])\n numnode+=int(len(depidx)/2)\n\n # chnk embedding\n inputNodes,lvl_layer=layers.embedlayer(inputNodes,\"lvl_input\",distanceidx,MAX_LENGTH)\n x=Concatenate()([x,lvl_layer])\n numnode+=int(len(distanceidx)/2)\n\n inputNodes,chnk_layer=layers.embedlayer(inputNodes,\"chnk_input\",chnkidx,MAX_LENGTH)\n x=Concatenate()([x,chnk_layer])\n numnode+=int(len(chnkidx)/2)\n\n # wikiclass embedding\n inputNodes,subner_layer=layers.embedlayer(inputNodes,\"subner_input\",subneridx,MAX_LENGTH)\n x=Concatenate()([x,subner_layer])\n numnode+=int(len(subneridx)/2)\n\n if onlyArg:\n neartrigger_input = Input(shape=(MAX_LENGTH,), name='neartrigger_input', dtype='int32')\n inputNodes.append(neartrigger_input)\n neartrigger_layer = Embedding(output_dim=EMBEDDING_DIM, weights=[embedding_matrix],input_dim=vocabulary_size, \\\n input_length=MAX_LENGTH, mask_zero=False)(neartrigger_input)\n x=Concatenate()([x,neartrigger_layer])\n numnode+=50\n inputNodes,x,numnode=layers.extralayer(inputNodes,x,numnode,extraidx,featuresidx,MAX_LENGTH)\n\n lstm_out = Bidirectional(LSTM(numnode, dropout=0.2,return_sequences=True))(x)\n numnode=int((numnode+len(labelidx))*2/3)\n\n if onlyArg:\n lstm_out = SeqSelfAttention(attention_activation='tanh', attention_width=5)(lstm_out)\n\n lstm_out = Dropout(0.2)(lstm_out)\n out = Dense(numnode)(lstm_out)\n\n crf = CRF(len(labelidx), sparse_target=False) # CRF layer\n main_output=crf(out)\n loss=crf_loss #crf.loss_function\n acc=[crf_accuracy]\n\n model = Model(inputs=inputNodes, outputs=main_output) \n model.compile(loss=loss,optimizer=Adam(0.001),metrics=acc)\n model.summary()\n\n return model",
"def build_computation_graph(self, num_words, num_chars):\n # initialize the word embeddings and the parameters\n cembeds = None\n if self.embeds_file:\n print(\"loading embeddings\", file=sys.stderr)\n embeddings, emb_dim = load_embeddings_file(self.embeds_file)\n assert(emb_dim==self.in_dim)\n num_words=len(set(embeddings.keys()).union(set(self.w2i.keys()))) # initialize all with embeddings\n # init model parameters and initialize them\n wembeds = self.model.add_lookup_parameters((num_words, self.in_dim),init=dynet.ConstInitializer(0.01))\n\n if self.c_in_dim > 0:\n cembeds = self.model.add_lookup_parameters((num_chars, self.c_in_dim),init=dynet.ConstInitializer(0.01))\n \n init=0\n l = len(embeddings.keys())\n for word in embeddings.keys():\n # for those words we have already in w2i, update vector, otherwise add to w2i (since we keep data as integers)\n if word in self.w2i:\n wembeds.init_row(self.w2i[word], embeddings[word])\n else:\n self.w2i[word]=len(self.w2i.keys()) # add new word\n wembeds.init_row(self.w2i[word], embeddings[word])\n init+=1\n print(\"initialized: {}\".format(init), file=sys.stderr)\n\n else:\n wembeds = self.model.add_lookup_parameters((num_words, self.in_dim),init=dynet.ConstInitializer(0.01))\n if self.c_in_dim > 0:\n cembeds = self.model.add_lookup_parameters((num_chars, self.c_in_dim),init=dynet.ConstInitializer(0.01))\n\n #make it more flexible to add number of layers as specified by parameter\n layers = [] # inner layers\n\n for layer_num in range(0,self.h_layers):\n\n if layer_num == 0:\n if self.c_in_dim > 0:\n f_builder = dynet.CoupledLSTMBuilder(1, self.in_dim+self.c_in_dim*2, self.h_dim, self.model) # in_dim: size of each layer\n b_builder = dynet.CoupledLSTMBuilder(1, self.in_dim+self.c_in_dim*2, self.h_dim, self.model) \n else:\n f_builder = dynet.CoupledLSTMBuilder(1, self.in_dim, self.h_dim, self.model)\n b_builder = dynet.CoupledLSTMBuilder(1, self.in_dim, self.h_dim, self.model)\n layers.append(BiRNNSequencePredictor(f_builder, b_builder)) #returns forward and backward sequence\n else:\n # add inner layers (if h_layers >1)\n f_builder = dynet.LSTMBuilder(1, self.h_dim, self.h_dim, self.model)\n b_builder = dynet.LSTMBuilder(1, self.h_dim, self.h_dim, self.model)\n layers.append(BiRNNSequencePredictor(f_builder,b_builder))\n\n # store at which layer to predict task\n\n task_num_labels= len(self.tag2idx)\n output_layer = FFSequencePredictor(Layer(self.model, self.h_dim*2, task_num_labels, dynet.softmax))\n\n if self.c_in_dim > 0:\n char_rnn = BiRNNSequencePredictor(dynet.CoupledLSTMBuilder(1, self.c_in_dim, self.c_in_dim, self.model), dynet.CoupledLSTMBuilder(1, self.c_in_dim, self.c_in_dim, self.model))\n else:\n char_rnn = None\n\n predictors = {}\n predictors[\"inner\"] = layers\n predictors[\"output_layers_dict\"] = output_layer\n predictors[\"task_expected_at\"] = self.h_layers\n\n return predictors, char_rnn, wembeds, cembeds",
"def xmoe_2d_88():\n hparams = xmoe_2d()\n hparams.mesh_shape = \"b0:4;b1:8\"\n hparams.batch_size = 512\n hparams.outer_batch_size = 4\n hparams.moe_num_experts = [8, 8]\n return hparams",
"def build(self):\n\n self.W_AA = self.init([self.n_atom_input_feat, self.n_hidden_AA])\n self.b_AA = model_ops.zeros(shape=[\n self.n_hidden_AA,\n ])\n\n self.W_PA = self.init([self.n_pair_input_feat, self.n_hidden_PA])\n self.b_PA = model_ops.zeros(shape=[\n self.n_hidden_PA,\n ])\n\n self.W_A = self.init([self.n_hidden_A, self.n_atom_output_feat])\n self.b_A = model_ops.zeros(shape=[\n self.n_atom_output_feat,\n ])\n\n self.trainable_weights = [\n self.W_AA, self.b_AA, self.W_PA, self.b_PA, self.W_A, self.b_A\n ]\n if self.update_pair:\n self.W_AP = self.init([self.n_atom_input_feat * 2, self.n_hidden_AP])\n self.b_AP = model_ops.zeros(shape=[\n self.n_hidden_AP,\n ])\n\n self.W_PP = self.init([self.n_pair_input_feat, self.n_hidden_PP])\n self.b_PP = model_ops.zeros(shape=[\n self.n_hidden_PP,\n ])\n\n self.W_P = self.init([self.n_hidden_P, self.n_pair_output_feat])\n self.b_P = model_ops.zeros(shape=[\n self.n_pair_output_feat,\n ])\n\n self.trainable_weights.extend(\n [self.W_AP, self.b_AP, self.W_PP, self.b_PP, self.W_P, self.b_P])",
"def _init_model(self):\n self.A_inv = np.zeros(shape=(self.numUsers, self.d, self.d))\n self.b = np.zeros(shape=(self.numUsers, self.d))\n self.w = np.zeros(shape=(self.numUsers, self.d))\n for i, mat in enumerate(self.A_inv):\n self.A_inv[i] = np.eye(self.d)",
"def __init__(self, inputSize, outputSize, hiddenSize): \n\n self.inputSize = inputSize\n self.outputSize = outputSize\n self.hiddenSize = hiddenSize \n \n # Initialize random weight with range [-0.5, 0.5]\n self.weight = np.matrix(np.random.uniform(-0.5, 0.5, (self.hiddenSize, self.inputSize)))\n\n # Initialize random bias with range [0, 1]\n self.bias = np.matrix(np.random.uniform(0, 1, (1, self.hiddenSize)))\n \n self.H = 0\n self.beta = 0",
"def build(self):\n self.build_inputs()\n self.build_word_embeddings()\n self.build_encoder()\n self.build_fc()\n self.build_loss()\n self.build_global_step()",
"def build_matrix(self):\n self.lb_make = LabelEncoder()\n self.lb_make.fit(self.Y_train)\n tokenizer = Tokenizer(num_words=2000)\n x_array_train = numpy.asarray(self.train['text'])\n x_array_test = numpy.asarray(self.test['text'])\n tokenizer.fit_on_texts(x_array_train)\n x_train_matrix = tokenizer.texts_to_matrix(x_array_train, mode='count')\n x_test_matrix = tokenizer.texts_to_matrix(x_array_test, mode='count')\n y_train_numbers = self.lb_make.transform(self.Y_train)\n y_test_numbers = self.lb_make.transform(self.Y_test)\n y_train_matrix = keras.utils.to_categorical(y_train_numbers, 3)\n y_test_matrix = keras.utils.to_categorical(y_test_numbers, 3)\n self.tokenizer = tokenizer\n return x_train_matrix, x_test_matrix, y_train_matrix, y_test_matrix",
"def __init__(self, E, U, height, width, filter_hs, conv_non_linear,\n hidden_units, batch_size, non_static, dropout_rates,subspace_size=None,\n activations=[Iden]):\n rng = np.random.RandomState(3435)\n feature_maps = hidden_units[0]\n self.batch_size = batch_size\n\n # define model architecture\n self.index = T.lscalar()\n self.x = T.matrix('x') \n self.y = T.ivector('y') \n self.Words = theano.shared(value=E, name=\"Words\") \n self.Users = None \n self.u = None\n self.subspace_size = subspace_size\n zero_vec_tensor = T.vector()\n self.zero_vec = np.zeros(width)\n # reset Words to 0?\n self.set_zero = theano.function([zero_vec_tensor],\n updates=[(self.Words, T.set_subtensor(self.Words[0,:],zero_vec_tensor))],\n allow_input_downcast=True)\n # inputs to the ConvNet go to all convolutional filters:\n layer0_input = self.Words[T.cast(self.x.flatten(), dtype=\"int32\")].reshape(\n (self.x.shape[0], 1, self.x.shape[1], self.Words.shape[1]))\n self.conv_layers = [] \n \n # outputs of convolutional filters\n layer1_inputs = []\n image_shape = (batch_size, 1, height, width)\n filter_w = width \n for filter_h in filter_hs: \n filter_shape = (feature_maps, 1, filter_h, filter_w)\n pool_size = (height-filter_h+1, width-filter_w+1)\n conv_layer = LeNetConvPoolLayer(rng, input=layer0_input,\n image_shape=image_shape,\n filter_shape=filter_shape,\n poolsize=pool_size,\n non_linear=conv_non_linear)\n layer1_input = conv_layer.output.flatten(2)\n self.conv_layers.append(conv_layer)\n layer1_inputs.append(layer1_input)\n # inputs to the MLP\n layer1_input = T.concatenate(layer1_inputs, 1)\n if U is not None:\n print \"Will use user embeddings\"\n self.u = T.ivector('u')\n self.Users = theano.shared(value=U, name=\"Users\")\n them_users = self.Users[self.u]\n if self.subspace_size:\n print \"and subspace\"\n # set_trace()\n self.subspace = HiddenLayer(rng, them_users, U.shape[1], subspace_size, Sigmoid)\n self.peep = theano.function([self.x, self.u],[self.subspace.output,layer1_input],allow_input_downcast=True)\n\n layer1_input = T.concatenate((layer1_input,T.nnet.sigmoid(self.subspace.output)),1)\n layer_sizes = [feature_maps*len(filter_hs)+subspace_size] \n # layer1_input = T.concatenate((layer1_input,them_users),1)\n # layer_sizes = [feature_maps*len(filter_hs)+U.shape[1]]\n\n else:\n layer1_input = T.concatenate((layer1_input,them_users),1)\n layer_sizes = [feature_maps*len(filter_hs)+U.shape[1]]\n\n else:\n print \"NO user embeddings\"\n layer_sizes = [feature_maps*len(filter_hs)]\n layer_sizes += hidden_units[1:]\n \n super(ConvNet, self).__init__(rng, input=layer1_input,\n layer_sizes=layer_sizes,\n activations=activations,\n dropout_rates=dropout_rates)\n\n # add parameters from convolutional layers\n for conv_layer in self.conv_layers:\n self.params += conv_layer.params\n if non_static:\n # if word vectors are allowed to change, add them as model parameters\n self.params += [self.Words]\n if U is not None:\n # if self.subspace_size is None:\n self.params += [self.Users]",
"def L2X(train = True):\n print('Loading dataset...') \n x_train, y_train, x_val, y_val, id_to_word = load_data()\n #pred_train = np.load('data/pred_train.npy')\n #pred_val = np.load('data/pred_val.npy') \n print('Creating model...')\n\n # P(S|X)\n with tf.variable_scope('selection_model'):\n X_ph = Input(shape=(maxlen,), dtype='int32')\n\n logits_T_grp = construct_gumbel_selector(X_ph, max_features, embedding_dims, maxlen) # bs, max_len * num_groups\n tau = 0.5 \n T = Sample_Concrete(tau, k, num_feature=maxlen, num_groups=num_groups)(logits_T_grp)\n\n T = Reshape((maxlen, num_groups))(T)\n T = Permute((2, 1))(T) # bs, num_groups, max_len\n\n # q(X_S)\n with tf.variable_scope('prediction_model'):\n emb2 = Embedding(max_features, embedding_dims, \n input_length=maxlen)(X_ph)\n # emb2 bs, max_len, 50\n # apply the matrix trick as before\n # here the output size of matmul layer is different from before\n net = matmul_layer([T, emb2]) # bs, num_groups, 50\n #print(net.shape)\n net = Conv1D(1, 1, padding='same', activation=None, strides=1, name = 'merge_channel')(net) # bs, num_groups, 1\n\n # net = Mean(net) # bs, 50\n input_group = Flatten()(net) # bs, num_groups\n # num_groups = K.int_shape(input_group)[1]\n # here we add instance wise f-s again!!!!\n net = Dense(100, activation='relu', name = 's/dense1',\n kernel_regularizer=regularizers.l2(1e-3))(input_group)\n net = Dense(100, activation='relu', name = 's/dense2',\n kernel_regularizer=regularizers.l2(1e-3))(net)\n logits = Dense(num_groups)(net)\n\n\n\n\n # A tensor of shape, [batch_size, max_sents, 100]\n samples = Sample_Concrete_Original(tau, num_vital_group, name='group_importance')(logits)\n new_input_group = Multiply()([input_group, samples]) \n\n\n\n net = Dense(hidden_dims, activation='relu')(new_input_group)\n preds = Dense(2, activation='softmax', \n name = 'new_dense')(net)\n\n\n model = Model(inputs=X_ph, \n outputs=preds)\n model.summary()\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',#optimizer,\n metrics=['acc']) \n #train_acc = np.mean(np.argmax(pred_train, axis = 1)==np.argmax(y_train, axis = 1))\n #val_acc = np.mean(np.argmax(pred_val, axis = 1)==np.argmax(y_val, axis = 1))\n #print('The train and validation accuracy of the original model is {} and {}'.format(train_acc, val_acc))\n\n if train:\n filepath=\"models/l2x.hdf5\"\n checkpoint = ModelCheckpoint(filepath, monitor='val_acc', \n verbose=1, save_best_only=True, mode='max')\n callbacks_list = [checkpoint] \n st = time.time()\n model.fit(x_train, y_train, \n validation_data=(x_val, y_val), \n callbacks = callbacks_list,\n epochs=epochs, batch_size=batch_size)\n duration = time.time() - st\n print('Training time is {}'.format(duration)) \n\n model.load_weights('models/l2x.hdf5', by_name=True) \n\n pred_model = Model(X_ph, [T, samples]) \n pred_model.summary()\n pred_model.compile(loss='categorical_crossentropy', \n optimizer='adam', metrics=['acc']) \n\n st = time.time()\n #scores = pred_model.predict(x_val, \n # verbose = 1, batch_size = batch_size)[:,:,0] \n #scores = np.reshape(scores, [scores.shape[0], maxlen])\n scores_t, group_importances_t = pred_model.predict(x_train, verbose = 1, batch_size = batch_size)\n scores_v, group_importances_v = pred_model.predict(x_val, verbose = 1, batch_size = batch_size)\n return scores_t, group_importances_t, scores_v, group_importances_v, x_val",
"def build_model(self):\n num_layers, num_units, input_window, output_window, encoder_exog_size, decoder_exog_size, dropout_rate, l2_regu =\\\n self.num_layers, self.num_units, self.input_window, self.output_window, self.encoder_exog_size, self.decoder_exog_size, self.dropout_rate, self.l2_regu\n \n #Define embedding layers (item_id, event_name), in case the embedding layers are applied to both encoder and decoder.\n event_embed = Embedding(input_dim=31, output_dim=8, mask_zero=False, name='event_embed')\n \n #Define encoder model\n encoder_input = Input(shape=(input_window, 1)) #endog input for encoder\n encoder_exog_input = Input(shape=(input_window, encoder_exog_size))\n \n encoder_concat_input = Concatenate()([encoder_input, encoder_exog_input])\n \n encoder_lstm_res = {}\n for i in range(num_layers):\n encoder_lstm = LSTM(num_units[i], kernel_regularizer=l2_regu, recurrent_regularizer=l2_regu, dropout=dropout_rate, recurrent_dropout=0,\n return_sequences=True, return_state=True, name='encoder_lstm_{}'.format(i))\n if (i == 0):\n encoder_lstm_outputs, encoder_lstm_state_h, encoder_lstm_state_c = encoder_lstm(encoder_concat_input)\n else:\n encoder_lstm_outputs, encoder_lstm_state_h, encoder_lstm_state_c = encoder_lstm(encoder_lstm_res[(i-1, 'outputs')])\n\n encoder_lstm_res[(i, 'model')] = encoder_lstm\n encoder_lstm_res[(i, 'outputs')] = encoder_lstm_outputs\n encoder_lstm_res[(i, 'states')] = [encoder_lstm_state_h, encoder_lstm_state_c]\n\n #Define decoder model\n #endog input for decoder. It is always a vector of 0s, meaning that model is trained unconditionally without using any forecast information.\n decoder_input = Input(shape=(output_window, 1))\n decoder_exog_input = Input(shape=(output_window, decoder_exog_size))\n \n decoder_event_input = Input(shape=(output_window,))\n decoder_event_embed = event_embed(decoder_event_input)\n \n decoder_concat_input = Concatenate()([decoder_input, decoder_exog_input, decoder_event_embed])\n \n decoder_lstm_res = {}\n for i in range(num_layers):\n decoder_lstm = LSTM(num_units[i], kernel_regularizer=l2_regu, recurrent_regularizer=l2_regu, dropout=dropout_rate, recurrent_dropout=0,\n return_sequences=True, return_state=True, name='decoder_lstm_{}'.format(i))\n if (i == 0):\n decoder_lstm_outputs, _, _ = decoder_lstm(decoder_concat_input, initial_state=encoder_lstm_res[(i, 'states')])\n else:\n decoder_lstm_outputs, _, _ = decoder_lstm(decoder_lstm_res[(i-1, 'outputs')], initial_state=encoder_lstm_res[(i, 'states')])\n\n decoder_lstm_res[(i, 'model')] = decoder_lstm\n decoder_lstm_res[(i, 'outputs')] = decoder_lstm_outputs\n\n decoder_output = Dense(1, activation=None, kernel_regularizer=l2_regu, name='decoder_output')(decoder_lstm_outputs)\n\n #training mode of model\n model = Model(inputs = [encoder_input, encoder_exog_input, decoder_input, decoder_exog_input, decoder_event_input], outputs = decoder_output)\n adam = Adam(learning_rate=self.lr)\n model.compile(optimizer=adam, loss='mse')\n print(model.summary())\n \n self.model = model\n \n return(model)",
"def build_model(input_classes,output_classes):\n dimensions = 20\n inputs = []\n embedded_outputs = []\n for i in input_classes:\n input_layer = Input((1,))\n inputs.append(input_layer)\n embedder = Embedding(input_dim=i,output_dim=dimensions,input_length=1,embeddings_constraint=UnitNorm(axis=0))\n embedded_layer = embedder(input_layer)\n embedded_outputs.append(embedded_layer)\n\n embedded_concats = Concatenate()(embedded_outputs)\n flatten_layer = Flatten()\n\n dense_layer = Dense(output_classes)\n\n flattened_output = flatten_layer(embedded_concats)\n dense_output = dense_layer(flattened_output)\n\n # dense_output = dense_layer(embedded_concats)\n\n model = Model(inputs,dense_output)\n print(model.summary())\n model.compile(loss='sparse_categorical_crossentropy', optimizer='adam')\n\n return model",
"def model(input_shape, output_dim, num_hidden_units,num_hidden_units_2, num_code_units, filter_size, batch_size=BATCH_SIZE):\n shape = tuple([None]+list(input_shape[1:]))\n print(shape)\n l_in = lasagne.layers.InputLayer(shape=shape)\n\n print(\"Input shape: \",lasagne.layers.get_output_shape(l_in))\n\n # print(shaped_units)\n # shaped_units = shaped_units[0]\n shaped_units = 2800\n\n # print(shape)\n\n l_conv2D_1 = lasagne.layers.Conv2DLayer(\n l_in, \n num_filters=8,\n filter_size=filter_size, \n stride=(1, 1), \n border_mode=\"valid\", \n untie_biases=False, \n nonlinearity=None,\n )\n\n print(\"Conv 2D shape: \",lasagne.layers.get_output_shape(l_conv2D_1))\n\n l_reshape_1 = lasagne.layers.ReshapeLayer(\n l_conv2D_1,\n shape=(([0], -1))\n )\n\n print(\"Reshape 1 shape: \", lasagne.layers.get_output_shape(l_reshape_1))\n\n l_hidden_1 = lasagne.layers.DenseLayer(\n l_reshape_1,\n num_units= num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 1 shape: \", lasagne.layers.get_output_shape(l_hidden_1))\n\n l_code_layer = lasagne.layers.DenseLayer(\n l_hidden_1,\n num_units=num_code_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Code layer shape: \",lasagne.layers.get_output_shape(l_code_layer))\n\n l_hidden_2 = lasagne.layers.DenseLayer(\n l_code_layer,\n num_units=num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 2 shape: \",lasagne.layers.get_output_shape(l_hidden_2))\n\n l_hidden_3 = lasagne.layers.DenseLayer(\n l_hidden_2,\n num_units=shaped_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 3 shape: \",lasagne.layers.get_output_shape(l_hidden_3))\n\n l_reshape_2 = lasagne.layers.ReshapeLayer(\n l_hidden_3,\n shape=(([0],8,7,50))\n )\n\n print(\"Reshape 2 shape: \",lasagne.layers.get_output_shape(l_reshape_2))\n\n l_out = lasagne.layers.Conv2DLayer(\n l_reshape_2, \n num_filters=1,\n filter_size=filter_size, \n stride=(1, 1), \n border_mode=\"valid\", \n untie_biases=False, \n nonlinearity=None,\n )\n\n # print(\"Deconv shape: \",lasagne.layers.get_output_shape(l_deconv2D_1))\n\n print(\"Output shape: \",lasagne.layers.get_output_shape(l_out))\n\n return l_out",
"def instantiate_weights(self):\n self.product_embeddings = tf.get_variable(\n name='product_embeddings',\n shape=[50000, 300],\n dtype=tf.float32\n )\n self.aisle_embeddings = tf.get_variable(\n name='aisle_embeddings',\n shape=[250, 50],\n dtype=tf.float32\n )\n self.department_embeddings = tf.get_variable(\n name='department_embeddings',\n shape=[50, 10],\n dtype=tf.float32\n )\n self.W_relu = tf.get_variable(\"W_relu\",shape=[670, 30]) #这个参数后续需要自适应\n self.b_relu = tf.get_variable(\"bias_relu\",shape=[30]) \n self.W_projection = tf.get_variable(\"W_projection\",shape=[30, 1]) \n self.b_projection = tf.get_variable(\"bias_projection\",shape=[1])",
"def build(self, input_shape):\n\n # get shape of input matrix x\n self.num_of_vertices = input_shape[1]\n self.num_of_features = input_shape[2]\n self.num_of_timesteps = input_shape[3]\n #self.W_1.shape = (num_of_timesteps,)\n #self.W_2.shape = (num_of_features, num_of_timesteps)\n #self.W_3.shape = (num_of_features,)\n #self.b_s.shape = (1, num_of_vertices, num_of_vertices)\n #self.V_s.shape = (num_of_vertices, num_of_vertices)\n\n self.w1 = self.add_weight(name='w1', shape=(self.num_of_timesteps, ),\n initializer='glorot_normal', trainable=True)\n self.w2 = self.add_weight(name='w2', shape=(self.num_of_features, self.num_of_timesteps),\n initializer='glorot_normal', trainable=True)\n self.w3 = self.add_weight(name='w3', shape=(self.num_of_features,),\n initializer='glorot_normal', trainable=True)\n self.b = self.add_weight(name='b', shape=(1, self.num_of_vertices, self.num_of_vertices),\n initializer='zeros', trainable=True)\n self.v = self.add_weight(name='v', shape=(self.num_of_vertices, self.num_of_vertices),\n initializer='glorot_normal', trainable=True)\n\n super(Spatial_Attention_layer, self).build(input_shape)",
"def optimize(self):\n self.u = np.random.uniform(-1, 1, (self.batchsize, 288, 1, 1))\n self.l2 = torch.from_numpy(self.u).float()\n self.n = torch.randn(self.batchsize, 1, 28, 28)\n self.l1 = self.enc(self.input + self.n)\n print(self.l1.shape,99999999999999999999999999999999999)\n self.del1=self.dec(self.l1)\n self.del2=self.dec(self.l2)\n self.update_netc()\n self.update_netd()\n\n self.update_l2()\n self.update_netg()",
"def xmoe_2d():\n hparams = xmoe_top_2()\n hparams.decoder_layers = [\"att\", \"hmoe\"] * 4\n hparams.mesh_shape = \"b0:2;b1:4\"\n hparams.outer_batch_size = 4\n hparams.layout = \"outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0\"\n hparams.moe_num_experts = [4, 4]\n return hparams",
"def instantiate_weights(self):\n with tf.name_scope(\"decoder_init_state\"):\n self.W_initial_state = tf.get_variable(\"W_initial_state\", shape=[self.hidden_size, self.hidden_size*2], initializer=self.initializer)\n self.b_initial_state = tf.get_variable(\"b_initial_state\", shape=[self.hidden_size*2])\n with tf.name_scope(\"embedding_projection\"): # embedding matrix\n self.Embedding = tf.get_variable(\"Embedding\", shape=[self.vocab_size, self.embed_size],initializer=self.initializer) # [vocab_size,embed_size] tf.random_uniform([self.vocab_size, self.embed_size],-1.0,1.0)\n self.Embedding_label = tf.get_variable(\"Embedding_label\", shape=[self.num_classes, self.embed_size*2],dtype=tf.float32) #,initializer=self.initializer\n self.W_projection = tf.get_variable(\"W_projection\", shape=[self.hidden_size*2, self.num_classes],\n initializer=self.initializer) # [embed_size,label_size]\n self.b_projection = tf.get_variable(\"b_projection\", shape=[self.num_classes])\n\n # GRU parameters:update gate related\n with tf.name_scope(\"gru_weights_encoder\"):\n self.W_z = tf.get_variable(\"W_z\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.U_z = tf.get_variable(\"U_z\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.b_z = tf.get_variable(\"b_z\", shape=[self.hidden_size])\n # GRU parameters:reset gate related\n self.W_r = tf.get_variable(\"W_r\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.U_r = tf.get_variable(\"U_r\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.b_r = tf.get_variable(\"b_r\", shape=[self.hidden_size])\n\n self.W_h = tf.get_variable(\"W_h\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.U_h = tf.get_variable(\"U_h\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.b_h = tf.get_variable(\"b_h\", shape=[self.hidden_size])\n\n with tf.name_scope(\"gru_weights_decoder\"):\n self.W_z_decoder = tf.get_variable(\"W_z_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.U_z_decoder = tf.get_variable(\"U_z_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.C_z_decoder = tf.get_variable(\"C_z_decoder\", shape=[self.embed_size * 2, self.hidden_size * 2],initializer=self.initializer) #TODO\n self.b_z_decoder = tf.get_variable(\"b_z_decoder\", shape=[self.hidden_size*2])\n # GRU parameters:reset gate related\n self.W_r_decoder = tf.get_variable(\"W_r_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.U_r_decoder = tf.get_variable(\"U_r_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.C_r_decoder = tf.get_variable(\"C_r_decoder\", shape=[self.embed_size * 2, self.hidden_size * 2],initializer=self.initializer) #TODO\n self.b_r_decoder = tf.get_variable(\"b_r_decoder\", shape=[self.hidden_size*2])\n\n self.W_h_decoder = tf.get_variable(\"W_h_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.U_h_decoder = tf.get_variable(\"U_h_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer) #TODO\n self.C_h_decoder = tf.get_variable(\"C_h_decoder\", shape=[self.embed_size * 2, self.hidden_size * 2],initializer=self.initializer)\n self.b_h_decoder = tf.get_variable(\"b_h_decoder\", shape=[self.hidden_size*2])\n\n with tf.name_scope(\"full_connected\"):\n self.W_fc=tf.get_variable(\"W_fc\",shape=[self.hidden_size*2,self.hidden_size])\n self.a_fc=tf.get_variable(\"a_fc\",shape=[self.hidden_size])",
"def long_answer_small():\n hparams = long_answer_base()\n hparams.num_hidden_layers = 4\n hparams.hidden_size = 512\n hparams.filter_size = 2048\n hparams.moe_n1 = 128\n hparams.moe_layers = \"2\"\n hparams.moe_hidden_size = 2048\n return hparams",
"def __init__(self, vocab_size, embedding_size, context_size, hid_dim, out_dim):\n super(Net, self).__init__()\n self.E = nn.Embedding(vocab_size, embedding_size) # Embedding matrix\n self.after_embed_size = embedding_size * context_size\n self.lin = nn.Linear(self.after_embed_size, hid_dim)\n self.lin2 = nn.Linear(hid_dim, out_dim)",
"def model(input_shape, output_dim, num_hidden_units,num_hidden_units_2,num_hidden_units_3, num_code_units, batch_size=BATCH_SIZE):\n shape = tuple([None]+list(input_shape[1:]))\n print(shape)\n l_in = lasagne.layers.InputLayer(shape=shape)\n\n l_hidden_1 = lasagne.layers.DenseLayer(\n l_in,\n num_units=num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_hidden_2 = lasagne.layers.DenseLayer(\n l_hidden_1,\n num_units=num_hidden_units_2,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_hidden_3 = lasagne.layers.DenseLayer(\n l_hidden_2,\n num_units=num_hidden_units_3,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_code_layer = lasagne.layers.DenseLayer(\n l_hidden_3,\n num_units=num_code_units,\n nonlinearity=lasagne.nonlinearities.softmax,\n )\n\n l_hidden_3 = lasagne.layers.DenseLayer(\n l_code_layer,\n num_units=num_hidden_units_3,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_hidden_4 = lasagne.layers.DenseLayer(\n l_hidden_3,\n num_units=num_hidden_units_2,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_hidden_5 = lasagne.layers.DenseLayer(\n l_hidden_4,\n num_units=num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_out = lasagne.layers.DenseLayer(\n l_hidden_5,\n num_units=output_dim,\n nonlinearity=None,\n )\n\n return l_out",
"def __init__(self, input_size, hidden_size, en_bias):\n self.en_bias = en_bias\n # weight matrix and bias vector\n self.u = self.random(-np.sqrt(1.0/input_size),\n np.sqrt(1.0/input_size), (hidden_size, input_size))\n self.w = self.random(-np.sqrt(1.0/hidden_size),\n np.sqrt(1.0/hidden_size), (hidden_size, hidden_size))\n self.v = self.random(-np.sqrt(1.0/hidden_size),\n np.sqrt(1.0/hidden_size), (hidden_size, hidden_size))\n if en_bias:\n self.b = self.random(-0.1, 0.1, (hidden_size,))\n else:\n self.b = np.zeros(hidden_size)\n # error gradient for weight matrix and bias vector\n self.dLdu = np.zeros(self.u.shape)\n self.dLdw = np.zeros(self.w.shape)\n self.dLdv = np.zeros(self.v.shape)\n self.dLdb = np.zeros(self.b.shape)"
] | [
"0.60445243",
"0.5994819",
"0.59890795",
"0.5960874",
"0.59296095",
"0.59175307",
"0.59017277",
"0.58717036",
"0.5868812",
"0.5800571",
"0.56979",
"0.56773806",
"0.5673088",
"0.56546074",
"0.56416607",
"0.5627796",
"0.5624328",
"0.55904293",
"0.55746734",
"0.5568759",
"0.555732",
"0.5541044",
"0.5534183",
"0.5531511",
"0.5530943",
"0.55239993",
"0.55236226",
"0.5515297",
"0.5480723",
"0.547687"
] | 0.73267615 | 0 |
Function that retrieves the size of the hidden layer | def get_hidden_layer_size(self):
return self.hidden_layer_size | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def layer_size(self, layer_id): # -> int:\n ...",
"def hidden_size(self):\n return self._internal.get_hidden_size()",
"def get_final_emb_size(self):\n size = self.n_layers * 1 * 2 * self.hidden_size\n return size",
"def get_size(self):\n return self._surf.get_size()",
"def layers_compressed_size(self):\n # don't have this information at this point\n return None",
"def layers_compressed_size(self):\n # don't have this information at this point\n return None",
"def hidden_dim(self):\n\n return self.__hidden_dim",
"def get_layer_size(self, layer_ind):\n assert(layer_ind < self.num_layers)\n return self._layer_sizes[layer_ind]",
"def hidden_size(self) ->int:\n return self._cell.hidden_size",
"def get_num_hidden(self) -> int:\n return self.output_dim",
"def size(self):\n\t\treturn self.dims",
"def get_layer_shape(self,layer_id):\n return self.net.blobs[layer_id].data.shape[1:] # Chop off batch size",
"def dimension_size(self):\n return self._dim",
"def get_num_hidden(self) -> int:\n return self.config.model_size",
"def upperLayersSize(self):\n return sys.getsizeof(self.segment)",
"def layers_sizes(self):\n return iter([self.delta_h*l for l in range(int(self.h/self.delta_h)-1)])",
"def get_height(self):\n height = 0\n for layer, ldata in self.conf['Layers'].items():\n layer_t = ldata['params']['thickness']\n height += layer_t\n return height",
"def num_layers(self):\n\n return 2 + self.num_hidden_layers",
"def encoded_display_shape(hidden_size):\n width = math.sqrt(hidden_size)\n height = width\n if not width.is_integer():\n width = hidden_size\n height = 1\n else:\n width = int(width)\n height = int(height)\n return width, height",
"def __len__(self):\n return self.flat_image.size",
"def size_out(self):\n return self.dimensions",
"def get_dimension_length(self):\n pass",
"def get_visual_size(self):\n print(self.my_name)\n print(self.my_distance)\n print(self.my_size)\n pass # do some fancyness here",
"def dimensions():",
"def nHiddenLayers(self):\n\n\t\treturn self._nHiddenLayers",
"def size(self) -> tf.Tensor:",
"def getSize(self):\n\n return self.size",
"def getSize(self):\r\n return self.size",
"def size(img):\n\treturn img.size",
"def get_image_size(self):"
] | [
"0.7838551",
"0.7757203",
"0.765589",
"0.7248255",
"0.72339445",
"0.72339445",
"0.71614826",
"0.71602",
"0.71266425",
"0.7090172",
"0.70477694",
"0.70440054",
"0.6969208",
"0.69381",
"0.69150704",
"0.687971",
"0.68564427",
"0.6826497",
"0.6812965",
"0.6804558",
"0.67893684",
"0.6738063",
"0.6736313",
"0.6727528",
"0.67225057",
"0.67118394",
"0.67111933",
"0.6710462",
"0.6701995",
"0.6668482"
] | 0.88835496 | 0 |
Model forward pass input, current input in t contextState, previous output in (t 1) the sequence of hidden states | def forward(self, x, contextState):
#concatenate input and context state
#x = x.t()
xAndContext = torch.cat((x, contextState), 1)
#calculate next context state (hidden output for current t) with tanh(xAndContext * W1)
contextState = torch.tanh(xAndContext.mm(self.W1))
# Calculates final output
output = contextState.mm(self.V)
return (output, contextState) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def forward(self, prev_state, obs_t):\r\n # Use your network to compute qvalues for given state\r\n #print(state_t.shape)\r\n h = self.conv(obs_t)\r\n\r\n h = h.view(h.size(0), -1)\r\n\r\n new_state = h_new, c_new = self.lstm(h, prev_state)\r\n advantage = self.adv(h_new)\r\n value = self.val(h_new)\r\n\r\n\r\n adv_mean = torch.mean(advantage, dim=1, keepdim=True)\r\n qvalues = value + advantage - adv_mean\r\n\r\n return new_state, qvalues",
"def forward(self, inp, state_0):\n next_inp = inp.view(1, -1)\n next_state = state_0\n outputs = []\n for i in range(self.seq_len):\n out, next_state = self.model(next_inp, next_state)\n outputs.append(out)\n next_inp = torch.argmax(out, dim=-1)\n\n return torch.cat(outputs, dim=0), next_state",
"def forward(self, input, context, state):\n raise NotImplementedError",
"def forward(self, input, hidden, ctx):\n def recurrence(input, hidden, ctx):\n \"\"\"Recurrence helper.\"\"\"\n i_r = self.input_weights_r(input)\n i_i = self.input_weights_i(input)\n i_n = self.input_weights_n(input)\n\n h_r = self.hidden_weights_r(hidden)\n h_i = self.hidden_weights_i(hidden)\n\n p_r = self.peep_weights_r(ctx)\n p_i = self.peep_weights_i(ctx)\n p_n = self.peep_weights_n(ctx)\n\n\n\n resetgate = self.reset(i_r + h_r + p_r)\n inputgate = self.input(i_i + h_i + p_i)\n newgate = self.new(i_n + self.hidden_weights_n(resetgate * hidden) + p_n)\n hy = (1 - inputgate) * hidden + inputgate * newgate\n return hy\n\n input = input.transpose(0, 1)\n ctx = ctx.transpose(0, 1)\n\n output = []\n steps = range(input.size(0))\n for i in steps:\n hidden = recurrence(input[i], hidden, ctx[i])\n if isinstance(hidden, tuple):\n output.append(hidden[0])\n else:\n output.append(hidden)\n\n output = torch.cat(output, 0).view(input.size(0), *output[0].size())\n output = output.transpose(0, 1)\n return output, hidden",
"def forward(self, input, states):\n (hidden, cell) = states\n\n input = input + self.transform(hidden)\n\n forget_gate = torch.sigmoid(self.forget(input))\n input_gate = torch.sigmoid(self.input(input))\n output_gate = torch.sigmoid(self.output(input))\n state_gate = torch.tanh(self.state(input))\n\n # Update internal cell state\n cell = forget_gate * cell + input_gate * state_gate\n hidden = output_gate * torch.tanh(cell)\n\n return hidden, cell",
"def forward(self, input, hidden):\r\n output, hidden = self.rnn(input, hidden)\r\n output = f.log_softmax(self.out(output.squeeze(1)), 1)\r\n return output, hidden",
"def forward(self, state):\n #pass\n #forward through each layer in \"hidden layer\", with ReLU activation unit between them\n for linear in self.hidden_layers:\n state = F.relu(linear(state))\n \n state = self.output(state)\n return state#F.log_softmax(state, dim=1)",
"def forward(self, input, hidden, ctx):\n def recurrence(input, hidden, ctx):\n \"\"\"Recurrence helper.\"\"\"\n input_gate = self.input_weights(input)\n hidden_gate = self.hidden_weights(hidden)\n peep_gate = self.peep_weights(ctx)\n i_r, i_i, i_n = input_gate.chunk(3, 1)\n h_r, h_i, h_n = hidden_gate.chunk(3, 1)\n p_r, p_i, p_n = peep_gate.chunk(3, 1)\n resetgate = self.reset(i_r + h_r + p_r)\n inputgate = self.input(i_i + h_i + p_i)\n newgate = self.new(i_n + resetgate * h_n + p_n)\n hy = newgate + inputgate * (hidden - newgate)\n\n return hy\n\n input = input.transpose(0, 1)\n ctx = ctx.transpose(0, 1)\n\n output = []\n steps = range(input.size(0))\n for i in steps:\n hidden = recurrence(input[i], hidden, ctx[i])\n if isinstance(hidden, tuple):\n output.append(hidden[0])\n else:\n output.append(hidden)\n\n output = torch.cat(output, 0).view(input.size(0), *output[0].size())\n output = output.transpose(0, 1)\n return output, hidden",
"def forward_pass(self, h_tm1, x_t): # Function though to be used by tf.scan\n\n # Convert vector-tensor form into matrix-tensor form\n x_t = tf.reshape(x_t, shape=[1, -1])\n h_tm1 = tf.reshape(h_tm1, shape=[1, -1])\n\n # Definitions of z_t and r_t\n z_t = tf.sigmoid(tf.matmul(x_t, self.Wz) + tf.matmul(h_tm1, self.Uz) + self.bz)\n r_t = tf.sigmoid(tf.matmul(x_t, self.Wr) + tf.matmul(h_tm1, self.Ur) + self.br)\n\n # Definition of h~_t\n h_proposal = tf.tanh(tf.matmul(x_t, self.Wh) + tf.matmul(tf.multiply(r_t, h_tm1), self.Uh) + self.bh)\n\n # Compute the next hidden state\n h_t = tf.multiply(1 - z_t, h_tm1) + tf.multiply(z_t, h_proposal)\n\n return tf.squeeze(h_t)",
"def forward(self, inputs):\n _, state = self.core(inputs)\n return state",
"def forward(self, inp, state):\n emb = self.drop(self.encoder(inp))\n y, state_next = self.rnn(emb, state)\n y = self.drop(y)\n y = self.decoder(y)\n return y, state_next",
"def forward(self, state):\n x = self.nonlin(self.fc1(self.in_fn(state)))\n x = self.drop_layer(x)\n x = self.nonlin(self.fc2(x))\n x = self.drop_layer(x)\n return self.fc3(x)",
"def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n #return self.fc3(x)\n return F.softmax(self.fc3(x), dim=1)",
"def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r",
"def forward(self, x):\n x, self.hidden = self.lstm(x, self.hidden)\n self.detach_hidden()\n x = self.dropout(x)\n x = self.out(x)\n return x",
"def forward(self, x, hidden):\n batch_size=x.shape[0]\n\n x = self.embed(x)\n\n x,hidden = self.lstm(x,hidden)\n\n x = x.reshape(-1,self.hidden_dim)\n\n x = self.drp(x)\n\n x = self.fc(x)\n\n sig_out = self.sigmoid(x)\n\n # return last sigmoid output and hidden state\n sig_out = sig_out.reshape(batch_size,-1)\n sig_out = sig_out[:,-1]\n\n return sig_out, hidden",
"def forward_step(self, layer: int, hidden: AmbiguousHidden, input_: Tensor) -> AmbiguousHidden:\n hx, cx = hidden\n\n # Forget gate\n f_g = torch.sigmoid(self.gates[layer]['if'](input_) + self.gates[layer]['hf'](hx))\n\n # Input gate\n i_g = torch.sigmoid(self.gates[layer]['ii'](input_) + self.gates[layer]['hi'](hx))\n\n # Output gate\n o_g = torch.sigmoid(self.gates[layer]['io'](input_) + self.gates[layer]['ho'](hx))\n\n # Intermediate cell state\n c_tilde_g = torch.tanh(self.gates[layer]['ig'](input_) + self.gates[layer]['hg'](hx))\n\n # New cell state\n cx = f_g * cx + i_g * c_tilde_g\n\n # New hidden state\n hx = o_g * torch.tanh(cx)\n\n return hx, cx",
"def forward(self, inputs, prev_state):\n output = []\n state = {\n 'controller_state': prev_state['controller_state'],\n 'memory_state': prev_state['memory_state']\n }\n steps = inputs.shape[1]\n batch_size = inputs.shape[0]\n batch_history_read = torch.zeros((batch_size, steps, self.memory.num_read_heads, self.memory.num_rows))\n batch_history_write = torch.zeros((batch_size, steps, self.memory.num_write_heads, self.memory.num_rows))\n\n for i in range(steps):\n controller_state = self.controller(inputs[:, i, :], state['controller_state'])\n\n controller_output = controller_state[0]\n\n read_vector, memory_state = self.memory(self.layer_norm(self._clip_if_enabled(controller_output)), state['memory_state'])\n state = {\n 'controller_state': controller_state,\n 'memory_state': memory_state\n }\n\n for batch in range(batch_size):\n batch_history_read[batch][i] = memory_state['read_weights'][batch]\n batch_history_write[batch][i] = memory_state['write_weights'][batch]\n\n dropped_controller_output = self.dropout(controller_output)\n read_vector = torch.flatten(read_vector, start_dim=1)\n input_final_layer = torch.cat((dropped_controller_output, read_vector), 1)\n final_output = self.linear(input_final_layer)\n output.append(final_output)\n \n # we are interested only on the last output of the sequence\n out = output[-1]\n return out, state, batch_history_read, batch_history_write",
"def forward(self, input, hidden_states):\n h, c = self.ih2h(input, hidden_states[0])\n next_hiddens = [(h, c)]\n h, c = self.h2h(h, hidden_states[1])\n next_hiddens.append((h, c))\n output = self.log_softmax(self.h2o(h))\n return output, next_hiddens",
"def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n return self.fc4(x)",
"def forward(self, input, last_hidden):\n \n self._W_i = torch.cat((self._W_x2i, self._W_h2i), 0)\n self._W_f = torch.cat((self._W_x2f, self._W_h2f), 0)\n self._W_o = torch.cat((self._W_x2o, self._W_h2o), 0)\n self._W_c = torch.cat((self._W_x2c, self._W_h2c), 0)\n \n c_input = torch.cat((input, last_hidden[\"h\"]), 1)\n\n pre_i = torch.mm(c_input, self._W_i) + self._b_i + last_hidden[\"c\"] * self._W_c2i\n if self._layer_norm:\n pre_i = self._ln_i(pre_i)\n i = torch.sigmoid(pre_i)\n\n pre_f = torch.mm(c_input, self._W_f) + self._b_f + last_hidden[\"c\"] * self._W_c2f\n if self._layer_norm:\n pre_f = self._ln_f(pre_f)\n f = torch.sigmoid(pre_f)\n\n cp_input = torch.cat((input, last_hidden[\"h\"]), 1)\n pre_cp = torch.mm(cp_input, self._W_c) + self._b_c\n if self._layer_norm:\n pre_cp = self._ln_g(pre_cp)\n cp = torch.tanh(pre_cp)\n\n c = f * last_hidden[\"c\"] + i * cp\n\n o_input = torch.cat((input, last_hidden[\"h\"]), 1)\n pre_o = torch.mm(o_input, self._W_o) + self._b_o + c * self._W_c2o\n if self._layer_norm:\n pre_o = self._ln_o(pre_o)\n o = torch.sigmoid(pre_o)\n\n if self._layer_norm:\n c = self._ln_c(c)\n h = o*torch.tanh(c)\n \n hidden = {}\n hidden[\"h\"] = h\n hidden[\"c\"] = c \n return hidden",
"def forward(self, h_prev, x_t):\n m, _ = h_prev.shape\n # our input uses h and x together\n h = np.concatenate((h_prev, x_t), axis=1)\n # calculate update gate\n in1 = h @ self.Wz + self.bz\n z = 1 / (1 + np.exp(-1 * in1))\n # and reset gate\n in2 = h @ self.Wr + self.br\n r = 1 / (1 + np.exp(-1 * in2))\n # then new hidden state\n coef = np.concatenate((r * h_prev, x_t), axis=1)\n h_temp = np.tanh((coef) @ self.Wh + self.bh)\n # finally new output\n # print(z.shape, (1-z).shape)\n h_next = (1 - z) * h_prev + z * h_temp\n output = h_next @ self.Wy + self.by\n # softmax of the output\n y = np.exp(output - np.max(output))\n y = y / y.sum(axis=1)[:, np.newaxis]\n return h_next, y\n # code for vanilla RNN if you want to compare\n h_next = np.tanh(h @ self.Wh + self.bh)\n output = h_next @ self.Wy + self.by\n # softmax the output to get y\n y = np.exp(output - np.max(output))\n y = y / y.sum(axis=1)[:, np.newaxis]\n return h_next, y",
"def forward(self, state):\n x = self.conv(state).view(-1, self.hid_size)\n x = self.fc1(x)\n x = F.relu(x)\n return self.fc2(x)",
"def forward(self, state):\n x = F.relu(self.input(state))\n for layer in self.layers:\n x = F.relu(layer(x))\n if self.duel:\n # Value function estimator\n val = F.relu(self.val_fc_input(x))\n val = self.val_fc_output(val)\n # Advantage function estimator\n adv = F.relu(self.adv_fc_input(x))\n adv = self.adv_fc_output(adv)\n # Subtract mean so that V and A are uniquely identifiable for a given Q\n return val + adv - adv.mean(1).unsqueeze(1).expand(state.size(0), self.action_size)\n else:\n return self.output(x)",
"def forward(self, state):\n x = self.fc(state)\n return x",
"def forward(self, input):\n\n x = self.conv(input)\n x = self.bn(x)\n out = self.act(x)\n return out",
"def forward(self, x, init_state, give_gates=False):\n seq_sz, bs, _ = x.size()\n\n update_gates = torch.empty(seq_sz, bs, self.hidden_size)\n reset_gates = torch.empty(seq_sz, bs, self.hidden_size)\n hidden_states = torch.empty(seq_sz, bs, self.hidden_size)\n\n h_t = init_state\n\n for t in range(seq_sz): # iterate over the time steps\n x_t = x[t, :, :]\n\n gi = F.linear(x_t, self.weight_ih_l0, self.bias_ih_l0) # do the matmul all together\n gh = F.linear(h_t, self.weight_hh_l0, self.bias_hh_l0)\n\n i_r, i_z, i_n = gi.chunk(3,1) # input currents\n h_r, h_z, h_n = gh.chunk(3,2) # hidden currents\n\n r_t = torch.sigmoid(i_r + h_r)\n z_t = torch.sigmoid(i_z + h_z)\n n = self.f(i_n + r_t*h_n)\n h_t = n + z_t*(h_t - n)\n\n update_gates[t,:,:] = z_t\n reset_gates[t,:,:] = r_t\n hidden_states[t,:,:] = h_t\n\n output = hidden_states\n\n if give_gates:\n return output, h_t, (update_gates, reset_gates)\n else:\n return output, h_t",
"def forward(self, inputs, mode):\n self.hidden = self.init_hidden() \n\n if mode == 'a':\n\n embedding_word = self.embedding(inputs).view(self.embedding.shape.size())\n lstm_out, self.hidden = self.lstm_words(embedding_word.view(len(inputs), 1, -1), self.hidden)\n softmax_out = self.softmax(self.fc(lstm_out)) \n\n if mode == 'b':\n \n embed_chars = self.embedding_chars(inputs).view(self.embedding_chars.shape.size()) \n lstm_out_chars, self.hidden = self.lstm_chars(embed_chars.view(len(inputs), 1, -1),self.hidden) \n softmax_out = self.softmax(self.fc(lstm_out_chars))\n\n if mode == 'c': \n\n embedding_prefix = self.embeds_prefix(inputs[0]).view(self.embeds_prefix.shape.size())\n lstm_out_prefix, self.hidden = self.lstm_prefix(embedding_prefix.view(len(inputs[0]), 1, -1), self.hidden)\n embedding_suffix = self.embeds_suffix(inputs[1]).view(self.embeds_suffix.shape.size())\n lstm_out_suffix, self.hidden = self.lstm_suffix(embedding_suffix.view(len(inputs[1]), 1, -1), self.hidden)\n lstm_out = lstm_out_prefix+lstm_out_suffix\n softmax_out = self.softmax(self.fc(lstm_out)) \n\n if mode == 'd': \n\n embedding_c_words = self.embedding_concat_words(inputs[0]).view(self.embedding_concat_words.shape.size())\n embedding_c_chars = self.embedding_concat_chars(inputs[1]).view(self.embedding_concat_chars.shape.size())\n concat_input = torch.cat((embedding_c_words, embedding_c_chars),1)\n lstm_out, self.hidden = self.lstm_concat(concat_input.view(100 , 1, -1), self.hidden)\n softmax_out = self.softmax(self.fc(lstm_out)) \n\n return softmax_out, self.hidden",
"def feedforward(self, inputs):\n # hidden activations\n # a_hidden = self.transfer(np.dot(self.w_input, inputs))\n a_hidden = self.transfer(np.dot(inputs, self.w_input))\n \n #a_output = self.transfer(np.dot(self.w_output, a_hidden))\n dots = (np.dot(a_hidden, self.w_output))\n a_output = self.transfer(np.asarray(dots))\n\n return (a_hidden, a_output)",
"def forward(self, x, hidden):\n emb_x = self.emb_layer(x)\n lstm_out, hidden = self.lstm(emb_x, hidden)\n if self.bidirectional:\n # separate to forward and backward\n # following code reshapes LSTM output to:\n # (batch size, seq length, num directions, hidden dimensions)\n # where direction '0' is forward and direction '1' is backward\n lstm_out = lstm_out.contiguous().view(-1, self.seq_len, 2, self.hidden_dim)\n # get backward output in first node\n lstm_out_bw = lstm_out[:, 0, 1, :]\n # get forward output in last node\n lstm_out_fw = lstm_out[:, -1, 0, :]\n # we may simple concatenate forward & backward outputs,\n # or add them, multiply or average; in this case i used average\n lstm_out = torch.add(input=lstm_out_bw, alpha=1, other=lstm_out_fw)\n lstm_out = torch.div(lstm_out, 2)\n else:\n lstm_out = lstm_out[:, -1]\n \n assert lstm_out.shape[-1] == self.hidden_dim, (lstm_out.shape, self.hidden_dim)\n lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)\n \n d_out = self.dropout(lstm_out)\n fc_out = self.output_layer(d_out)\n sig_out = torch.sigmoid(fc_out)\n \n # return last sigmoid output and hidden state\n return sig_out, hidden"
] | [
"0.70666903",
"0.6960144",
"0.6944148",
"0.6924527",
"0.68692386",
"0.68370396",
"0.68172926",
"0.6813111",
"0.68120724",
"0.68052375",
"0.67952406",
"0.6781737",
"0.67794245",
"0.6764326",
"0.6733078",
"0.6679583",
"0.66273844",
"0.6616958",
"0.65847284",
"0.657578",
"0.65555036",
"0.65545714",
"0.65495396",
"0.6548636",
"0.6543853",
"0.6526107",
"0.6521318",
"0.65166944",
"0.6513832",
"0.65136766"
] | 0.7525371 | 0 |
Check if two Elongation objects are equivalent. | def __eq__(self, other):
return isinstance(other, Elongation)\
and len(self.xs) == len(other.xs)\
and all(self.xs == other.xs) and all(self.ys == other.ys)\
and self.gauge_length == other.gauge_length\
and self.sample_width == other.sample_width\
and self.sample_thickness == other.sample_thickness\
and self.name == other.name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def areEquivalent(*args):\n return _libsbml.Unit_areEquivalent(*args)",
"def equivalent(self, other):\n return id(self) == id(other)",
"def almost_equals(self, other):\n if self.__class__ is other.__class__ and len(self) == len(other):\n for a, b in zip(self, other):\n if not a.almost_equals(b):\n return False\n return True\n else:\n return False",
"def check_equivalent(self, a, b):\n assert len(a) == len(b)\n for x, y in zip(a, b):\n assert self.is_equal(x, y)",
"def is_equal(self, a, b):\n return a.X[0] == b.X[0]",
"def is_identical(self, other):\n return (self.compounddatatype == other.compounddatatype and\n self.min_row == other.min_row and\n self.max_row == other.max_row)",
"def is_equivalence(self) -> bool:",
"def areEquivalent(*args):\n return _libsbml.UnitDefinition_areEquivalent(*args)",
"def __eq__(self, other):\n return self._dna == other._dna and \\\n self._exons == other._exons",
"def isIsosceles(self):\n\t\treturn self.a == self.b or self.a == self.c or self.b == self.c",
"def __eq__(self, other):\n\n same_ae = True\n\n if (self.date_start != other.date_start) or \\\n (self.date_end != other.date_end) or \\\n (self.type_event != other.type_event):\n\n same_ae = False\n\n return same_ae",
"def is_compatible(self, other):\n return self.intervals == other.intervals and\\\n self.nonderived_directions == other.nonderived_directions",
"def is_equal(self, other):\n return (other.__class__ == self.__class__\n and other.subscript == self.subscript\n and other.swept_inames == self.swept_inames)",
"def e_paralelo(self, other):\n if (self == other) or (self.normaliza() == other.normaliza()):\n return True\n else:\n return False",
"def __eq__(self, other):\n \n if not tools.data_are_equal(self.attrs, other.attrs):\n print('here')\n return False\n \n return tools.data_are_equal(self.components, other.components)",
"def __eq__(self, other):\n return self.position.data == other.position.data and \\\n self.velocity.data == other.velocity.data",
"def Unit_areEquivalent(*args):\n return _libsbml.Unit_areEquivalent(*args)",
"def models_are_equivalent(model_a: TopLevelOscalModel, model_b: TopLevelOscalModel) -> bool:\n # this will change the second model as a side-effect\n model_b.metadata.last_modified = model_a.metadata.last_modified\n return model_a == model_b",
"def __eq__(self, other):\n if not type(other) == type(self):\n return False\n sedges, oedges = self.edges, other.edges\n return ((len(sedges) == len(oedges)) and\n all(numpy.all(se == oe) for (se, oe) in zip(sedges, oedges)))",
"def equivalent(kls, first, second):\n if first.empty() and second.empty():\n return True\n elif first.vertices.shape[0] != second.vertices.shape[0]:\n return False\n elif first.edges.shape[0] != second.edges.shape[0]:\n return False\n\n EPSILON = 1e-7\n\n vertex1, ct1 = np.unique(first.vertices, axis=0, return_counts=True)\n vertex2, ct2 = np.unique(second.vertices, axis=0, return_counts=True)\n \n vertex_match = np.all(np.abs(vertex1 - vertex2) < EPSILON)\n ct_match = np.all(ct1 == ct2)\n if not (vertex_match and ct_match):\n return False\n\n g1 = nx.Graph()\n g1.add_edges_from(first.edges)\n g2 = nx.Graph()\n g2.add_edges_from(second.edges)\n edges_match = nx.is_isomorphic(g1, g2)\n del g1 \n del g2\n\n if not edges_match:\n return False\n\n second_verts = {}\n for i, vert in enumerate(second.vertices):\n second_verts[tuple(vert)] = i\n \n attrs = [ attr['id'] for attr in first.extra_attributes ]\n for attr in attrs:\n buf1 = getattr(first, attr)\n buf2 = getattr(second, attr)\n if len(buf1) != len(buf2):\n return False\n\n for i in range(len(buf1)):\n i2 = second_verts[tuple(first.vertices[i])]\n if buf1[i] != buf2[i2]:\n return False\n\n return True",
"def testEquality(self):\n pass",
"def __eq__(self, other):\n return isinstance(other, self.__class__) and \\\n self.content == other.content and self.justification == other.justification",
"def test_equality_check_against_other_object_doesnt_raise_exception(self):\n test_object = Vec3(1, 2, 3)\n self.assertFalse(test_object == Quat(1, 2, 3, 4))\n self.assertFalse(Quat(1, 2, 3, 4) == test_object)\n self.assertTrue(test_object != Quat(1, 2, 3, 4))\n self.assertTrue(Quat(1, 2, 3, 4) != test_object)",
"def isEquivalent(self, oth: 'StateNode') -> bool:\n a = [self.table[i][j] for i in self.state[0] for j in self.state[1]]\n b = [oth.table[i][j] for i in oth.state[0] for j in oth.state[1]]\n if len(a) != len(b):\n return False\n if len(a) < 1 or len(b) < 1 or len(a[0]) != len(b[0]):\n return False\n for i in range(len(a)):\n for j in range(len(a[0])):\n if a[i][j] != b[i][j]:\n return False\n return True",
"def __eq__(self, other):\n if not isinstance(other, PantsMappingClass):\n # print(\"A\")\n return False\n # if other._pants_decomposition != self._pants_decomposition:\n # print(\"B\")\n # return False\n # print(\"C\")\n return (self * other.inverse()).is_identity()",
"def _is_equal_same_type(self, other):\n # approximate_online_count\n if self.approximate_online_count != other.approximate_online_count:\n return False\n \n # approximate_user_count\n if self.approximate_user_count != other.approximate_user_count:\n return False\n \n # description\n if self.description != other.description:\n return False\n \n # discovery_splash_hash\n if self.discovery_splash_hash != other.discovery_splash_hash:\n return False\n \n # discovery_splash_type\n if self.discovery_splash_type != other.discovery_splash_type:\n return False\n \n # emojis\n if self.emojis != other.emojis:\n return False\n \n # features\n if self.features != other.features:\n return False\n \n # icon_hash\n if self.icon_hash != other.icon_hash:\n return False\n \n # icon_type\n if self.icon_type != other.icon_type:\n return False\n \n # id\n if self.id != other.id:\n return False\n \n # invite_splash_hash\n if self.invite_splash_hash != other.invite_splash_hash:\n return False\n \n # invite_splash_type\n if self.invite_splash_type != other.invite_splash_type:\n return False\n \n # stickers\n if self.stickers != other.stickers:\n return False\n \n # name\n if self.name != other.name:\n return False\n \n return True",
"def __eq__(self, other) -> bool:\n return (\n self._start is other._start\n and self._end is other._end\n and self._label == other._label\n and self._has_direction is other._has_direction\n )",
"def is_same_as(self, other) -> bool:\n return self.x == other.x and self.y == other.y",
"def is_equal(self, a, b):\n return a is b",
"def are_equal(self, sp1, sp2):\n return True"
] | [
"0.7179371",
"0.71698356",
"0.70256376",
"0.69242305",
"0.6891672",
"0.6889412",
"0.6868289",
"0.6853487",
"0.6802602",
"0.6787559",
"0.6784947",
"0.6781069",
"0.6712332",
"0.6677166",
"0.6673854",
"0.6671227",
"0.66428155",
"0.66405296",
"0.66279215",
"0.66225433",
"0.6601957",
"0.6598766",
"0.65949863",
"0.65932983",
"0.6592679",
"0.65721464",
"0.6563296",
"0.6556987",
"0.6553797",
"0.65511143"
] | 0.7408706 | 0 |
Generate a smoothed version of the Elongation. | def smoothed(self, box_pts=True):
elong = self.copy()
elong.ys = smooth_curve(self.ys, box_pts)
return elong | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _smooth(self):\n self.te = self._spline(self.rho_in, self.te_in, self.rho)\n self.ne = self._spline(self.rho_in, self.ne_in, self.rho)\n self.ti = self._spline(self.rho_in, self.ti_in, self.rho)\n self.vt = self._spline(self.rho_in, self.vt_in, self.rho)\n for i in range(self.nion):\n self.ni[i,:]=self._spline(self.rho_in, self.ni_in[i,:], self.rho)\n self._extrapolate()",
"def fingauss_smoothing(engine, smoothed, R, d_k):\n code = CodeSegment(engine)\n def tf(k):\n k2 = sum(((2*kny/numpy.pi)*numpy.sin(ki*numpy.pi/(2*kny)))**2 for ki in k)\n wts = numpy.exp(-0.5*k2* R**2)\n return wts\n \n kny = numpy.pi*engine.pm.Nmesh[0]/engine.pm.BoxSize[0]\n code.assign(x='d_k', y='tmp')\n code.transfer(complex='tmp', tf=tf)\n code.c2r(real=smoothed, complex='tmp')\n return code",
"def interpolate_smooth(self, transect):\n\n # Get data from object\n\n u = np.copy(self.u_mps)\n v = np.copy(self.v_mps)\n u[self.valid_data[0, :] == False] = np.nan\n v[self.valid_data[0, :] == False] = np.nan\n\n # Compute ens_time\n ens_time = np.nancumsum(transect.date_time.ens_duration_sec)\n\n # Apply smooth to each component\n u_smooth = rloess(ens_time, u, 10)\n v_smooth = rloess(ens_time, v, 10)\n\n # Save data in object\n self.u_processed_mps = u\n self.v_processed_mps = v\n self.u_processed_mps[np.isnan(u)] = u_smooth[np.isnan(u)]\n self.v_processed_mps[np.isnan(v)] = v_smooth[np.isnan(v)]",
"def smooth(self):\n self.te=self._spline(self.te_in[0,:], self.te_in[1,:], self.rho)\n self.ne=self._spline(self.ne_in[0,:], self.ne_in[1,:], self.rho)\n self.ti=self._spline(self.ti_in[0,:], self.ti_in[1,:], self.rho)\n self.vt=self._spline(self.vt_in[0,:], self.vt_in[1,:], self.rho)\n for i in range(self.nion):\n self.ni[i,:]=self._spline(self.ni_in[i,0,:], self.ni_in[i,1,:], self.rho)\n self._extrapolate()",
"def smooth(self):\n \n self.te = self._spline(self.rho_in, self.te_in, self.rho)\n self.ne = self._spline(self.rho_in, self.ne_in, self.rho)\n self.ti = self._spline(self.rho_in, self.ti_in, self.rho)\n self.vt = self._spline(self.rho_in, self.vt_in, self.rho)\n for i in range(self.nion):\n self.ni[i,:]=self._spline(self.rho_in, self.ni_in[i,:], self.rho)\n\n #self.zeff = self._spline(self.rho_in, self.zeff_in, self.rho)\n\n self._extrapolate()",
"def gauss_smoothing(engine, smoothed, R, d_k):\n code = CodeSegment(engine)\n def tf(k):\n k2 = sum(ki**2 for ki in k)\n wts = numpy.exp(-0.5*k2* R**2)\n return wts\n \n code.assign(x='d_k', y='tmp')\n code.transfer(complex='tmp', tf=tf)\n code.c2r(real=smoothed, complex='tmp')\n return code",
"def smooth(self):\n \n self.te = self._spline(self.rho_in, self.te_in, self.rho)\n self.ne = self._spline(self.rho_in, self.ne_in, self.rho)\n self.ti = self._spline(self.rho_in, self.ti_in, self.rho)\n self.vt = self._spline(self.rho_in, self.vt_in, self.rho)\n for i in range(self.nion):\n self.ni[i,:]=self._spline(self.rho_in, self.ni_in[i,:], self.rho)\n\n self.zeff = self._spline(self.rho_in, self.zeff_in, self.rho)\n\n self._extrapolate()",
"def _lidstone_smooth(prob, smoothing, observations, outcomes):\n return (prob + smoothing) / (observations + (smoothing * outcomes))",
"def new_scaled_energy(run, smoother=\"pol2\"):\n get_from_ccdb(run)\n endpoint_calib = ROOT.pstags().endpoint_calib\n endpoint_energy = ROOT.pstags().endpoint_energy\n fout = open(f\"new_scaled_energy.{run}\", \"w\")\n Eps_tagm = ROOT.gROOT.FindObject(\"Epair_Etagm_fit\")\n if not Eps_tagm:\n Eps_tagm = ROOT.gROOT.FindObject(\"Epair_Etagm\")\n if not Eps_tagm:\n Eps_tagm = plot_Etagm_Epair(run)[0]\n Eps_tagm.Fit(smoother)\n for func in Eps_tagm.GetListOfFunctions():\n ntagm = Eps_tagm.GetNbinsX()\n for i in range(ntagm):\n Elow = Eps_tagm.GetXaxis().GetBinLowEdge(102-i)\n Ehigh = Eps_tagm.GetXaxis().GetBinUpEdge(102-i)\n f = [(endpoint_calib - endpoint_energy + func.Eval(E)) /\n endpoint_calib for E in (Elow, Ehigh)]\n fout.write(f\"{i+1} {f[0]} {f[1]}\\n\")\n break",
"def smooth(*args, numiter=1) -> core.Smooth:\n X, Y, kws = util.parseargs(*args)\n return core.Smooth(X, Y, numiter=numiter)",
"def smooth(self):\n \n self.te = self._spline(self.rho_in, self.te_in, self.rho)\n self.ne = self._spline(self.rho_in, self.ne_in, self.rho)\n self.ti = self._spline(self.rho_in, self.ti_in, self.rho)\n self.vt = self._spline(self.rho_in, self.vt_in, self.rho)\n for i in range(self.nion):\n self.ni[i,:]=self._spline(self.rho_in, self.ni_in[i,:], self.rho)\n self._extrapolate()",
"def _apply_smooth_update(self):\n self.print(\"SGD with Momentum: Applying smooth update...\", line_above=True)\n\n raw_update = self.get_h5_data(self.raw_update_path)\n update = self.get_h5_data(self.smooth_update_path)\n\n if np.sum(np.isnan(update)) > 1:\n raise Exception(\n \"NaNs were found in the smoothed update.\"\n \"Check the raw update and smoothing process.\"\n )\n\n max_upd = np.max(np.abs(update))\n print(f\"Max smooth model update: {max_upd}\")\n\n update_scaling_fac_alpha = self.alpha / max_upd\n\n self.print(\n f\"Recaling based on alpha: {update_scaling_fac_alpha},\"\n f\"New maximum update is: {max_upd * update_scaling_fac_alpha}\"\n )\n\n update *= update_scaling_fac_alpha\n\n # normalise theta and apply update\n theta_0 = self.get_h5_data(self._get_path_for_iteration(0, self.model_path))\n\n # Update parameters\n if max(self.roughness_decay_smoothing_length) > 0.0:\n theta_prev = self.get_h5_data(self.smoothed_model_path)\n\n # If relative perturbations are smoothed, make model physical\n if self.roughness_decay_type == \"relative_perturbation\":\n theta_prev = (theta_prev + 1) * theta_0\n else:\n theta_prev = self.get_h5_data(self.model_path)\n\n # Normalize the model and prevent division by zero in the outer core.\n theta_prev[theta_0 != 0] = theta_prev[theta_0 != 0] / theta_0[theta_0 != 0] - 1\n\n # Make sure that the model is only updated where theta is non_zero\n theta_new = np.zeros_like(theta_0)\n theta_new[theta_0 != 0] = (\n theta_prev[theta_0 != 0]\n - update[theta_0 != 0]\n - (1 - self.beta) * self.perturbation_decay * theta_prev[theta_0 != 0]\n )\n\n # Remove normalization from updated model and write physical model\n theta_physical = (theta_new + 1) * theta_0\n shutil.copy(\n self.model_path,\n self.tmp_model_path,\n )\n self.set_h5_data(\n self.tmp_model_path,\n theta_physical,\n )",
"def _calculate(self,*args,**kwargs):\n\n like = self.like\n name = self.name\n\n init_energes = like.energies[[0,-1]]\n\n # Freeze all sources except one to make sed of.\n all_sources = like.sourceNames()\n\n if name not in all_sources:\n raise Exception(\"Cannot find source %s in list of sources\" % name)\n\n # make copy of parameter values + free parameters\n \n saved_state = SuperState(like)\n\n if self.verbosity: print 'Freezing background sources'\n for other_name in get_background(like):\n if self.freeze_bg_diffuse:\n if self.verbosity: print ' * Freezing diffuse source %s' % other_name\n modify(like, other_name, free=False)\n else:\n if self.verbosity: print ' * Freezing spectral shape for diffuse source %s' % other_name\n modify(like, other_name, freeze_spectral_shape=True)\n for other_name in get_sources(like):\n if self.freeze_bg_sources:\n if self.verbosity: print ' * Freezing bg source %s' % other_name\n modify(like, other_name, free=False)\n else:\n if self.verbosity: print ' * Freezing spectral shape for bg source %s' % other_name\n modify(like, other_name, freeze_spectral_shape=True)\n\n self.raw_results = []\n for i,(lower,upper) in enumerate(zip(self.lower,self.upper)):\n\n like.setEnergyRange(float(lower)+1, float(upper)-1)\n\n e = np.sqrt(lower*upper)\n\n if self.verbosity: print 'Calculating SED from %.0dMeV to %.0dMeV' % (lower,upper)\n\n \"\"\" Note, the most robust method I have found for computing SEDs in gtlike is:\n (a) Create a generic spectral model with a fixed spectral index.\n (b) Set the 'Scale' to sqrt(emin*emax) so the prefactor is dNdE in the middle\n of the sed bin.\n (b) Set the limits to go from norm/fit_range to norm*fit_range and set the scale to 'norm'\n \"\"\" \n old_flux = self.init_model.i_flux(emin=lower,emax=upper)\n model = PowerLaw(index=self.powerlaw_index, e0=e)\n model.set_flux(old_flux, emin=lower, emax=upper)\n norm = model['norm']\n model.set_limits('norm',norm/float(self.fit_range),norm*self.fit_range, scale=norm)\n model.set_limits('index',-5,5)\n model.freeze('index')\n spectrum = build_gtlike_spectrum(model)\n\n like.setSpectrum(name,spectrum)\n like.syncSrcParams(name)\n\n if self.verbosity:\n print 'Before fitting SED from %.0dMeV to %.0dMeV' % (lower,upper)\n print summary(like)\n\n paranoid_gtlike_fit(like, verbosity=self.verbosity)\n\n if self.verbosity:\n print 'After fitting SED from %.0dMeV to %.0dMeV' % (lower,upper)\n print summary(like)\n\n d = dict()\n self.raw_results.append(d)\n\n d['energy'] = energy_dict(emin=lower, emax=upper, energy_units=self.energy_units)\n d['flux'] = flux_dict(like, name, emin=lower,emax=upper, flux_units=self.flux_units, \n errors=True, include_prefactor=True, prefactor_energy=e)\n d['prefactor'] = powerlaw_prefactor_dict(like, name, errors=self.save_hesse_errors, minos_errors=True,\n flux_units=self.flux_units)\n d['TS'] = ts_dict(like, name, verbosity=self.verbosity)\n\n if self.verbosity: print 'Calculating SED upper limit from %.0dMeV to %.0dMeV' % (lower,upper)\n\n if self.always_upper_limit or d['TS']['reoptimize'] < self.min_ts:\n ul = GtlikePowerLawUpperLimit(like, name,\n cl=self.ul_confidence,\n emin=lower,emax=upper,\n flux_units=self.flux_units,\n energy_units=self.energy_units,\n upper_limit_kwargs=self.upper_limit_kwargs,\n include_prefactor=True,\n prefactor_energy=e,\n verbosity=self.verbosity,\n )\n d['upper_limit'] = ul.todict()\n\n # revert to old model\n like.setEnergyRange(*init_energes)\n saved_state.restore()\n\n self._condense_results()",
"def office_add_smoothed_kernels(parser, args, params):\n parser.add_argument('--max_perturbation', type=str,\n help='Amount by which to scale the velocity updates',\n metavar='', required=True)\n local_args = parser.parse_known_args(args)\n max_perturbation = local_args[0].max_perturbation\n\n control.add_smoothed_kernels(params, max_perturbation)",
"def _smooth(values, std):\n width = std * 4\n x = np.linspace(-width, width, min(2 * width + 1, len(values)))\n kernel = np.exp(-(x / 5)**2)\n\n values = np.array(values)\n weights = np.ones_like(values)\n\n smoothed_values = np.convolve(values, kernel, mode='same')\n smoothed_weights = np.convolve(weights, kernel, mode='same')\n\n return smoothed_values / smoothed_weights",
"def do_smooth(d, WT, sample_rate):\n d_smooth = np.zeros(len(d))\n Wt = int(np.ceil(sample_rate*WT))\n for i in range(len(d)-Wt):\n d_smooth[i] = np.mean(d[i: i+Wt])\n d_smooth[0:Wt+100] = np.nan # +100 removes \"edge effects\" at start of f4\n return(d_smooth)",
"def Psmooth(self, k):\n om = self.om\n ol = self.ol\n omz = self.cosmology.Om(self.z) # Omega matter at z\n olz = ol/np.square(self.cosmology.efunc(self.z)) # MBW Eqn 3.77\n g0 = 5/2*om/(np.power(om, 4/7) - ol + ((1+om/2)*(1+ol/70))) # Eqn 4.76\n gz = 5/2*omz/(np.power(omz, 4/7) - olz + ((1+omz/2)*(1+olz/70)))\n Dlin_ratio = gz / (1+self.z) / g0\n Psmooth = self.P0smooth * np.square(self.T0(k)) * \\\n np.power(k, self.ns) * np.square(Dlin_ratio)\n return Psmooth",
"def kinetics_eo_smooth(data):\n new_data = {}\n # Generate new_data\n for kw in data:\n if kw.startswith('t') == False:\n sigma = int(len(data[kw]) / 15) + 1\n new_data[kw] = gaussian_filter1d(data[kw], sigma)\n# new_data[kw] = uniform_filter1d(data[kw], sigma) \n else:\n new_data[kw] = data[kw]\n \n # plot new_data\n fig, ax1 = plt.subplots(dpi=300)\n \n color = 'black'\n ax1.set_xlabel('$t$ [s]')\n ax1.set_ylabel('$\\\\alpha$', color=color)\n ax1.plot(new_data['t0'], new_data['alpha'], color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n \n color = wowcolor(2)\n ax2 = ax1.twinx()\n ax2.set_ylabel('$E$ [$\\mu$m$^2$/s$^2$]', color=color)\n ax2.plot(new_data['t2'], new_data['E'], color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n\n \n color = wowcolor(8)\n ax3 = ax1.twinx()\n ax3.set_ylabel('$OP$', color=color)\n ax3.plot(new_data['t2'], new_data['OP'], color=color)\n ax3.tick_params(axis='y', labelcolor=color)\n ax3.spines[\"right\"].set_position((\"axes\", 1.2))\n \n ax = [ax1, ax2, ax3]\n \n return new_data, fig, ax",
"def smooth_linestring(linestring, smooth_sigma):\n smooth_x = np.array(filters.gaussian_filter1d(\n linestring.xy[0],\n smooth_sigma)\n )\n smooth_y = np.array(filters.gaussian_filter1d(\n linestring.xy[1],\n smooth_sigma)\n )\n smoothed_coords = np.hstack((smooth_x, smooth_y))\n smoothed_coords = zip(smooth_x, smooth_y)\n linestring_smoothed = LineString(smoothed_coords)\n return linestring_smoothed",
"def test_linear_2d_merwe():\n\n\n def fx(x, dt):\n F = np.array([[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]], dtype=float)\n\n return np.dot(F, x)\n\n def hx(x):\n return np.array([x[0], x[2]])\n\n\n dt = 0.1\n points = MerweScaledSigmaPoints(4, .1, 2., -1)\n kf = UKF(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)\n\n\n kf.x = np.array([-1., 1., -1., 1])\n kf.P*=0.0001\n #kf.R *=0\n #kf.Q\n\n zs = []\n for i in range(20):\n z = np.array([i+randn()*0.1, i+randn()*0.1])\n zs.append(z)\n\n Ms, Ps = kf.batch_filter(zs)\n smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dt=dt)\n\n if DO_PLOT:\n plt.figure()\n zs = np.asarray(zs)\n plt.plot(zs[:,0], marker='+')\n plt.plot(Ms[:,0], c='b')\n plt.plot(smooth_x[:,0], smooth_x[:,2], c='r')\n print(smooth_x)",
"def __smooth_emission_params(self):\n params_count = {}\n unique_symbols = []\n for key, value in self.emission_dict.items():\n if key[0] not in unique_symbols:\n unique_symbols.append(key[0])\n \n n = len(unique_symbols)\n # n refers to the number of observations/symbols \n\n for state in self.states:\n params_count[state] = [0,0,0]\n # print(params_count[state])\n # key is the state, value is list [total no. of symbols, total no. of non-zero probability, probability p]\n # i.e. [Ts, v, p]\n for key, value in self.emission_dict.items():\n if state in key:\n params_count[state][0] += 1\n if value != 0:\n params_count[state][1] += 1\n else:\n continue\n params_count[state][2] += 1/(params_count[state][0] + params_count[state][1])\n # p = 1/(Ts+v)\n \n for state in self.states:\n for key, value in self.emission_dict.items():\n if state in key:\n if value != 0:\n self.emission_dict[key] = value - params_count[state][2]\n else:\n self.emission_dict[key] = (params_count[state][2]*params_count[state][2])/n-params_count[state][2]\n # v*p/n-v",
"def eeg_smooth(array,window,window_len):\t\n\tarray_smooth = np.zeros(array.shape)\n\tif not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman', 'kaiser']:\n\t\traise ValueError, \"Window is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman', 'kaiser'\"\n\t\t\n\tif window == 'flat':\n\t\tw = np.ones(window_len)\n\telif window == 'kaiser':\n\t\tw = eval('np.'+window+'(window_len,4)')\t\t\n\telse:\n\t\tw = eval('np.'+window+'(window_len)')\t\t\n\t\t\n\t\n\tif len(array.shape) == 1:\n\t\tntpts = len(array)\n\t\tarray_smooth = np.convolve(array, w/w.sum(), mode='same')\n\t\n\telif len(array.shape) == 2:\n\t\t[nchan,ntpts] = array.shape\n\t\tfor i in range(0,nchan):\n\t\t\tarray_smooth[i,:] = np.convolve(array[i,:], w/w.sum(), mode='same')\n\t\n\telif len(array.shape) > 2:\n\t\tprint 'Error: only works with 1 or 2 dimensions'\n\t\t\n\treturn array_smooth",
"def smoothing(data, mask):\n smooth_data = gaussian_filter(data, [2, 2, 2, 0])\n\n Y = smooth_data[mask].T\n\n return Y",
"def smooth_al(data):\n wd = 5\n optimize = True\n DW_min = 5\n while optimize == True:\n smooth = savgol_filter(data, wd, 2)\n DW = DW_cal(data, smooth)\n if abs(2 - DW) < DW_min:\n wd = wd + 2\n DW_min = abs(2 - DW)\n else:\n wd = wd - 2\n smooth = savgol_filter(data, wd, 2)\n DW = DW_cal(data, smooth)\n break\n return smooth, wd",
"def smooth(self):\n minimal = np.median(self.pheromone_matrix[self.pheromone_matrix > -inf])\n maxim = np.max(self.pheromone_matrix)\n self.pheromone_matrix[self.pheromone_matrix == maxim] = minimal * np.log(maxim/minimal)",
"def compute_smoothed_traj(path, V_des, alpha, dt):\n ########## Code starts here ##########\n #convert path to numpy array because otherwise it's a pain!\n path = np.array(path)\n #create the time vector buy finding the distance from each point and dividing by the straight line velocity\n N = len(path)\n t = np.zeros(N)\n for i in range(1, N):\n #get the distance between the points\n distance = np.linalg.norm(path[i, :] - path[i-1, :])\n #calc the time based on distance and velocity\n t[i] = distance/V_des + t[i-1]\n t_smoothed = np.arange(t[0], t[-1], dt);\n print(t_smoothed.size)\n \n #interpolate over the given path \n x_tck = scipy.interpolate.splrep(t, path[:,0], s=alpha)\n y_tck = scipy.interpolate.splrep(t, path[:,1], s=alpha)\n \n #allocate for the trajectory\n traj_smoothed = np.zeros([len(t_smoothed),7])\n \n #generate the states\n traj_smoothed[:,0] = scipy.interpolate.splev(t_smoothed, x_tck)\n traj_smoothed[:,1] = scipy.interpolate.splev(t_smoothed, y_tck)\n traj_smoothed[:,3] = scipy.interpolate.splev(t_smoothed, x_tck, der=1)\n traj_smoothed[:,4] = scipy.interpolate.splev(t_smoothed, y_tck, der=1)\n traj_smoothed[:,2] = np.arctan2(traj_smoothed[:,4], traj_smoothed[:,3])\n traj_smoothed[:,5] = scipy.interpolate.splev(t_smoothed, x_tck, der=2)\n traj_smoothed[:,6] = scipy.interpolate.splev(t_smoothed, y_tck, der=2) \n ########## Code ends here ##########\n\n return traj_smoothed, t_smoothed",
"def apply_smoothstep(image):\n image_out = 3 * image**2 - 2 * image**3\n return image_out",
"def smooth(o_l, o_r, c_l, c_r, AMT):\n l = o_l * AMT + (1-AMT) * c_l\n r = o_r * AMT + (1-AMT) * c_r\n return (l, r)",
"def eulers_spring(dt=.05,p=1,s0=1,v0=0,t0=0,damp=0,plotit = True,trials = 1000):\n \n delta_t = dt\n \n\n s = np.zeros([trials+1])\n v = np.zeros([trials+1])\n t = np.zeros([trials+1])\n s[0] = s0\n v[0] = v0\n t[0] = t0\n \n for i in range(trials):\n s[i + 1] = updater(s[i],v[i], delta_t)\n v[i + 1] = updater(v[i],acceleration(p,s[i],v=v[i],damp=damp),delta_t)\n t[i + 1] = updater(t[i],1,delta_t)\n \n #s[i + 1] = s[i] + v[i] *delta_t #non functional implementation I saw online\n #v[i + 1] = v[i] + -p*s[i] * delta_t\n #t[i + 1] = t[i] + delta_t\n \n if plotit == True:\n plt.plot(t,s)\n plt.plot(t,v)\n plt.show()",
"def create_Ey(self, MESH):\n self.Ey = MESH.z/MESH.z.sum()*1.0e4"
] | [
"0.6047224",
"0.5958558",
"0.5911852",
"0.58532757",
"0.58519554",
"0.5808192",
"0.5806572",
"0.5766009",
"0.57573235",
"0.565053",
"0.56344795",
"0.5537342",
"0.55034465",
"0.5393578",
"0.5387983",
"0.538715",
"0.53834933",
"0.5319106",
"0.5317269",
"0.5313368",
"0.53031206",
"0.52856064",
"0.5261988",
"0.5246921",
"0.52460146",
"0.52314097",
"0.5228087",
"0.5210223",
"0.5182448",
"0.5164322"
] | 0.6461423 | 0 |
Crop the Elongation by index. | def cropped_index(self, start_i=None, end_i=None, shifted=True):
xs = self.xs[start_i:end_i]
ys = self.ys[start_i:end_i]
if shifted:
xs = xs - xs[0]
return self.__class__(xs, ys, self.gauge_length, self.sample_width, self.sample_thickness, self.name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def crop(self, N):\n self.data = self.data[:,:N]",
"def crop(self):\n return np.array([f.crop() for f in self])",
"def crop(self, timerange):\n\n begin = self.bisect(timerange.begin())\n end = self.bisect(timerange.end(), begin)\n return self.slice(begin, end)",
"def convert_index_select(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n index = g.get_node(op.input(\"Index\")[0])\n axis = op.attr(\"dim\")\n out = _op.transform.take(x, index, axis, mode=\"wrap\")\n g.add_node(op.output(\"Out\")[0], out)",
"def reset(self, index):\n return self.behead(index)[0]",
"def _get_item(self, index):\n data, label = self.data[index], self.label[index]\n zmax, zmin = data.max(axis=0)[2], data.min(axis=0)[2]\n for ind in range(10):\n center_idx = random.randint(0, data.shape[0]-1) # randomly select a crop center, then check if it is a valid choice\n center = data[center_idx]\n crop_min = np.array([center[0]-0.75, center[1]-0.75, zmin])\n crop_max = np.array([center[0]+0.75, center[1]+0.75, zmax])\n crop_ids = np.sum((data>=(crop_min-0.2)) * (data<=(crop_max+0.2)), axis=1) == 3\n if crop_ids.size == 0: continue\n crop_data, crop_label = data[crop_ids], label[crop_ids]\n if np.sum(crop_label>0)/crop_label.size < 0.7 and ind < 9:\n continue\n mask = np.sum((crop_data>=(crop_min-0.01)) * (crop_data<=(crop_max+0.01)), axis=1) == 3\n vidx = np.ceil((crop_data[mask]-crop_min) / (crop_max-crop_min) * [31,31,62])\n vidx = np.unique(vidx[:,0]*31*62 + vidx[:,1]*62 + vidx[:,2])\n # check if large portion of points are annotated, and the points occupy enough spaces\n if vidx.size*1./31/31/62 >= 0.02:\n break\n ids = np.random.choice(crop_label.size, self.npoints, replace=True)\n data = crop_data[ids]\n label = crop_label[ids]\n mask = mask[ids]\n if self.normalize == \"ball\":\n data = utils.normalize_point_cloud(data)\n if self.normalize == \"square\":\n data = utils.normalize_point_cloud_square(data)\n if self.use_weights:\n weight = self.label_weights[label] * mask\n return data, label, weight\n else:\n return data, label * mask",
"def carve_slice(\n self, x_index=0, width=config()[\"panel\"][\"width\"],\n ):\n piece = []\n for row in self.grid:\n piece.append(row[x_index : x_index + width])\n\n return piece",
"def remove(self, idx):\n indices = range(len(self))\n indices.remove(idx)\n return self.take(indices, axis=0).take(indices, axis=1)",
"def to_slice(self):\n return np.index_exp[self.start[2]:self.end[2], #\n self.start[1]:self.end[1], #\n self.start[0]:self.end[0]]",
"def truncate(self, index, chain):\n self._trace[chain] = self._trace[chain][:index]",
"def crop_image(image):\r\n return image[40:-20, :]",
"def crop(arr, target_shape):\n arr_shape = arr.shape\n ncrop = ()\n for dim in range(len(arr_shape)):\n diff = arr_shape[dim] - target_shape[dim]\n if diff > 0:\n start = int(diff / 2)\n stop = start + target_shape[dim]\n ncrop += np.index_exp[start:stop]\n else:\n ncrop += np.index_exp[:]\n cropped = arr[ncrop]\n return cropped",
"def set_truncation_mask(self):\n N = self.truncation_size\n self.manifold_indices = []\n for i in range(len(self.energies)):\n num_excitations = len(self.energies[i])\n total_occ_num = np.kron(np.ones(num_excitations),self.vibrational_total_occupation_number)\n inds_to_keep = np.where(total_occ_num < N)\n self.manifold_indices.append(inds_to_keep)",
"def clip_or_fit_solutions(self, pop, idx):\r\n for k in idx:\r\n self.repair_genotype(pop[k])",
"def removeFromIndex(self, index):\n if self.size <= 0:\n raise IndexError(\"the array is empty\")\n super().removeFromIndex(index)\n self._shrinkCheck()",
"def take_item_at(self, index):\r\n self._set_item(index, None)\r\n return self.get_switched_item()",
"def pop(self, index: int) -> Viewable:\n new_objects = list(self)\n obj = new_objects.pop(index)\n self.objects = new_objects\n return obj",
"def pick(self,i):\n x_i = self.all[i,:]\n return x_i",
"def remove(self, index):\n raise NotImplementedError()",
"def __getitem__( self, index ):\n \n # check dimensions - this rules out the ellisis (...) for the moment\n if len( index ) != 3:\n raise ValueError(\"This is a three-dimensional object, please index accordingly.\")\n\n # get involved file numbers and steps\n # if index[0] is a single number (not iterable, not a slice), make a list of it\n if not hasattr( index[0], '__iter__') and not isinstance( index[0], slice ):\n valid_steps = self._valid_steps[ [index[0]], :2 ]\n else:\n valid_steps = self._valid_steps[ index[0], :2 ]\n \n # if image should be cropped make sure that slices stay slices (is about 30% faster)\n if self._cropped: \n if isinstance( index[1], slice ):\n a = self._ymin if index[1].start is None else index[1].start+self._ymin\n b = self._ymax if index[1].stop is None else index[1].stop+self._ymin \n internal_index1 = slice( a, b, index[1].step )\n \n else:\n internal_index1 = np.arange( self._ymin, self._ymax )[ index[1] ]\n \n if isinstance( index[2], slice ):\n a = self._xmin if index[2].start is None else index[2].start+self._xmin\n b = self._xmax if index[2].stop is None else index[2].stop+self._xmin \n internal_index2 = slice( a, b, index[2].step )\n \n else:\n internal_index2 = np.arange( self._xmin, self._xmax )[ index[2] ]\n\n else:\n internal_index1 = index[1]\n internal_index2 = index[2]\n\n # get all data slices and concatenate them (always use data interface here, regardless of whether memory mapping is used or not)\n slices = []\n for file_no in np.unique( valid_steps[:,0] ):\n file = ir.file_hub.open( self._files[file_no] )\n slices.append( file[ self._selected_ext ].data[ valid_steps[ valid_steps[:,0] == file_no, 1 ], internal_index1, internal_index2 ] )\n slices = np.vstack( slices )\n\n # remove first dimension if there is only one slice\n if slices.shape[0] == 1 and slices.ndim == 3:\n slices = slices[0,:,:]\n \n return slices",
"def test_removing_index(self):",
"def _raveled_index(self):\n return np.r_[:self.size]",
"def _raveled_index(self):\n return np.r_[:self.size]",
"def clear_index(self):\n self.index = None",
"def _index_select_nd(source: torch.Tensor, index: torch.Tensor) -> torch.Tensor:\n index_size = index.size() # (num_atoms/num_bonds, max_num_bonds)\n suffix_dim = source.size()[1:] # (hidden_size,)\n final_size = index_size + suffix_dim # (num_atoms/num_bonds, max_num_bonds, hidden_size)\n\n target = source.index_select(dim=0, index=index.view(\n -1)) # (num_atoms/num_bonds * max_num_bonds, hidden_size)\n target = target.view(\n final_size) # (num_atoms/num_bonds, max_num_bonds, hidden_size)\n\n return target",
"def remain(self):\n return self.source[self.cur :]",
"def pop(self, index: int = -1) -> Transform:\r\n return self.transforms.pop(index=-1)",
"def crop(self, coords):\n pass",
"def __delitem__(self, index):\n def _removeBlock(blockIndex):\n block = self._doc.findBlockByNumber(blockIndex)\n if block.next().isValid(): # not the last\n cursor = QTextCursor(block)\n cursor.movePosition(QTextCursor.NextBlock, QTextCursor.KeepAnchor)\n elif block.previous().isValid(): # the last, not the first\n cursor = QTextCursor(block.previous())\n cursor.movePosition(QTextCursor.EndOfBlock)\n cursor.movePosition(QTextCursor.NextBlock, QTextCursor.KeepAnchor)\n cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor)\n else: # only one block\n cursor = QTextCursor(block)\n cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor)\n cursor.removeSelectedText()\n\n if isinstance(index, int):\n index = self._checkAndConvertIndex(index)\n _removeBlock(index)\n elif isinstance(index, slice):\n \"\"\"List of indexes is reversed for make sure \n not processed indexes are not shifted during document modification\n \"\"\"\n start, stop, step = index.indices(self._doc.blockCount())\n if step > 0:\n start, stop, step = stop - 1, start - 1, step * -1\n\n for blockIndex in range(start, stop, step):\n _removeBlock(blockIndex)",
"def unstacked_index(size, index):\n return index % size, index // size"
] | [
"0.61602694",
"0.5879224",
"0.5870638",
"0.581509",
"0.56682354",
"0.56376",
"0.56352115",
"0.5629972",
"0.5623761",
"0.55894375",
"0.5587319",
"0.5582777",
"0.55521005",
"0.5534629",
"0.5528097",
"0.55064076",
"0.5488465",
"0.54711306",
"0.5463051",
"0.54613936",
"0.5452928",
"0.54375345",
"0.54375345",
"0.5425676",
"0.54183304",
"0.5386166",
"0.53778094",
"0.5350985",
"0.5327305",
"0.5323004"
] | 0.60998905 | 1 |
Determine the strain index of break. Break is defined herein as the last peak in the stress/strain curve. | def break_index(self, **kwargs):
return self.peak_indices(**kwargs)[0][-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getBreakIndices(self):\n for i in self.raw.index[:-1]:\n if self.raw['stress'][i+1] > self.raw['stress'][i] and \\\n self.raw['stress'][i+2] < self.raw['stress'][i+1]:\n brkIdx1 = i+1 # brkIdx1: start of the first unloading\n break\n if self.reloading:\n for i in self.raw.index[brkIdx1+1:-1]:\n if self.raw['stress'][i+1] < self.raw['stress'][i] and \\\n self.raw['stress'][i+2] > self.raw['stress'][i+1]:\n brkIdx2 = i+1 # brkIdx2: end of the first unloading\n break\n # brkIdx3: Point on the NCL after the first reloading\n brkIdx3 = self.raw.query(f'stress == stress[{brkIdx1}]').index[1]\n # brkIdx4: index of the last point on the NCL\n brkIdx4 = self.raw.query('stress == stress.max()').index[0]\n self.secondUnloading = False\n else:\n brkIdx2 = self.raw.index[-1]\n brkIdx3 = None\n brkIdx4 = None\n\n self.brkIdx1 = brkIdx1\n self.brkIdx2 = brkIdx2\n self.brkIdx3 = brkIdx3\n self.brkIdx4 = brkIdx4\n return",
"def damping_index(self):\n start_index = self.kp_index[1]\n end_index = start_index + self.control_dim\n\n if self.impedance_flag:\n return (start_index, end_index)\n else:\n return None",
"def getLimbIndex(self):\n\n data = self.name.split('-')\n return int(data[1]) - 1",
"def _get_breaking_point(x, y):\n # select threshold where curve break\n slope = (y[-1] - y[0]) / len(y)\n y_grad = np.gradient(y)\n m = list(y_grad >= slope)\n j = m.index(False)\n m = m[j:]\n x = x[j:]\n y = y[j:]\n if True in m:\n i = m.index(True)\n else:\n i = -1\n breaking_point = float(x[i])\n\n return breaking_point, x, y",
"def top_of_descent_index(self):\n tod = self.altitudes.argmax()\n # if the altitude profile is not just a climb\n # and there is a cruising section\n if (tod < len(self.altitudes) - 1) and \\\n (self.altitudes[tod] == self.altitudes[tod + 1]):\n tod += 1\n\n return tod",
"def find_index(segmentation, stroke_id):\n for i, symbol in enumerate(segmentation):\n for sid in symbol:\n if sid == stroke_id:\n return i\n return -1",
"def get_block(self):\r\n current_value = self.get_value(self.current_point)\r\n endpoint_value = self.get_value(self.end_point)\r\n worker_start = self.current_point\r\n if (endpoint_value - current_value) > self.searchwidth:\r\n worker_end = self.get_word_from_value(current_value + self.searchwidth - 1)\r\n self.current_point = self.get_word_from_value(current_value + self.searchwidth)\r\n else:\r\n worker_end = self.end_point\r\n self.current_point = worker_end\r\n\r\n return worker_start, worker_end",
"def get_wstart(ref, wave_ref, wave_per_pixel):\n\n return wave_ref - ((ref-1) * wave_per_pixel)",
"def kp_index(self):\n start_index = self.control_dim\n end_index = start_index + self.control_dim\n\n if self.impedance_flag:\n return (start_index, end_index)\n else:\n return None",
"def get_index_from_well(self, well):\n pass",
"def get_vehicle_end_index(self):\n return [len(self.matrix) - 1 for i in range(len(self.vehicles))]",
"def stop(self):\n try:\n return self.index[-1]\n except:\n pass",
"def span(self):\n if self.vcount == 0:\n return None\n else:\n return self.vmax-self.vmin",
"def _get_lback_index(self, model, last) -> int:\n assert last > 0\n # last state cannot be loop-back.\n assert model.get_value(self.totime(self._in_loop, last)).is_true()\n assert model.get_value(self.totime(self._in_loop, 0)).is_false()\n idx = last - 1\n while model.get_value(self.totime(self._in_loop, idx)).is_true():\n idx -= 1\n assert idx >= 0\n assert model.get_value(self.totime(self._in_loop, idx + 1)).is_true()\n assert model.get_value(self.totime(self._in_loop, idx)).is_false()\n assert model.get_value(self.totime(self.start_loop, idx)).is_true()\n return idx",
"def findspan(self, u):\n #if u >= self.kv[-self.p-1]:\n # return self.kv.size - self.p - 2 # last interval\n #else:\n # return self.kv.searchsorted(u, side='right') - 1\n return pyx_findspan(self.kv, self.p, u)",
"def season_breaks(in_mjd, ra):\n\n season = np.floor(calcSeason(ra, in_mjd))\n\n\n di = np.diff(season)\n break_indx = np.where(di > 0)[0]\n #breaks = (in_mjd[break_indx] + in_mjd[break_indx+1])/2.\n\n return break_indx",
"def find_exit_time(self):\n \n # check when last bead starts moving\n vel_last_bead = self.get_bead_velocity(self.get_N()-1)\n nonzero = np.flatnonzero( vel_last_bead )\n \n if nonzero.size == 0:\n warnings.warn('The wave did not exit the chain or it could not be determined.')\n return np.nan\n else:\n exit_index = np.min( nonzero )\n time = self.get_time()\n exit_time = time[exit_index]\n return exit_time",
"def _get_end_index(self):\n return max(self.index + self.source_window,\n self._get_target_index() + self.target_window)",
"def indexOfMin(lyst):\n\tminIndex =0\n\tcurrentIndex=1\n\twhile currentIndex< len(lyst):\n\t\tif lyst(currentIndex)<lyst(minIndex):\n\t\t\tminIndex=currentIndex\n\t\tcurrentIndex+=1\n\treturn minIndex",
"def locus_stop(self):\n return int(open(self.locus_file).read().split('\\t')[4])",
"def spark_index(n):\n return int(round((clamp(n) - minimum) * coefficient))",
"def findStressIdx(self, stress2find, cleanedData=True):\n if stress2find == 0:\n idx = 1\n elif stress2find > self.raw['stress'].max():\n idx = None\n else:\n data4finding = self.cleaned if cleanedData else self.raw\n idx = data4finding.query(f'stress >= {stress2find}').index[0]\n return idx",
"def boatLocation(self):\n if self.operator is None:\n return 0\n else:\n if self.operator[0] == 's':\n return 1\n elif self.operator[0] == 'b':\n return 0\n else:\n return -1",
"def get_current_challenge_part(self):\n dist = max(self.get_global_total_distance_on_foot(), self.get_global_total_distance_on_bike())\n checkpoints = self._get_challenge_parts()\n\n result = 0\n for d in checkpoints.keys():\n if d <= dist:\n result = result + 1\n\n return '%02d' % result",
"def _get_start_render_index(self):\n\n for ol in self.overlay_list[-1::-1]:\n # FIXME There may be even no 'label' in a label's name, so we need use other method to recognize a label.\n if \"label\" not in ol.get_name() and ol.get_alpha() == 1. and ol.is_visible()\\\n and ol.get_min() <= np.min(ol.get_data()):\n return self.overlay_list.index(ol)\n\n # 0 means that the render will start with the bottom overlay.\n return 0",
"def find_start_index(self,pulse_width_list):\n for i in range(len(pulse_width_list)):\n if abs(pulse_width_list[i] - _Const.NEC_HDR_MARK) < _Const.NEC_HDR_MARK * _Const.TOLERANCE:\n return i\n return",
"def _spin_index(self, sz: float) -> int:\n if self.spin is None:\n if sz is not None or not np.isclose(sz, 0):\n raise Exception(\"cannot request spin index of spinless fermions\")\n return 0\n else:\n return round(sz + self.spin)",
"def get_envelope_end(env):\n denv = np.diff(env)\n i = np.where(np.abs(denv) > 0)[0]\n true_stop_index = np.max(i)+1\n return true_stop_index",
"def get_sound_index(self):\n # Return difference between the two last compared elements\n lhs = ThreadManagment.sort_data_by_thread[self.thread.ident].last_cmp_left\n #rhs = ThreadManagment.last_cmp_right_by_thread.get(self.thread.ident, 0)\n #return round((lhs + rhs) / 2)\n return lhs",
"def symbolic_start(self):\n return self.symbolic_bounds[0]"
] | [
"0.63903123",
"0.5647532",
"0.5634",
"0.54937726",
"0.544508",
"0.5438141",
"0.54027975",
"0.53570503",
"0.5290587",
"0.5280061",
"0.5275725",
"0.5270513",
"0.5268599",
"0.52369666",
"0.52302957",
"0.52163225",
"0.5215555",
"0.5207446",
"0.52039963",
"0.51773316",
"0.5164221",
"0.51483715",
"0.5129155",
"0.5123716",
"0.51124525",
"0.51123804",
"0.5106179",
"0.50962514",
"0.5079094",
"0.50790787"
] | 0.6041352 | 1 |
Write Elongation object to a csv file. | def write_csv(elongation, file_name):
e = elongation
with open(file_name, 'w') as f:
f.write(f"""\
Break Load, {e.break_load()}
Break Strength, {e.break_strength()}
Break Elongation, {e.break_elongation()}
Yield Load, {e.yield_load()}
Yield Strength, {e.yield_strength()}
Yield Elongation, {e.yield_elongation()}
Gauge Length, {e.gauge_length}
Sample Width, {e.sample_width}
Sample Thickness, {e.sample_thickness}
Points
%, N""")
for x, y in zip(e.xs, e.ys):
f.write(f'\n{x:>8.4f}, {y:>8.4f}') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_csv_file(self):\r\n # Create a new csv-file\r\n with open(self.fname, 'w') as f:\r\n writer = csv.writer(f, dialect='excel')\r\n writer.writerow(['set_time',\r\n 'read_time_P_ac',\r\n 'read_time_P_bat',\r\n 'soc',\r\n 'set_value',\r\n 'P_ac',\r\n 'P_bat'])",
"def writeToCSV(self, filepath):\r\n\t\twith open(filepath, 'w') as outputFile:\r\n\t\t\toutputFile.write(str(self))",
"def csv_output(self):\r\n fh = open(\"output.csv\",'w')\r\n for i in range(len(self.population.columns)):\r\n if i != len(self.population.columns)-1:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\"\\n\")\r\n\r\n for i in range(len(self.population.data)):\r\n for j in range(len(self.population.data[i])):\r\n if j != len(self.population.data[i])-1:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\"\\n\")\r\n fh.close()",
"def write(self): \n # Open csv file\n with open(self.file_name, 'w', newline='') as file:\n self._writer = csv.writer(file)\n \n # Write header rows\n# self.write_sim_header_data(self.trace.sim.get_data())\n \n # Write trace table\n self._writer.writerow(['Record #', 'Rep', 'Time',\n 'Priority', 'Record Type', 'Name'])\n for trace_record in self.trace._record_list:\n self._writer.writerow(trace_record.get_row())\n file.close()",
"def save_to_csv(self):\r\n # Save the read values to a csv file\r\n with open(self.fname, \"a\") as f:\r\n wr = csv.writer(f, dialect='excel')\r\n wr.writerow([self.set_time, self.read_time_P_ac, self.read_time_P_bat,\r\n self.soc0, self.set_val, self.P_ac, self.P_bat])",
"def to_csv(self, csvwriter):\n csvwriter.writerow(self.to_csv_row())",
"def write_csv(self, outfile, collapse_orders=False, show_age=False):\r\n # Write header row\r\n outfile.write(self.get_csv_header(collapse_orders, show_age).encode())\r\n\r\n # Write content\r\n for x in self.records:\r\n x.write_csv(outfile, collapse_orders, show_age)",
"def write_output(self):\n with open(self.filename, 'a', newline='', encoding='utf-8') as \\\n csv_file:\n csv_writer = csv.writer(csv_file)\n if os.stat(self.filename).st_size == 0:\n # if the csv file needs a headers\n csv_writer.writerow(Configurations.header)\n for quote in self.quotes_objects:\n csv_writer.writerow(quote.info)",
"def save_csv(self, filename): # DONE\n self.data.to_csv(filename)",
"def save_entries(self):\n with open(self.file_name, \"w\") as file:\n file.write('date,name,minutes,note\\n')\n for entry in self.entries:\n writer = csv.writer(file)\n writer.writerow([entry.date, entry.name, entry.minutes, entry.note])",
"def __create_csv(self):\n with open(self.__csv_file_name, 'w', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writeheader()",
"def write_to_csv(self, data):\n with open(\"out.csv\", \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerow(self.column_names)\n writer.writerows(data)\n print(\" Updated succesfully \")",
"def _csv_export(self, exppath):\n with open(exppath, 'w') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',', skipinitialspace=True)\n csvwriter.writerow(['hexstr','dmc','name'])\n for clr in self.lookup_table:\n csvwriter.writerow([clr.hex.to_str(), clr.id, clr.name])",
"def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)",
"def to_csv_file_obj(self, rows):\n output = StringIO.StringIO()\n writer = csv.writer(output)\n writer.writerows(rows)\n return output",
"def export_csv(self, path):\r\n\r\n with open(path, 'w') as f:\r\n f.write('# h,hr,m')\r\n\r\n if self.rho is not None:\r\n f.write(',rho')\r\n if self.temperature is not None:\r\n f.write(',temperature')\r\n\r\n f.write('\\n')\r\n for i in range(self.shape[0]):\r\n for j in range(self.shape[1]):\r\n f.write(f'{self.h[i, j]},{self.hr[i, j]},{self.m[i, j]}')\r\n if self.rho is not None:\r\n f.write(f',{self.rho[i, j]}')\r\n if self.temperature is not None:\r\n f.write(f',{self.temperature[i, j]}')\r\n f.write('\\n')\r\n return",
"def export_csv(self, csvfileobject):\n for index, track in enumerate(self._tracks):\n csvfileobject.writerow(track.properties)\n for delta in track.periods: \n csvfileobject.writerow(delta.properties)",
"def write_csv(settings, row, mode):\n with open(settings.output_file_path, mode=mode) as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow(row)",
"def write_csv(self):\n self.tableView.df.to_csv('Data export.csv', index=False)\n print('CSV file exported')",
"def _csvWriter(self):\r\n # Initialize Header\r\n table = []\r\n voltageRow = []\r\n for i in range(len(self._voltages)):\r\n voltageRow.append(self._voltages[i][0])\r\n voltageRow.append(\" \")\r\n if self._vna.isTwoComponents():\r\n voltageRow.append(\" \")\r\n table.append(voltageRow)\r\n \r\n # Fill table with data\r\n # if self._vna.isTwoComponents():\r\n # for i in range(len(self._frequency[0])):\r\n # row = []\r\n # for j in range(len(self._frequency)):\r\n # row.append(self._frequency[j][i])\r\n # row.append(self._intensity[j][2*i])\r\n # row.append(self._intensity[j][2*i + 1])\r\n # table.append(row)\r\n # else: \r\n for i in range(len(self._frequency[0])):\r\n row = []\r\n for j in range(len(self._frequency)):\r\n row.append(self._frequency[j][i])\r\n row.append(self._intensity[j][i])\r\n table.append(row)\r\n\r\n # Write to CSV\r\n filename = 'CSVs/' + self._vna.getDateFormatted() + '.csv'\r\n with open(filename, 'w', newline='') as csvfile:\r\n dataWriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)\r\n for i in range(len(table)):\r\n dataWriter.writerow(table[i])",
"def write_as_csv(self,destination=sys.stdout):\n # write sorted\n the_destination=None\n if isinstance(destination,types.FileType):\n the_destination=destination\n elif isinstance(destination,types.StringTypes):\n the_destination=file(destination,\"w\")\n else:\n raise Exception(\"sorry destination %s is not valid\"%(repr(destination)))\n\n the_destination.write(\"# quantity:\"+str(self.quantity_name))\n the_destination.write(\"# x y ysigma n\\n\")\n for x in self.get_xdata():\n y=UserDict.UserDict.__getitem__(self,x)\n if type(y) is types.FloatType:\n the_destination.write(\"%g %g 0 1\\n\"%(x,y)) \n else:\n the_destination.write(\"%g %g %g %d\\n\"%(x,y.mean(),y.mean_sigma(),y.n))\n\n the_destination=None",
"def write(self):\n \n self.df.to_csv('/home/austin/Desktop/Falcon/realestate/Falcon/Datasets/mls.csv')",
"def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )",
"def writeToFile(self):\n self.dto.writeToCsv()\n print(\"File written.\")",
"def write_the_contents_to_the_same_file(self):\n if not len(self.student_list):\n print('There is no contents to write')\n return\n\n if self._filename is None:\n self._filename = self.input_filename()\n\n with open(self._filename, 'w') as OUT:\n OUT.write(self.student_list.to_csv(date_format='%Y-%m-%d',\n sep='\\t', header=False, columns=self.columns_to_save))\n print(f'Data are saved into {self._filename!r}')",
"def save_csv(self, filename: str, type='n', **args):\n if type == 'n':\n df = self.export_nodes()\n else:\n df = self.export_edges()\n df.to_csv(filename, index=False)",
"def save_csv(self):\n if not self.__is_csv():\n # creates the csv file if it did not exist.\n self.__create_csv()\n try:\n with open(self.__csv_file_name, 'a', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writerow(self.__values)\n except IOError: # this exception avoid a product does not have saved in csv file\n time.sleep(0.5)\n self.save_csv()\n # display on the screen what is being record on csv\n for key, value in self.__values.items():\n print('{}: {}'.format(key, value), end='; ' if key != 'url' else '\\n')",
"def write_to_file(data, method, delimiter):\r\n output_file = 'data.csv'\r\n with open(output_file, method, newline='', encoding='utf-8') as file:\r\n writer = csv.writer(file, delimiter=delimiter)\r\n writer.writerows([data])",
"def writetoCSV(self, fileName):\n\n with open(fileName, 'w') as writeFile:\n writeFile.write(\"ID,Fx,Fy,Fz\\n\")\n for fstnr in F:\n writeFile.write(str(fstnr.ID))\n for i in fstnr.force:\n writeFile.write(',' + str(i))\n writeFile.write('\\n')",
"def write_to_file(self, time):\n if Parameters.instance().use_ages:\n nb_age_groups = len(Parameters.instance().age_proportions)\n else:\n nb_age_groups = 1\n if Parameters.instance().use_ages:\n if self.spatial_output: # Separate output line for each cell\n for cell in self.population.cells:\n for age_i in range(0, nb_age_groups):\n data = {s: 0 for s in list(InfectionStatus)}\n for inf_status in data:\n data_per_inf_status =\\\n cell.compartment_counter.retrieve()[inf_status]\n data[inf_status] += data_per_inf_status[age_i]\n # Age groups are numbered from 1 to the total number\n # of age groups (thus the +1):\n data[\"age_group\"] = age_i+1\n data[\"time\"] = time\n data[\"cell\"] = cell.id\n data[\"location_x\"] = cell.location[0]\n data[\"location_y\"] = cell.location[1]\n self.writer.write(data)\n else: # Summed output across all cells in population\n data = {s: 0 for s in list(InfectionStatus)}\n for cell in self.population.cells:\n for age_i in range(0, nb_age_groups):\n for inf_status in list(InfectionStatus):\n data_per_inf_status =\\\n cell.compartment_counter.retrieve()[inf_status]\n data[inf_status] += data_per_inf_status[age_i]\n data[\"age_group\"] = age_i+1\n data[\"time\"] = time\n self.writer.write(data)\n else: # If age not considered, age_group not written in csv\n if self.spatial_output: # Separate output line for each cell\n for cell in self.population.cells:\n data = {s: 0 for s in list(InfectionStatus)}\n for k in data:\n data[k] += sum(cell.compartment_counter.retrieve()[k])\n data[\"time\"] = time\n data[\"cell\"] = cell.id\n data[\"location_x\"] = cell.location[0]\n data[\"location_y\"] = cell.location[1]\n self.writer.write(data)\n else: # Summed output across all cells in population\n data = {s: 0 for s in list(InfectionStatus)}\n for cell in self.population.cells:\n for k in data:\n # Sum across age compartments\n data[k] += sum(cell.compartment_counter.retrieve()[k])\n data[\"time\"] = time\n self.writer.write(data)"
] | [
"0.71679187",
"0.7032536",
"0.70268303",
"0.69770074",
"0.6973022",
"0.693288",
"0.6858325",
"0.68417126",
"0.6832732",
"0.68016165",
"0.67204237",
"0.6718283",
"0.671795",
"0.67077386",
"0.6678489",
"0.66233027",
"0.6594838",
"0.65919703",
"0.65797895",
"0.6558252",
"0.6532758",
"0.65254396",
"0.65226173",
"0.6517087",
"0.65157276",
"0.6485437",
"0.6473108",
"0.64685166",
"0.64545226",
"0.645136"
] | 0.7691217 | 0 |
Read an iterable of elongation files. | def read_elongations(file_names):
return list(itertools.chain(*(read_elongation(f) for f in file_names))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __iter__(self):\r\n example = []\r\n for line in open(self.fullpath):\r\n if line != '\\n':\r\n example.append(line.rstrip()) # remove newline\r\n else:\r\n yield example\r\n example = []",
"def read_files(self):\n for f in self.filenames:\n self.games.extend(pgn.loads(open(f).read()))",
"def ReadFilesGenerator(self):\n\n for file in self._file_names:\n file_list = []\n\n # TODO see further into yielding one line at a time\n with open(file, 'r', encoding='mbcs') as sped:\n file_list = sped.read().splitlines()\n\n if not self.isSigned(file_list):\n file_list = self.stripSignature(file_list)\n\n yield file, file_list",
"def open_files(self, listed_files):\n for file_ in listed_files:\n try: \n with codecs.open(file_, \"r\", encoding=\"utf-8\", errors=\"ignore\") as f:\n yield f\n except Exception as error:\n print(error, file=sys.stderr)\n exit(0)",
"def read_iter_from_file(path_to_file_read):\n with open(path_to_file_read, \"r\") as fichero:\n line = fichero.readline().strip()\n while line:\n yield line\n line = fichero.readline().strip()",
"def raw_feature_iterator(file_paths):\n for path in file_paths:\n with open(path, \"r\") as fin:\n for line in fin:\n yield line",
"def raw_feature_iterator(file_paths):\n for path in file_paths:\n with open(path, \"r\") as fin:\n for line in fin:\n yield line",
"def _get_load_iterators(options):\n \n #declare delimiters and field/line iterators\n field_delimiter = ','\n line_delimiter = '\\n'\n escape_char = '\\\\'\n \n line_pattern = re.compile('(?s)^(.*?)' + re.escape(line_delimiter) + '(.*)$')\n field_pattern = re.compile('(?s)^(.*?)' + re.escape(field_delimiter) + '(.*)$')\n \n def _line_iter(f):\n buffer = ''\n while True:\n next = f.read(1024 * 4)\n if next == '':\n if buffer:\n yield buffer\n \n return\n \n buffer += next\n while buffer:\n m = re.match(line_pattern, buffer)\n if m:\n yield m.group(1)\n buffer = m.group(2)\n else:\n break\n\n unescape_map = dict()\n if options['escape_eol_chars']:\n unescape_map['n'] = '\\n'\n unescape_map['t'] = '\\t'\n unescape_map['r'] = '\\r'\n\n def _unescape(s):\n in_escape = False\n for c in s:\n if in_escape:\n yield unescape_map.get(c, c)\n in_escape = False\n elif c == escape_char:\n in_escape = True\n else:\n yield c\n \n def _field_iter(s):\n while s:\n m = re.match(field_pattern, s)\n if m:\n yield ''.join(m.group(1))\n s = m.group(2)\n else:\n yield ''.join(_unescape(s))\n return\n \n return _line_iter, _field_iter",
"def read(self, filenames, encoding=None):\n if isinstance(filenames, str):\n filenames = [filenames]\n read_ok = []\n for filename in filenames:\n try:\n with open(filename, encoding=encoding) as f:\n self.read_file(f)\n except OSError:\n continue\n read_ok.append(filename)\n return read_ok",
"def _read_eeg(eeg_file):\r\n pass",
"def read_files(filenames, gram_size=1):\n assert isinstance(filenames, list), \"filenames argument must be a list\"\n parser = MorParser()\n for fn in filenames:\n for uid, speaker, ngram in generate_chunks(parser.parse(fn), gram_size):\n yield fn, uid, speaker, ngram",
"def read_data_files(filenames, datapath, ids=None):\n filenames = np.array(filenames) # make sure it's array\n if ids is None:\n ids = range(0, len(filenames))\n\n for i in [filenames[k] for k in ids]:\n yield str(open(datapath+i, 'r').read())",
"def iterator(dataset_fn, sections=None, lang=None, field_indices=None):\n files = filelist(lang=lang, sections=sections)\n\n with TarFile.open(dataset_fn, 'r:gz') as f:\n for member in f:\n if member.isfile() and os.path.basename(member.name) in files:\n logging.info('parsing %s ...' % member.name)\n m_f = f.extractfile(member)\n\n for sentence in parse_conll(m_f, field_indices=field_indices):\n yield sentence\n\n m_f.close()",
"def _read_files(self):\n \n for langname in self.langnames:\n filename = f'data/word_lists/{langname}.txt'\n with open(filename) as f:\n index = self.langnames.index(langname)\n lang_list = getattr(self, f'word_list{index}')\n words = f.readlines()\n for word in words:\n fword = ''.join(char for char in word if char is not '\\n')\n lang_list.append(fword)\n f.close()\n return",
"def read_lines(files):\n for file in files:\n for line in file.readlines():\n try:\n line = line.decode('utf-8')\n except UnicodeDecodeError:\n line = line.decode('latin-1')\n yield line.strip()",
"def flow_from_files(self, filenames=None, batch_size=32):\n\n if filenames:\n self.filenames = filenames\n\n for i in range(0, len(self.filenames), batch_size):\n yield np.concatenate([np.load(self.path / f) \\\n for f in self.filenames.iloc[i:i+batch_size]])",
"def read_input_files(self):\r\n\r\n for input_file in self.list_of_input_files:\r\n input_file.read_header_of_file()\r\n self.list_of_header_objects.extend(input_file.list_of_header_objects)\r\n self.list_of_header_objects_without_ID.extend(input_file.list_of_header_objects_without_ID)\r\n self.list_of_contigs.extend(input_file.list_of_contigs)\r\n\r\n self.list_of_header_objects = list(toolz.unique(self.list_of_header_objects, key=lambda x: x.tag_and_ID))\r\n self.list_of_header_objects_without_ID = list(\r\n toolz.unique(self.list_of_header_objects_without_ID, key=lambda x: x.line))\r\n self.list_of_contigs = list(toolz.unique(self.list_of_contigs, key=lambda x: x.line))\r\n self.list_of_header_objects.extend(self.list_of_header_objects_without_ID)\r\n self.list_of_header_objects.sort(key=lambda x: x.line)\r\n self.list_of_header_objects.extend(self.list_of_contigs)\r\n self.list_of_header_objects.sort(key=lambda x: x.tag, reverse=False)\r\n self.create_body_header_line_for_output()\r\n self.write_header_in_output_file()\r\n\r\n list_of_chrom = list(self.indices.keys())\r\n list_of_chrom.sort(key=lambda x: self.alphanum_key(x))\r\n for chrom in list_of_chrom:\r\n self.list_of_body_objects.clear()\r\n for input_file in self.list_of_input_files:\r\n input_file.read_specific_chrom_body_of_file(chrom)\r\n self.list_of_body_objects.extend(input_file.list_of_body_objects)\r\n\r\n self.adjust_body_records_to_samples()\r\n self.list_of_body_objects = list(toolz.unique(self.list_of_body_objects, key=lambda x: x.line))\r\n self.list_of_body_objects.sort(key=lambda x: self.alphanum_key(x.line))\r\n self.verify_and_merge_body_records()\r\n self.write_specific_chrom_in_output_file()",
"def read(self, filenames):\n if isinstance(filenames, basestring):\n filenames = [filenames]\n read_ok = []\n for filename in filenames:\n try:\n fp = open(filename)\n except IOError:\n continue\n self._read(fp, filename)\n fp.close()\n read_ok.append(filename)\n return read_ok",
"def io_files(self, iterable, ext=None, func=None):\n for input_path in iterable:\n output_path, temp_file = self.check_output_path(input_path, ext)\n\n try:\n func(input_path, temp_file)\n except Exception as e:\n if self._force_continue is True:\n self.handle_error(e, input_path)\n else:\n raise e\n\n self.overwrite_output_path(input_path, output_path, temp_file)",
"def read_all(self, prog:progress=None):\t\t\t\n\t\tself.__output_status(\"Read & compare all files\")\n\t\tself.__read_files('all', prog)",
"def readEEGepoch(eegfilename, mainDir):\n # subject = 'ES9007' \n datapath = os.path.join(mainDir)\n os.chdir(datapath)\n \n folders = os.listdir(datapath)\n \n for dir in folders:\n \n os.chdir(os.path.join(datapath, dir))\n file = glob.glob(eegfilename)\n \n if file:\n print('>>>>>>>>>>>>> file loaded from >>>>>>>>>>>>>>>>>:', os.getcwd())\n filepath = os.path.join(os.getcwd(), eegfilename) \n dat = mne.read_epochs(filepath, preload=True) \n break \n return dat",
"def concatenate_files(self, files):\n entries = []\n for f in files:\n tree = ETree.parse(f)\n\n root = tree.getroot()\n entries = root.findall(tags[\"entry\"])\n\n for entry in entries:\n yield entry",
"def read_concat_file(self):\n\n file_list = []\n for i in self.IDs[0:3]:\n with open(i, 'r') as cf:\n cf = cf.read()\n file_list.append(cf)\n return file_list",
"def _readFiles(self):\n template_files = []\n for file in os.listdir(self.template_folder):\n if file.endswith(\".xml\"):\n template_files.append(file)\n return template_files",
"def read(self,filenames):\n\n if isinstance(filenames, basestring):\n filenames = [filenames]\n read_ok = []\n for filename in filenames:\n try:\n fp = open(filename)\n except IOError:\n continue\n self._read(fp)\n fp.close()\n read_ok.append(filename)\n return read_ok",
"def open_file(self) -> Iterator[NamedIO]:\n with open(self.filename) as f:\n yield cast(NamedIO, f)",
"def iterload(filename, chunk=100, **kwargs):\n stride = kwargs.get('stride', 1)\n atom_indices = cast_indices(kwargs.get('atom_indices', None))\n if chunk % stride != 0 and filename.endswith('.dcd'):\n raise ValueError('Stride must be a divisor of chunk. stride=%d does not go '\n 'evenly into chunk=%d' % (stride, chunk))\n if chunk == 0:\n yield load(filename, **kwargs)\n # If chunk was 0 then we want to avoid filetype-specific code in case of undefined behavior in various file parsers.\n else:\n skip = kwargs.pop('skip', 0)\n if filename.endswith('.h5'):\n if 'top' in kwargs:\n warnings.warn('top= kwarg ignored since file contains topology information')\n\n with HDF5TrajectoryFile(filename) as f:\n if skip > 0:\n xyz, _, _, _ = f.read(skip, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n if atom_indices is None:\n topology = f.topology\n else:\n topology = f.topology.subset(atom_indices)\n\n while True:\n data = f.read(chunk*stride, stride=stride, atom_indices=atom_indices)\n if data == []:\n raise StopIteration()\n in_units_of(data.coordinates, f.distance_unit, Trajectory._distance_unit, inplace=True)\n in_units_of(data.cell_lengths, f.distance_unit, Trajectory._distance_unit, inplace=True)\n yield Trajectory(xyz=data.coordinates, topology=topology,\n time=data.time, unitcell_lengths=data.cell_lengths,\n unitcell_angles=data.cell_angles)\n\n if filename.endswith('.lh5'):\n if 'top' in kwargs:\n warnings.warn('top= kwarg ignored since file contains topology information')\n with LH5TrajectoryFile(filename) as f:\n if atom_indices is None:\n topology = f.topology\n else:\n topology = f.topology.subset(atom_indices)\n\n ptr = 0\n if skip > 0:\n xyz, _, _, _ = f.read(skip, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n while True:\n xyz = f.read(chunk*stride, stride=stride, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True)\n time = np.arange(ptr, ptr+len(xyz)*stride, stride)\n ptr += len(xyz)*stride\n yield Trajectory(xyz=xyz, topology=topology, time=time)\n\n elif filename.endswith('.xtc'):\n topology = _parse_topology(kwargs.get('top', None))\n with XTCTrajectoryFile(filename) as f:\n if skip > 0:\n xyz, _, _, _ = f.read(skip)\n if len(xyz) == 0:\n raise StopIteration()\n while True:\n xyz, time, step, box = f.read(chunk*stride, stride=stride, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True)\n in_units_of(box, f.distance_unit, Trajectory._distance_unit, inplace=True)\n trajectory = Trajectory(xyz=xyz, topology=topology, time=time)\n trajectory.unitcell_vectors = box\n yield trajectory\n\n elif filename.endswith('.dcd'):\n topology = _parse_topology(kwargs.get('top', None))\n with DCDTrajectoryFile(filename) as f:\n ptr = 0\n if skip > 0:\n xyz, _, _ = f.read(skip, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n while True:\n # for reasons that I have not investigated, dcdtrajectory file chunk and stride\n # together work like this method, but HDF5/XTC do not.\n xyz, box_length, box_angle = f.read(chunk, stride=stride, atom_indices=atom_indices)\n if len(xyz) == 0:\n raise StopIteration()\n in_units_of(xyz, f.distance_unit, Trajectory._distance_unit, inplace=True)\n in_units_of(box_length, f.distance_unit, Trajectory._distance_unit, inplace=True)\n time = np.arange(ptr, ptr+len(xyz)*stride, stride)\n ptr += len(xyz)*stride\n yield Trajectory(xyz=xyz, topology=topology, time=time, unitcell_lengths=box_length,\n unitcell_angles=box_angle)\n\n else:\n log.critical(\"loading complete traj into mem! This might no be desired.\")\n t = load(filename, **kwargs)\n for i in range(skip, len(t), chunk):\n yield t[i:i+chunk]",
"def read_input_files(input_file: str) -> list[Food]:\n with open(input_file) as input_fobj:\n foods = [Food.from_raw(line.strip()) for line in input_fobj]\n return foods",
"def read_file(self, file_name_list):\n\n # Iterating over the file name list\n for file_name in file_name_list:\n\n # Opening MTF file\n #try: \n mtf_file = open(file_name,\"r\")\n #except Exception: pass # TODO\n\n # Reading file\n for line in mtf_file:\n # Processing line\n line_list = line.strip().split(\"\\t\")\n tf_id=line_list[0]\n name=line_list[1]\n database=line_list[2]\n tf_class=int(line_list[3])\n genes=line_list[4].split(\";\")\n genes_suffix=line_list[5].split(\";\")\n\n self.add(Motif(tf_id, name, database, tf_class, genes, genes_suffix))\n\n\n # Termination\n mtf_file.close()",
"def __iter__(self):\n with open(self.fn + \".fai\") as fai:\n for toks in (l.rstrip(\"\\r\\n\").split(\"\\t\") for l in fai):\n yield toks[0], int(toks[1])"
] | [
"0.6163643",
"0.61128104",
"0.609269",
"0.6008233",
"0.59725094",
"0.59578764",
"0.59578764",
"0.59162337",
"0.5894706",
"0.5822738",
"0.58118016",
"0.57776636",
"0.5761199",
"0.5705911",
"0.5697656",
"0.5660844",
"0.56368",
"0.5615509",
"0.56117857",
"0.5606222",
"0.5591424",
"0.557768",
"0.55712074",
"0.5570847",
"0.55637056",
"0.55554694",
"0.5553878",
"0.5552735",
"0.55517226",
"0.55459946"
] | 0.64478576 | 0 |
Downloads all files from the SugarSync account to the provided output folder | def download_files(self, output, replace=False):
try:
# Create output directory
# self._output_path = os.path.join(output,
# "sugardl_{}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S")))
# os.makedirs(self._output_path)
# Just write to the provided output directory
self._output_path = output
#####
# Authenticate: getting a refresh token, then an access token
#####
print("Authenticating..")
self._get_refresh_token()
self._get_access_token()
#####
# User Info
#####
self._get_user_info()
#####
# Get all folder metadata prior to download
#####
self._get_sync_folders()
#####
# Download: Recursively download all syncfolder contents
#####
for folder in self._folder_metadata:
print("== SYNC FOLDER DOWNLOAD: {} ==".format(folder['displayName']))
self._download_folder_contents(folder['contents'], "{}/{}".format(self._output_path, folder['displayName']), start_idx=0, replace=replace)
print("")
except Exception as e:
print("Error in download_files: {}".format(traceback.print_exc()))
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download(urls, dest_folder):\n pass",
"def download_output_files(self):\n bucket_list = self.bucket.list(\"output/part\")\n for bucket_entry in bucket_list:\n key_string = str(bucket_entry.key)\n # check if file exists locally, if not: download it\n if not os.path.exists(key_string):\n bucket_entry.get_contents_to_filename(\"../\" + key_string)\n else:\n print \"output file already exists, please delete\"",
"def download_files(self):",
"def download_results(self, output_dir, progress=None):\n\n if self._uuid is not None:\n self.update()\n\n if not path.exists(output_dir):\n makedirs(output_dir)\n\n if self._dirty:\n self.results.get_all_files(output_dir, progress=progress)",
"def download_data(self):\r\n \r\n for file in self.__files:\r\n file_to_download = os.path.join(self.__folder, os.path.basename(file))\r\n if not os.path.isfile(file_to_download):\r\n self.__download_file(file)",
"def sync_files(self, folder):\n blobs = GoogleStorage().list_blobs_with_prefix(self.bucket_name, folder)\n\n # Create the session folder if not existing\n project_home = os.environ['PROJ_HOME']\n root_folder = os.path.join(project_home, folder)\n if not os.path.isdir(root_folder):\n os.makedirs(root_folder)\n\n # Start download files\n for blob in blobs:\n destination_file_name = os.path.join(project_home, blob.name)\n\n # Check if the local file exist before download file\n if not os.path.isfile(destination_file_name):\n\n # Create folder to avoid exception when download\n destination_file_folder = os.path.dirname(destination_file_name)\n if not os.path.isdir(destination_file_folder):\n os.makedirs(destination_file_folder)\n\n blob.download_to_filename(destination_file_name)\n print('Downloaded file {}'.format(destination_file_name))",
"def download_file(directory, file_name, output_dir):\n endpoint_url = BASE_URL + \"/\" + directory\n final_file = \"lib/\" + output_dir + \"/\" + file_name\n if not os.path.exists(\"lib/\" + output_dir):\n os.makedirs(\"lib/\" + output_dir)\n print('Downloading ' + endpoint_url + \"/\" + file_name + ' ...')\n opener = urllib.URLopener()\n opener.retrieve(endpoint_url + \"/\" + file_name, final_file)\n os.chmod(final_file, 0o755)",
"def download_files(self) -> None:\n\n for name, url in self.files.items():\n print(f\"Download {name.split('/')[-1]}\")\n wget.download(url, os.path.join(\"data\", name))",
"def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None",
"def download_all(self, to: str = None) -> Generator:\n\n for filename in self.list_files():\n yield (self.download(filename, to))",
"def download_all_files(self, root_url, version):\n file_list = self._http_client.get(root_url + '?ref=refs/tags/' + version)\n for file in file_list.json():\n if file['type'] == 'file':\n download_url = file['download_url']\n download_path = self.get_module_and_path('next/' + file['path'].replace(self._main_dir + '/', ''))\n self.download_file(download_url.replace('refs/tags/', ''), download_path)\n elif file['type'] == 'dir':\n path = self.get_module_and_path('next/' + file['path'].replace(self._main_dir + '/', ''))\n os.mkdir(path)\n self.download_all_files(root_url + '/' + file['name'], version) # Recurse into the subdirectory.\n\n file_list.close()",
"def download_all_files(self):\n self.server_handler.get_sensor_data_from_server()",
"def download_data(files: page_iterator.HTTPIterator, folder: str) -> None:\n logging.info('File download Started... Wait for the job to complete.')\n\n # create folder locally if not exists\n if not os.path.exists(folder): os.makedirs(folder)\n\n for file in files:\n logging.info('GCS File: {}'.format(file.name))\n destination_uri = '{}/{}'.format(folder, file.name.split('/')[-1])\n file.download_to_filename(destination_uri if destination_uri.endswith('.csv') else destination_uri + '.csv')\n logging.info('Exported {} to {}'.format(file.name, destination_uri))\n\n return None",
"def download_data_files(self, dest_directory):\n\t\tif not os.path.exists(dest_directory):\n\t\t\tos.makedirs(dest_directory)\n\t\tfilename = DATA_URL.split('/')[-1]\n\t\tfilepath = os.path.join(dest_directory, filename)\n\t\tif not os.path.exists(filepath):\n\t\t\tdef _progress(count, block_size, total_size):\n\t\t\t\tsys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename,\n\t\t\t\t\t\tfloat(count * block_size) / float(total_size) * 100.0))\n\t\t\t\tsys.stdout.flush()\n\t\t\tfilepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n\t\t\tprint()\n\t\t\tstatinfo = os.stat(filepath)\n\t\t\tprint('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n\t\textracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')\n\t\tif not os.path.exists(extracted_dir_path):\n\t\t\ttarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def download(path):\n\treturn send_from_directory(\"results\", path, as_attachment=True)",
"def download(all):\n print(\"Downloading\")",
"def download_file(self, parsed_event, input_dir_path):",
"def download_files(service, file_list, out_path):\n total = len(file_list)\n for i, file_id in enumerate(file_list, 1):\n name = get_file(service, file_id)['title']\n print('Downloading {}... ({}/{}) [{}%]'.format(name, i, total,\n round(i / total * 100)))\n path = os.path.join(out_path, name)\n try:\n download_file(service, file_id, path)\n except errors.HttpError as error:\n os.remove(path) # Remove broken file\n print('Could not download file: {}'.format(error), file=sys.stderr)",
"def download(cls, root):\n path_dirname = os.path.join(root, cls.dirname)\n path_name = os.path.join(path_dirname, cls.name)\n if not os.path.isdir(path_dirname):\n for url in cls.urls:\n filename = os.path.basename(url)\n zpath = os.path.join(path_dirname, filename)\n if not os.path.isfile(zpath):\n if not os.path.exists(os.path.dirname(zpath)):\n os.makedirs(os.path.dirname(zpath))\n print(f'Download {filename} from {url} to {zpath}')\n download_from_url(url, zpath)\n extract_to_dir(zpath, path_name)\n\n return path_name",
"def download(url, output, encoding, insrs, format_name):\n\n folder = download_data(url, encoding)\n joined_file = join_files(folder)\n transform(joined_file, output, insrs, format_name)\n\n shutil.rmtree(folder)\n os.remove(joined_file)\n\n if not os.path.isfile(output):\n raise Error(\"Output file not created, the whole process failed\")\n else:\n logging.info(\"File %s successfuly created\" % output)",
"def maybe_download_and_extract():\n dest_directory = FLAGS.data_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n r = requests.Session().get(DATA_URL)\n with open(filepath, 'wb') as fd:\n for chunk in r.iter_content(500):\n fd.write(chunk)\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')\n if not os.path.exists(extracted_dir_path):\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def download(dbx, folder, subfolder, name):\r\n path = '/%s/%s/%s' % (\"Apps\", \"Contract Drafter\", \"2.amr\")\r\n while '//' in path:\r\n path = path.replace('//', '/')\r\n with stopwatch('download'):\r\n try:\r\n md, res = dbx.files_download(path)\r\n except dropbox.exceptions.HttpError as err:\r\n print('*** HTTP error', err)\r\n return None\r\n data = res.content\r\n print(data, 'bytes; md:', md)\r\n return data",
"def _download(data_folder): # pragma: no cover\n\n logger.info(f\"Downloading {SOURCE_URL}.\")\n\n with urlopen(SOURCE_URL) as zipresp:\n with zipfile.ZipFile(io.BytesIO(zipresp.read())) as zfile:\n zfile.extractall(data_folder)",
"def download(self, outputfile: str, outputformat: str):\n pass",
"def download(self):\r\n \r\n # RAR Files names\r\n if self.debug==0:\r\n rar_files_name = [\"K001.rar\",\"K002.rar\",\"K003.rar\",\"K004.rar\",\"K005.rar\",\"K006.rar\",\r\n \"KA01.rar\", \"KA03.rar\", \"KA04.rar\", \"KA05.rar\", \"KA06.rar\", \"KA07.rar\", \r\n \"KA08.rar\", \"KA09.rar\", \"KA15.rar\", \"KA16.rar\", \"KA22.rar\", \"KA30.rar\", \r\n \"KB23.rar\", \"KB24.rar\", \"KB27.rar\", \r\n \"KI01.rar\", \"KI03.rar\", \"KI04.rar\", \"KI05.rar\", \"KI07.rar\", \"KI08.rar\", \r\n \"KI14.rar\", \"KI16.rar\", \"KI17.rar\", \"KI18.rar\", \"KI21.rar\"]\r\n else:\r\n rar_files_name = [\"K002.rar\", \"KA01.rar\", \"KI01.rar\"]\r\n\r\n url = self.url\r\n \r\n dirname = self.rawfilesdir\r\n dir_rar = \"rar_files\"\r\n if not os.path.isdir(dirname):\r\n os.mkdir(dirname)\r\n if not os.path.isdir(os.path.join(dirname, dir_rar)):\r\n os.mkdir(os.path.join(dirname, dir_rar))\r\n \r\n\r\n print(\"Downloading RAR files:\")\r\n for i in rar_files_name:\r\n file_name = i\r\n if not os.path.exists(os.path.join(dirname, dir_rar, file_name)):\r\n urllib.request.urlretrieve(url+file_name, os.path.join(dirname, dir_rar, file_name))\r\n print(file_name)\r\n \r\n print(\"Extracting files:\")\r\n for i in rar_files_name:\r\n if not os.path.exists(os.path.join(dirname, i[:4])):\r\n file_name = os.path.join(dirname, dir_rar, i)\r\n Archive(file_name).extractall(dirname) \r\n print(i)\r\n\r\n if self.debug==0:\r\n files_path = self.files\r\n else:\r\n files_path = files_debug(self.rawfilesdir)\r\n\r\n print(files_path)\r\n self.files = files_path",
"def get_files(self):\n # self.folder= +str(int(time.time()))\n if not os.path.exists(self.folder):\n os.mkdir(self.folder)\n while len(self.url_queue): # If we have URLs to crawl - we crawl\n href = self.url_queue.popleft() # We grab a URL from the left of the list\n filename = href.rsplit('/', 1)[-1]\n print(\"Downloading %s to %s...\" % (href, filename))\n fullname = os.path.join(self.folder, filename)\n urlretrieve(href, fullname)\n self.xlfnames.append(filename)",
"def download_remote_files(output_dir, files):\n logging.debug(f\"Try to download files: {files}\")\n\n # Create list of remote and local files\n base_url = \"https://storage.googleapis.com/\"\n urls = [base_url+file for file in files]\n local_files = [output_dir + file.split(\"/\")[-1] for file in files]\n\n\n async def get(session, url, local_f):\n if os.path.isfile(local_f):\n logging.info(\"Raw file {} exists locally\".format(local_f))\n pass\n else:\n # Download file\n async with session.get(url=url) as response:\n if response.status == 200:\n resp = await response.read()\n with open(local_f, \"wb\") as outfile:\n outfile.write(resp)\n\n\n async def main(urls, local_files):\n conn = aiohttp.TCPConnector(limit=30)\n timeout = aiohttp.ClientTimeout(total=None, connect=None, sock_connect=30, sock_read=10)\n async with aiohttp.ClientSession(connector=conn, timeout=timeout) as session:\n _ = await asyncio.gather(*[get(session, urls[f], local_files[f]) for f in range(len(urls))])\n\n asyncio.run(main(urls, local_files))\n return local_files",
"def download_files_in_drs_manifest(\n hostname,\n auth,\n infile,\n output_dir,\n show_progress=True,\n unpack_packages=True,\n delete_unpacked_packages=False,\n) -> None:\n _download(\n hostname,\n auth,\n infile,\n output_dir,\n show_progress,\n unpack_packages,\n delete_unpacked_packages,\n )",
"def download(self, output):\n self.wait()\n path = 'auditlogEntryReport/download'\n with open(output, 'w') as f:\n f.write(self._session.get(path))\n LOGGER.info('log downloaded: {}'.format(output))",
"def download_sra_files(remote_location, local_location = '', max_recursion = 3, verbose = False):\n\n downloaded_files = list();\n\n def printv(*args):\n if(verbose):\n print(*args);\n sys.stdout.flush();\n\n printv(\"Reading folder: \", remote_location);\n\n req = urllib2.Request(remote_location);\n\n response = urllib2.urlopen(req);\n\n the_page = response.read();\n\n entries = the_page.split('\\r\\n');\n\n #Identify sub folders\n folders = list();\n for entry in entries:\n if(len(entry) == 0):\n continue;\n\n spl_entry = entry.split();\n if(spl_entry[0][0] == 'd'): #if directory flag\n folders.append(spl_entry[-1]);\n\n\n for folder in folders:\n dl_files = download_sra_files(remote_location + '/' + folder, local_location, max_recursion - 1, verbose);\n downloaded_files.extend(dl_files);\n\n #Identify SRA files\n files = list();\n for entry in entries:\n if(len(entry) == 0):\n continue;\n\n spl_entry = entry.split();\n if(spl_entry[0][0] == '-' and #Not a directory\n spl_entry[-1].lower().endswith('.sra')): #Has extension '.sra'\n\n files.append(spl_entry[-1]);\n\n if(len(files) > 0):\n printv(\"Identified sra files: \");\n for file_name in files:\n printv(\" \", file_name);\n\n abs_local_location = os.path.abspath(local_location);\n\n if(not os.path.isdir(abs_local_location)):\n os.makedirs(abs_local_location);\n\n for file_name in files:\n\n printv(\"Downloading \", file_name);\n\n file_str = remote_location + '/' + file_name;\n\n req = urllib2.Request(file_str);\n response = urllib2.urlopen(req);\n\n dest_file_name = abs_local_location + os.sep + file_name;\n dest_file = open(dest_file_name, 'wb');\n shutil.copyfileobj(response, dest_file)\n dest_file.close();\n downloaded_files.append(dest_file_name);\n\n return downloaded_files;"
] | [
"0.6863336",
"0.6838265",
"0.6813811",
"0.6791622",
"0.6458129",
"0.64210194",
"0.63175696",
"0.6252998",
"0.62363803",
"0.621832",
"0.6204603",
"0.6164557",
"0.6153396",
"0.61488926",
"0.6148461",
"0.6134311",
"0.6120213",
"0.60974497",
"0.6057297",
"0.5996962",
"0.5962937",
"0.59536785",
"0.59508806",
"0.5939444",
"0.5916294",
"0.5907781",
"0.5889323",
"0.5886011",
"0.5873174",
"0.58650637"
] | 0.74477714 | 0 |
Retrieves user information to include sync folders | def _get_user_info(self):
if not self._refresh_token:
raise ValueError("Refresh Token not set")
# Add access token to the headers
add_headers = dict(self._default_headers)
add_headers['Authorization'] = self._access_token
resp = requests.get(BASE_URL + "user/{}".format(self._user_id), headers=add_headers, verify=False)
if resp.status_code >= 300:
raise Exception("Failed to retrieve user info: {}".format(resp))
vals = etree_to_dict(ET.XML(resp.content.decode('utf-8')))
# Print generic user info
print("")
print("== USER INFO ==")
print("Username: {}".format(vals.get('user').get('username')))
print("Nickname: {}".format(vals.get('user').get('nickname')))
print("Usage: {} MB / {} MB".format(int(int(vals.get('user').get('quota').get('usage')) / (1024*1024)),
int(int(vals.get('user').get('quota').get('limit')) / (1024*1024))))
print("")
# Grab folder ids we care about
self._user_sync_folders_url = vals.get('user').get('syncfolders') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_user_info(self) -> str:\n return self._searcher.get_user_info()",
"def get_users_info(): \n \n data = user_obj.get_users_info()\n return data",
"def user_info(self):\n response = self.query('user_info')\n return response",
"def getUserInfo(self, user):\n return pwd.getpwnam(user)[2:4]",
"def user_info(self):\n return self.auth.get_user_by_session()",
"def fetch_user_info(self) -> UserInfo:\n url = buildCommandUrl(\n self.server, \"/as/user/keep\", self.__userInfo.strToken)\n result = json_request(\"GET\", url, token=self.__userInfo.strToken)",
"def userinfo(self):\n return self._userinfo",
"def rootuser_info(self, datadict):\n\n dict1 = OrderedDict()\n dict1 = datadict['entry_data']['ProfilePage'][0]['graphql']['user']\n\n userdict = OrderedDict()\n keylist = ['id', 'username', 'full_name', 'biography', 'edge_follow', 'edge_followed_by', 'is_private', 'external_url', 'profile_pic_url_hd']\n\n for key in keylist:\n if key is 'edge_follow':\n userdict['following'] = dict1[key]\n elif key is 'edge_followed_by':\n userdict['followers'] = dict1[key]\n else:\n userdict[key] = dict1[key]\n\n userdict['platform'] = datadict['platform']\n\n return (json.dumps(userdict, indent=4))",
"def _get_sync_folders(self):\n\n if not self._user_sync_folders_url:\n raise ValueError(\"User sync folders URL not retrieved\")\n\n if not self._refresh_token:\n raise ValueError(\"Refresh Token not set\")\n\n # Add access token to the headers\n add_headers = dict(self._default_headers)\n add_headers['Authorization'] = self._access_token\n\n resp = requests.get(self._user_sync_folders_url, headers=add_headers, verify=False)\n if resp.status_code >= 300:\n raise Exception(\"Failed to claim access token: {}\".format(resp))\n\n vals = etree_to_dict(ET.XML(resp.content.decode('utf-8')))\n\n # Print and store relevant sync folder information\n print(\"== SYNC FOLDERS ==\")\n for folder in vals.get('collectionContents').get('collection'):\n print(\"Folder: {}\".format(folder.get('displayName')))\n self._folder_metadata.append(folder)\n\n print(\"\")",
"def getInterestedUsers():",
"def user_details():\n url = 'https://api.github.com/orgs/facebook/repos'\n json_obj = urllib2.urlopen(url)\n userdata = json.load(json_obj)\n if 'error' in userdata:\n print 'errors are scanned in data'\n for data in userdata:\n if 'name' in data:\n if data['name'] == 'codemod':\n print 'language used'\n print data['language']\n print 'number of watchers'\n print data['watchers']\n print 'git url'\n print data['git_url']\n print 'open issues'\n print data['open_issues']\n print 'permissions for user'\n print 'push'\n print data['permissions']['push']\n print 'pull'\n print data['permissions']['pull']",
"def GetUserInfo(self):\n user = users.get_current_user()\n user_info = GetInfoForUser(user)\n if user:\n # Check to see if the user has auxiliary info for Swyzl, and if not\n # then create it.\n if not user_info:\n user_info = models.UserInfo()\n user_info.user = user\n user_info.put()\n\n url = users.create_logout_url(self.request.uri)\n url_link_text = 'Logout'\n else:\n url = users.create_login_url(self.request.uri)\n url_link_text = 'Login'\n return (user, url, url_link_text)",
"def get_user_info_by_id(self, user_id: int) -> dict:",
"def get_bookshare_user_info(patron):\n pass",
"def get():\n return prepare_response(get_user_info())",
"def user_info(self):\n \n return self.auth.get_user_by_session()",
"def GetInfoForUser(user):\n return models.UserInfo.gql('WHERE user = :1', user).get()",
"def get_user_info_by_name(self, username: str) -> dict:",
"def get_user_info(self):\n user_info = self.data_source.get_user_info(self.user_id)\n\n return user_info",
"def current_user_info():\n\n return current_user",
"def user_info(self):\r\n param = {}\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time()*1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/users/me', param, self.timeout)",
"def get_user_folders_dict(user_id):\n return { folder['full_name'] : folder['id'] for folder in canvas_api.pull_folders(user_id) }",
"def userinfo(self, **kwargs):\n metadata = self.load_server_metadata()\n resp = self.get(metadata['userinfo_endpoint'], **kwargs)\n resp.raise_for_status()\n data = resp.json()\n return UserInfo(data)",
"def _on_get_user_info(self, callback, session, user):\n logging.debug('user data from github ' + str(user))\n if user is None:\n callback(None)\n return\n callback({\n \"id\": user[\"id\"],\n \"login\": user[\"login\"],\n \"name\": user.get(\"name\"),\n \"email\": user.get(\"email\"),\n \"access_token\": session[\"access_token\"],\n })",
"def getUserInfo(self):\r\n userJson = self.httpGet(ReaderUrl.USER_INFO_URL)\r\n result = json.loads(userJson, strict=False)\r\n self.userId = result['userId']\r\n return result",
"def getPublicUserInfo(self, username):\r\n pass",
"def user_info(self):\n resp = self._get(get_url('user'))\n raise_on_error(resp)\n ret = resp.json()\n return UserInfo(ret)",
"def user_info(username):\n print(json.dumps(client.user_info(username)))",
"def extract_user_info(client_config):\n # test if there isn't a system user or if there isn't a name for that\n # user, return None\n if ('system user' not in client_config or\n 'name' not in client_config['system user']):\n return None\n\n user_info = dict()\n user_info['system_key'] = dict(\n user=client_config['system user']['name'],\n access_key=client_config['system user']['access key'],\n secret_key=client_config['system user']['secret key'],\n )\n return user_info",
"def get_user_profile(self):\n return self.request('get', 'id/users')"
] | [
"0.69573396",
"0.68997866",
"0.6759884",
"0.6710544",
"0.6675518",
"0.661304",
"0.6520475",
"0.6491115",
"0.6431651",
"0.63984233",
"0.6313943",
"0.63038987",
"0.6303114",
"0.63017505",
"0.6268096",
"0.62518907",
"0.62275803",
"0.61987466",
"0.61969614",
"0.6171311",
"0.6162074",
"0.61426055",
"0.6138153",
"0.61101407",
"0.60876733",
"0.60695714",
"0.6068188",
"0.60572696",
"0.60527444",
"0.5996758"
] | 0.7239684 | 0 |
Retrieves metadata on all sync folders | def _get_sync_folders(self):
if not self._user_sync_folders_url:
raise ValueError("User sync folders URL not retrieved")
if not self._refresh_token:
raise ValueError("Refresh Token not set")
# Add access token to the headers
add_headers = dict(self._default_headers)
add_headers['Authorization'] = self._access_token
resp = requests.get(self._user_sync_folders_url, headers=add_headers, verify=False)
if resp.status_code >= 300:
raise Exception("Failed to claim access token: {}".format(resp))
vals = etree_to_dict(ET.XML(resp.content.decode('utf-8')))
# Print and store relevant sync folder information
print("== SYNC FOLDERS ==")
for folder in vals.get('collectionContents').get('collection'):
print("Folder: {}".format(folder.get('displayName')))
self._folder_metadata.append(folder)
print("") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_root_metadata(self):\n r = self._do_request(\n 'get',\n http_server_utils.join_url_components(\n [self._api_drive_endpoint_prefix, 'root']),\n params={'select': 'id,name,fileSystemInfo'})\n return r.json()",
"def syncfolder():",
"def getFolder(self, folderUris, metadata = None, queryArgs = None):\n\n returnContents = {}\n\n\n\n #-------------------- \n # Force the relevant argumets to lists\n #-------------------- \n if isinstance(folderUris, str):\n folderUris = [folderUris]\n if isinstance(queryArgs, str):\n queryArgs = [queryArgs]\n\n\n\n #-------------------- \n # Acquire contents via 'self.__getJson'\n #-------------------- \n contents = []\n for folderUri in folderUris:\n\n #\n # Apply query arguments, if any.\n #\n if queryArgs:\n folderUri = Xnat.path.applyQueryArguments(folderUri, \n queryArgs)\n\n\n #\n # Get the JSON\n #\n folderUri = Xnat.path.makeXnatUrl(self.host, folderUri)\n json = self.__getJson(folderUri)\n\n #\n # If json is null we have a login error.\n # Return out.\n #\n if json == None:\n return None\n #\n # Otherwise, concatenate to rest of contents.\n #\n contents = contents + json\n\n #\n # If we want the projects, store projects in a dictionary. \n # 'self.projectCache' is reset if the user logs into a new \n # host or logs in a again.\n #\n if folderUri.endswith('/projects'):\n self.projectCache = contents\n #print(f\"CONTENTS {contents}\")\n #-------------------- \n # Exit out if there are non-Json or XML values.\n #-------------------- \n if str(contents).startswith(\"<?xml\"): return [] \n # We don't want text values\n\n\n\n #-------------------- \n # Get other attributes with the contents \n # for metadata tracking.\n #-------------------- \n for content in contents:\n if metadata:\n for metadataTag in metadata:\n if metadataTag in content:\n #\n # Create the object attribute if not there.\n #\n if not metadataTag in returnContents:\n returnContents[metadataTag] = []\n returnContents[metadataTag].append(\\\n content[metadataTag])\n else:\n returnContents = contents\n\n\n #-------------------- \n # Track projects and files in global dict\n #-------------------- \n for folderUri in folderUris:\n folderUri = folderUri.replace('//', '/')\n if folderUri.endswith('/files'):\n for content in contents:\n # create a tracker in the fileDict\n #print(f\"\\n\\nCONTENT {content} {folderUri}\")\n self.fileDict[content['Name']] = content\n #print(\"%s %s\"%(, self.fileDict))\n elif folderUri.endswith('/projects'):\n self.projectCache = returnContents\n\n\n\n #-------------------- \n # Return the contents of the folder as a\n # dictionary of lists\n #-------------------- \n return returnContents",
"def getAllItems(self):\r\n\r\n control = []\r\n\r\n for folder in self.get_items():\r\n control.append(folder.id)\r\n\r\n while len(control) > 0:\r\n try:\r\n for item in self.get_items(parent=control.pop()):\r\n print(\"Getting content for: \" + item.name)\r\n if item.type == \"folder\":\r\n control.insert(0,item.id)\r\n else:\r\n self.contents[item.id] = {\r\n 'name': item.name, 'parentID': item.parent_id,\r\n 'size': item.size, 'uri': item.download_url\r\n }\r\n except putio.PutioError as err:\r\n print(\"Can't get content for Directory\")\r\n pass\r\n return self.contents",
"def get_metadata(self):\n previous = DirectoryMetadata.load_pickle(self)\n metadata = {}\n\n for dirpath, dirnames, filenames in os.walk(self.prefix_dir):\n for fname in filenames:\n path = os.path.join(dirpath, fname)\n relative_path = path.split(self.base_dir, 1)[1]\n try:\n stats = os.stat(path)\n except OSError:\n log.exception('Error stating a file on disk while building up metadata, skipping file %s' % path)\n continue\n swift_bytes = stats.st_size\n mtime = datetime.utcfromtimestamp(stats.st_mtime)\n if (previous is not None) and (relative_path in previous.metadata) and\\\n (previous.metadata[relative_path].bytes == swift_bytes):\n swift_hash = previous.metadata[relative_path].hash\n else:\n try:\n with open(path, 'rb') as afile:\n md5_hash = hashlib.md5()\n md5_hash.update(afile.read())\n swift_hash = md5_hash.hexdigest()\n except OSError:\n log.exception('Error reading a file to create the md5 while building up metadata, skipping file %s' % path)\n continue\n\n metadata[relative_path] = FileMetadata(relative_path, swift_bytes, mtime, swift_hash)\n\n return metadata",
"def get_files_folders(request,**kwargs):\n import requests\n from mainserver.constants import FORWARD_TO_SERVERS\n\n path = request.data['path']\n\n\n print (\"path \", path)\n\n\n try :\n # print FileSystem.objects.filter(path=path,type='FOLDER',status='CREATED').order_by('-creation_datetime').count()\n fsobject = FileSystem.objects.filter(path=path,type='FOLDER',status='CREATED').order_by('-creation_datetime')[0]\n print fsobject\n except :\n print \"illegal file object query access\"\n return Response(status=status.HTTP_417_EXPECTATION_FAILED)\n\n\n fsobjects_list = FileSystem.objects.filter(parent=fsobject,status='CREATED').order_by('-creation_datetime')\n\n fs_object = FileSystem.objects.all().order_by('-creation_datetime')[:][0]\n\n sync_with_servers(tail=None, datetime=convert_datetime_to_string(fs_object.creation_datetime))\n\n response_list = []\n dictionary = {}\n\n print (\"sync complete\")\n\n for fsobject in fsobjects_list :\n print fsobject\n fs_object = convert_fsobject_to_fstypeobject(fsobject)\n if fs_object.path not in dictionary.keys() :\n for fs_object in FileSystem.objects.filter(path=fs_object.path,status='CREATED').order_by('-creation_datetime')[:2] :\n try :\n response = requests.get(fs_object.location)\n if response.status_code == 200 :\n break\n except requests.ConnectionError :\n pass\n print\"final object lopcation\", fs_object.location\n dictionary[fs_object.path]=fs_object\n\n for fs_object in dictionary.values():\n fs_object.creation_datetime = convert_datetime_to_string(fs_object.creation_datetime)\n data = FileSystemSerializer(fs_object).data\n response_list.append(data)\n\n # print response_list\n\n data = {\"current_dir\" : path}\n\n data ['files_folders'] = response_list\n\n\n print data\n\n return Response(data=data,status=status.HTTP_200_OK)",
"def export_directories(self):\n print('=== Exporting all directory data...')\n\n for directory in self.client.directories:\n print('- Exporting directory:', directory.name)\n\n json = {\n 'id': self.get_id(directory),\n 'href': directory.href,\n 'name': directory.name,\n 'description': directory.description,\n 'status': directory.status,\n 'createdAt': directory.created_at.isoformat(),\n 'modifiedAt': directory.modified_at.isoformat(),\n 'customData': self.get_custom_data(directory),\n 'groups': [],\n }\n\n for group in directory.groups:\n json['groups'].append({\n 'id': self.get_id(group),\n 'href': group.href,\n 'name': group.name,\n 'description': group.description,\n 'status': group.status,\n 'createdAt': group.created_at.isoformat(),\n 'modifiedAt': group.modified_at.isoformat(),\n })\n\n json['provider'] = {\n 'href': directory.provider.href,\n 'providerId': directory.provider.provider_id,\n 'agent': None,\n }\n\n try:\n json['provider']['createdAt'] = directory.provider.created_at.isoformat()\n json['provider']['modifiedAt'] = directory.provider.modified_at.isoformat()\n except AttributeError:\n json['provider']['createdAt'] = None\n json['provider']['modifiedAt'] = None\n\n try:\n json['provider']['clientId'] = directory.provider.client_id\n except AttributeError:\n json['provider']['clientId'] = None\n\n try:\n json['provider']['clientSecret'] = directory.provider.client_secret\n except AttributeError:\n json['provider']['clientSecret'] = None\n\n try:\n json['provider']['redirectUri'] = directory.provider.redirect_uri\n except AttributeError:\n json['provider']['redirectUri'] = None\n\n try:\n json['provider']['agent'] = {\n 'id': self.get_id(directory.provider.agent),\n 'href': directory.provider.agent.href,\n 'status': directory.provider.agent.status,\n 'createdAt': directory.provider.agent.created_at.isoformat(),\n 'modifiedAt': directory.provider.agent.modified_at.isoformat(),\n 'config': {\n 'directoryHost': directory.provider.agent.directory_host,\n 'directoryPort': directory.provider.agent.directory_port,\n 'sslRequired': directory.provider.agent.ssl_required,\n 'agentUserDn': directory.provider.agent.agent_user_dn,\n 'agentUserDnPassword': directory.provider.agent.agent_user_dn_password,\n 'baseDn': directory.provider.agent.base_dn,\n 'pollInterval': directory.provider.agent.poll_interval,\n 'referralMode': directory.provider.agent.referral_mode,\n 'ignoreReferralIssues': directory.provider.agent.ignore_referral_issues,\n 'accountConfig': directory.provider.agent.account_config,\n 'groupConfig': directory.provider.agent.group_config,\n },\n 'download': {\n\n },\n }\n except AttributeError:\n pass\n\n if directory.password_policy:\n json['passwordPolicy'] = {\n 'id': self.get_id(directory.password_policy),\n 'href': directory.password_policy.href,\n #'createdAt': directory.password_policy.created_at.isoformat(),\n #'modifiedAt': directory.password_policy.modified_at.isoformat(),\n 'resetEmailStatus': directory.password_policy.reset_email_status,\n 'resetEmailTemplates': [],\n 'resetSuccessEmailStatus': directory.password_policy.reset_success_email_status,\n 'resetSuccessEmailTemplates': [],\n 'resetTokenTtl': directory.password_policy.reset_token_ttl,\n 'strength': {\n 'href': directory.password_policy.strength.href,\n #'createdAt': directory.password_policy.strength.created_at.isoformat(),\n #'modifiedAt': directory.password_policy.strength.modified_at.isoformat(),\n 'maxLength': directory.password_policy.strength.max_length,\n 'minDiacritic': directory.password_policy.strength.min_diacritic,\n 'minLength': directory.password_policy.strength.min_length,\n 'minLowerCase': directory.password_policy.strength.min_lower_case,\n 'minNumeric': directory.password_policy.strength.min_numeric,\n 'minSymbol': directory.password_policy.strength.min_symbol,\n 'minUpperCase': directory.password_policy.strength.min_upper_case,\n },\n }\n\n try:\n for template in directory.password_policy.reset_email_templates:\n json['passwordPolicy']['resetEmailTemplates'].append({\n 'id': self.get_id(template),\n 'href': template.href,\n 'createdAt': template.created_at.isoformat(),\n 'modifiedAt': template.modified_at.isoformat(),\n 'fromName': template.from_name,\n 'name': template.name,\n 'description': template.description,\n 'fromEmailAddress': template.from_email_address,\n 'textBody': template.text_body,\n 'htmlBody': template.html_body,\n 'defaultModel': template.default_model,\n 'mimeType': template.mime_type,\n 'subject': template.subject,\n })\n except AttributeError:\n pass\n\n try:\n for template in directory.password_policy.reset_success_email_templates:\n json['passwordPolicy']['resetSuccessEmailTemplates'].append({\n 'id': self.get_id(template),\n 'href': template.href,\n 'createdAt': template.created_at.isoformat(),\n 'modifiedAt': template.modified_at.isoformat(),\n 'fromName': template.from_name,\n 'name': template.name,\n 'description': template.description,\n 'fromEmailAddress': template.from_email_address,\n 'textBody': template.text_body,\n 'htmlBody': template.html_body,\n 'mimeType': template.mime_type,\n 'subject': template.subject,\n })\n except AttributeError:\n pass\n\n tenant = self.get_id(directory.tenant)\n self.write('%s/%s/directories/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')",
"def gdrive_metadata(url: str, fetch_all=False) -> object:\n payload = _get_endpoint_payload()\n route = payload['route'] + '/metadata'\n params = dict(url=url)\n\n response = requests.get(\n route,\n headers=payload['headers'],\n params=params\n )\n\n metadata = response.json()\n # metadata = {meta[0]: meta[1] for meta in response.json()}\n\n meta_fields = [\n 'mimeType',\n 'fileExtension',\n 'lastModifyingUser',\n 'title',\n 'parents',\n 'fileSize',\n 'alternateLink',\n ]\n\n try:\n metadata['folder_id'] = (\n None if not metadata['parents']\n else metadata['parents'][0]['id']\n )\n except IndexError:\n raise('The file must reside in a folder that is shared with '\n '<my-bot>@<my-domain>.com.')\n\n if 'lastModifyingUser' in metadata:\n metadata['last_mod_by_email'] = (\n metadata['lastModifyingUser']['emailAddress']\n )\n del metadata['lastModifyingUser']\n\n if not fetch_all:\n metadata = {\n k: v\n for k, v in metadata.items()\n if k in meta_fields +\n ['folder_id', 'last_mod_by_email']\n }\n del metadata['parents']\n Metadata = namedtuple('MetaData', metadata.keys())\n return Metadata(**metadata)\n\n return metadata",
"def get_all_metadata(self):\n return self.db.get_all_nodes()",
"async def get_files_metadata(\n location_id: LocationID,\n uuid_filter: str = \"\",\n expand_dirs: bool = Query(\n True,\n description=(\n \"Automatic directory expansion. This will be replaced by pagination the future\"\n ),\n ),\n):",
"def ls(self, folder_id: int = -1) -> list:\n print('ls', folder_id)\n if folder_id == -1:\n folder_id = self.default_dir\n url = 'https://webapi.115.com/files?aid=1&cid={}&o=user_ptime&asc=0&offset=0&show_dir=1&limit=115&code=&scid=' \\\n '&snap=0&natsort=1&custom_order=2&source=&format=json&type=&star=&is_q=&is_share='.format(folder_id)\n result = self.s.get(url, headers={'Referer': referer['115'].format(self.default_dir)}).json()\n if result['errNo'] == 0:\n data = result['data']\n return data",
"def get_metadata(self):\n return self.manager.get_metadata(self)",
"def ls(self):\n files = self.drive.files().list().execute().get(\"files\", [])\n for f in files:\n print(f[\"name\"], f[\"mimeType\"])",
"def _GetDistCpMetadata(base_dir: str, subdirs: List[str]):\n metadata = []\n for subdir in subdirs or []:\n metadata += [(FLAGS.dpb_sparksql_data_format or 'parquet', {\n 'path': '/'.join([base_dir, subdir])\n })]\n return metadata",
"def collect_meta() -> Dict[str, Any]:\n out = {}\n for integrations_dir, meta in all_integrations():\n integration_name = integrations_dir.name\n out[integration_name] = meta\n # print(f\"Processed meta for integration {integration_name}\")\n return out",
"def get_metadata(self) -> DeepDict:\n metadata = get_default_nwbfile_metadata()\n for interface in self.data_interface_objects.values():\n interface_metadata = interface.get_metadata()\n metadata = dict_deep_update(metadata, interface_metadata)\n return metadata",
"async def get_files_metadata_dataset(\n location_id: LocationID,\n dataset_id: str,\n user_id: UserID,\n expand_dirs: bool = Query(\n True,\n description=(\n \"Automatic directory expansion. This will be replaced by pagination the future\"\n ),\n ),\n):",
"def manifest(self):\n yield self._meta\n for dir_key, meta in self._walk_dir_meta():\n yield {'logical_key': dir_key, 'meta': meta}\n for logical_key, entry in self.walk():\n yield {'logical_key': logical_key, **entry.as_dict()}",
"def allinfo(self, *path, **kwargs):\n recursive = kwargs.get(\"recursive\", True)\n self._download_server_info()\n files = self.listfiles(*path, recursive=recursive)\n infos = {}\n for a in files:\n npath = a\n infos[npath] = self.info(*npath)\n return infos",
"def sync_get_metadata(self, chunk, coords):\n\n return chunk.get_metadata(coords)",
"def get_root_folder_details(client):\n try:\n root_folder = client.folder(folder_id='0').get()\n print(f\"The root folder is owned by: {root_folder.owned_by['login']}\")\n items = root_folder.get_items(limit=100, offset=0)\n print('This is the first 100 items in the root folder:')\n for item in items:\n print(\" \" + item.name, item.id)\n\n except Exception as e:\n print(f\"Error has occurred: {e}\")\n return None",
"def get_data_files():\n\n data_files = []\n for d, dirs, filenames in os.walk(share_jupyterhub):\n rel_d = os.path.relpath(d, here)\n data_files.append((rel_d, [os.path.join(rel_d, f) for f in filenames]))\n return data_files",
"def getDirectoryMetadata( self, path ):\n res = self.__checkArgumentFormat( path )\n if not res['OK']:\n return res\n urls = res['Value']\n successful = {}\n failed = {}\n gLogger.debug( \"DIPStorage.getFileMetadata: Attempting to obtain metadata for %s directories.\" % len( urls ) )\n serviceClient = RPCClient( self.url )\n for url in urls:\n res = serviceClient.getMetadata( url )\n if res['OK']:\n if res['Value']['Exists']:\n if res['Value']['Type'] == 'Directory':\n gLogger.debug( \"DIPStorage.getFileMetadata: Successfully obtained metadata for %s.\" % url )\n successful[url] = res['Value']\n else:\n failed[url] = 'Supplied path is not a directory'\n else:\n failed[url] = 'Directory does not exist'\n else:\n gLogger.error( \"DIPStorage.getFileMetadata: Failed to get metdata for %s.\" % url, res['Message'] )\n failed[url] = res['Message']\n resDict = {'Failed':failed, 'Successful':successful}\n return S_OK( resDict )",
"def metadataDirectory(self):\n guid_hash = utils.sha1hash(self.parentGuid)\n return str(Path('Metadata') / 'TV Shows' / guid_hash[0] / f'{guid_hash[1:]}.bundle')",
"def get_folder_list():\n if exists_key_store('folders:list'):\n return get_key_store('folders:list')\n else:\n # initialize folder list with root (All)\n set_key_store('folders:counter', 0)\n rpush_key_store('folders:list', {'id': 0, 'parent': -1, 'name': 'All'})\n return get_key_store('folders:list')",
"def metadataDirectory(self):\n guid_hash = utils.sha1hash(self.grandparentGuid)\n return str(Path('Metadata') / 'TV Shows' / guid_hash[0] / f'{guid_hash[1:]}.bundle')",
"def list_all():\n if os.path.exists(DATA_DIR):\n return os.listdir(DATA_DIR)\n return []",
"def test_list_base_metadata(self):\n\n # start out by creating some metadata\n\n self.save_base_metadata()\n\n # then list metadata with name = 'idsvc.basemeta'\n\n response = BaseMetadata.list(api_client=self.IDS_SYS_CLIENT)\n\n # we should have at least one in the list, since we just created one\n\n self.assertTrue(len(response)>0)\n\n # cleanup\n\n self.delete_base_metadata()",
"def _download_metadata(track_id, dataset_version):\n metadata_path = os.path.join(METADATA_PATH, _METADATA_FMT % track_id)\n if os.path.exists(metadata_path):\n return True\n\n try:\n top_folderid = GDRIVE_FOLDERS[dataset_version]\n except KeyError:\n raise IOError(\"Unable to find data in Google Drive for this version.\")\n\n file_list = get_named_child(top_folderid, track_id)\n correct_file = [f for f in file_list if f['title'] == track_id]\n\n if len(correct_file) == 0:\n raise IOError(\"Could not find multitrack\")\n else:\n mtrack_file = correct_file[0]\n\n metadata_file_list = get_named_child(mtrack_file['id'], 'METADATA')\n if len(metadata_file_list) > 0:\n metadata_file = metadata_file_list[0]\n else:\n folder_file_list = get_files_in_folder(mtrack_file['id'])\n print(len(folder_file_list))\n for fobject in folder_file_list:\n print(fobject['title'])\n raise IOError(\"Could not find Metadata\")\n\n download_file(metadata_file['id'], metadata_path)\n\n DOWNLOADED_FILEPATHS.append(metadata_path)\n\n return True",
"def metadataDirectory(self):\n guid_hash = utils.sha1hash(self.guid)\n return str(Path('Metadata') / 'TV Shows' / guid_hash[0] / f'{guid_hash[1:]}.bundle')"
] | [
"0.648162",
"0.60808307",
"0.59680235",
"0.58834165",
"0.58828735",
"0.58178836",
"0.58129156",
"0.57245374",
"0.5677313",
"0.5626248",
"0.5588174",
"0.5587416",
"0.55705136",
"0.55277115",
"0.55202806",
"0.5497931",
"0.54850954",
"0.54689896",
"0.5424635",
"0.5419897",
"0.5406022",
"0.54037",
"0.5399845",
"0.53896",
"0.53895736",
"0.5380054",
"0.53777355",
"0.5377431",
"0.53755844",
"0.5357723"
] | 0.7083151 | 0 |
If we're unable to establish a connection to the Elasticsearch server, CannotLoadConfiguration (which the circulation manager can understand) is raised instead of an Elasticsearchspecific exception. | def test_elasticsearch_error_in_constructor_becomes_cannotloadconfiguration(self):
# Unlike other tests in this module, this one runs even if no
# ElasticSearch server is running, since it's testing what
# happens if there's a problem communicating with that server.
class Mock(ExternalSearchIndex):
def set_works_index_and_alias(self, _db):
raise ElasticsearchException("very bad")
with pytest.raises(CannotLoadConfiguration) as excinfo:
Mock(self._db)
assert "Exception communicating with Elasticsearch server: " in str(excinfo.value)
assert "very bad" in str(excinfo.value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def check_connection(self, hass: HomeAssistantType):\n from elasticsearch import (\n AuthenticationException,\n AuthorizationException,\n ConnectionError,\n ElasticsearchException,\n SSLError,\n )\n\n client = None\n is_supported_version = True\n try:\n client = self._create_es_client()\n\n es_version = ElasticsearchVersion(client)\n await es_version.async_init()\n\n is_supported_version = es_version.is_supported_version()\n except SSLError as err:\n raise UntrustedCertificate(err)\n except ConnectionError as err:\n if isinstance(\n err.info, aiohttp.client_exceptions.ClientConnectorCertificateError\n ):\n raise UntrustedCertificate(err)\n raise CannotConnect(err)\n except AuthenticationException as err:\n raise AuthenticationRequired(err)\n except AuthorizationException as err:\n raise InsufficientPrivileges(err)\n except ElasticsearchException as err:\n raise ElasticException(err)\n except Exception as err:\n raise ElasticException(err)\n finally:\n if client:\n await client.close()\n client = None\n\n if not is_supported_version:\n raise UnsupportedVersion()",
"def _load(self):\n try:\n scoped_index_name = get_scoped_index_name(\n self._app_namespace, self._es_index_name\n )\n if not self._es_client.indices.exists(index=scoped_index_name):\n self.fit()\n except EsConnectionError as e:\n logger.error(\n \"Unable to connect to Elasticsearch: %s details: %s\", e.error, e.info\n )\n raise EntityResolverConnectionError(es_host=self._es_client.transport.hosts) from e\n except TransportError as e:\n logger.error(\n \"Unexpected error occurred when sending requests to Elasticsearch: %s \"\n \"Status code: %s details: %s\",\n e.error,\n e.status_code,\n e.info,\n )\n raise EntityResolverError from e\n except ElasticsearchException as e:\n raise EntityResolverError from e",
"def _validate_configurations(self) -> None:\n if self.__exception:\n raise self.__exception",
"def _init_es(self):\n es = Elasticsearch([{'host': elastic_conf['host'], 'port': elastic_conf['port']}])\n print('Connected to Elastic Search:', es.ping())\n return es",
"def load(self):\n try:\n if self._use_text_rel:\n scoped_index_name = get_scoped_index_name(\n self._app_namespace, self._es_index_name\n )\n if not self._es_client.indices.exists(index=scoped_index_name):\n self.fit()\n else:\n self.fit()\n\n except EsConnectionError as e:\n logger.error(\n \"Unable to connect to Elasticsearch: %s details: %s\", e.error, e.info\n )\n raise EntityResolverConnectionError(es_host=self._es_client.transport.hosts)\n except TransportError as e:\n logger.error(\n \"Unexpected error occurred when sending requests to Elasticsearch: %s \"\n \"Status code: %s details: %s\",\n e.error,\n e.status_code,\n e.info,\n )\n raise EntityResolverError\n except ElasticsearchException:\n raise EntityResolverError",
"async def check_config(self) -> None:\n try:\n await self._check_api()\n except aiohttp.ClientError as e:\n raise ConnectionError(str(e))",
"def es_connection_setUp(es_server):\n datalab_logger_connections.info(\"ES : setUp connection\")\n return Elasticsearch(es_server, timeout=30, ignore=[400, 404], maxsize=100)",
"def __init__(self, conn_str=ELASTICSEARCH_CONN):\n self.es = Elasticsearch(conn_str)",
"def get_es_config(): # pragma: no cover -- will be used eventually\n es_url = CONFIG.ES_HOSTNAME\n url_parts, url_username, url_password = parse_http_url(es_url)\n ssl = url_parts.scheme == \"https\"\n host = url_parts.hostname\n port = url_parts.port if url_parts.port else \"443\" if ssl else \"80\"\n user = url_username or \"\"\n password = url_password or \"\"\n\n # More values at:\n # - https://www.elastic.co/guide/en/elasticsearch/hadoop/current/configuration.html\n # - https://www.elastic.co/guide/en/elasticsearch/hadoop/current/spark.html#spark-python\n config = {\n \"es.resource.write\": \"\",\n \"es.nodes\": host,\n \"es.port\": str(port), # default 9200\n \"es.index.auto.create\": \"yes\", # default yes\n # \"es.mapping.id\": \"_id\", # defaults to not set\n # \"es.nodes.data.only\": \"false\", # default true, but should not be set when in WAN-only mode\n \"es.nodes.wan.only\": \"true\", # default false\n \"es.net.http.auth.user\": user, # default (not set). Set if running on a local cluster that has auth\n \"es.net.http.auth.pass\": password, # default (not set) Set if running on a local cluster that has auth\n \"es.net.ssl\": str(ssl).lower(), # default false\n \"es.net.ssl.cert.allow.self.signed\": \"true\", # default false\n \"es.batch.size.entries\": str(CONFIG.ES_BATCH_ENTRIES), # default 1000\n \"es.batch.size.bytes\": str(CONFIG.ES_MAX_BATCH_BYTES), # default 1024*1024 (1mb)\n \"es.batch.write.refresh\": \"false\", # default true, to refresh after configured batch size completes\n }\n\n if is_aws():\n # Basic auth only required for local clusters\n config.pop(\"es.net.http.auth.user\")\n config.pop(\"es.net.http.auth.pass\")\n\n return config",
"def connect_elasticsearch():\n _es = None\n # create an instance of elasticsearch and assign it to port 9200\n _es = Elasticsearch([{'host': 'localhost', 'port': 9200}]) \n _es.cluster.health(wait_for_status='yellow', request_timeout=1)\n\n # pings the server and returns True if gets connected.\n if _es.ping(): \n print('Connected')\n else:\n print('It could not connect!')\n return _es",
"def connect():\n es = False\n tries = 0\n while tries < 5 and not es:\n try:\n # Connect to es node and round-robin between them.\n logger.debug(\"Connecting to %s.\" % es_nodes)\n es = Elasticsearch(es_nodes)\n except:\n etype, evalue, etb = sys.exc_info()\n logger.warning(\"Connection to %s failed. Exception: %s, Error: %s.\" % (es_nodes, etype, evalue))\n logger.warning(\"Waiting %s seconds before retring to connect.\" % ((4 + tries)))\n time.sleep(4 + tries)\n tries += 1\n continue\n if not es:\n logger.error(\"Connection to %s failed. Shutting down.\" % es_nodes)\n sys.exit(255)\n else:\n logger.debug(\"Connection to %s successful.\" % es_nodes)\n return es",
"def get_connection(self):\n\n return Elasticsearch(\n self.conn_info[\"DATABASE_URI\"][\"hosts\"],\n use_ssl=self.conn_info.get(\"USE_SSL\", False),\n verify_certs=self.conn_info.get(\"VERIFY_CERTS\", False),\n )",
"def test_configure_connection(connections, settings):\n settings.ES_URL = 'https://login:password@test:1234'\n connections.configure.return_value = {}\n\n elasticsearch.configure_connection()\n\n connections.configure.assert_called_with(default={\n 'hosts': [settings.ES_URL],\n 'verify_certs': settings.ES_VERIFY_CERTS,\n })",
"def _get_es_client(conf):\n return elasticsearch_client(conf)",
"def connect_to_ES(esEndPoint):\n print ('Connecting to the ES Endpoint {0}'.format(esEndPoint))\n try:\n esClient = Elasticsearch(\n hosts=[{'host': ES_ENDPOINT, 'port': 443}],\n http_auth=(ES_USER, ES_SECRET),\n use_ssl=True,\n verify_certs=True,\n connection_class=RequestsHttpConnection)\n return esClient\n except Exception as E:\n print(\"Unable to connect to {0}\".format(esEndPoint))\n print(E)",
"def __init__(self, target_ip=\"localhost\", source_ip=\"localhost\"):\n port = 9200\n target_server = [{\"host\": target_ip, \"port\": port}]\n source_server = [{\"host\": source_ip, \"port\": port}]\n try:\n self.client = Elasticsearch(hosts=target_server, timeout=300)\n self.source = Elasticsearch(hosts=source_server, timeout=300)\n except:\n print(\"Failed to establish connection with ES server.\")\n raise",
"def connect(args):\n return elasticsearch.Elasticsearch(\n [f\"{args.host}:{args.port}\"],\n use_ssl=True,\n verify_certs=True,\n ca_certs=args.ca_certs,\n client_cert=args.cert,\n client_key=args.key,\n )",
"def basic_client():\n es_connected = False\n while not es_connected:\n try:\n ES = Elasticsearch(\n hosts=[HOSTNAME]\n )\n es_connected = True\n except TransportError as e:\n logging.info('Not yet connected: %s, sleeping for 1s', e)\n time.sleep(1)\n return ES",
"def __init__(self, elasticsearch_url: str, index_name: str, embedding: Embeddings):\n try:\n import elasticsearch\n except ImportError:\n raise ValueError(\n \"Could not import elasticsearch python package. \"\n \"Please install it with `pip install elasticsearch`.\"\n )\n self.embedding = embedding\n self.index_name = index_name\n try:\n es_client = elasticsearch.Elasticsearch(elasticsearch_url) # noqa\n except ValueError as e:\n raise ValueError(\n f\"Your elasticsearch client string is misformatted. Got error: {e} \"\n )\n self.client = es_client",
"def connect_elasticsearch(ip = \"localhost\"):\n _es = None\n _es = Elasticsearch([{'host': ip, 'port': 9200}])\n if _es.ping():\n print('Succesfully Connected')\n else:\n print('Failed to connection')\n return _es",
"def prepare_environment():\n elastic_search = Elasticsearch('{}:{}'.format(\n _CONFIG.elastic.elastic_hostname,\n _CONFIG.elastic.elastic_port))\n try:\n if not elastic_search.indices.exists(_CONFIG.elastic.elastic_index):\n elastic_search.indices.create(\n index=_CONFIG.elastic.elastic_index,\n body=_CONFIG.elastic.metadata_index_setup)\n except ConnectionError:\n sys.exit(\"Can't start because of no connection to ElasticSearch.\")",
"def test_init(self):\n es = elasticsearch.ElasticSearch(server='8.8.8.8',\n user='alice',\n password='iLoveDogs',\n doc_type='someLogCategory')\n\n self.assertTrue(isinstance(es, elasticsearch.ElasticSearch))",
"def _load_config(self, conf):\n\t\tself.log.info(\"Loading configuration file...\")\n\n\t\tself.host = conf.get('host', None)\n\t\tself.port = conf.get('port', None)\n\t\tself.password = conf.get('password', None)\n\t\tself.conf_commands = conf.get('commands', None)\n\n\t\tif( self.host is None\n\t\t\t or self.port is None\n\t\t\t or self.password is None\n\t\t\t or self.conf_commands is None):\n\t\t\traise KeyError(\"Could not initialize OBS Client, missing host, port, password, or conf_commands!\")\n\n\t\tself.log.info(\"...Loaded configuration file.\")",
"def _check_configured(cls):\r\n if not cls._CONFIGURED:\r\n raise RuntimeError('Registry not configured')",
"def __init__(self, hosts, port, user_name, password, connection_class=RequestsHttpConnection):\n self.hosts = hosts\n self.connection_class = connection_class\n self.elastic_search_client = Elasticsearch(self.hosts, connection_class=self.connection_class)\n self.elastic_search_client = Elasticsearch(hosts, http_auth=(user_name, password), port=port)",
"def indices_client():\n es_connected = False\n while not es_connected:\n try:\n ES = Elasticsearch(\n hosts=[HOSTNAME]\n )\n es_connected = True\n except TransportError as e:\n logging.info('Not yet connected: %s, sleeping for 1s', e)\n time.sleep(1)\n return IndicesClient(ES)",
"def check_for_setup_error(self):\n super(RBDISCSIDriver, self).check_for_setup_error()\n\n required_options = ['rbd_iscsi_api_user',\n 'rbd_iscsi_api_password',\n 'rbd_iscsi_api_url',\n 'rbd_iscsi_target_iqn']\n\n for attr in required_options:\n val = getattr(self.configuration, attr)\n if not val:\n raise exception.InvalidConfigurationValue(option=attr,\n value=val)",
"def init(self, *args, **kwargs):\n try:\n self._init(*args, **kwargs)\n except (ValueError, TypeError, UnicodeError, ConfigParser.Error), exc:\n raise ConfigInvalidError, str(exc), sys.exc_info()[2]",
"def _initialize_logging(self):\n if self._custom_logger:\n self._logger.debug(\"Skipping logging init: custom logger detected\")\n return\n\n try:\n log_config = self._ez_client.get_logging_config(\n local=bool(self._config.runner_id)\n )\n except Exception as ex:\n self._logger.warning(\n \"Unable to retrieve logging configuration from Beergarden, the default \"\n \"configuration will be used instead. Caused by: {0}\".format(ex)\n )\n return\n\n try:\n configure_logging(\n log_config,\n namespace=self._system.namespace,\n system_name=self._system.name,\n system_version=self._system.version,\n instance_name=self._config.instance_name,\n )\n except Exception as ex:\n # Reset to default config as logging can be seriously wrong now\n logging.config.dictConfig(default_config(level=self._config.log_level))\n\n self._logger.exception(\n \"Error encountered during logging configuration. This most likely \"\n \"indicates an issue with the Beergarden server plugin logging \"\n \"configuration. The default configuration will be used instead. Caused \"\n \"by: {0}\".format(ex)\n )\n return\n\n # Finally, log uncaught exceptions using the configuration instead of stderr\n self._set_exception_hook(self._logger)",
"def connection_es(): \n client = Elasticsearch([{'host': 'localhost', 'port': 9200}])\n index_name = \"fs_metadata_\"\n return client, index_name"
] | [
"0.6263491",
"0.62350416",
"0.6013181",
"0.5960673",
"0.59529793",
"0.5821141",
"0.5678517",
"0.5538262",
"0.5535928",
"0.5493257",
"0.54761046",
"0.5462948",
"0.53717524",
"0.5338984",
"0.5287106",
"0.52525455",
"0.5251487",
"0.5240034",
"0.5202334",
"0.5163459",
"0.51596135",
"0.5156378",
"0.5139358",
"0.5100653",
"0.5086254",
"0.5068159",
"0.5067595",
"0.5051327",
"0.4987188",
"0.4973316"
] | 0.699837 | 0 |
The name of the search index is the prefix (defined in ExternalSearchTest.setup) plus a version number associated with this version of the core code. | def test_works_index_name(self):
assert "test_index-v4" == self.search.works_index_name(self._db) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def db_index_name(self):\r\n return 'index_{}'.format(self.db_field_name)",
"def index_prefix(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"index_prefix\")",
"def index_prefix(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"index_prefix\")",
"def build_index():\n pass",
"def elasticsearch_index_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"elasticsearch_index_prefix\")",
"def _get_index_name(self, timestamp):\n if self.index_rollover.lower() == 'monthly':\n return \"%s-%s\" % (self.es_config['all_index'],\n timestamp.strftime(\"%m%Y\")) \n elif self.index_rollover.lower() == 'daily':\n return \"%s-%s\" % (self.es_config['all_index'],\n timestamp.strftime(\"%d%m%Y\")) \n else:\n return self.es_config['all_index']",
"def get_internal_index_name(self, schema, language, type_name, version):\n\n return '-'.join((\n self.get_external_index_name(schema, language, type_name),\n utils.normalize_index_segment(version, allow_wildcards=False)\n ))",
"def short_str(self):\n return \"#{}\".format(self.core_index)",
"def name(self):\n return _version._NAME # pylint: disable=protected-access",
"def create_index():",
"def get_index_text(self, crate, module, impl, name):\n return _('%s (Rust function)') % name",
"def getName(self, index) -> Str:\n ...",
"def pypi_indexes() -> IndexesDefinition:\n return {}",
"def getvarnameindex(self,somename_): # 3\n res,resargs = self.__obj.getvarnameindex(somename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _asgn_return_value,_index_return_value = resargs\n return _asgn_return_value,_index_return_value",
"def test_string_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_string_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_simple_index, \"foobar\")\n\t)",
"def test_set_appsearch_engine_name(self):\n\n # Get the current engine name and store it\n original_engine_name = Car.get_appsearch_engine_name()\n\n # Set a new app search engine name\n Car.set_appsearch_engine_name('test_cars')\n\n # Test if its set successfully\n engine_name = Car.get_appsearch_engine_name()\n self.assertEqual(engine_name, 'test_cars')\n\n # Reset it back to the original\n Car.set_appsearch_engine_name(original_engine_name)",
"def test_creating_index_type(self):",
"def versionstring():\n return \"%i.%i.%i\" % __version__",
"def versionstring():\n return \"%i.%i.%i\" % __version__",
"def update_index(signum):\n cdx = redis_cli.zrange('ipfs:cdxj', 0, -1)\n cdx = ''.join(cdx)\n buff = BytesIO(cdx)\n\n # Add New Index\n res = ipfs_api.add(CustomNameStream(buff, 'index.cdxj'))\n print('Updating Index: ' + str(res))\n\n # Register with IPNS\n res = ipfs_api.name_publish(res['Hash'])\n print res",
"def get_index_text(self, name):\n raise NotImplementedError('must be implemented in subclasses')",
"def getvarnameindex(self,somename_):\n if isinstance(somename_,unicode):\n somename_ = somename_.encode(\"utf-8\",errors=\"replace\")\n asgn_ = ctypes.c_int32()\n index_ = ctypes.c_int32()\n res = __library__.MSK_XX_getvarnameindex(self.__nativep,somename_,ctypes.byref(asgn_),ctypes.byref(index_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n asgn_ = asgn_.value\n _asgn_return_value = asgn_\n index_ = index_.value\n _index_return_value = index_\n return (_asgn_return_value,_index_return_value)",
"def _amd_index(sysfs_gpu_name):\n drop_prefix = sysfs_gpu_name.strip()[len(_SYSFS_PREFIX):]\n return drop_prefix.split('/')[0]",
"def get_index_text(self, crate, module, impl, name):\n return _('%s (Rust struct)') % name",
"def test_index(self):",
"def test_index(self):\n a = create_addon('zool')\n es = self.es\n es.refresh()\n r = es.search(query=StringQuery('zool'))\n eq_(r['hits']['total'], 1)\n eq_(r['hits']['hits'][0]['_source']['name'], a.name)\n return a",
"def _init_prefix(self):\n self._.prefix = \"v%x\" % (hash(self) % Integer(2)**32)",
"def create_index(self, index_name):\n print(f\"Creating {index_name} index started \\n\")\n add_index = '//*[@id=\"content-react\"]/div/div/button'\n create_new_index_btn_sitem = self.locator_finder_by_xpath(add_index)\n create_new_index_btn_sitem.click()\n time.sleep(2)\n\n print(f\"selecting {index_name} from the list\\n\")\n\n if index_name == 'Persistent':\n # selecting persistent index's filed\n persistent_field = \"/html//input[@id='fields']\"\n persistent_field_sitem = self.locator_finder_by_xpath(persistent_field)\n persistent_field_sitem.click()\n persistent_field_sitem.send_keys('name')\n\n # selecting persistent index's name\n persistent_name = \"/html//input[@id='name']\"\n persistent_name_sitem = self.locator_finder_by_xpath(persistent_name)\n persistent_name_sitem.click()\n persistent_name_sitem.send_keys(index_name)\n\n # selecting persistent index's extra value\n extra_value = \"/html//input[@id='storedValues']\"\n extra_value_sitem = self.locator_finder_by_xpath(extra_value)\n extra_value_sitem.click()\n extra_value_sitem.send_keys('email, likes')\n\n # selecting persistent index's sparse value\n sparse = \"(//span[@aria-hidden='true'])[1]\"\n sparse_sitem = self.locator_finder_by_xpath(sparse)\n sparse_sitem.click()\n\n # selecting persistent index's duplicate array value\n duplicate_array = '//*[@id=\"content-react\"]/div/div[3]/form/div/div[1]/div[11]/label/span/span'\n duplicate_array_sitem = self.locator_finder_by_xpath(duplicate_array)\n duplicate_array_sitem.click()\n\n memory_cache = '//*[@id=\"content-react\"]/div/div[3]/form/div/div[1]/div[15]/label/span/span'\n memory_cache_sitem = self.locator_finder_by_xpath(memory_cache)\n memory_cache_sitem.click()\n\n elif index_name == 'Geo':\n self.select_desired_index_from_the_list('Geo Index')\n # selecting geo index's filed\n geo_field = \"/html//input[@id='fields']\"\n geo_field_sitem = self.locator_finder_by_xpath(geo_field)\n geo_field_sitem.click()\n geo_field_sitem.send_keys('region')\n\n # selecting geo index's name\n geo_name = \"/html//input[@id='name']\"\n geo_name_sitem = self.locator_finder_by_xpath(geo_name)\n geo_name_sitem.click()\n geo_name_sitem.send_keys(index_name)\n\n elif index_name == 'Fulltext':\n self.select_desired_index_from_the_list('Fulltext Index')\n # selecting fullText index's filed\n full_text_field = \"/html//input[@id='fields']\"\n full_text_field_sitem = self.locator_finder_by_xpath(full_text_field)\n full_text_field_sitem.click()\n full_text_field_sitem.send_keys('region')\n\n # selecting fullText index's name\n full_text_name = \"/html//input[@id='name']\"\n full_text_name_sitem = self.locator_finder_by_xpath(full_text_name)\n full_text_name_sitem.click()\n full_text_name_sitem.send_keys(index_name)\n\n # selecting fullText index's min length\n min_length = \"/html//input[@id='minLength']\"\n min_length_sitem = self.locator_finder_by_xpath(min_length)\n min_length_sitem.click()\n min_length_sitem.send_keys()\n\n elif index_name == 'TTL':\n self.select_desired_index_from_the_list('TTL Index')\n # selecting ttl index's filed\n ttl_field = \"/html//input[@id='fields']\"\n ttl_field_sitem = self.locator_finder_by_xpath(ttl_field)\n ttl_field_sitem.click()\n ttl_field_sitem.send_keys('region')\n\n # selecting ttl index's name\n ttl_name = \"/html//input[@id='name']\"\n ttl_name_sitem = self.locator_finder_by_xpath(ttl_name)\n ttl_name_sitem.click()\n ttl_name_sitem.send_keys(index_name)\n\n ttl_expire = \"/html//input[@id='expireAfter']\"\n ttl_expire_sitem = self.locator_finder_by_xpath(ttl_expire)\n ttl_expire_sitem.click()\n ttl_expire_sitem.send_keys(1000)\n\n elif index_name == 'Inverted Index':\n action = ActionChains(self.driver)\n self.select_desired_index_from_the_list('Inverted Index')\n\n fields = \"(//div[contains(@class,'css-1d6mnfj')])[2]\"\n fields_sitem = self.locator_finder_by_xpath(fields)\n fields_sitem.click()\n action.send_keys('region').send_keys(Keys.ENTER).send_keys('name').send_keys(Keys.ENTER).perform()\n time.sleep(1)\n\n analyzer = \"//*[text()='Analyzer']\"\n analyzer_sitem = self.locator_finder_by_xpath(analyzer)\n analyzer_sitem.click()\n action.send_keys(Keys.DOWN).send_keys(Keys.ENTER).perform()\n time.sleep(1)\n\n include_all_fields = \"//*[text()='Include All Fields']\"\n include_all_fields_sitem = self.locator_finder_by_xpath(include_all_fields)\n include_all_fields_sitem.click()\n time.sleep(1)\n\n track_all_position = \"//*[text()='Track List Positions']\"\n track_all_position_sitem = self.locator_finder_by_xpath(track_all_position)\n track_all_position_sitem.click()\n time.sleep(1)\n\n search_fields = \"//*[text()='Search Field']\"\n search_fields_sitem = self.locator_finder_by_xpath(search_fields)\n search_fields_sitem.click()\n time.sleep(1)\n\n general_name = \"//*[text()='Name']\"\n general_name_sitem = self.locator_finder_by_xpath(general_name)\n general_name_sitem.click()\n action.send_keys('Inverted').perform()\n time.sleep(1)\n\n general_writebuffer_idle = \"//*[text()='Writebuffer Idle']\"\n general_writebuffer_idle_sitem = self.locator_finder_by_xpath(general_writebuffer_idle)\n general_writebuffer_idle_sitem.click()\n action.key_down(Keys.CONTROL).\\\n send_keys(\"a\").\\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE).\\\n send_keys(100).perform()\n time.sleep(1)\n\n general_writebuffer_active = \"//*[text()='Writebuffer Active']\"\n general_writebuffer_active_sitem = self.locator_finder_by_xpath(general_writebuffer_active)\n general_writebuffer_active_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(1).perform()\n time.sleep(1)\n\n general_writebuffer_size_max = \"//*[text()='Writebuffer Size Max']\"\n general_writebuffer_size_max_sitem = self.locator_finder_by_xpath(\n general_writebuffer_size_max)\n general_writebuffer_size_max_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(33554438).perform()\n time.sleep(1)\n\n general_cleanup_startup_steps = \"//*[text()='Cleanup Interval Step']\"\n general_cleanup_startup_steps_sitem = self.locator_finder_by_xpath(\n general_cleanup_startup_steps)\n general_cleanup_startup_steps_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(3).perform()\n time.sleep(1)\n\n general_commit_interval = \"//*[text()='Commit Interval (msec)']\"\n general_commit_interval_sitem = self.locator_finder_by_xpath(\n general_commit_interval)\n general_commit_interval_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(1010).perform()\n time.sleep(1)\n\n general_consolidation_interval = \"//*[text()='Consolidation Interval (msec)']\"\n general_consolidation_interval_sitem = self.locator_finder_by_xpath(\n general_consolidation_interval)\n general_consolidation_interval_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(1010).perform()\n time.sleep(1)\n\n primary_sort = \"//*[text()='Primary Sort']\"\n primary_sort_sitem = self.locator_finder_by_xpath(\n primary_sort)\n primary_sort_sitem.click()\n time.sleep(1)\n\n primary_sort_field = \"//*[text()='Field']\"\n primary_sort_field_sitem = self.locator_finder_by_xpath(\n primary_sort_field)\n primary_sort_field_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(\"name\").perform()\n time.sleep(1)\n\n stored_value = \"//*[text()='Stored Values']\"\n stored_value_sitem = self.locator_finder_by_xpath(\n stored_value)\n stored_value_sitem.click()\n time.sleep(1)\n\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(\"age\").perform()\n time.sleep(1)\n\n consolidation_policy = \"//*[text()='Consolidation Policy']\"\n consolidation_policy_sitem = self.locator_finder_by_xpath(\n consolidation_policy)\n consolidation_policy_sitem.click()\n time.sleep(1)\n\n segment_min = \"//*[text()='Segments Min']\"\n segment_min_sitem = self.locator_finder_by_xpath(\n segment_min)\n segment_min_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(2).perform()\n time.sleep(1)\n\n segment_max = \"//*[text()='Segments Max']\"\n segment_max_sitem = self.locator_finder_by_xpath(\n segment_max)\n segment_max_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(12).perform()\n time.sleep(1)\n\n segment_byte_max = \"//*[text()='Segments Bytes Max']\"\n segment_byte_max_sitem = self.locator_finder_by_xpath(\n segment_byte_max)\n segment_byte_max_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(5368709120).perform()\n time.sleep(1)\n\n segment_bytes_floor = \"//*[text()='Segments Bytes Floor']\"\n segment_bytes_floor_sitem = self.locator_finder_by_xpath(\n segment_bytes_floor)\n segment_bytes_floor_sitem.click()\n action.key_down(Keys.CONTROL). \\\n send_keys(\"a\"). \\\n key_up(Keys.CONTROL).send_keys(Keys.BACKSPACE). \\\n send_keys(5368709128).perform()\n time.sleep(1)\n\n else:\n self.navbar_goto(\"collections\")\n print(\"Selecting computed values collections. \\n\")\n col = '//*[@id=\"collection_ComputedValueCol\"]/div/h5'\n self.locator_finder_by_xpath(col).click()\n time.sleep(1)\n\n self.select_index_menu()\n\n create_new_index_btn_sitem = self.locator_finder_by_xpath(add_index)\n create_new_index_btn_sitem.click()\n time.sleep(2)\n\n print('ZKD Index (EXPERIMENTAL)')\n zkd_field = \"/html//input[@id='fields']\"\n zkd_field = self.locator_finder_by_xpath(zkd_field)\n zkd_field.click()\n zkd_field.send_keys('x,y')\n\n # selecting ZKD index's name\n zkd_name = \"/html//input[@id='name']\"\n zkd_name_sitem = self.locator_finder_by_xpath(zkd_name)\n zkd_name_sitem.click()\n zkd_name_sitem.send_keys(index_name)\n\n\n # create the index\n create_btn = \"//*[text()='Create']\"\n create_btn_sitem = self.locator_finder_by_xpath(create_btn)\n create_btn_sitem.click()\n time.sleep(2)",
"def get_index_text(self, crate, module, impl, name):\n raise NotImplementedError",
"def getbarvarnameindex(self,somename_): # 3\n res,resargs = self.__obj.getbarvarnameindex(somename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _asgn_return_value,_index_return_value = resargs\n return _asgn_return_value,_index_return_value"
] | [
"0.58981884",
"0.5859757",
"0.5859757",
"0.5726856",
"0.56515306",
"0.5513793",
"0.5505178",
"0.54348093",
"0.53886336",
"0.5384905",
"0.5370095",
"0.5368973",
"0.53638005",
"0.5342818",
"0.5323321",
"0.53231466",
"0.53118646",
"0.53068525",
"0.53068525",
"0.52520263",
"0.5219844",
"0.52175504",
"0.52119374",
"0.5188982",
"0.51799643",
"0.51722467",
"0.5169104",
"0.5156419",
"0.5150406",
"0.5131651"
] | 0.6855528 | 0 |
When all the filters are applied to `start`, the result is `finish`. | def filters_to(start, finish):
for find, replace in filters:
start = find.sub(replace, start)
assert start == finish | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def after_all(self) -> None:\r\n for a_filter in self.filters[::-1]:\r\n a_filter.after_all()",
"def analyze(self, start, end):\n return",
"def FilterDone(self, last_bits):\n return last_bits",
"def __call__(self, start):\r\n return self._iterate(start)",
"def catch_up(self, start_ind):\n if self.is_main_filter:\n print(\"################## Starting Index: {} ################\".format(start_ind))\n\n self.explicit_count = 0\n\n ledger = deepcopy(self.ledger)\n # print(\"DT: {}\".format(self.delta_multiplier))\n # print(ledger[start_ind][\"P_prior\"])\n\n if ledger[start_ind][\"x_hat_prior\"] is None or ledger[start_ind][\"P_prior\"] is None:\n start_ind -= 1\n # print(\"Start index: {}\".format(start_ind))\n\n # Reset the ledger\n self.ledger = {}\n for i in range(1, start_ind):\n self.ledger[i] = ledger[i]\n self._add_block()\n\n # Reset the filter\n # self.filter = deepcopy(self.original_filter)\n \n self.filter.x_hat = ledger[start_ind][\"x_hat_prior\"]\n self.filter.P = ledger[start_ind][\"P_prior\"]\n \n for i_step in range(start_ind,len(ledger)):\n meas_list = ledger[i_step][\"meas\"]\n update_time = ledger[i_step][\"time\"]\n u = ledger[i_step][\"u\"]\n Q = ledger[i_step][\"Q\"]\n nav_mean = ledger[i_step][\"nav_mean\"]\n nav_cov = ledger[i_step][\"nav_cov\"]\n\n for meas in meas_list:\n self.add_meas(meas)\n self.update(update_time, u, Q, nav_mean, nav_cov)",
"def after_each(self, dataset: pydicom.dataset.Dataset) -> None:\r\n for a_filter in self.filters[::-1]:\r\n a_filter.after_each(dataset)",
"def FilterScan(self, time_ranges, start_time, end_time, upload_time):\r\n # Always add it to total time_range\r\n self.total_time_range.AddScan(start_time, end_time,\r\n upload_time)\r\n\r\n for time_range in time_ranges:\r\n if time_range.TimeisValid(start_time):\r\n time_range.AddScan(start_time, end_time, upload_time)\r\n return\r\n\r\n logging.warning(\"Scan does not match any filters\")",
"def filter(self, filters):",
"def notify_start(self):\n super().notify_start()\n self._filter.reset()",
"def reduce_run():",
"def sum(self, start=0, end=None):\n return super().reduce(start, end)",
"def apply_filters(self):\n hurst_cut = 0\n coint_cut = 0\n half_life_cut = 0\n mean_cross_cut = 0\n\n # Create an empty list for pairs that pass the filter tests\n validated_pairs = []\n\n # Create all the pairs combination\n self.create_pair_differences()\n\n # Print the number of potential pairs\n print(f\"Number of potential pairs in before filter: {len(self.__pairs_data)}\")\n\n for pair in self.__pairs_data:\n # Select the stocks from the pair\n stock1 = pair[0]\n stock2 = pair[1]\n\n # Test the hurst filter\n if self.hurst_filter(self, stock1=stock1, stock2=stock2):\n hurst_cut += 1\n if self.engel_filter(self, stock1=stock1, stock2=stock2):\n coint_cut += 1\n if self.half_life_filter(self, stock1=stock1, stock2=stock2):\n half_life_cut += 1\n if self.mean_cross_filter(self, stock1=stock1, stock2=stock2):\n mean_cross_cut += 1\n validated_pairs.append([stock1, stock2])\n\n print(f\"Hurst filter pass: {hurst_cut}\")\n print(f\"Co-integration filter pass: {coint_cut}\")\n print(f\"Half-life filter pass: {half_life_cut}\")\n print(f\"Mean-cross filter pass: {mean_cross_cut}\")\n print(f\"Final Number of validated pairs: {len(validated_pairs)}\")\n print(\"The final validated pairs are: \")\n print(validated_pairs)\n\n # Save it to the attribute\n self.__validated_pairs = validated_pairs\n self.__validated_pairs_diff = self.__pair_diff[self.symbolize_pairs(self.__validated_pairs)]",
"def finish():",
"def finish():",
"def finish():",
"def finish():",
"def __get_final_successor_and_start(actions):\n branch_start_actions = []\n final_successor_action = []\n for steps in actions:\n steps_action = get_action_type(action=steps)\n if \"StartAction\" in steps_action:\n branch_start_actions.append(steps)\n elif \"StopAction\" in steps_action:\n final_successor_action.append(steps)\n return branch_start_actions, final_successor_action",
"def _filter_temporal(self, start_date: str, end_date: str) -> 'ImageCollection':\n process_id = 'filter_daterange'\n args = {\n 'imagery': self.graph,\n 'extent': [start_date, end_date]\n }\n\n return self.graph_add_process(process_id, args)",
"def _build_filter_chain(self):\n result = None\n for klass in self.filters:\n tmp = klass(self, self.args, result)\n logging.info(\"%s %s\", klass, tmp.active)\n if tmp.active:\n result = tmp\n return result or (lambda x: x)",
"def finish(self) -> Iterable[Chunk]:",
"def Chunks(start):\n func_iter = ida_funcs.func_tail_iterator_t( ida_funcs.get_func( start ) )\n for chunk in func_iter:\n yield (chunk.start_ea, chunk.end_ea)",
"def transform( request, data, finishing=False ):",
"def transform( request, data, finishing=False ):",
"def finish(self) -> None:",
"def finish(self) -> None:",
"def pre_stop(self):",
"def finished(self):",
"def step_filter(self, qs):\n return qs",
"def test_filter_function_all(self):\n self.es.register_filter(lambda x: True)\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: False)\n self.assertFalse(self.es.streamfilter(self.data))",
"def compute(self):\n Y = self.data[1]\n # Create an order 3 lowpass butterworth filter\n b, a = signal.butter(3, 0.05)\n # Apply the filter to Y. Use lfilter_zi to choose the initial condition of the filter\n zi = signal.lfilter_zi(b, a)\n z, _ = signal.lfilter(b, a, Y, zi=zi * Y[0])\n # Apply the filter again, to have a result filtered at an order the same as filtfilt\n z2, _ = signal.lfilter(b, a, z, zi=zi * z[0])\n # Use filtfilt to apply the filter\n self.data[2] = signal.filtfilt(b, a, Y)\n self.data[3] = self.data[2] - self.data[1] - self.dataSpan * 0.3\n self.updatePlot()"
] | [
"0.60245126",
"0.58253586",
"0.5557765",
"0.546555",
"0.5420422",
"0.5409369",
"0.53233445",
"0.5279904",
"0.5211791",
"0.51451194",
"0.51429945",
"0.5127559",
"0.50891775",
"0.50891775",
"0.50891775",
"0.50891775",
"0.5051931",
"0.5035896",
"0.49861154",
"0.4938378",
"0.49249658",
"0.49114963",
"0.49114963",
"0.48933914",
"0.48933914",
"0.4887889",
"0.48791566",
"0.48563156",
"0.48538172",
"0.48485777"
] | 0.74531156 | 0 |
Iterate over a WorkList until it ends, and return all of the pages. | def pages(worklist):
pagination = SortKeyPagination(size=2)
facets = Facets(
self._default_library, None, None, order=Facets.ORDER_TITLE
)
pages = []
while pagination:
pages.append(worklist.works(
self._db, facets, pagination, self.search
))
pagination = pagination.next_page
# The last page should always be empty -- that's how we
# knew we'd reached the end.
assert [] == pages[-1]
# Return all the other pages for verification.
return pages[:-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_pages(self, url_list):\n page_helper = self.get_page\n pool = ThreadPool(self.max_threads)\n results = pool.map(page_helper, url_list)\n pool.close()\n pool.join()\n return results",
"def pages(self):\n # The page list comes in three sections. Given radius=3:\n # 0 1 2 ... n-2 n-1 n n+1 n+2 ... m-2 m-1 m\n # Alas, some caveats:\n # - These sections might overlap.\n # - The current page might not be integral.\n delta = self.radius - 1 # since the below two are off by one\n before_current = int(math.ceil(self.current_page - 1))\n after_current = int(math.floor(self.current_page + 1))\n pages = []\n\n # First through current\n if before_current - delta <= 1:\n pages.extend(range(0, before_current + 1))\n else:\n pages.append(None)\n pages.extend(range(\n before_current - delta, before_current + 1))\n\n # Current\n pages.append(self.current_page)\n\n # Current through end\n if self.last_page is None:\n # Don't know the last page. Show one more and ..., if appropriate\n if self.next_item and \\\n after_current * self.page_size <= self.maximum_skip:\n\n pages.append(after_current)\n pages.append(None)\n return pages\n\n if after_current + delta >= self.last_page - 1:\n pages.extend(range(\n after_current, self.last_page + 1))\n else:\n pages.extend(range(after_current, after_current + delta + 1))\n pages.append(None)\n\n return pages",
"def _all_pages(self, page_function, **kwargs) -> Iterator[Iterable]:\n\n next_token = None\n is_truncated = True\n while is_truncated:\n page = page_function(token=next_token, **kwargs)\n next_token = page.next_token\n is_truncated = page.is_truncated and next_token is not None\n for task in page.page_data:\n yield task",
"def _get_iter(self, url, params):\n for current_page_index in itertools.count():\n result_dict = self._get_page(url, params, current_page_index)\n for document in result_dict['entries']:\n yield document\n if not result_dict['isNextPageAvailable']:\n break",
"def _get_pages(self,url,params,section):\n if self.verbose:\n print('Get Pages for {}'.format(url))\n print(params)\n page = 1\n maxPage = 1\n \n all_results = []\n this_batch = []\n while page <= maxPage: \n \n params['page']=page\n resp = self._get(url=url,params=params)\n maxPage = int(resp.headers.get('X-Total-Page-Count',0))\n try:\n results=resp.json()\n except:\n results=None\n if isinstance(results,(list,dict)):\n if 'errors' in results:\n print(results['errors'])\n return results\n \n this_batch = results[section]\n all_results.extend(this_batch)\n\n page+=1\n else:\n if self.verbose:\n print(\"PROBLEM\")\n return results\n\n return all_results",
"def _paginatedRequest(allPages, *args):\n data = []\n currentPage = 0\n while True:\n newData = Gw2Spidy._request(*(args + (str(currentPage),)))\n if not allPages:\n return newData['results']\n data.extend(newData['results'])\n currentPage = currentPage + 1\n if newData['page'] == newData['last_page']:\n break\n return data",
"def _paginate(self) -> Iterable[List[str]]:\n req = self.html\n videos_lens = self._extractor(req)\n yield videos_lens # yielding doesn't mean that is the end\n\n # The above only returns 100 or fewer links\n # as Youtube loads 100 videos at a time\n # Simulating a browser request for the load more link\n load_more_url = self._find_load_more_url(req)\n\n while load_more_url: # there is an url found\n req = get(load_more_url)\n load_more = json.loads(req)\n try:\n html = load_more[\"content_html\"]\n except KeyError:\n return # if there is no content_html there is no chanch to find_load_more_url\n videos_lens = self._extractor(html)\n yield videos_lens\n\n load_more_url = self._find_load_more_url(\n load_more[\"load_more_widget_html\"],\n )\n\n return",
"def iter_pages(self):\n for num in range(1, self.pages + 1):\n yield Page(num)",
"def getAllListPage():\n firstPage = city + '/line1'\n data = urlopen(firstPage).read().decode('gbk')\n urlList = getLineTypeList(data)\n urlList.append(firstPage)\n num = len(urlList)\n i = 0\n p = Pool(processes=4)\n pageData = p.map(readData, urlList)\n# manager = Manager()\n# pageData = manager.list()\n# while i < num:\n# procline = Process(target=readData, args=(urlList[i], pageData,))\n# procline.start()\n# procline.join()\n# i += 1\n return pageData",
"def _get_page_range(self):\r\n return list(range(1, self.num_pages + 1))",
"def GetNextBatchOfResults(self) -> typing.List[Repository.Repository]:\n while True:\n try:\n logging.debug('Requesting page %d', self.next_page_num)\n page = list(self.query.get_page(self.next_page_num))\n logging.debug('Page %d contains %d results', self.next_page_num,\n len(page))\n self.next_page_num += 1\n return page\n except github.RateLimitExceededException:\n logging.debug('Pausing on GitHub rate limit')\n time.sleep(3)\n except github.GithubException:\n # One possible cause for this exception is when trying to request\n # a page beyond 1000 results, since GitHub only returns the first\n # 1000 results for a query.\n return []",
"def __iter__(self):\n while self.has_next_page():\n response = self.get_next_page_response()\n for item in self.get_items_from_response(response):\n yield item",
"def chunk(self, count):\n page = 1\n results = self.for_page(page, count).get()\n\n while len(results) > 0:\n yield results\n\n page += 1\n\n results = self.for_page(page, count).get()",
"def get_pages(search_url):\n page_number = 1\n page = fetch_page(search_url.format(page_number))\n while (page_exists(page)) & (page_number <= 100):\n print (page_number, end=', ')\n yield page, page_number\n page_number += 1\n page = fetch_page(search_url.format(page_number))",
"def extract_page_urls(self, _):\n url = \"https://mossadams.taleo.net/careersection/rest/jobboard/searchjobs?lang=en&portal=4160751617\"\n page_num = 1\n last_count = 0\n this_count = 0\n\n while True:\n last_count = len(self.urls_to_scrape)\n payload = PAYLOAD + '\"pageNo\":' + str(page_num) + \"}\"\n json_data = self.post_request(url, out_format='json', headers=HEADERS, data=payload)\n\n for job in json_data['requisitionList']:\n job_url = \"https://mossadams.taleo.net/careersection/6/jobdetail.ftl?job=\" + job['contestNo']\n self.urls_to_scrape.add(job_url)\n\n # check to see if any new records were scraped; if not, I've reach the end\n this_count = len(self.urls_to_scrape)\n if last_count == this_count:\n break\n else:\n last_count = this_count\n page_num += 1",
"def fetch_pages(query_val, page_num):\n \n for page_id in range(1 + page_num + 1):\n try:\n output = fetch_data(query_val, page_id)\n for j in output:\n print(str(j))\n \n except Exception as e:\n print(e)",
"def __iter__(self) -> Generator[WikiPage, None, None]:\n response = self.subreddit._reddit.get(\n API_PATH[\"wiki_pages\"].format(subreddit=self.subreddit),\n params={\"unique\": self.subreddit._reddit._next_unique},\n )\n for page_name in response[\"data\"]:\n yield WikiPage(self.subreddit._reddit, self.subreddit, page_name)",
"def iter_pages(self) -> Generator[Tuple[Optional[List[dict]], int], None, None]:\n # retrieves the data for the given url\n data_list, response, result = self.retrieve_data(self.url)\n\n if result != GithubApiResult.SUCCESS:\n self.logger.debug(\"Failed to retrieve the data even though 10 attempts were given\")\n yield None, None\n return\n\n # this retrieves the page for the given url\n page_number = get_url_page_number(self.url)\n\n # yields the first page of data and its page number\n yield data_list, page_number\n\n while 'next' in response.links.keys():\n\n # gets the next page from the last responses header\n next_page = response.links['next']['url']\n\n # Here we don't need to pass in params with the page, or the default params because the url from the headers already has those values\n data_list, response, result = self.retrieve_data(next_page)\n\n if result != GithubApiResult.SUCCESS:\n self.logger.debug(f\"Failed to retrieve the data for even though 10 attempts were given. Url: {next_page}\")\n return\n\n page_number = get_url_page_number(next_page)\n\n # if either the data or response is None then yield None and return\n if data_list is None or response is None:\n return\n\n # yield the data from the page and its number\n yield data_list, page_number",
"def results(self):\n page = []\n\n for i, item in enumerate(super(VideoCarouselTile, self).results()):\n page.append(item)\n if (i + 1) % 3 == 0:\n yield page\n page = []\n if page:\n yield page",
"def next_pages(self):\n \n if len(self.all_pages) - self.end > 0:\n return self.all_pages[self.end:]\n return",
"def _get_allpages(self, url:str, paramsdict:Dict[str,str]):\n r1 = self._get_dict_from_url(url, paramsdict)\n r = [r1]\n #display(r)\n if 'total_pages' in r1:\n # print('more than one page')\n for next_page in range(2, r1['total_pages']+1):\n # print(f\"load page {next_page} \")\n r.append(self._get_dict_from_url(url, {**paramsdict, 'page':next_page}))\n # print(len(r))\n # print([len(rx['results']) for rx in r])\n results = [entry for rx in r for entry in rx['results'] ]\n\n return results",
"def get_page_list(self, offset=0, limit=50):\n return self._telegraph.method('getPageList', {\n 'offset': offset,\n 'limit': limit\n })",
"def __iter__(self):\n return self.paged()",
"def __update_page_results(self):\n \n pages = []\n\n # Request id for pages associated to search term \n page_fields='page&fields=id,name,username,link'\n term = self.track[self.track_index]\n self.track_index += 1\n \n # Define url for http request to get pages id associated to search term \n page_request_url = 'https://graph.facebook.com/search?q=%s&type=%s&limit=%d&access_token=%s'%(term,page_fields,self.page_lim,self.access_token)\n \n while(True):\n # Try 100 times\n for i in range(100):\n \n page_response = requests.get(page_request_url)\n \n if 'error' in page_response.json() or page_response.status_code <> 200:\n print \"\\n !---- ERROR IN SEARCH REQUEST ----!\"\n print time.ctime()\n print \"Status Code: \", page_response.status_code\n print page_response.json()\n #raise StopIteration()\n time.sleep(1800) # Wait 30 minutes\n else:\n break\n \n page_json = page_response.json()\n pages = pages + page_json['data']\n time.sleep(5)\n \n if 'next' in page_json['paging']:\n page_request_url = page_json['paging']['next']\n else:\n break\n \n print \"Term: %s, Pages: %d\"%(term, len(pages))\n return pages",
"def get_paged_entries(start_page=0, pg_size=2):\n skip = pg_size * start_page\n start, end = 0 + skip, pg_size - 1 + skip\n while True:\n entries = r.zrevrange('entry_index', start, end)\n if not entries:\n break\n yield entries\n start, end = start + pg_size, end + pg_size",
"def _fetch_in_bulk(self, func_name, page_range, **func_args):\n all_results = []\n prog_bar = None\n\n if 'page_num' in func_args:\n func_args = func_args.pop('page_num')\n\n if self.profile.use_prog_bar:\n try:\n max_val = (max(page_range) + 1)\n except ValueError:\n max_val = 1\n\n prog_bar = progressbar.ProgressBar(max_value=max_val)\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=self.profile.num_thread_workers) as executor:\n counter = 1\n future_to_page = {executor.submit(func_name, page_num=page, **func_args): page for page in page_range}\n\n for future in concurrent.futures.as_completed(future_to_page):\n try:\n data = future.result()\n except PageSizeError:\n raise\n except RequestFailed:\n continue\n\n if 'content' in data:\n items = data['content']\n for item in items:\n all_results.append(item)\n\n if self.profile.use_prog_bar:\n prog_bar.update(counter)\n time.sleep(0.1)\n counter += 1\n\n if self.profile.use_prog_bar:\n prog_bar.finish()\n\n return all_results",
"def _all_offset_pages(self, page_function, **kwargs) -> Iterator[Iterable]:\n\n next_offset = 0\n is_truncated = True\n while is_truncated:\n page = page_function(offset=next_offset, **kwargs)\n next_offset = page.offset + page.limit\n is_truncated = page.total > next_offset\n for data in page.page_data:\n yield data",
"def iterResponsePages(service, payload, verbose, slow_down):\n token = 0\n next_page = True\n data = {'reports': []}\n\n\n while next_page:\n if verbose:\n print(f'Fetching rows starting at position: {token}')\n if slow_down > 0:\n time.sleep(slow_down)\n \n data_tmp = service.reports().batchGet(body=payload).execute()\n token = data_tmp.get('reports')[0].get('nextPageToken')\n\n if token != None:\n payload.get('reportRequests')[0].update({'pageToken': token})\n else:\n next_page = False\n payload.get('reportRequests')[0].update({'pageToken': '0'})\n\n for report in data_tmp.get('reports'):\n data.get('reports').append(report)\n\n return data",
"def parallel_get_pages(args):\n n_requests, from_id, step, index_name, es = args\n all_sites_arr = []\n for _ in range(n_requests):\n waiting_response_time = 0\n for i in range(5):\n time.sleep(waiting_response_time)\n\n try:\n res = es.search(\n index=index_name,\n body={\n \"from\": from_id,\n \"query\": {\n \"match_all\": {}\n },\n \"size\": step,\n \"sort\": {\n \"site_id\": \"asc\"\n }\n },\n request_timeout=1000\n )\n print(\"Got %d Hits\" % len(res['hits']['hits']))\n\n for site in res['hits']['hits']:\n all_sites_arr.append({\n \"link\": site[\"_source\"][\"link\"],\n \"hyperlinks\": site[\"_source\"][\"hyperlinks\"]\n })\n\n break\n except TransportError as exc:\n print('index setup error', exc)\n\n waiting_response_time = math.exp(i + 1)\n\n from_id += step\n time.sleep(10)\n\n return all_sites_arr",
"def page_query(q):\n\toffset = 0\n\twhile True:\n\t\tr = False\n\t\tfor elem in q.limit(1000).offset(offset):\n\t\t r = True\n\t\t yield elem\n\t\toffset += 1000\n\t\tif not r:\n\t\t\tbreak"
] | [
"0.6512649",
"0.6426794",
"0.6418355",
"0.63479626",
"0.62923247",
"0.6280523",
"0.6178995",
"0.6132314",
"0.61132336",
"0.6108371",
"0.6048124",
"0.6044336",
"0.60390985",
"0.6030068",
"0.5951634",
"0.59403145",
"0.59379506",
"0.58951074",
"0.5890618",
"0.5873708",
"0.5872979",
"0.5863401",
"0.585357",
"0.58107364",
"0.5804669",
"0.57755595",
"0.574847",
"0.5736505",
"0.5730413",
"0.5716344"
] | 0.7885278 | 0 |
Verify that when the books created during test setup are ordered by the given `sort_field`, they show up in the given `order`. Also verify that when the search is ordered descending, the same books show up in the opposite order. This proves that `sort_field` isn't being ignored creating a test that only succeeds by chance. | def assert_order(sort_field, order, **filter_kwargs):
expect = self._expect_results
facets = Facets(
self._default_library, Facets.COLLECTION_FULL,
Facets.AVAILABLE_ALL, order=sort_field, order_ascending=True
)
expect(order, None, Filter(facets=facets, **filter_kwargs))
facets.order_ascending = False
expect(list(reversed(order)), None, Filter(facets=facets, **filter_kwargs))
# Get each item in the list as a separate page. This
# proves that pagination works for this sort order for
# both Pagination and SortKeyPagination.
facets.order_ascending = True
for pagination_class in (
Pagination, SortKeyPagination
):
pagination = pagination_class(size=1)
to_process = list(order) + [[]]
while to_process:
filter = Filter(facets=facets, **filter_kwargs)
expect_result = to_process.pop(0)
expect(expect_result, None, filter, pagination=pagination)
pagination = pagination.next_page
# We are now off the edge of the list -- we got an
# empty page of results and there is no next page.
assert None == pagination
# Now try the same tests but in reverse order.
facets.order_ascending = False
for pagination_class in (
Pagination, SortKeyPagination
):
pagination = pagination_class(size=1)
to_process = list(reversed(order)) + [[]]
results = []
pagination = SortKeyPagination(size=1)
while to_process:
filter = Filter(facets=facets, **filter_kwargs)
expect_result = to_process.pop(0)
expect(expect_result, None, filter, pagination=pagination)
pagination = pagination.next_page
# We are now off the edge of the list -- we got an
# empty page of results and there is no next page.
assert None == pagination | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_sort(self):\n sort_field = MoveSearchForm.sort\n for value, label in sort_field.kwargs['choices']:\n response = self.do_search(id=u'1', sort=value)\n self.assert_(\n response.tmpl_context.results,\n \"\"\"Sort by {0} doesn't crash\"\"\".format(value)\n )",
"def sort(self, *order_fields):\n return MockSearch(\n self, self._query, self.nested_filter_calls, order_fields,\n self._script_fields\n )",
"def test_ordering_with_overridden_field_name_and_descending(self):\n class F(FilterSet):\n class Meta:\n model = User\n fields = ['username', 'status']\n order_by = ['status', '-status']\n order_by_field = 'order'\n\n f = F().form\n self.assertNotIn('o', f.fields)\n self.assertIn('order', f.fields)\n self.assertEqual(f.fields['order'].choices, [('status', 'Status'), ('-status', 'Status (descending)')])",
"def test_sorting_album(self):\n self.add_mp3(artist='Artist', title='Title 1',\n album='Album 1', year=2016, filename='song1.mp3')\n self.add_mp3(artist='Artist', title='Title 2',\n album='Album 2', year=2006, filename='song2.mp3')\n self.add_mp3(artist='Artist', title='Title 3',\n album='Album 3', year=1996, filename='song3.mp3')\n self.run_add()\n self.assertEqual(Album.objects.count(), 3)\n\n albums = [\n Album.objects.get(name='Album 1'),\n Album.objects.get(name='Album 2'),\n Album.objects.get(name='Album 3'),\n ]\n artist = Artist.objects.get(name='Artist')\n\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context['albums'].data), 3)\n self.assertQuerysetEqual(response.context['albums'].data, [repr(al) for al in albums])\n self.assertContains(response, '\"?album-sort=year\"')\n\n # test the sorting button\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)), {'album-sort': 'year'})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context['albums'].data), 3)\n self.assertQuerysetEqual(response.context['albums'].data, [repr(al) for al in reversed(albums)])\n self.assertContains(response, '\"?album-sort=-year\"')",
"def test_sort_order(self):\n obj = self.conn.search(self.basedn, 2, attrlist=['uidNumber'],\n sort_order=[\"-uidNumber\"])\n sort = [o['uidNumber'][0] for o in obj if 'uidNumber' in o]\n self.assertTrue((all(sort[i] >= sort[i+1]\n for i in range(len(sort)-1))), \"Not sorted\")",
"def test_sorting_song(self):\n self.add_mp3(artist='Artist', title='Title 1',\n album='Album 3', filename='song1.mp3')\n self.add_mp3(artist='Artist', title='Title 2',\n album='Album 2', filename='song2.mp3')\n self.add_mp3(artist='Artist', title='Title 3',\n album='Album 1', filename='song3.mp3')\n self.run_add()\n self.assertEqual(Album.objects.count(), 3)\n\n songs = [\n Song.objects.get(title='Title 1'),\n Song.objects.get(title='Title 2'),\n Song.objects.get(title='Title 3'),\n ]\n artist = Artist.objects.get(name='Artist')\n\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context['songs'].data), 3)\n self.assertQuerysetEqual(response.context['songs'].data, [repr(al) for al in songs])\n self.assertContains(response, '\"?song-sort=album\"')\n\n # test the sorting button\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)), {'song-sort': 'album'})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context['songs'].data), 3)\n self.assertQuerysetEqual(response.context['songs'].data, [repr(al) for al in reversed(songs)])\n self.assertContains(response, '\"?song-sort=-album\"')",
"def test_entities__Entity__getFieldOrder__2(entity_with_field, field):\n entity = entity_with_field\n entity.setFieldOrder([field.__name__, 'dummy'])\n assert [field.__name__, 'dummy'] == entity.getFieldOrder()",
"def test_sortby_invalid(self):\n qs = {'a': 1, 'w': 4, 'format': 'json', 'sortby': ''}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(200, response.status_code)",
"def validate_sort_order(filter, main_field):\n\n # The tiebreaker fields are always in the same order, but\n # if the main sort field is one of the tiebreaker fields,\n # it's removed from the list -- there's no need to sort on\n # that field a second time.\n default_sort_fields = [\n {x: \"asc\"} for x in ['sort_author', 'sort_title', 'work_id']\n if x != main_field\n ]\n assert default_sort_fields == filter.sort_order[1:]\n return filter.sort_order[0]",
"def test_collection_ordering(mocker, logged_in_apiclient, field):\n mocker.patch(\"ui.serializers.get_moira_client\")\n mocker.patch(\"ui.utils.get_moira_client\")\n CollectionSetPagination.page_size = 5\n client, user = logged_in_apiclient\n CollectionFactory.create_batch(10, owner=user)\n url = reverse(\"models-api:collection-list\")\n p1_response = client.get(\"{}?page=1&ordering={}\".format(url, field))\n assert len(p1_response.data[\"results\"]) == 5\n for i in range(4):\n assert (\n p1_response.data[\"results\"][i][field].lower()\n <= p1_response.data[\"results\"][i + 1][field].lower()\n )\n p2_response = client.get(\"{}?page=2&ordering={}\".format(url, field))\n assert (\n p1_response.data[\"results\"][-1][field].lower()\n <= p2_response.data[\"results\"][0][field].lower()\n )\n for i in range(4):\n assert (\n p2_response.data[\"results\"][i][field].lower()\n <= p2_response.data[\"results\"][i + 1][field].lower()\n )",
"def validate_sort_and_order_and_artifact(sort: Optional[str] = None, order: Optional[str] = None,\n artifact_source: Optional[str] = None) -> bool:\n if artifact_source == 'true' and sort:\n raise Exception('Please remove or disable one of sort or artifact,'\n ' As they are not supported in the api together.')\n elif sort and not order:\n raise Exception('Please specify the order of sorting (Ascending or Descending).')\n elif order and not sort:\n raise Exception('Please specify a field to sort by.')\n elif sort and order:\n return True\n return False",
"def test_entities__Entity__setFieldOrder__2(entity_with_field):\n entity = entity_with_field\n entity.setFieldOrder(['dummy2', 'I-do-not-exist', 'dummy'])\n assert ['dummy2', 'dummy'] == entity.getFieldOrder()\n # Unknown field names are not written into storage:\n order_storage = zope.component.getUtility(IOrderStorage)\n assert (['dummy2', 'dummy'] ==\n order_storage.byNamespace(entity.order_storage_namespace))",
"def test_scroll_query_sort_safe(self):\n self._validate_scroll_search_params({\"sort\": \"_id\"}, {\"sort\": \"_id\"})",
"def test_entities__Entity__setFieldOrder__1(entity_with_field, field):\n assert [] == entity_with_field.getFieldOrder()\n entity_with_field.setFieldOrder(['dummy2', field.__name__, 'dummy'])\n assert (['dummy2', field.__name__, 'dummy'] ==\n entity_with_field.getFieldOrder())",
"def test_sorting_album_year_time_added(self):\n self.add_mp3(artist='Artist', title='Title 1',\n album='Album 1', year=2017, filename='song1.mp3')\n self.add_mp3(artist='Artist', title='Title 2',\n album='Album 2', year=2017, filename='song2.mp3')\n self.run_add()\n al2 = self.age_album('Artist', 'Album 2', 10)\n self.assertEqual(Album.objects.count(), 2)\n\n albums = [\n al2,\n Album.objects.get(name='Album 1'),\n ]\n artist = Artist.objects.get(name='Artist')\n\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)), {'album-sort': 'year'})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context['albums'].data), 2)\n self.assertQuerysetEqual(response.context['albums'].data, [repr(al) for al in albums])\n self.assertContains(response, '\"?album-sort=-year\"')\n\n # test reverse sort\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)), {'album-sort': '-year'})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context['albums'].data), 2)\n self.assertQuerysetEqual(response.context['albums'].data, [repr(al) for al in reversed(albums)])\n self.assertContains(response, '\"?album-sort=year\"')",
"def testSorting(self):\n if self.sorting in tools.SORTINGS:\n self.assertEqual(\n self.sorting,\n self.config.sorting\n )\n else:\n self.assertNotEqual(\n self.sorting,\n self.config.sorting\n )\n self.assertEqual(\n tools.SORTING_DEFAULT,\n self.config.sorting\n )",
"def test_sorting(sort=selection_sort, num_items=20, max_value=50):\n # TODO: Repeat until all items are in sorted order\n # TODO: Take first unsorted item\n # TODO: Insert it in sorted order in front of items",
"def test_shelflistitem_view_orderby(order_by, api_settings, shelflist_solr_env,\n get_shelflist_urls, api_client):\n sl_urls = get_shelflist_urls(shelflist_solr_env.records['shelflistitem'])\n test_url = '{}?orderBy={}'.format(sl_urls.values()[0], order_by)\n response = api_client.get(test_url)\n assert response.status_code == 400\n assert 'not a valid field for ordering' in response.data['detail']",
"def test_listCatalogEntriesWithSortFilters(self):\n expected_orders = {\n 'launch_date': ['25544', '37820'],\n '-launch_date': ['37820', '25544'],\n 'norad_catalog_number': ['25544', '37820'],\n '-norad_catalog_number': ['37820', '25544'],\n }\n\n for param, order in expected_orders.items():\n response = self.client.get(\n '/api/v1/catalogentry/?ordering={}'.format(param)\n )\n content = response.content.decode('utf8')\n json_data = json.loads(content)\n\n for i in range(len(order)):\n self.assertEqual(\n json_data['results'][i]['norad_catalog_number'],\n order[i]\n )",
"def test_query_sort_nondefault_sort_order(self):\n doc_count = 10\n field_to_be_sorted_by = \"data\"\n prefix = get_rand_string()\n\n data = [prefix + \"-\" + str(x) for x in range(10)]\n\n # Same user_id for all documents\n user_id = get_rand_string()\n\n for datum in data:\n self.conn.add(id=get_rand_string(), user_id=user_id, data=datum)\n self.conn.commit()\n\n results = self.conn.query(q=\"user_id:\" + user_id, sort=\"data\",\n sort_order=\"desc\").results\n\n self.assertEquals(len(results), doc_count,\n \"There should be %d documents returned, got:%d, results:%s\" % (\n doc_count, len(results), results))\n\n query_data = [doc[\"data\"] for doc in results]\n\n for idx, datum in enumerate(reversed(sorted(data))):\n self.assertEquals(datum, query_data[idx],\n \"Expected %s instead of %s on position %s in query_data:%s\" % (\n datum, query_data[idx], idx, query_data))",
"def test_get_order(self):\n pass",
"def test_ordering_create(self):\n test_questionnaire = Questionnaire.objects.create(title='test_ordering_1')\n Question.objects.create(\n questionnaire=test_questionnaire,\n question_type='boolean',\n question_text='question 1',\n )\n Question.objects.create(\n questionnaire=test_questionnaire,\n question_type='boolean',\n question_text='question 2',\n )\n\n self.assertEqual(test_questionnaire.questions.get(question_text='question 1').order, 0)\n self.assertEqual(test_questionnaire.questions.get(question_text='question 2').order, 1)",
"def test_properties_xsorted(things, reverse):\n assert_property_xsorted_is_the_same_as_sorted(xsorted, things, reverse)",
"def test_sort(self):\n\n test_cases = [\n Case(\n description=\"lists of strings\",\n val=[\"b\", \"a\", \"C\", \"B\", \"A\"],\n args=[],\n kwargs={},\n expect=[\"A\", \"B\", \"C\", \"a\", \"b\"],\n ),\n Case(\n description=\"lists of objects with key\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"Baz\"}],\n args=[\"title\"],\n kwargs={},\n expect=[{\"title\": \"Baz\"}, {\"title\": \"bar\"}, {\"title\": \"foo\"}],\n ),\n Case(\n description=\"lists of objects with missing key\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"heading\": \"Baz\"}],\n args=[\"title\"],\n kwargs={},\n expect=[{\"title\": \"bar\"}, {\"title\": \"foo\"}, {\"heading\": \"Baz\"}],\n ),\n Case(\n description=\"empty list\",\n val=[],\n args=[],\n kwargs={},\n expect=[],\n ),\n Case(\n description=\"too many arguments\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"baz\"}],\n args=[\"title\", \"heading\"],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"value not an array\",\n val=123,\n args=[],\n kwargs={},\n expect=FilterValueError,\n ),\n Case(\n description=\"undefined left value\",\n val=self.env.undefined(\"test\"),\n args=[],\n kwargs={},\n expect=[],\n ),\n Case(\n description=\"undefined argument\",\n val=[{\"z\": \"z\", \"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"Baz\"}],\n args=[self.env.undefined(\"test\")],\n kwargs={},\n expect=FilterValueError,\n ),\n ]\n\n self._test(Sort, test_cases)",
"def test_sortby_documents_helpful(self):\n r1 = RevisionFactory(is_approved=True)\n r2 = RevisionFactory(is_approved=True)\n HelpfulVoteFactory(revision=r2, helpful=True)\n\n # Note: We have to wipe and rebuild the index because new\n # helpful_votes don't update the index data.\n self.setup_indexes()\n self.reindex_and_refresh()\n\n # r2.document should come first with 1 vote.\n response = self.client.get(reverse('search.advanced'), {\n 'w': '1', 'a': '1', 'sortby_documents': 'helpful',\n 'format': 'json'\n })\n eq_(200, response.status_code)\n\n content = json.loads(response.content)\n eq_(r2.document.title, content['results'][0]['title'])\n\n # Vote twice on r1, now it should come first.\n HelpfulVoteFactory(revision=r1, helpful=True)\n HelpfulVoteFactory(revision=r1, helpful=True)\n\n self.setup_indexes()\n self.reindex_and_refresh()\n\n response = self.client.get(reverse('search.advanced'), {\n 'w': '1', 'a': '1', 'sortby_documents': 'helpful',\n 'format': 'json'})\n eq_(200, response.status_code)\n\n content = json.loads(response.content)\n eq_(r1.document.title, content['results'][0]['title'])",
"def test_property_xsorted_custom_serializer_is_the_same_as_sorted(xsorted_custom_serializer_fixture,\n things, reverse):\n assert_property_xsorted_is_the_same_as_sorted(xsorted_custom_serializer_fixture, things, reverse)",
"def test_invalid_sort_by_similarity(self):\n\n # no filter by similarity but order by similarity\n query = [{\n \"object_name\": \"Assessment\",\n \"order_by\": [{\"name\": \"__similarity__\"}],\n \"filters\": {\"expression\": {}},\n }]\n\n self.assert400(self.client.post(\n \"/query\",\n data=json.dumps(query),\n headers={\"Content-Type\": \"application/json\"},\n ))\n\n # filter by similarity in one query and order by similarity in another\n query = [\n {\n \"object_name\": \"Assessment\",\n \"filters\": {\n \"expression\": {\n \"op\": {\"name\": \"similar\"},\n \"object_name\": \"Assessment\",\n \"ids\": [1],\n },\n },\n },\n {\n \"object_name\": \"Assessment\",\n \"order_by\": [{\"name\": \"__similarity__\"}],\n \"filters\": {\"expression\": {}},\n },\n ]\n\n self.assert400(self.client.post(\n \"/query\",\n data=json.dumps(query),\n headers={\"Content-Type\": \"application/json\"},\n ))",
"def test_order(self):\n\n # issue a valid query\n # Assure proper execution, and get results from quilt_history\n o = str(quilt_test_core.call_quilt_script('quilt_submit.py', [\n '-y', 'out_of_order']))\n\n o = self.check_query_and_get_results3(o)\n\n # Check results\n # assure that results are in order\n l = []\n for i in xrange(1, 6):\n searchStr = \"{'timestamp': \" + str(i) + '}'\n index = o.find(searchStr)\n logging.debug(\"looking for string: \" + searchStr)\n self.assertTrue(index != -1)\n l.append(index)\n\n isSorted = all(l[i] <= l[i + 1] for i in xrange(len(l) - 1))\n self.assertTrue(isSorted)",
"def test_ordering_with_overridden_field_name(self):\n class F(FilterSet):\n class Meta:\n model = User\n fields = ['username', 'status']\n order_by = ['status']\n order_by_field = 'order'\n\n f = F().form\n self.assertNotIn('o', f.fields)\n self.assertIn('order', f.fields)\n self.assertEqual(f.fields['order'].choices, [('status', 'Status')])",
"def test_entities__Entity__getFields__2(entity_with_field, schemaized_field):\n entity = entity_with_field\n entity.setFieldOrder(['dummy2', schemaized_field.__name__, 'dummy'])\n assert ([('dummy', IDummy['dummy']),\n ('dummy2', IDummy['dummy2']),\n (schemaized_field.__name__, schemaized_field)] ==\n list(entity.getFields(sorted=False)))"
] | [
"0.69119376",
"0.62695354",
"0.59014153",
"0.5880185",
"0.5848647",
"0.5769646",
"0.5743111",
"0.5740987",
"0.56924033",
"0.56718487",
"0.56502676",
"0.5648147",
"0.5643026",
"0.56352484",
"0.56259537",
"0.55134785",
"0.55029243",
"0.5502388",
"0.5476563",
"0.54709595",
"0.5451467",
"0.54431564",
"0.5408119",
"0.5401141",
"0.54010683",
"0.5363871",
"0.5357735",
"0.5347787",
"0.53459674",
"0.53453034"
] | 0.7052679 | 0 |
Simulate the creation of an ElasticsearchDSL `Search` object from an ElasticsearchDSL `Query` object. | def query(self, query):
return MockSearch(
self, query, self.nested_filter_calls, self.order,
self._script_fields
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _search(self, query):\n return self._request(query)",
"def search_query(\n self,\n index, # type: str\n query, # type: SearchQuery\n *options, # type: SearchOptions\n **kwargs\n ) -> SearchResult:\n\n query = SearchQueryBuilder.create_search_query_object(\n index, query, *options, **kwargs\n )\n return SearchResult(SearchRequest.generate_search_request(self.connection,\n query.as_encodable(),\n default_serializer=self.default_serializer))",
"def query(self, search, query):\n if query:\n return search.query(\n \"simple_query_string\",\n query=query,\n fields=self.fields,\n default_operator='and',\n lenient=True\n )\n return search",
"def search(self, query):",
"def search_raw(cls, query=None, params=None, raw_result=False):\n if query is None:\n query = {}\n if params is None:\n params = {}\n res = cls.es.conn.search(index=cls.es.index,\n doc_type=cls.__type__,\n body=query,\n **params)\n if not raw_result:\n docs = res['hits']['hits']\n res = [cls(d['_source'], id=d['_id']) for d in docs]\n return res",
"def __init__(self, query, refresh=False, subscriber=True, cursor=True,\n view=None, download=True, **kwds):\n # Checks\n allowed_views = ('STANDARD', 'COMPLETE')\n if view and view not in allowed_views:\n raise ValueError('view parameter must be one of ' +\n ', '.join(allowed_views))\n # Parameters\n if not view:\n if subscriber:\n view = \"COMPLETE\"\n else:\n view = \"STANDARD\"\n count = 25\n if view == \"STANDARD\" and subscriber:\n count = 200\n # Query\n self.query = query\n Search.__init__(self, query=query, api='ScopusSearch', refresh=refresh,\n count=count, subscriber=subscriber, cursor=cursor,\n view=view, download_results=download, **kwds)",
"def _build_search(self, index, **kwargs):\n startdate = kwargs.get('startdate', None)\n if startdate:\n timefield = kwargs.get('timefield')\n enddate = kwargs.get('enddate', 'now')\n filters = kwargs.get('filters', None)\n exclude = kwargs.get('exclude', None)\n ranges = kwargs.get('ranges', None)\n fields_to_include = kwargs.get('field_to_include', None)\n wildcards = kwargs.get('wildcard', None)\n start_from = kwargs.get('from_', None)\n size = kwargs.get('size', None)\n sort_ = kwargs.get('sort', None)\n\n search = Search(using=self.es, index=index, doc_type=self.doc_type)\\\n .params(request_timeout=2000)\n\n if startdate:\n if startdate != enddate:\n timeRange = {timefield: {'gte': startdate, 'lt': enddate}}\n else:\n timeRange = {timefield: {'gte': startdate, 'lte': enddate}}\n search = search.filter('range', **timeRange)\n if filters:\n for key, val in filters.items():\n search = search.filter('terms' if isinstance(val, list) else 'term', **{key: val})\n if exclude:\n for ex in exclude.keys():\n search = search.exclude('terms', **{ex: exclude[ex]})\n if ranges:\n # ranges are expected in format:\n # [{field:{'gte':value, 'lte':value}}, {field: {'gte': value}}, {field: {'lte': value}}]\n for range_filter in ranges:\n search = search.filter('range', **range_filter)\n if fields_to_include:\n for field in fields_to_include.keys():\n search = search.source(**{field: fields_to_include[field]})\n if wildcards:\n for wild in wildcards:\n search = search.filter('wildcard', **{wild: wildcards[wild]})\n if start_from:\n search = search.extra(**{\"from_\": start_from})\n if size:\n search = search.extra(**{\"size\": size})\n if sort_:\n search = search.sort(*sort_)\n\n self._logger.info(json.dumps(search.to_dict()))\n\n return search",
"def _create_query(cls, search: Search):\n\n search_columns = []\n for column_name in search.SearchBy.split(\",\"): # accepts multiple columns split by ,\n search_column = cls._get_column_from_name(column_name)\n if search_column is None:\n raise AppException(\"The column {} you are trying to search at don't exists.\".format(column_name))\n search_columns.append(search_column)\n\n find_values = []\n for value in search.SearchValue.split(\",\"): # accepts multiple values split by ,\n find_value = \"%{}%\".format(value.strip())\n find_values.append(find_value)\n\n # construct search filter.\n if search.MapColumnAndValue:\n # makes a 1:1 search for column:value\n search_filters = [sc.like(value) for sc, value in zip(search_columns, find_values)]\n else:\n # makes n:x search for column:value\n search_filters = [sc.like(value) for sc in search_columns for value in find_values]\n\n order_by_list = cls._create_order_by_list(search)\n\n # AND or OR\n if search.Use_AND_Operator:\n query = cls.query.filter(and_(*search_filters)).order_by(*order_by_list)\n else:\n query = cls.query.filter(or_(*search_filters)).order_by(*order_by_list)\n\n if search.TextualQuery:\n query = query.filter(text(search.TextualQuery)).order_by(*order_by_list)\n\n return query",
"def search(self, query):\n return self._search_provider.search(self._normalize_query(query))",
"def search(self, query, **kwargs):\n query = dict(query)\n if self._query:\n query = {'$and': [self._query, query]}\n cat = type(self)(\n metadatastore_db=self._metadatastore_db,\n asset_registry_db=self._asset_registry_db,\n query=query,\n find_kwargs=kwargs,\n handler_registry=self._handler_registry,\n transforms=self._transforms,\n root_map=self._root_map,\n filler_class=self._filler_class,\n name='search results',\n getenv=self.getenv,\n getshell=self.getshell,\n auth=self.auth,\n metadata=(self.metadata or {}).copy(),\n storage_options=self.storage_options)\n return cat",
"def search_from_raw_query(api, raw_query, **kwargs):\n tweets=api.GetSearch(raw_query=raw_query)\n return {\"tweets\":tweets}",
"def test_search(self):\n from rubber import resource\n requestmock = RequestMock()\n resource.requests = requestmock\n\n q = {'query': {'term': {'user': 'kimchy'}}}\n self.Article.elasticsearch.search(q, toto='titi')\n\n self.assertEquals(1, len(requestmock.stack))\n self.assertEquals('http://example.com:9200/tests/article/_search', requestmock.stack[0]['url'])\n self.assertEquals('GET', requestmock.stack[0]['method'])\n self.assertEquals('titi', requestmock.stack[0]['kwargs']['toto'])\n from rubber.instanceutils import data_to_json\n self.assertEquals(data_to_json(q), requestmock.stack[0]['kwargs']['data'])\n\n self.Article.elasticsearch.mapping.put({'some': 'mapping'}, toto='titi')\n\n self.assertEquals(2, len(requestmock.stack))\n self.assertEquals('http://example.com:9200/tests/article/_mapping', requestmock.stack[1]['url'])\n self.assertEquals('PUT', requestmock.stack[1]['method'])\n self.assertEquals('titi', requestmock.stack[1]['kwargs']['toto'])",
"def search(self, query=None, options=None):\n return self.create_search(query, options).search()",
"def raw_search(self, query=None):\n\n # update query\n # if query is None:\n # query = self.query\n if query is not None:\n self.query = query\n\n req_body = {\n \"query\": {\n \"multi_match\": {\n \"query\": self.query,\n \"fields\": [\"content\", \"title\", \"author\"],\n \"fuzziness\": \"AUTO\",\n }\n },\n \"sort\": {\"_score\": {\"order\": \"desc\"}},\n \"highlight\": {\n # \"pre_tags\" : [_c.bold + _c.blue], # for proper coloring use the direct api\n # \"post_tags\" : [_c.reset],\n # for proper coloring use the direct api\n # shell escapes not working at beginning of string, this can be\n # replaced later\n \"pre_tags\": [\"<highlight>\"],\n \"post_tags\": [\"</highlight>\"],\n \"order\": \"score\",\n \"number_of_fragments\": 1,\n \"fields\": {\"content\": {}},\n },\n \"_source\": [\"file.filename\", \"path.real\", \"meta.title\", \"meta.raw.description\"],\n \"from\": self.offset\n }\n\n res = self.es.search(\n index=self.index,\n body=req_body,\n _source=[\"file.filename\", \"path.real\", \"meta.title\", \"meta.raw.description\"],\n )\n return res",
"def __search(self):\n query = self.__query.query()\n self.__engine.search(query)",
"def search(\n self,\n query,\n fields=None,\n operator=None,\n order_by_relevance=True,\n partial_match=None, # RemovedInWagtail60Warning\n backend=\"default\",\n ):\n search_backend = get_search_backend(backend)\n return search_backend.search(\n query,\n self,\n fields=fields,\n operator=operator,\n order_by_relevance=order_by_relevance,\n partial_match=partial_match, # RemovedInWagtail60Warning\n )",
"def dummy_search(query):\n ii = InvertedIndex()\n return ii.lookup_query(query)",
"def search(self, **kwargs):\n clone = self._clone()\n\n if 'q' in kwargs:\n clone.q = kwargs['q']\n del kwargs['q']\n clone.params.update(kwargs)\n\n return clone.solr.search(q=clone.q, **clone.params)",
"def construct(self):\n return self.as_search().construct()",
"def search(query, models=None, order_by=RELEVANCE, limit=25, offset=0):\n # Delay import of the backend so we have a chance to configure things\n # after importing search, but before we use it.\n from djangosearch.backends import backend\n return backend.SearchEngine().search(query, models, order_by, limit, offset)",
"def createSearch(self, authenticationToken, search):\r\n pass",
"def __init__(self, query: str, **kwargs: Any):\n self._client = kwargs.get(\"client\")\n self._query = query\n self._kwargs = kwargs",
"def search(self, q):\n self.__query = q\n self.scrape_page()",
"def search(self, query):\n request_url = self.base_url + 'search'\n data = {'q': query}\n response = requests.get(request_url, data=data, headers=self.headers).json()\n return response",
"def generate_simple_search(search_form, language, with_highlights=False):\n # We use a regular S here because we want to search across\n # multiple doctypes.\n searcher = (\n es_utils.AnalyzerS().es(\n urls=settings.ES_URLS,\n timeout=settings.ES_TIMEOUT,\n use_ssl=settings.ES_USE_SSL,\n http_auth=settings.ES_HTTP_AUTH,\n connection_class=RequestsHttpConnection\n )\n .indexes(es_utils.read_index('default'))\n )\n\n cleaned = search_form.cleaned_data\n\n doctypes = []\n final_filter = es_utils.F()\n cleaned_q = cleaned['q']\n products = cleaned['product']\n\n # Handle wiki filters\n if cleaned['w'] & constants.WHERE_WIKI:\n wiki_f = es_utils.F(model='wiki_document',\n document_category__in=settings.SEARCH_DEFAULT_CATEGORIES,\n document_locale=language,\n document_is_archived=False)\n\n for p in products:\n wiki_f &= es_utils.F(product=p)\n\n doctypes.append(DocumentMappingType.get_mapping_type_name())\n final_filter |= wiki_f\n\n # Handle question filters\n if cleaned['w'] & constants.WHERE_SUPPORT:\n question_f = es_utils.F(model='questions_question',\n question_is_archived=False,\n question_has_helpful=True)\n\n for p in products:\n question_f &= es_utils.F(product=p)\n\n doctypes.append(QuestionMappingType.get_mapping_type_name())\n final_filter |= question_f\n\n # Build a filter for those filters and add the other bits to\n # finish the search\n searcher = searcher.doctypes(*doctypes)\n searcher = searcher.filter(final_filter)\n\n if cleaned['explain']:\n searcher = searcher.explain()\n\n if with_highlights:\n # Set up the highlights. Show the entire field highlighted.\n searcher = searcher.highlight(\n 'question_content', # support forum\n 'document_summary', # kb\n pre_tags=['<b>'],\n post_tags=['</b>'],\n number_of_fragments=0\n )\n\n searcher = apply_boosts(searcher)\n\n # Build the query\n query_fields = chain(*[\n cls.get_query_fields() for cls in [\n DocumentMappingType,\n QuestionMappingType\n ]\n ])\n query = {}\n # Create match and match_phrase queries for every field\n # we want to search.\n for field in query_fields:\n for query_type in ['match', 'match_phrase']:\n query['%s__%s' % (field, query_type)] = cleaned_q\n\n # Transform the query to use locale aware analyzers.\n query = es_utils.es_query_with_analyzer(query, language)\n\n searcher = searcher.query(should=True, **query)\n return searcher",
"def from_search_query(self, search_query):\n hits = search_query.hits\n score_sql = self._raw_sql([(h['id'], h['score'] or 0) for h in hits])\n rank_sql = self._raw_sql([(hits[i]['id'], i) for i in range(len(hits))])\n return (\n self.get_queryset()\n .filter(pk__in=[h['id'] for h in hits])\n # add the query relevance score\n .annotate(search_score=RawSQL(score_sql, ()))\n # add the ordering number (0-based)\n .annotate(search_rank=RawSQL(rank_sql, ()))\n .order_by('search_rank')\n )",
"def search(self, query, relation=None, index=0, limit=25, **kwargs):\n return self.get_object(\n \"search\", relation=relation, q=query, index=index, limit=limit, **kwargs\n )",
"def do_search(search_object: str, query: dict, scope: Optional[str], size: Optional[str] = None,\n sort: Optional[str] = None, order: Optional[str] = None, err_operation: Optional[str] = None,\n artifact_source: Optional[str] = None) -> dict:\n path = '/samples/search' if search_object == 'samples' else '/sessions/search'\n data = {\n 'query': query,\n 'size': size\n }\n if scope:\n data.update({'scope': API_PARAM_DICT['scope'][scope]}) # type: ignore\n if validate_sort_and_order_and_artifact(sort, order, artifact_source):\n data.update({'sort': {API_PARAM_DICT['sort'][sort]: {'order': API_PARAM_DICT['order'][order]}}}) # type: ignore\n if artifact_source == 'true':\n data.update({'artifactSource': 'af'})\n data.update({'type': 'scan'})\n # Remove nulls\n data = createContext(data, removeNull=True)\n result = http_request(path, data=data, err_operation=err_operation)\n return result",
"def search(query, sortedby=None, reverse=False):\n return _run_indexer_func(\"search\", query, sortedby, reverse)",
"def search(self, query):\n launch_gs_app('search',\n self.browser,\n GoogleSuite.SEARCH_URL.format(_urlencode([('q', query)])))"
] | [
"0.7029542",
"0.6688671",
"0.6600027",
"0.6517039",
"0.6485709",
"0.64265573",
"0.64017105",
"0.6352035",
"0.6290121",
"0.61653656",
"0.614457",
"0.61306244",
"0.60751146",
"0.6055303",
"0.60288244",
"0.6017776",
"0.6013076",
"0.59807044",
"0.59282154",
"0.591098",
"0.5907803",
"0.5905143",
"0.58721244",
"0.58706737",
"0.585399",
"0.58478445",
"0.5846996",
"0.58383775",
"0.5821709",
"0.5801941"
] | 0.75526977 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.