query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Generate a batch of non linear data and store it into numpy structures | def make_nonlinear_batch_data(self):
self.reset_batch()
g = SampleGenerator()
for i in range(self.batch_size):
# Draw a random sample on the interval [0,1]
x = np.random.random()
y = g.generate_non_linear_samples(x)
self.x_data.append(x)
self.y_data.append(y) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_linear_batch_data(self):\n self.reset_batch()\n g = SampleGenerator()\n for i in range(self.batch_size):\n # Draw a random sample on the interval [0,1]\n x = np.random.random()\n y = g.generate_linear_samples(x)\n self.x_data.append(x)\n self.y_data.append(y)",
"def data_gen(voc_size, batch, nbatches, seq_len = 15):\r\n for i in range(nbatches):\r\n # (batch_size, seq_len)\r\n data = torch.from_numpy(\r\n np.random.randint(1, voc_size, size=(batch, seq_len)))\r\n data[:, 0] = 1 # add start token\r\n src = Variable(data, requires_grad=False)\r\n tgt = Variable(data, requires_grad=False)\r\n yield Batch(src, tgt, 0) # Accessed by next function one by one\r",
"def gen_numpy(path,smilist):\n import numpy as np\n from get_metadata import get_metadata\n from descriptor import Morgan2D\n nb = 1024\n trainlen = len(smilist)\n train_XY = np.zeros((trainlen, nb + 5))\n local_X = np.zeros((nb + 5))\n for num, smi in enumerate(smilist):\n \n etot, dipole, quadrapole, solv, mp = get_metadata(path,smi)\n ecfp = Morgan2D(smi,2,nb)\n local_X[0:nb] = ecfp\n local_X[-5] = etot\n local_X[-4] = dipole\n local_X[-3] = quadrapole\n local_X[-2] = solv\n local_X[-1] = mp \n train_XY[num, :] = local_X\n \n return train_XY",
"def __data_generation(self, list_IDs_temp):\n # Initialization\n if self.mirror:\n X = np.empty(\n (self.batch_size, *self.dim_in, self.n_channels_in),\n dtype=\"uint8\",\n ) \n else:\n X = np.empty(\n (self.batch_size * len(self.camnames[0]), *self.dim_in, self.n_channels_in),\n dtype=\"uint8\",\n )\n\n # We'll need to transpose this later such that channels are last,\n # but initializaing the array this ways gives us\n # more flexibility in terms of user-defined array sizes\\\n if self.labelmode == \"prob\":\n y = np.empty(\n (\n self.batch_size * len(self.camnames[0]),\n self.n_channels_out,\n *self.dim_out,\n ),\n dtype=\"float32\",\n )\n else:\n # Just return the targets, without making a meshgrid later\n y = np.empty(\n (\n self.batch_size * len(self.camnames[0]),\n self.n_channels_out,\n len(self.dim_out),\n ),\n dtype=\"float32\",\n )\n\n # Generate data\n cnt = 0\n for i, ID in enumerate(list_IDs_temp):\n if \"_\" in ID:\n experimentID = int(ID.split(\"_\")[0])\n else:\n # Then we only have one experiment\n experimentID = 0\n for _ci, camname in enumerate(self.camnames[experimentID]):\n # Store sample\n # TODO(Refactor): This section is tricky to read\n\n if not self.mirror or _ci == 0:\n if self.immode == \"video\":\n X[cnt] = self.load_vid_frame(\n self.labels[ID][\"frames\"][camname],\n camname,\n self.preload,\n self.extension,\n )[\n self.crop_height[0] : self.crop_height[1],\n self.crop_width[0] : self.crop_width[1],\n ]\n elif self.immode == \"tif\":\n X[cnt] = self.load_tif_frame(\n self.labels[ID][\"frames\"][camname], camname\n )[\n self.crop_height[0] : self.crop_height[1],\n self.crop_width[0] : self.crop_width[1],\n ]\n else:\n raise Exception(\"Not a valid image reading mode\")\n\n # Labels will now be the pixel positions of each joint.\n # Here, we convert them to\n # probability maps with a numpy meshgrid operation\n this_y = np.round(self.labels[ID][\"data\"][camname])\n if self.immode == \"video\":\n this_y[0, :] = this_y[0, :] - self.crop_width[0]\n this_y[1, :] = this_y[1, :] - self.crop_height[0]\n else:\n raise Exception(\n \"Unsupported image format. Needs to be video files.\"\n )\n\n # For 2D, this_y should be size (2, 20)\n if this_y.shape[1] != self.n_channels_out:\n # TODO(shape_exception):This should probably be its own\n # class that inherits from base exception\n raise Exception(_EXEP_MSG)\n\n if self.labelmode == \"prob\":\n # Only do this if we actually need the labels --\n # this is too slow otherwise\n (x_coord, y_coord) = np.meshgrid(\n np.arange(self.dim_out[1]), np.arange(self.dim_out[0])\n )\n for j in range(self.n_channels_out):\n # I tested a version of this with numpy broadcasting,\n # and looping was ~100ms seconds faster for making\n # 20 maps\n # In the future, a shortcut might be to \"stamp\" a\n # truncated Gaussian pdf onto the images, centered\n # at the peak\n y[cnt, j] = np.exp(\n -(\n (y_coord - this_y[1, j]) ** 2\n + (x_coord - this_y[0, j]) ** 2\n )\n / (2 * self.out_scale ** 2)\n )\n else:\n y[cnt] = this_y.T\n\n cnt = cnt + 1\n\n # Move channels last\n if self.labelmode == \"prob\":\n y = np.transpose(y, [0, 2, 3, 1])\n\n if self.mirror:\n # separate the batches from the cameras, and use the cameras as the numebr of channels \n # to make a single-shot multi-target prediction from a single image\n y = np.reshape(y, (self.batch_size, len(self.camnames[0]), y.shape[1], y.shape[2]))\n y = np.transpose(y, [0, 2, 3, 1])\n else:\n # One less dimension when not training with probability map targets\n y = np.transpose(y, [0, 2, 1])\n\n if self.downsample > 1:\n X = processing.downsample_batch(X, fac=self.downsample, method=self.dsmode)\n if self.labelmode == \"prob\":\n y = processing.downsample_batch(\n y, fac=self.downsample, method=self.dsmode\n )\n y /= np.max(np.max(y, axis=1), axis=1)[:, np.newaxis, np.newaxis, :]\n\n if self.mono and self.n_channels_in == 3:\n # Go from 3 to 1 channel using RGB conversion. This will also\n # work fine if there are just 3 channel grayscale\n X = X[:, :, :, 0]*0.2125 + \\\n X[:, :, :, 1]*0.7154 + \\\n X[:, :, :, 2]*0.0721\n\n X = X[:, :, :, np.newaxis]\n\n if self.mono:\n # Just subtract the mean imagent BGR value, which is as close as we\n # get to vgg19 normalization\n X -= 114.67\n else:\n X = pp_vgg19(X)\n return X, y",
"def data_gen(\n v: int, batch: int, nbatches: int, device: torch.device = torch.device(\"cpu\")\n) -> Iterator[Batch]: # TODO bad name\n for i in range(nbatches):\n data = np.random.randint(1, v, size=(batch, 10))\n data[:, 0] = 1\n src: LongTensorType = torch.from_numpy(data)\n tgt: LongTensorType = torch.from_numpy(data)\n src, tgt = src.to(device), tgt.to(device)\n yield Batch(src, tgt, 0)",
"def numpy_container_gen_data(no_substripes):\n q11vec = np.zeros(no_substripes, dtype=float)\n q12vec = np.zeros(no_substripes, dtype=float)\n q21vec = np.zeros(no_substripes, dtype=float)\n q22vec = np.zeros(no_substripes, dtype=float)\n x1vec = np.zeros(no_substripes, dtype=float)\n x2vec = np.zeros(no_substripes, dtype=float)\n y1vec = np.zeros(no_substripes, dtype=float)\n y2vec = np.zeros(no_substripes, dtype=float)\n xvec = np.zeros(no_substripes, dtype=float)\n yvec = np.zeros(no_substripes, dtype=float)\n\n for idx in range(no_substripes):\n q11vec[idx], q12vec[idx], q21vec[idx], q22vec[idx], x1vec[idx], \\\n x2vec[idx], y1vec[idx], y2vec[idx], xvec[idx], \\\n yvec[idx] = generate_data()\n\n return q11vec, q12vec, q21vec, q22vec, x1vec, x2vec, y1vec, y2vec, xvec, \\\n yvec",
"def generate_batches(data, labels, batch_size):\n for start in range(0, len(data), batch_size):\n yield Tensor(data[start:start+batch_size, ...]), Tensor(labels[start:start+batch_size, ...])",
"def __data_generation(self, batch_data):\n X = np.zeros((self.batch_size, self.num_features), dtype=float)\n y = np.zeros((self.batch_size, self.num_outputs), dtype=float)\n\n for i, sample in batch_data.iterrows():\n # Get lat/long of pickup and dropoff locations\n PULocation = self.taxizone_data.loc[sample['PULocationID']].centroids\n PULocationLong, PULocationLat = PULocation.x, PULocation.y\n DOLocation = self.taxizone_data.loc[sample['DOLocationID']].centroids\n DOLocationLong, DOLocationLat = DOLocation.x, DOLocation.y\n\n # Get month date, day of week and hours/mins for pickup\n PUDateTime = datetime.strptime(sample.tpep_pickup_datetime, '%Y-%m-%d %H:%M:%S')\n PUDate = PUDateTime.strftime('%Y-%m-%d')\n PUYear, PUMonth, PUMonthDate = PUDate.split('-')\n # TODO - Add this to pre-processing of trip data! Some random months in the data!!\n if PUYear != '2018' or PUMonth != '06':\n continue\n PUDayOfWeek = PUDateTime.weekday()\n PUTimeHour, PUTimeMinute = datetime.strptime(\n sample.tpep_pickup_datetime, '%Y-%m-%d %H:%M:%S'\n ).strftime('%H:%M').split(':')\n\n # Get precipitation for that day\n Precipitation = self.weather_data[self.weather_data['DATE'] == PUDate]['PRCP'].values[0]\n\n X[i] = np.concatenate((np.array([\n\n PULocationLat,\n PULocationLong,\n DOLocationLat,\n DOLocationLong,\n abs((PULocationLat - DOLocationLat) ** 2 + abs(PULocationLong - DOLocationLong) ** 2) ** 0.5,\n Precipitation\n ]),\n to_categorical(PUDayOfWeek, 7),\n to_categorical(PUMonthDate, 31),\n to_categorical(PUTimeHour, 24)\n ))\n\n y[i] = [sample['duration']] if self.generator_type == 'duration' \\\n else [sample['total_amount'] - sample['tip_amount']]\n\n return X, y",
"def next_batch(self, batch_size):\n batch_data = np.zeros([batch_size,] + list(self.example_shape))\n for i in range(batch_size):\n index = self.q.pop()\n batch_data[i,...] = self.data[index]\n if len(self.q)==0:\n self.__new_epoch()\n\n return batch_data",
"def generate_train_batch(self):\n\n patients_indices = self.get_indices()\n patients_for_batch = [self._data[i] for i in patients_indices]\n\n data = np.zeros((self.batch_size, 1, *self.patch_size), dtype=np.short)\n labels = np.empty(self.batch_size, dtype=np.float32)\n\n # iterate over patients_for_batch and include them in the batch\n for i, j in enumerate(patients_for_batch):\n patient_data_ct = np.load(j).astype(np.short)\n\n data[i] = self.preprocess_func(patient_data_ct).astype(np.short)\n path = str(j).split('/')[-1].replace('.npy', '')\n labels[i] = float(self.age_info[path])\n\n return {'data': np.array(data), 'label': np.array(labels)}",
"def batch_to_distribution_memories(batch):\n batch_memory = []\n for value_array in batch:\n batch_memory.append(values_to_distribution_memory(value_array))\n return batch_memory",
"def generate_dataset(output_dim=14, num_examples=10000):\n def int2vec(x, dim=output_dim):\n out = np.zeros(dim)\n binrep = np.array(list(np.binary_repr(x))).astype('int')\n out[-len(binrep):] = binrep\n return out\n\n x_left_int = (np.random.rand(num_examples) * 2 ** (output_dim - 1)).astype('int')\n x_right_int = (np.random.rand(num_examples) * 2 ** (output_dim - 1)).astype('int')\n y_int = x_left_int + x_right_int\n\n x = list()\n for i in range(len(x_left_int)):\n x.append(np.concatenate((int2vec(x_left_int[i]), int2vec(x_right_int[i]))))\n\n y = list()\n for i in range(len(y_int)):\n y.append(int2vec(y_int[i]))\n\n x = np.array(x)\n y = np.array(y)\n return x, y",
"def make_unsupervised_batches(data, nbatches, batch_size=None):\n print '---->\\n.....Putting data into vector-shaped batches'\n if batch_size==None:\n batch_size = int(data.sahpe[0]/nbatches)\n else:\n assert nbatches * batch_size <= data.shape\n permut = permutation(data.shape[0])\n xdata = []\n for i in xrange(nbatches):\n xs = data[permut[i * batch_size:(i + 1) * batch_size], :, :, :]\n xdata.append(reshape(xs, (batch_size, prod(xs.shape) / batch_size)))\n return np.reshape(np.asarray(xdata), (nbatches, batch_size, -1))",
"def initGD( X, N ):\n seq = np.ndarray(len(X), dtype=np.object)\n for i in range( len(X) ):\n a = np.floor(np.linspace(0,N-.00000001,len(X[i])))\n seq[i] = a\n return seq",
"def get_batch(X, Y, iteration):\n offset = 100\n start = iteration * offset % len(Y)\n \n # YOUR CODE HERE\n # This will return the entire data set each iteration. This is costly, so\n # you should experiment with different way of changing this:\n return X[start: start + offset], Y[start: start + offset]",
"def __data_generation(self, batch_indices):\n\n X = self.__get_npy_arrays(batch_indices)\n y = self.__get_records(batch_indices)\n\n return X, y",
"def create_dataset(dataset,time_step=1):\n dataX,dataY=[],[]\n for i in range(len(dataset)-time_step):\n a=dataset[i:i+time_step]\n dataX.append(a)\n dataY.append(dataset[i+time_step])\n return np.asarray(dataX),np.asarray(dataY)",
"def generate_batch():\n\n # Initialize variables\n example = np.zeros(self.batch_size)\n labels = np.zeros((self.batch_size, 1))\n alphas = np.zeros(self.batch_size)\n n_items = 0\n index = 0\n\n while index < len(data):\n reduced_window = random.randint(0, self.window_size)\n if data[index] is not None:\n\n left = max(0, index - self.window_size + reduced_window)\n right = min((index + self.window_size + 1 -\n reduced_window), len(data) - 1)\n for pos2 in range(left, right, 1):\n\n if n_items == self.batch_size:\n queue.put((example, labels, index))\n example = np.zeros(self.batch_size)\n labels = np.zeros((self.batch_size, 1))\n n_items = 0\n\n if pos2 != index and data[pos2] is not None:\n example[n_items] = data[pos2]\n labels[n_items] = data[index]\n alpha = self.learning_rate - \\\n (self.learning_rate - 0.001) * (index / self.n_words)\n alphas[n_items] = max(0.001, alpha)\n n_items += 1\n index += 1\n\n # Poison pills\n for _ in range(n_workers):\n queue.put(None)",
"def batch_split(self) -> np.array:\n pass",
"def _data_generation(self, batch_data):\n # Initialization\n batch_x = []\n batch_y = defaultdict(list)\n\n for ind, item_data in batch_data.iterrows():\n img_path = os.path.join(self.img_dir, \"images\", \"rgb\", item_data[\"name\"])\n img = cv2.imread(img_path)\n try:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n except Exception as error:\n print(img_path)\n print(error)\n not_valid_mask = self.read_masks_borders(item_data[\"name\"])\n img[not_valid_mask] = 0\n\n # getmasks\n targets = np.zeros((img.shape[0], img.shape[1], len(self.classes)))\n for i, c in enumerate(self.classes):\n mask_path = os.path.join(self.img_dir, \"labels\", c, item_data[\"name\"])\n mask = cv2.imread(\n mask_path.replace(\".jpg\", \".png\"), cv2.IMREAD_GRAYSCALE\n )\n mask[not_valid_mask[:, :, 0]] = 0\n mask = mask > 0\n targets[:, :, i] = mask\n\n res = self.reshape_func(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n if self.do_aug:\n res = self.aug(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n\n for i, c in enumerate(self.classes):\n batch_y[c].append(targets[:, :, i])\n\n batch_x.append(img)\n\n batch_x = np.array(batch_x, np.float32)\n batch_y = {k: np.array(v, np.float32) for k, v in batch_y.items()}\n batch_y = {k: np.expand_dims(v, axis=-1) for k, v in batch_y.items()}\n\n return (\n imagenet_utils.preprocess_input(batch_x, \"channels_last\", mode=\"tf\"),\n batch_y\n )",
"def next_simple_dataset(dataset, batch_size: int, datatype):\n while True:\n x_batch = []\n y_batch = []\n for i in range(batch_size):\n try:\n x, y, data_unit, index = create_xy(dataset, datatype)\n # x = normalize(x)\n x_batch.append(x)\n y_batch.append(y)\n except StopIteration:\n break\n x_batch, y_batch = np.array(x_batch), np.array(y_batch)\n if datatype != DataType.test:\n x_batch = SEQ_CVXTZ.augment_images(x_batch).astype(\"float32\")\n x_batch = np.array([normalize(x) for x in x_batch])\n # org_shape = x_batch.shape\n # org_width = x_batch.shape[1]\n # corner = int((org_width - ROI_IMAGE_SIZE) // 2)\n # print(f\"0: org_shape:{org_shape} x_batch:{x_batch.shape} corner:{corner}\")\n # x_batch = x_batch[:, corner:(org_width - corner), corner:(org_width - corner), :]\n # resized_x_batch = []\n # for x in x_batch:\n # img = Image.fromarray(np.uint8(x))\n # img = img.resize((IMAGE_SIZE, IMAGE_SIZE), Image.LANCZOS)\n # resized_x_batch.append(normalize(np.array(img)))\n # print(f\"1: org_shape:{org_shape} corner:{corner} x_batch:{x_batch.shape}\")\n # yield np.array(resized_x_batch), y_batch\n yield np.array(x_batch), y_batch",
"def data_generation(imgs, labs, batch, validataion):\n\n # Initialization\n batch_images = np.empty((batch, imgs[0].shape[0], imgs[0].shape[1], imgs[0].shape[2]))\n batch_labels = np.empty((batch, 1))\n # Generate data\n while True: # loop forever\n for x in range(batch):\n rand = random.randint(0, len(labs)-1)\n if validataion:\n # Store un-altered image and measurement\n batch_images[x] = imgs[rand]\n batch_labels[x] = labs[rand]\n else:\n # Store new image and adjusted measurement\n batch_images[x], batch_labels[x] = transform_image(imgs[rand], labs[rand])\n yield batch_images, batch_labels",
"def get_raw_batch(data, min_index=0, max_index=None, \r\n batch_size=16, n_steps=150, step_length=1000):\r\n\r\n if max_index is None:\r\n #max_index = len(data) - 1\r\n max_index = len(data)\r\n\r\n # Pick indices of ending positions\r\n #rows = np.random.randint(min_index + n_steps * step_length, max_index, size=batch_size)\r\n rows = np.random.randint(min_index, max_index/(n_steps*step_length), size=batch_size) + 1\r\n rows = rows * n_steps * step_length\r\n \r\n # Initialize feature matrices and targets\r\n samples = np.zeros((batch_size, n_steps, n_features))\r\n data_list = []\r\n\r\n for j, row in enumerate(rows):\r\n data_list.append([X_preprocessor(data=data[:, 0], last_index=row, \r\n n_steps=n_steps, step_length=step_length), # may be modified\r\n data[row - 1, 1]])\r\n\n #watch_dog.feeding(os.popen(\"free -h\").read()) # debugging\r\n\r\n return data_list",
"def instantiate_batch(self, inputs):\n return inputs",
"def gen_batches(data, batch_size):\n data = np.array(data)\n\n for i in range(0, data.shape[0], batch_size):\n yield data[i:i+batch_size]",
"def _batch_iter(line_iter, n_objects, batchsize, dtype):\n new_dt = np.dtype(dtype.descr + [('mask',bool), ('weight', float)])\n batch = np.zeros((batchsize, n_objects), dtype=new_dt)\n sample_n = 0\n for array, wt in line_iter:\n\n n_missing = n_objects - array.size\n\n array.resize(n_objects, refcheck=False)\n batch[sample_n, :] = array\n batch['weight'][sample_n, :] = wt\n if n_missing > 0:\n batch['mask'][sample_n, -n_missing:] = True\n\n sample_n += 1\n if sample_n == batchsize:\n yield batch\n sample_n = 0\n batch.fill(0)\n yield batch[:sample_n, ...]",
"def gen_batches(data, batch_size=2048):\n indices = torch.randperm(len(data))\n indices = indices.cuda()\n\n for idx in range(0, len(data) - batch_size + 1, batch_size):\n sample = indices[idx:idx + batch_size]\n l_words, r_words = data.L_words[sample], data.R_words[sample]\n l_vecs = data.l_vecs[l_words]\n r_vecs = data.r_vecs[r_words]\n l_bias = data.l_biases[l_words]\n r_bias = data.r_biases[r_words]\n weight = data.weights[sample]\n y = data.y[sample]\n yield weight, l_vecs, r_vecs, y, l_bias, r_bias",
"def next(self):\n #print('next')\n batch_size = self.batch_size\n batch_data = nd.empty((batch_size,)+self.data_shape)\n batch_label = nd.empty((batch_size,)+self.label_shape)\n i = 0\n #self.cutoff = random.randint(800,1280)\n try:\n while i < batch_size:\n #print('N', i)\n data, label, annot = self.next_sample()\n R = self.get_data(data, label, annot)\n if R is None:\n continue\n data_out, label_out, flip_data_out, flip_label_out = R\n if not self.use_coherent:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n i += 1\n else:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n data2 = nd.array(flip_data_out)\n data2 = nd.transpose(data2, axes=(2, 0, 1))\n label2 = nd.array(flip_label_out)\n #M = nd.array(M)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n #i+=1\n j = i+self.per_batch_size//2\n batch_data[j][:] = data2\n batch_label[j][:] = label2\n i += 1\n if j%self.per_batch_size==self.per_batch_size-1:\n i = j+1\n except StopIteration:\n if i<batch_size:\n raise StopIteration\n\n #return {self.data_name : batch_data,\n # self.label_name : batch_label}\n #print(batch_data.shape, batch_label.shape)\n return mx.io.DataBatch([batch_data], [batch_label], batch_size - i)",
"def create_batches(self, X, y, batch_size, step):\n index_start = batch_size * step\n index_end = index_start + batch_size\n\n if index_end < X.shape[0]:\n batch_indices = np.arange(index_start, index_end)\n X_batch = X[batch_indices]\n y_batch = y[batch_indices]\n else:\n batch_indices = np.arange(index_start, X.shape[0])\n X_batch = X[batch_indices]\n y_batch = y[batch_indices]\n\n return X_batch, y_batch",
"def make_data(self): \n s = numpy.arange(0.0, 10.0, 0.01)\n s = numpy.reshape(s, (10,10,10))\n s = numpy.transpose(s)\n\n v = numpy.zeros(3000, 'd')\n v[1::3] = 1.0\n v = numpy.reshape(v, (10,10,10,3))\n return s, v"
] | [
"0.69023687",
"0.6657422",
"0.65740246",
"0.6552211",
"0.6543923",
"0.6525947",
"0.652149",
"0.64986634",
"0.6427317",
"0.6416945",
"0.6410946",
"0.6362014",
"0.63412833",
"0.633792",
"0.63310975",
"0.6317198",
"0.63154197",
"0.6303581",
"0.62988424",
"0.62969756",
"0.62965375",
"0.62955916",
"0.6284763",
"0.62711227",
"0.62702817",
"0.626287",
"0.6244404",
"0.6239621",
"0.62367743",
"0.6226497"
] | 0.7060218 | 0 |
Generate a batch of linear data and store it into numpy structures | def make_linear_batch_data(self):
self.reset_batch()
g = SampleGenerator()
for i in range(self.batch_size):
# Draw a random sample on the interval [0,1]
x = np.random.random()
y = g.generate_linear_samples(x)
self.x_data.append(x)
self.y_data.append(y) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_nonlinear_batch_data(self):\n self.reset_batch()\n g = SampleGenerator()\n for i in range(self.batch_size):\n # Draw a random sample on the interval [0,1]\n x = np.random.random()\n y = g.generate_non_linear_samples(x)\n self.x_data.append(x)\n self.y_data.append(y)",
"def gen_numpy(path,smilist):\n import numpy as np\n from get_metadata import get_metadata\n from descriptor import Morgan2D\n nb = 1024\n trainlen = len(smilist)\n train_XY = np.zeros((trainlen, nb + 5))\n local_X = np.zeros((nb + 5))\n for num, smi in enumerate(smilist):\n \n etot, dipole, quadrapole, solv, mp = get_metadata(path,smi)\n ecfp = Morgan2D(smi,2,nb)\n local_X[0:nb] = ecfp\n local_X[-5] = etot\n local_X[-4] = dipole\n local_X[-3] = quadrapole\n local_X[-2] = solv\n local_X[-1] = mp \n train_XY[num, :] = local_X\n \n return train_XY",
"def data_gen(voc_size, batch, nbatches, seq_len = 15):\r\n for i in range(nbatches):\r\n # (batch_size, seq_len)\r\n data = torch.from_numpy(\r\n np.random.randint(1, voc_size, size=(batch, seq_len)))\r\n data[:, 0] = 1 # add start token\r\n src = Variable(data, requires_grad=False)\r\n tgt = Variable(data, requires_grad=False)\r\n yield Batch(src, tgt, 0) # Accessed by next function one by one\r",
"def make_data(self): \n s = numpy.arange(0.0, 10.0, 0.01)\n s = numpy.reshape(s, (10,10,10))\n s = numpy.transpose(s)\n\n v = numpy.zeros(3000, 'd')\n v[1::3] = 1.0\n v = numpy.reshape(v, (10,10,10,3))\n return s, v",
"def numpy_container_gen_data(no_substripes):\n q11vec = np.zeros(no_substripes, dtype=float)\n q12vec = np.zeros(no_substripes, dtype=float)\n q21vec = np.zeros(no_substripes, dtype=float)\n q22vec = np.zeros(no_substripes, dtype=float)\n x1vec = np.zeros(no_substripes, dtype=float)\n x2vec = np.zeros(no_substripes, dtype=float)\n y1vec = np.zeros(no_substripes, dtype=float)\n y2vec = np.zeros(no_substripes, dtype=float)\n xvec = np.zeros(no_substripes, dtype=float)\n yvec = np.zeros(no_substripes, dtype=float)\n\n for idx in range(no_substripes):\n q11vec[idx], q12vec[idx], q21vec[idx], q22vec[idx], x1vec[idx], \\\n x2vec[idx], y1vec[idx], y2vec[idx], xvec[idx], \\\n yvec[idx] = generate_data()\n\n return q11vec, q12vec, q21vec, q22vec, x1vec, x2vec, y1vec, y2vec, xvec, \\\n yvec",
"def generate_batches(data, labels, batch_size):\n for start in range(0, len(data), batch_size):\n yield Tensor(data[start:start+batch_size, ...]), Tensor(labels[start:start+batch_size, ...])",
"def data_gen(\n v: int, batch: int, nbatches: int, device: torch.device = torch.device(\"cpu\")\n) -> Iterator[Batch]: # TODO bad name\n for i in range(nbatches):\n data = np.random.randint(1, v, size=(batch, 10))\n data[:, 0] = 1\n src: LongTensorType = torch.from_numpy(data)\n tgt: LongTensorType = torch.from_numpy(data)\n src, tgt = src.to(device), tgt.to(device)\n yield Batch(src, tgt, 0)",
"def create_dataset(dataset,time_step=1):\n dataX,dataY=[],[]\n for i in range(len(dataset)-time_step):\n a=dataset[i:i+time_step]\n dataX.append(a)\n dataY.append(dataset[i+time_step])\n return np.asarray(dataX),np.asarray(dataY)",
"def __data_generation(self, batch_indices):\n\n X = self.__get_npy_arrays(batch_indices)\n y = self.__get_records(batch_indices)\n\n return X, y",
"def __data_generation(self, list_IDs_temp):\n # Initialization\n if self.mirror:\n X = np.empty(\n (self.batch_size, *self.dim_in, self.n_channels_in),\n dtype=\"uint8\",\n ) \n else:\n X = np.empty(\n (self.batch_size * len(self.camnames[0]), *self.dim_in, self.n_channels_in),\n dtype=\"uint8\",\n )\n\n # We'll need to transpose this later such that channels are last,\n # but initializaing the array this ways gives us\n # more flexibility in terms of user-defined array sizes\\\n if self.labelmode == \"prob\":\n y = np.empty(\n (\n self.batch_size * len(self.camnames[0]),\n self.n_channels_out,\n *self.dim_out,\n ),\n dtype=\"float32\",\n )\n else:\n # Just return the targets, without making a meshgrid later\n y = np.empty(\n (\n self.batch_size * len(self.camnames[0]),\n self.n_channels_out,\n len(self.dim_out),\n ),\n dtype=\"float32\",\n )\n\n # Generate data\n cnt = 0\n for i, ID in enumerate(list_IDs_temp):\n if \"_\" in ID:\n experimentID = int(ID.split(\"_\")[0])\n else:\n # Then we only have one experiment\n experimentID = 0\n for _ci, camname in enumerate(self.camnames[experimentID]):\n # Store sample\n # TODO(Refactor): This section is tricky to read\n\n if not self.mirror or _ci == 0:\n if self.immode == \"video\":\n X[cnt] = self.load_vid_frame(\n self.labels[ID][\"frames\"][camname],\n camname,\n self.preload,\n self.extension,\n )[\n self.crop_height[0] : self.crop_height[1],\n self.crop_width[0] : self.crop_width[1],\n ]\n elif self.immode == \"tif\":\n X[cnt] = self.load_tif_frame(\n self.labels[ID][\"frames\"][camname], camname\n )[\n self.crop_height[0] : self.crop_height[1],\n self.crop_width[0] : self.crop_width[1],\n ]\n else:\n raise Exception(\"Not a valid image reading mode\")\n\n # Labels will now be the pixel positions of each joint.\n # Here, we convert them to\n # probability maps with a numpy meshgrid operation\n this_y = np.round(self.labels[ID][\"data\"][camname])\n if self.immode == \"video\":\n this_y[0, :] = this_y[0, :] - self.crop_width[0]\n this_y[1, :] = this_y[1, :] - self.crop_height[0]\n else:\n raise Exception(\n \"Unsupported image format. Needs to be video files.\"\n )\n\n # For 2D, this_y should be size (2, 20)\n if this_y.shape[1] != self.n_channels_out:\n # TODO(shape_exception):This should probably be its own\n # class that inherits from base exception\n raise Exception(_EXEP_MSG)\n\n if self.labelmode == \"prob\":\n # Only do this if we actually need the labels --\n # this is too slow otherwise\n (x_coord, y_coord) = np.meshgrid(\n np.arange(self.dim_out[1]), np.arange(self.dim_out[0])\n )\n for j in range(self.n_channels_out):\n # I tested a version of this with numpy broadcasting,\n # and looping was ~100ms seconds faster for making\n # 20 maps\n # In the future, a shortcut might be to \"stamp\" a\n # truncated Gaussian pdf onto the images, centered\n # at the peak\n y[cnt, j] = np.exp(\n -(\n (y_coord - this_y[1, j]) ** 2\n + (x_coord - this_y[0, j]) ** 2\n )\n / (2 * self.out_scale ** 2)\n )\n else:\n y[cnt] = this_y.T\n\n cnt = cnt + 1\n\n # Move channels last\n if self.labelmode == \"prob\":\n y = np.transpose(y, [0, 2, 3, 1])\n\n if self.mirror:\n # separate the batches from the cameras, and use the cameras as the numebr of channels \n # to make a single-shot multi-target prediction from a single image\n y = np.reshape(y, (self.batch_size, len(self.camnames[0]), y.shape[1], y.shape[2]))\n y = np.transpose(y, [0, 2, 3, 1])\n else:\n # One less dimension when not training with probability map targets\n y = np.transpose(y, [0, 2, 1])\n\n if self.downsample > 1:\n X = processing.downsample_batch(X, fac=self.downsample, method=self.dsmode)\n if self.labelmode == \"prob\":\n y = processing.downsample_batch(\n y, fac=self.downsample, method=self.dsmode\n )\n y /= np.max(np.max(y, axis=1), axis=1)[:, np.newaxis, np.newaxis, :]\n\n if self.mono and self.n_channels_in == 3:\n # Go from 3 to 1 channel using RGB conversion. This will also\n # work fine if there are just 3 channel grayscale\n X = X[:, :, :, 0]*0.2125 + \\\n X[:, :, :, 1]*0.7154 + \\\n X[:, :, :, 2]*0.0721\n\n X = X[:, :, :, np.newaxis]\n\n if self.mono:\n # Just subtract the mean imagent BGR value, which is as close as we\n # get to vgg19 normalization\n X -= 114.67\n else:\n X = pp_vgg19(X)\n return X, y",
"def initGD( X, N ):\n seq = np.ndarray(len(X), dtype=np.object)\n for i in range( len(X) ):\n a = np.floor(np.linspace(0,N-.00000001,len(X[i])))\n seq[i] = a\n return seq",
"def __data_generation(self, indexes):\n\n # Initialization\n x = np.zeros((self.batch_size, self.dim_1,self.dim_2,1))\n y = np.zeros((self.batch_size, 2))\n\n # Generate data\n\n for i, ID in enumerate(indexes):\n\n x[i,:,:,0] = self.file_source[ID]\n y[i,:] = self.label_source[ID]\n\n return x, y",
"def generate_dataset(output_dim=14, num_examples=10000):\n def int2vec(x, dim=output_dim):\n out = np.zeros(dim)\n binrep = np.array(list(np.binary_repr(x))).astype('int')\n out[-len(binrep):] = binrep\n return out\n\n x_left_int = (np.random.rand(num_examples) * 2 ** (output_dim - 1)).astype('int')\n x_right_int = (np.random.rand(num_examples) * 2 ** (output_dim - 1)).astype('int')\n y_int = x_left_int + x_right_int\n\n x = list()\n for i in range(len(x_left_int)):\n x.append(np.concatenate((int2vec(x_left_int[i]), int2vec(x_right_int[i]))))\n\n y = list()\n for i in range(len(y_int)):\n y.append(int2vec(y_int[i]))\n\n x = np.array(x)\n y = np.array(y)\n return x, y",
"def __data_generation(self, batch_data):\n X = np.zeros((self.batch_size, self.num_features), dtype=float)\n y = np.zeros((self.batch_size, self.num_outputs), dtype=float)\n\n for i, sample in batch_data.iterrows():\n # Get lat/long of pickup and dropoff locations\n PULocation = self.taxizone_data.loc[sample['PULocationID']].centroids\n PULocationLong, PULocationLat = PULocation.x, PULocation.y\n DOLocation = self.taxizone_data.loc[sample['DOLocationID']].centroids\n DOLocationLong, DOLocationLat = DOLocation.x, DOLocation.y\n\n # Get month date, day of week and hours/mins for pickup\n PUDateTime = datetime.strptime(sample.tpep_pickup_datetime, '%Y-%m-%d %H:%M:%S')\n PUDate = PUDateTime.strftime('%Y-%m-%d')\n PUYear, PUMonth, PUMonthDate = PUDate.split('-')\n # TODO - Add this to pre-processing of trip data! Some random months in the data!!\n if PUYear != '2018' or PUMonth != '06':\n continue\n PUDayOfWeek = PUDateTime.weekday()\n PUTimeHour, PUTimeMinute = datetime.strptime(\n sample.tpep_pickup_datetime, '%Y-%m-%d %H:%M:%S'\n ).strftime('%H:%M').split(':')\n\n # Get precipitation for that day\n Precipitation = self.weather_data[self.weather_data['DATE'] == PUDate]['PRCP'].values[0]\n\n X[i] = np.concatenate((np.array([\n\n PULocationLat,\n PULocationLong,\n DOLocationLat,\n DOLocationLong,\n abs((PULocationLat - DOLocationLat) ** 2 + abs(PULocationLong - DOLocationLong) ** 2) ** 0.5,\n Precipitation\n ]),\n to_categorical(PUDayOfWeek, 7),\n to_categorical(PUMonthDate, 31),\n to_categorical(PUTimeHour, 24)\n ))\n\n y[i] = [sample['duration']] if self.generator_type == 'duration' \\\n else [sample['total_amount'] - sample['tip_amount']]\n\n return X, y",
"def gen_batches(data, batch_size=2048):\n indices = torch.randperm(len(data))\n indices = indices.cuda()\n\n for idx in range(0, len(data) - batch_size + 1, batch_size):\n sample = indices[idx:idx + batch_size]\n l_words, r_words = data.L_words[sample], data.R_words[sample]\n l_vecs = data.l_vecs[l_words]\n r_vecs = data.r_vecs[r_words]\n l_bias = data.l_biases[l_words]\n r_bias = data.r_biases[r_words]\n weight = data.weights[sample]\n y = data.y[sample]\n yield weight, l_vecs, r_vecs, y, l_bias, r_bias",
"def data():\n return RaggedArray(\n [[0, 1], [1, 2, 3, 4], [], [-1, -2], []]*20, dtype='float64')",
"def instantiate_batch(self, inputs):\n return inputs",
"def next_batch(memory_dim, batch_length, task):\n batch_inputs = []\n batch_targets = []\n difficulty = 0.0\n for _ in xrange(batch_length):\n # Values as integers\n random_input = task.generate_random_input(difficulty, memory_dim)\n target_output = task.run(random_input)\n\n # Values as MxM memories\n batch_inputs.append(values_to_distribution_memory(random_input))\n batch_targets.append(values_to_distribution_memory(target_output))\n\n return batch_inputs, batch_targets",
"def batch_to_distribution_memories(batch):\n batch_memory = []\n for value_array in batch:\n batch_memory.append(values_to_distribution_memory(value_array))\n return batch_memory",
"def next_batch(self, batch_size):\n batch_data = np.zeros([batch_size,] + list(self.example_shape))\n for i in range(batch_size):\n index = self.q.pop()\n batch_data[i,...] = self.data[index]\n if len(self.q)==0:\n self.__new_epoch()\n\n return batch_data",
"def __data_generation(self, folder_id, file_id, el_ids):\n self.reload(folder_id, file_id)\n X = np.array(self.X.iloc[el_ids]).reshape(self.batch_size, len(self.used_variables), self.lev)\n Y = self.Y[:,:,:,el_ids]\n return X,Y",
"def _batchify(self, data_containers: Dict, batch_size):\n\n X = Variable(torch.LongTensor(data_containers['X'])).to(self.device)\n Y = Variable(torch.FloatTensor(data_containers['Y'])).to(self.device)\n\n data_size = X.size()[0]\n num_batches = data_size // batch_size\n\n return [\n (X[bi * batch_size: (bi + 1) * batch_size],\n Y[bi * batch_size: (bi + 1) * batch_size].unsqueeze(1))\n for bi in range(num_batches + 1)\n ]",
"def next_batch(x, y, batch_size):\n\n def as_batch(data, start, count):\n part = []\n for i in range(start, start + count):\n part.append(data[i])\n return np.array(part)\n\n for i in range(0, len(x)-batch_size, batch_size):\n yield as_batch(x, i, batch_size), as_batch(y, i, batch_size)",
"def __data_generation(self, list_IDs_temp):\n X = np.empty((self.batch_size, self.dim))\n Y = np.empty((self.batch_size, self.word_length, self.hot_enc_len))\n\n # Generate data\n for i, ID in enumerate(list_IDs_temp): # The enumerate() function adds a counter to an iterable.\n word = self.labels.index2word[ID]\n # Store sample\n X[i, ] = self.labels[word]\n # Store class\n char_hot_enc_pad = self.word_2_seq_hot_enc_sample(word)\n Y[i] = char_hot_enc_pad\n return X.reshape(self.batch_size, self.dim), Y",
"def get_batch(X, Y, iteration):\n offset = 100\n start = iteration * offset % len(Y)\n \n # YOUR CODE HERE\n # This will return the entire data set each iteration. This is costly, so\n # you should experiment with different way of changing this:\n return X[start: start + offset], Y[start: start + offset]",
"def generate_train_batch(self):\n\n patients_indices = self.get_indices()\n patients_for_batch = [self._data[i] for i in patients_indices]\n\n data = np.zeros((self.batch_size, 1, *self.patch_size), dtype=np.short)\n labels = np.empty(self.batch_size, dtype=np.float32)\n\n # iterate over patients_for_batch and include them in the batch\n for i, j in enumerate(patients_for_batch):\n patient_data_ct = np.load(j).astype(np.short)\n\n data[i] = self.preprocess_func(patient_data_ct).astype(np.short)\n path = str(j).split('/')[-1].replace('.npy', '')\n labels[i] = float(self.age_info[path])\n\n return {'data': np.array(data), 'label': np.array(labels)}",
"def create_batches(self, X, y, batch_size, step):\n index_start = batch_size * step\n index_end = index_start + batch_size\n\n if index_end < X.shape[0]:\n batch_indices = np.arange(index_start, index_end)\n X_batch = X[batch_indices]\n y_batch = y[batch_indices]\n else:\n batch_indices = np.arange(index_start, X.shape[0])\n X_batch = X[batch_indices]\n y_batch = y[batch_indices]\n\n return X_batch, y_batch",
"def train_datas(self, batch_size):\r\n if not isinstance(batch_size, int):\r\n raise ValueError('In Dataset, batch_size should be int, get '\r\n '{}'.format(type(batch_size)))\r\n if batch_size <= 0:\r\n raise ValueError('In Dataset, batch_size should larger equal to '\r\n '1, get {}'.format(batch_size))\r\n \r\n indices = list(range(self.size))\r\n np.random.shuffle(indices)\r\n\r\n epoch_size = self.size // batch_size * batch_size\r\n self._train_datas = self._train_datas[indices][:epoch_size] # [epoch_size, ...]\r\n self._train_labels = self._train_labels[indices][:epoch_size] # [epoch_size, ...]\r\n \r\n datas = []\r\n for i in range(self.size // batch_size):\r\n # for label, we have box and landmark which is 0.\r\n datas.append([self._train_datas[i*batch_size:(i+1)*batch_size], \r\n self._train_labels[i*batch_size:(i+1)*batch_size]])\r\n return datas",
"def instantiate_batch(self, inputs):\n _ = inputs\n raise NotImplementedError(\n 'LoomOp needs a definition for instantiate_batch.')",
"def __data_generation(self, list_ids_temp):\r\n # 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)\r\n # Initialization\r\n X = np.empty((self.batch_size, *self.dim, self.n_channels))\r\n y = np.empty(self.batch_size, dtype = int)\r\n\r\n # Get data from .mat files\r\n for i, ID in enumerate(list_ids_temp):\r\n with h5py.File(ID) as file:\r\n _data = list(file['sequences'])\r\n\r\n # Convert to .mat structure to numpy array\r\n _npData = np.array(_data)\r\n _allSequences = np.transpose(_npData)\r\n\r\n # Reshape the numpy array to size : ( # of sequences, # of rows, # of columns, # of channels)\r\n X[i,] = np.reshape(_allSequences, (15, 16, 3200, 1)) # sequences\r\n\r\n # Store class\r\n y[i] = self.labels[ID]\r\n\r\n return X, tf.keras.utils.to_categorical(y, num_classes = self.n_classes)"
] | [
"0.6630902",
"0.64705443",
"0.626827",
"0.6255618",
"0.61779076",
"0.6143719",
"0.61349833",
"0.6093019",
"0.60913867",
"0.60872954",
"0.6078541",
"0.60360956",
"0.6005945",
"0.5993744",
"0.59579694",
"0.59538275",
"0.5936795",
"0.59321797",
"0.59284955",
"0.5920302",
"0.591604",
"0.5914332",
"0.5910492",
"0.59071535",
"0.59031224",
"0.5896021",
"0.58889043",
"0.588357",
"0.5875091",
"0.5850943"
] | 0.73512393 | 0 |
Returns the minimum path sum from root to a leaf. For example, the minimum path in this tree is [10, 5, 1, 1], which has sum 15. 10 / \ 5 5 | | 2 1 / 1 | def minimum_path_sum(self, root) -> int:
def minimum_path_sum_aux(root, path=None):
path.append(root.value)
new_path = path[:]
# Stop condition
if root.is_leaf():
return
else:
if root.left is not None:
minimum_path_sum_aux(root.left, path=path)
elif root.right is not None:
minimum_path_sum_aux(root.right, path=path)
if root.right is not None and root.left is not None:
paths.append(new_path)
minimum_path_sum_aux(root.right, path=new_path)
paths = [[]]
minimum_path_sum_aux(root, path=paths[0])
return min([sum(path) for path in paths]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def min_depth(t):\n if is_leaf(t):\n return 0\n h = float('inf')\n for b in branches(t):\n # Still works fine!\n h = min(h, 1 + min_depth(b))\n return h",
"def minDepth(self, root: TreeNode) -> int:\n return self.bfs(root)",
"def max_single_path(self, root: Optional[TreeNode]) -> int:\n if not root:\n return 0\n\n if root in self.single_path_cache:\n return self.single_path_cache[root]\n\n result = max(0, root.val)\n if root.left:\n left = self.max_single_path(root.left)\n result = max(result, left + root.val)\n if root.right:\n right = self.max_single_path(root.right)\n result = max(result, right + root.val)\n\n self.single_path_cache[root] = result\n return result",
"def max_path_sum(root_elem):\r\n\tif root_elem is None:\r\n\t\treturn 0\r\n\tif root_elem.left is None and root_elem.right is None:\r\n\t\treturn root_elem.value\r\n\tmax_child = max(max_path_sum(root_elem.left),max_path_sum(root_elem.left))\r\n\treturn root_elem.value + max_child",
"def maxPathSum(self, root):\n def maxPotential(node):\n \"\"\"Return max potention from each node in tree.\"\"\"\n nonlocal maxsum\n # Allocate null values to 0\n if not node:\n return 0\n\n # Get the max sum from the left and right subtrees\n # We only care about possible gains (no negative values)\n # Iterates all the way to the far left\n left_max = max(maxPotential(node.left), 0)\n print(\"left_max:\", left_max)\n # Iterates down the recursive stack\n right_max = max(maxPotential(node.right), 0)\n print(\"right_max:\", right_max)\n\n # Get sum of potential\n potential = node.val + left_max + right_max\n print(\"potential\", potential)\n\n # Update new maxsum\n maxsum = max(maxsum, potential)\n\n # Recursively return the max potential if continue on same path\n return node.val + max(left_max, right_max)\n\n # Initialize maxsum to negative infinity\n maxsum = float('-inf')\n maxPotential(root)\n\n return maxsum",
"def minDiffInBST(self, root: TreeNode) -> int:\n prev = [-float('inf')]\n curr_min = [float('inf')]\n def inorder_traverse(node, prev, curr_min):\n if node:\n inorder_traverse(node.left, prev, curr_min)\n curr_min[0] = min(curr_min[0], node.val - prev[0])\n prev[0] = node.val\n inorder_traverse(node.right, prev, curr_min)\n \n inorder_traverse(root, prev, curr_min)\n return curr_min[0]",
"def min(self):\n return self._min(self.root)",
"def min(self):\n no = self.root\n if no:\n no = self.__search_node_min_dir(no)\n if no:\n return no.valor\n return None",
"def get_min(self):\n if self.root is None: # BC1\n return float('+inf')\n\n current = self.root\n while current.left is not None: # Traverse like a linked-list\n current = current.left\n\n return current.key",
"def find_min(self) -> TreeNode:\n node = self.root\n while True:\n if not node.left:\n return node\n node = node.left",
"def path_sum(self, node):\n if self.is_leaf(node):\n return node.val, node.val\n if node is None:\n return 0, -2147483648\n\n left, sub1 = self.path_sum(node.left)\n right, sub2 = self.path_sum(node.right)\n left = left if left > 0 else 0\n right = right if right > 0 else 0\n\n if left > right:\n maximum_path = node.val + left\n else:\n maximum_path = node.val + right\n\n sub_result = max(max(sub1, sub2), node.val + left + right)\n return maximum_path, sub_result",
"def minDiffInBST2(self, root: TreeNode) -> int:\n def flatten(node, all_nodes):\n if node:\n flatten(node.left, all_nodes)\n all_nodes.append(node.val)\n flatten(node.right, all_nodes)\n \n nodes = []\n flatten(root, nodes)\n diff = nodes[1] - nodes[0]\n for i in range(2, len(nodes)):\n diff = min(diff, nodes[i] - nodes[i-1])\n return diff",
"def find_smallest(self):\n return self._find_smallest(self.root)",
"def peek_min(self):\n if self.root:\n return self.root.min().value\n raise ValueError(\"cannot perform peek_min on an empty tree\")",
"def sum(self) -> int:\n return self.root.sum",
"def BinaryTreeNodeDepthSum(self, root):\n return self.__binary_tree_node_sum(root)",
"def min_len(BST):\r\n if isinstance(BST,tuple):\r\n return min_len(BST[0]) + min_len(BST[1])\r\n else:\r\n return BST[0]",
"def min_depth(node):\n if not node:\n return 0\n elif (not node.left) and (not node.right):\n # found leaf\n return 1\n elif not node.left:\n # if the root has only 1 child, this prevents the minimum depth from\n # equaling zero\n return min_depth(node.right) + 1\n elif not node.right:\n return min_depth(node.left) + 1\n return min(min_depth(node.left), min_depth(node.right)) + 1",
"def min_value(tree):\n min_utility = float(\"inf\")\n \n if (is_terminal(tree)):\n return tree\n else:\n #options = []\n for node in tree:\n #options.append(max_value(node))\n min_utility = min(min_utility, max_value(node))\n return min_utility",
"def fn(node):\n if not node: return 0\n if not node.left or not node.right: return 1 + fn(node.left) + fn(node.right)\n return 1 + min(fn(node.left), fn(node.right))",
"def find_level_maxsum (self):\r\n level_queue = [self]\r\n next_level_queue = []\r\n curr_level = 0\r\n max_sum = -sys.maxsize\r\n while level_queue:\r\n curr_node = level_queue.pop(0)\r\n if curr_node.left:\r\n next_level_queue.append(curr_node.left)\r\n if curr_node.right:\r\n next_level_queue.append(curr_node.right)\r\n if not level_queue:\r\n sum_value = 0\r\n for nodes in next_level_queue:\r\n sum_value += nodes.root\r\n if sum_value > max_sum:\r\n max_sum = sum_value\r\n curr_level += 1\r\n level_queue = next_level_queue[:]\r\n next_level_queue = []\r\n if self.root> max_sum:\r\n max_sum = self.root\r\n return max_sum",
"def total(tree):\n if tree is None:\n return 0\n return total(tree.left) + total(tree.right) + tree.cargo",
"def deep_min(self):\r\n node = self\r\n while not node.is_leaf():\r\n node = node.children[0]\r\n return node.keys[0] if node.keys else None",
"def getSum2(root, level=0, maxLevel=None, sum=None):\n if root == None:\n return 0\n \n if maxLevel == None:\n maxLevel = [-1]\n sum = [0]\n \n if maxLevel[0] < level:\n sum[0] += root.data\n maxLevel[0] = level\n \n getSum2(root.right, level+1, maxLevel, sum) \n getSum2(root.left , level+1, maxLevel, sum)\n\n if level == 0:\n return sum[0]",
"def sum_tree(t):\n \"*** YOUR CODE HERE ***\"\n if is_leaf(t):\n return entry(t)\n total = entry(t)\n for subtree in subtrees(t):\n total += sum_tree(subtree)\n return total",
"def least_coins_recursion(total, coins):\r\n\tif total == 0:\r\n\t\treturn 0\r\n\tif total < 0:\r\n\t\treturn sys.maxsize\n\tglobal tmp_hash\n\tif tmp_hash[total] > 0 :\n\t\tmin_coins = tmp_hash[total]\n\telse:\n\t\ttmp_coins_list = list()\n\t\tfor j in coins:\n\t\t\ttmp_coins_list.append(1 + least_coins_recursion(total-j, coins))\r\n\t\tmin_coins = min(tmp_coins_list)\n\ttmp_hash[total] = min_coins\r\n\treturn min_coins",
"def minimumDominationCount(leaf):\n minimumDominationCount = np.nanmin(leaf.calDominationCount())\n return minimumDominationCount",
"def shortest_path_to_root(self):\n paths = self.hypernym_paths()\n shortest = paths.index(min([len(path) for path in paths]))\n return paths[shortest]",
"def find_smallest(node):\n smallest = node.value\n\n while node.left is not None:\n node = node.left\n smallest = node.value\n\n return smallest",
"def minKeyTree(root):\n try:\n min = None\n if (root is not None):\n if (root['left'] is None):\n min = root\n else:\n min = minKeyTree(root['left'])\n return min\n except Exception as exp:\n error.reraise(exp, 'BST:minKeyNode')"
] | [
"0.6976607",
"0.6895128",
"0.66671705",
"0.6653948",
"0.6637505",
"0.6618621",
"0.6516666",
"0.64048195",
"0.6392322",
"0.63633066",
"0.6349397",
"0.629449",
"0.6274608",
"0.6259616",
"0.6218573",
"0.62072426",
"0.60357225",
"0.60211045",
"0.59939396",
"0.59901595",
"0.59887516",
"0.59815925",
"0.5959336",
"0.59580314",
"0.5917617",
"0.5867806",
"0.58673763",
"0.5859804",
"0.58549815",
"0.58499"
] | 0.85443187 | 0 |
Build the API 'create node' payload specific to DEPOSITUS. | def payload_for_create(cls, nickname, **kwargs):
payload = super(DepositUsNode, cls).payload_for_create('DEPOSIT-US',
nickname=nickname,
**kwargs)
return payload | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def node_create(self, **kwargs):\n node = self.ironic_client.node.create(**kwargs)\n self.addCleanup(self.node_delete, node)\n return node",
"def makeTree(node,baseName,baseAddress,nodes,parentNode,vars,isGenerated):\n \n if (isGenerated == None or isGenerated == False) and node.get('generate') is not None and node.get('generate') == 'true':\n generateSize = parseInt(node.get('generate_size'))\n generateAddressStep = parseInt(node.get('generate_address_step'))\n generateIdxVar = node.get('generate_idx_var')\n for i in range(0, generateSize):\n vars[generateIdxVar] = i\n makeTree(node, baseName, baseAddress + generateAddressStep * i, nodes, parentNode, vars, True)\n return\n newNode = Node()\n name = baseName\n if baseName != '': name += '.'\n if node.get('id') is not None:\n name += node.get('id')\n name = substituteVars(name, vars)\n newNode.name = name\n if node.get('description') is not None:\n newNode.description = node.get('description')\n address = baseAddress\n if node.get('address') is not None:\n address = baseAddress + parseInt(node.get('address'))\n newNode.address = address\n newNode.real_address = (address<<2)+0x64000000\n newNode.permission = node.get('permission')\n newNode.mask = parseInt(node.get('mask'))\n newNode.isModule = node.get('fw_is_module') is not None and node.get('fw_is_module') == 'true'\n if node.get('sw_monitor_warn_min_threshold') is not None:\n newNode.warn_min_value = node.get('sw_monitor_warn_min_threshold') \n if node.get('sw_monitor_error_min_threshold') is not None:\n newNode.error_min_value = node.get('sw_monitor_error_min_threshold') \n nodes[name] = newNode\n if parentNode is not None:\n parentNode.addChild(newNode)\n newNode.parent = parentNode\n newNode.level = parentNode.level+1\n for child in node:\n makeTree(child,name,address,nodes,newNode,vars,False)",
"def create_node(\n self,\n node: Node,\n parameters: Any\n ) -> str:\n raise NotImplementedError",
"def _create_node(\n self,\n name,\n ):\n pass",
"def create_node(self, address, service):\n node = create_node(address, service)\n node.id = address + \"_\" + service\n return node",
"def payload_for_create(cls, nickname, **kwargs):\n payload = super(TriumphSubaccountUsNode, cls).payload_for_create('TRIUMPH-SUBACCOUNT-US',\n nickname=nickname,\n **kwargs)\n return payload",
"def create_node(self, **kwargs):\n if not self.nodes:\n self.get_nodes()\n\n _node = Node(project_id=self.project_id, connector=self.connector, **kwargs)\n\n _node.create()\n self.nodes.append(_node)\n print(\n f\"Created: {_node.name} -- Type: {_node.node_type} -- \"\n f\"Console: {_node.console}\"\n )",
"def addOnCreate(call, args=(), kwargs={}, nodeClass='*'):\n pass",
"def post(self, node):\n if self._from_chassis:\n raise exception.OperationNotPermitted\n\n try:\n new_node = pecan.request.dbapi.create_node(node.as_dict())\n except Exception as e:\n with excutils.save_and_reraise_exception():\n LOG.exception(e)\n return Node.convert_with_links(new_node)",
"def create_node(self, **kwargs):\n size = kwargs['size'].ram\n params = {\n 'cmd' : 'dreamhost_ps-add_ps',\n 'movedata' : kwargs.get('movedata', 'no'),\n 'type' : kwargs['image'].name,\n 'size' : size\n }\n data = self.connection.request('/', params).object\n return Node(\n id = data['added_web'],\n name = data['added_web'],\n state = NodeState.PENDING,\n public_ip = [],\n private_ip = [],\n driver = self.connection.driver,\n extra = {\n 'type' : kwargs['image'].name\n }\n )",
"def post_analytics_node_create(self, resource_dict):\n pass",
"def post(self):\n node_id = blockchain.register_node(request.host)\n\n return {\n 'message': 'New node have been added.',\n 'node_id': node_id,\n 'nodes': list(blockchain.nodes)\n }, 201",
"def create_nodes(self):",
"def post_config_node_create(self, resource_dict):\n pass",
"def _create_node(self, item: Item) -> Dict[str, Any]:\n node = {'text': item.title,\n 'item-id': item.id,\n 'nodes': []}\n icon = self.icon_name(item)\n if icon:\n node['icon'] = 'glyphicon glyphicon-{}'.format(icon)\n node['item_title'] = item.title\n node['item_type'] = item.type\n node['item_note'] = item.note\n node['node_type'] = item.__class__.__name__.lower()\n if isinstance(item, Item):\n meta = self._node_metadata(item)\n creators = item.creators\n if meta is not None:\n node['metadata'] = meta\n res = self._item_mapper.get_resource_name(item)\n if res is None:\n res = self._find_child_resource(item, self.PDF_EXT_REGEXP)\n if res is None:\n res = self._find_child_name(item, self.PDF_FULL_REGEXP)\n if res is not None:\n node['resource'] = res\n if creators is not None:\n if meta is None:\n meta = []\n node['metadata'] = meta\n meta.append(('Creators', ', '.join(map(str, creators))))\n if meta is not None:\n meta.sort()\n return node",
"def create_node(self, xmlnode):\n node = self.nf.get_node(xmlnode)\n # Check that the uri is valid\n check_uri(node.uri, self.sm, shouldExist = False)\n # Check for reserved URI\n if node.uri.endswith(AUTO): \n node.set_uri(generate_uri(node.uri))\n # Make sure capabilities is clear\n node.clear_capabilities()\n if isinstance(node, DataNode):\n # Add service views\n node.clear_accepts()\n for uris in SERVICE_VIEWS:\n node.add_accepts(uris)\n # Make sure provides is clear\n node.clear_provides()\n views = [set(PROVIDES_VIEWS[x]) for x in SERVICE_VIEWS]\n views = set.union(*views)\n for view in list(views):\n node.add_provides(view)\n # Make sure nodes is clear\n if isinstance(node, ContainerNode):\n node.clear_nodes()\n # Check properties\n for property in node.properties:\n if property in READ_ONLY_PROPERTIES: raise VOSpaceError(401, 'User does not have permissions to set a readonly property.', summary = PERMISSION_DENIED)\n # If container, create container\n location = get_location(node.uri)\n if isinstance(node, ContainerNode) and not os.path.exists(location): os.makedirs(location)\n # Store node\n self.sm.create_node(node.tostring(), node.uri, NODETYPES[node.TYPE], location = location) \n # Register properties\n self.sm.register_properties(node.uri, node.properties)\n return xmlnode",
"def createNode(*args, name: AnyStr=\"\", parent: AnyStr=\"\", shared: bool=True, skipSelect:\n bool=True, **kwargs)->AnyStr:\n pass",
"def create_node(self, node_cfg):\n with self.__connect_node(node_cfg) as conn:\n self._provision_node(conn, node_cfg)\n self._bootup_node(conn)",
"def create_nodes(self, nodes: List[Node]):\n nodes_str = \",\\n\".join([str(n) for n in nodes])\n query = \"\"\"CREATE %s\"\"\" % nodes_str\n return self.create_tx(query)",
"def pre_analytics_node_create(self, resource_dict):\n pass",
"def _create_borrow_node(self):\n\n request_type = int(self.request.arguments['request_type'])\n if request_type not in [BorrowHandler.BORROW, BorrowHandler.LEND]:\n raise InvalidRequestTypeError(request_type)\n return {\n 'title': self.get_arg('title', required=True),\n 'user_id': self.get_arg('user_id', required=True),\n 'description': self.get_arg('description', required=False),\n 'distance': self.get_arg('distance', required=True),\n 'duration': self.get_arg('duration', required=True),\n 'request_type': request_type\n }",
"def create_node(self, topogramId, id=None, x=None, y=None, data={}):\n assert type(data) is dict\n if id : assert type(id) is str\n if x : assert type(x) is float or type(x) is int\n if y : assert type(y) is float or type(x) is int\n\n el = {\n \"id\" : id,\n \"x\" : x,\n \"y\" : y\n }\n for k in data :\n el[k] = data[k]\n\n node = { \"element\" : el, \"data\" : data }\n return self.make_request(\"POST\", \"nodes\", { \"topogramId\" : topogramId, \"nodes\" : [ node ]})",
"def create_node(id_, label, node_schema, add_ts=False):\n node_record = {}\n node_record[\"id\"] = id_\n node_record[\"label\"] = label\n\n # Add a timestamp to the properties if the caller requested it.\n if add_ts:\n node_record[\"properties\"] = {}\n node_record[\"properties\"][\"timestamp\"] = \\\n Utils._get_time_milliseconds()\n\n return node_record",
"def create_xml(self, array):\n valid_fields = [\n 'acquirerId',\n 'commerceId',\n 'purchaseCurrencyCode',\n 'purchaseAmount',\n 'purchaseOperationNumber',\n 'billingAddress',\n 'billingCity',\n 'billingState',\n 'billingCountry',\n 'billingZIP',\n 'billingPhone',\n 'billingEMail',\n 'billingFirstName',\n 'billingLastName',\n 'language',\n 'commerceMallId',\n 'terminalCode',\n 'tipAmount',\n 'HTTPSessionId',\n 'shippingAddress',\n 'shippingCity',\n 'shippingState',\n 'shippingCountry',\n 'shippingZIP',\n 'shippingPhone',\n 'shippingEMail',\n 'shippingFirstName',\n 'shippingLastName',\n 'reserved1',\n 'reserved2',\n 'reserved3',\n 'reserved4',\n 'reserved5',\n 'reserved6',\n 'reserved7',\n 'reserved8',\n 'reserved9',\n 'reserved10',\n 'reserved11',\n 'reserved12',\n 'reserved13',\n 'reserved14',\n 'reserved15',\n 'reserved16',\n 'reserved17',\n 'reserved18',\n 'reserved19',\n 'reserved20',\n 'reserved21',\n 'reserved22',\n 'reserved23',\n 'reserved24',\n 'reserved25',\n 'reserved26',\n 'reserved27',\n 'reserved28',\n 'reserved29',\n 'reserved30',\n 'reserved31',\n 'reserved32',\n 'reserved33',\n 'reserved34',\n 'reserved35',\n 'reserved36',\n 'reserved37',\n 'reserved38',\n 'reserved39',\n 'reserved40',\n ]\n\n root = ET.Element('VPOSTransaction1.2')\n\n temp_dict = dict()\n taxes = dict()\n taxes_vals = dict()\n\n for key, value in array.items():\n if key in valid_fields:\n temp_dict[key] = value\n elif re.search(r'tax_([0-9]{1}|[0-9]{2})_name', key):\n re.sub(r'(^tax_)|(_name$)', '', key)\n taxes[key] = value\n else:\n raise AlignetError('%s is not allowed value in Alignet.') % key\n\n for key, value in temp_dict.items():\n elem = ET.SubElement(root, key)\n elem.text = value\n\n #TODO: If some taxes exist add to the XML doc\n if len(taxes):\n pass\n\n return ET.tostring(root, encoding='iso-8859-1')",
"def _build_payload(self, body: Dict) -> Dict[str, Any]:\n return {'jsonrpc': '2.0',\n 'id': self._id_count,\n **body}",
"def create_node(self, drip_campaign_id, title, start_time, template_id, subject, from_email, from_name, initial,\n description=None):\n new_content = Content(template_id=template_id, subject=subject, from_email=from_email, from_name=from_name)\n new_node = Node(\n drip_campaign_id=drip_campaign_id,\n title=title,\n start_time=start_time,\n description=description,\n content=new_content,\n initial=initial,\n done=False,\n )\n new_node.save()\n return new_node.id",
"def pre_config_node_create(self, resource_dict):\n pass",
"def create_test_node_tag(**kw):\n tag = get_test_node_tag(**kw)\n dbapi = db_api.get_instance()\n return dbapi.add_node_tag(tag['node_id'], tag['tag'])",
"def createNode(self, pkg, exe, args, name, nspace):\r\n node = Node(self)\r\n self.callRemote('createNode', pkg, exe, args, name,\r\n nspace).chainDeferred(node)\r\n return node",
"def _to_node(self, data):\n return Node(\n id = data['ps'],\n name = data['ps'],\n state = NodeState.UNKNOWN,\n public_ip = [data['ip']],\n private_ip = [],\n driver = self.connection.driver,\n extra = {\n 'current_size' : data['memory_mb'],\n 'account_id' : data['account_id'],\n 'type' : data['type']\n }\n )"
] | [
"0.6475023",
"0.61691827",
"0.60279626",
"0.5982535",
"0.58847415",
"0.5812112",
"0.57377934",
"0.56656283",
"0.5662233",
"0.56440794",
"0.5619852",
"0.5604117",
"0.5601905",
"0.5594947",
"0.5551773",
"0.55489177",
"0.5476511",
"0.54458004",
"0.5400765",
"0.53761464",
"0.5349492",
"0.5346053",
"0.5345478",
"0.5342864",
"0.5330822",
"0.53261095",
"0.5296854",
"0.526128",
"0.5249241",
"0.5241819"
] | 0.72631043 | 0 |
function used for load gene ontology file gene ontology should in OBO format | def load_gene_ontology(self, file_path):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_obo_file(self, obo_file, optional_attrs, load_obsolete, prt):\n reader = OBOReader(obo_file, optional_attrs)\n\n # Save alt_ids and their corresponding main GO ID. Add to GODag after populating GO Terms\n alt2rec = {}\n i = 0\n for rec in reader:\n # Save record if:\n # 1) Argument load_obsolete is True OR\n # 2) Argument load_obsolete is False and the GO term is \"live\" (not obsolete)\n if load_obsolete or not rec.is_obsolete:\n self[rec.id] = rec\n for alt in rec.alt_ids:\n alt2rec[alt] = rec\n\n # Save the typedefs and parsed optional_attrs\n # self.optobj = reader.optobj\n self.typedefs = reader.typedefs\n self._populate_terms(reader.optobj)\n self._set_level_depth(reader.optobj)\n\n # Add alt_ids to go2obj\n for goid_alt, rec in alt2rec.items():\n self[goid_alt] = rec\n desc = self._str_desc(reader)\n if prt is not None:\n prt.write(\"{DESC}\\n\".format(DESC=desc))\n return desc",
"def get_go():\n # decompress obo file if it wasn't yet\n if not os.path.exists(OBO_FILE):\n _decompress_obofile()\n # create global variable\n if __GO__[0] is None:\n __GO__[0] = onto.Ontology(OBO_FILE, with_rels=True, include_alt_ids=False)\n return __GO__[0]",
"def open_feature_ontology_pbobject(ontology_file):\n try:\n ontology = open_pbobject(ontology_file, FeatureOntologyPb2)\n if ontology is not None:\n logging.info('Successfully loaded FeatureOntology spec.')\n return ontology\n except Exception:\n logging.error('Failed to load ontology file' + ontology_file + '.')",
"def parse_obo(obo,\n output_file=None,\n id2name_file=None,\n id2namespace_file=None,\n alt_id_file=None):\n\n ## Keywords that screw up parsing:\n # import, is_anonymous, intersection_of, union_of\n\n ## Relations\n # 'is_a:'\n # 'relationship: has_part' # Not in filtered GO\n # 'relationship: occurs_in' # Not in filtered GO\n # 'relationship: part_of' \n # 'relationship: positively_regulates' \n # 'relationship: negatively_regulates'\n # 'relationship: regulates'\n # 'relationship: results_in' # Not in filtered GO\n\n stanza, edges = [], []\n id2name = dict()\n id2namespace = dict()\n alt_id = dict()\n in_term_stanza = False\n default_namespace_exists = False\n for line in io.open(obo).read().splitlines():\n\n line = line.split('!')[0].strip() # Remove comments\n\n if len(line)>0 and line[0]=='[' and line[-1]==']':\n # Add last stanza if it was a term stanza. Include namespace.\n if in_term_stanza:\n edges.extend(x+(namespace, ) for x in stanza)\n\n # Start new term stanza\n stanza = []\n \n # Set the default namespace, if it exists\n if default_namespace_exists:\n namespace = default_namespace\n \n # In a term stanzo or not\n in_term_stanza = line =='[Term]'\n\n name = None\n \n #if 'alt_id:' in line: assert False\n\n if 'id:' == line[:3]:\n curr_term = line.split('id:')[1].strip()\n elif 'alt_id:' in line:\n alt_term = line.split('alt_id:')[1].strip()\n if curr_term in alt_id: alt_id[curr_term].append(alt_term)\n else: alt_id[curr_term] = [alt_term]\n id2name[alt_term] = name\n elif 'name:' in line:\n name = line.split('name:')[1].strip()\n #assert not curr_term in id2name\n id2name[curr_term] = name\n elif 'is_a:' in line:\n parent = line.split('is_a:')[1].strip()\n stanza.append((parent, curr_term, 'is_a'))\n elif 'relationship:' in line:\n line = line.split('relationship:')[1].strip().split()\n if len(line)!=2: print(line)\n assert len(line)==2\n relation, parent = line\n stanza.append((parent, curr_term, relation))\n elif 'namespace:' == line[:10]:\n namespace = line.split('namespace:')[1].strip()\n assert not curr_term in id2namespace\n id2namespace[curr_term] = namespace\n elif 'default-namespace:' == line[:18]:\n namespace = line.split('default-namespace:')[1].strip()\n default_namespace_exists = True\n default_namespace = namespace\n\n pd.DataFrame(edges).to_csv(output_file, header=False, index=False, sep='\\t')\n pd.Series(id2name).to_csv(id2name_file, sep='\\t')\n pd.Series(id2namespace).to_csv(id2namespace_file, sep='\\t')\n pd.Series(dict([(a, c) for a, b in alt_id.items() for c in b])).to_csv(alt_id_file, sep='\\t')",
"def export_representations(self):\n\n dbpath, config = self._start()\n self.logger.msg1(\"Loading ontology\")\n obo_path = check_file(config.obo, dbpath, \"obo\")\n self.obo = MinimalObo(obo_path, True)\n self._export_reference_representations()\n self._export_model_representations(config)\n self._end()",
"def open_ontology_pbobject(ontology_file):\n try:\n ontology = parse_pbobject(ontology_file, OntologyV2Pb2)\n if ontology is not None:\n logging.info('Successfully loaded Ontology V2 spec.')\n return ontology\n except Exception:\n logging.error('Failed to load ontology file with V2 spec, trying V1 spec.')\n try:\n ontology = parse_pbobject(ontology_file, OntologyV1Pb2)\n if ontology is not None:\n logging.info('Successfully loaded Ontology V1 spec.')\n return ontology\n except Exception:\n if isinstance(ontology_file, str):\n logging.error('Failed to load ontology file' + ontology_file + 'with V1 spec also, returning None.')\n else:\n logging.error('Failed to load ontology file with V1 spec also, returning None.')",
"def __init__(self, obo_file=OBO_FILE, optional_attrs=None):\n self.optobj = self._init_optional_attrs(optional_attrs) # OboOptionalAttrs or None\n self.format_version = None # e.g., \"1.2\" of \"format-version:\" line\n self.data_version = None # e.g., \"releases/2016-07-07\" from \"data-version:\" line\n self.typedefs = {}\n\n # True if obo file exists or if a link to an obo file exists.\n print(\"obo_file:\")\n print(obo_file)\n if os.path.isfile(obo_file):\n self.obo_file = obo_file\n # GOTerm attributes that are necessary for any operations:\n else:\n raise Exception(\"COULD NOT READ({OBO})\\n\"\n \"download obo file first\\n \"\n \"[http://geneontology.org/ontology/\"\n \"go-basic.obo]\".format(OBO=obo_file))",
"def gos_files_creation(annotation_file, go_namespace_studied):\n go_ontology = pronto.Ontology('http://purl.obolibrary.org/obo/go/go-basic.obo')\n\n # For each GO terms look to the namespaces associated with them.\n go_namespaces = {}\n for go_term in go_ontology:\n go_namespaces[go_term.id] = go_term.other['namespace'][0]\n\n # For each GO terms look if there is an alternative ID fo them.\n go_alt_ids = {}\n for go_term in go_ontology:\n if 'alt_id' in go_term.other:\n for go_alt in go_term.other['alt_id']:\n go_alt_ids[go_alt] = go_term.id\n\n # Genome file with genes associated with GO terms.\n df = pa.read_csv(annotation_file, sep='\\t', header=None)\n df.columns = ['Gene_Name', 'GOs']\n df.replace(np.nan, '', inplace=True)\n\n gos_in_df = []\n for gos in df['GOs']:\n for go in gos.split(','):\n if go not in gos_in_df:\n gos_in_df.append(go)\n\n df.set_index('Gene_Name', inplace=True)\n\n gene_gos = []\n for gene, row in df.iterrows():\n for go in row['GOs'].split(','):\n gene_gos.append((go, gene))\n\n dic_go_genes = {}\n for go in tqdm(gos_in_df):\n genes = []\n for gene_go in gene_gos:\n if go != '' and go not in go_namespaces:\n go = go_alt_ids[go]\n if gene_go[0] == go and go != '' and go_namespaces[go] == go_namespace_studied:\n genes.append(gene_go[1])\n if go != '':\n dic_go_genes[go] = genes\n\n print(len(dic_go_genes))\n\n delete_keys = []\n for go in dic_go_genes:\n if len(dic_go_genes[go]) < 4:\n delete_keys.append(go)\n\n for key in delete_keys:\n del dic_go_genes[key]\n print(len(dic_go_genes))\n\n df_go = pa.DataFrame.from_dict(dic_go_genes, orient='index')\n df_go.insert(0, 'Description', 'GO_terms')\n\n df_go.to_csv('go_gene.gmt', sep='\\t', header=False)\n\n df.reset_index(inplace=True)\n df_query_go = pa.concat([pa.Series(row['Gene_Name'], row['GOs'].split(','))\n for _, row in df.iterrows()]).reset_index()\n df_query_go.columns = ['GOs', 'Gene_Name']\n df_query_go = df_query_go[['Gene_Name', 'GOs']]\n df_query_go.to_csv('query_go.tsv', sep='\\t', index=False)",
"def test_create_gene_ontology(self):\n\n # Here are mappings for just a few yeast genes.\n\n mapping = {}\n mapping['STE7'] = ['GO:0000187']\n mapping['PBS2'] = ['GO:0000187']\n mapping['NOP8'] = [\n 'GO:0003676', 'GO:0003723', 'GO:0042254', 'GO:0005634', 'GO:0005730'\n ]\n\n # Build the ontology, then see if it looks correct.\n\n root = dc.models.tensorgraph.models.ontology.create_gene_ontology(\n mapping, min_node_features=1)\n assert len(root.feature_ids) == 0\n\n def find_features(node, features):\n features.update(node.feature_ids)\n for child in node.children:\n find_features(child, features)\n\n all_features = set()\n find_features(root, all_features)\n assert len(all_features) == 3\n for key in mapping:\n assert key in all_features",
"def from_file(path) -> ontol.Ontology:\n abs_path = os.path.abspath(os.path.normpath(path))\n\n return __ontology[abs_path]",
"def load_gene_annotation(self, file_path):\n\t\tpass",
"def __init__(\n self,\n gene_lists,\n taxon,\n requests_per_sec=10,\n padj_threshold=0.05,\n log2_fc_threshold=0,\n fc_threshold=None,\n enrichment_fdr=0.05,\n annot_col=\"Name\",\n ):\n Ontology.__init__(self)\n PlotGOTerms.__init__(self)\n\n self.gene_lists = gene_lists\n self.enrichment_fdr = enrichment_fdr\n\n # users can set the fold change threshold in the log2 scale or normal\n # scale.\n assert log2_fc_threshold >= 0, \"log2 fc_threshold must be >=0\"\n if fc_threshold is not None:\n log2_fc_threshold = pylab.log2(fc_threshold)\n\n from bioservices import panther, quickgo\n\n self.quick_go_graph = QuickGOGraph()\n\n self.panther = panther.Panther(cache=True)\n self.valid_taxons = [x[\"taxon_id\"] for x in self.panther.get_supported_genomes()]\n self.summary = {}\n\n self._taxon = None\n self.taxon = taxon\n\n self.quickgo = quickgo.QuickGO(cache=True)\n self.quickgo.requests_per_sec = requests_per_sec\n self.quickgo.services.settings.TIMEOUT = 120\n\n self._ancestors = {\n \"MF\": \"GO:0003674\",\n \"CC\": \"GO:0005575\",\n \"BP\": \"GO:0008150\",\n \"SLIM_MF\": \"GO:0003674\",\n \"SLIM_CC\": \"GO:0005575\",\n \"SLIM_BP\": \"GO:0008150\",\n }\n self.ontologies.extend(\n [\n \"ANNOT_TYPE_ID_PANTHER_GO_SLIM_MF\",\n \"ANNOT_TYPE_ID_PANTHER_GO_SLIM_BP\",\n \"ANNOT_TYPE_ID_PANTHER_GO_SLIM_CC\",\n \"ANNOT_TYPE_ID_PANTHER_PC\",\n \"ANNOT_TYPE_ID_PANTHER_PATHWAY\",\n \"ANNOT_TYPE_ID_REACTOME_PATHWAY\",\n ]\n )\n\n self.ontology_aliases.extend(\n [\n \"SLIM_MF\",\n \"SLIM_BP\",\n \"SLIM_CC\",\n \"PROTEIN\",\n \"PANTHER_PATHWAY\",\n \"REACTOME_PATHWAY\",\n ]\n )\n\n # panther accepts onyl ~2-3000 genes at max. Let us restrict the analysis\n # to the first 2000 genes based on their log2 fold change 2000 + and\n # 2000 negatives\n\n msg = \"Ignoring DEGs with adjusted p-value > {} and fold change in [{}, {}]\".format(\n padj_threshold, 1 / (2**log2_fc_threshold), 2**log2_fc_threshold\n )\n logger.info(msg)\n\n # used in report module\n self.summary[\"fold_change_range\"] = [\n 1 / (2**log2_fc_threshold),\n 2**log2_fc_threshold,\n ]\n self.summary[\"padj_threshold\"] = padj_threshold\n\n fc_threshold = log2_fc_threshold\n\n for x in sorted(gene_lists.keys()):\n\n N = len(gene_lists[x])\n logger.info(f\"Starting with {N} genes from category '{x}'\")\n\n self.summary[\"DGE_after_filtering\"] = {k: len(v) for k, v in gene_lists.items()}\n\n self.enrichment = {}\n self.stats = {}\n self.obsolets = []",
"def test1_loading(self):\n\t\tprint \"\\nTEST 1: Loading ontologies from %s folder.\\n=================\" % DATA_FOLDER\n\t\t\n\t\tfor f in os.listdir(DATA_FOLDER):\n\t\t\tif not f.startswith('.'):\n\t\t\t\tprint \"Loading... >\", f\t\t\n\t\t\t\t\n\t\t\t\to = ontospy.Ontology(DATA_FOLDER + f)\n\t\t\t\t\n\t\t\t\tself.assertEqual(type(o), ontospy.Ontology)\n\t\t\t\tprint \"Success.\"",
"def test_genbank_to_genome_taxonomy(self):\n result = self.gfu.genbank_to_genome(self.ctx, {\n 'workspace_name': self.ws_name,\n 'generate_ids_if_needed': 'true', # why is this a string\n 'taxon_id': '3702',\n 'file': {\n 'path': f\"{_DATA_PATH}/wigglesworthia/genome.gb\"\n },\n 'genome_name': str(uuid4()),\n })\n ('result', result)\n ref = result[0]['genome_ref']\n self.assertTrue(ref, 'Genome ref exists')\n info = result[0]['genome_info']\n typ = info[2]\n self.assertTrue(typ.startswith('KBaseGenomes.Genome'))\n info_details = info[-1]\n self.assertEqual(info_details['Taxonomy'], (\n \"cellular organisms;Eukaryota;Viridiplantae;\"\n \"Streptophyta;Streptophytina;Embryophyta;Tracheophyta;\"\n \"Euphyllophyta;Spermatophyta;Magnoliopsida;Mesangiospermae;\"\n \"eudicotyledons;Gunneridae;Pentapetalae;rosids;malvids;\"\n \"Brassicales;Brassicaceae;Camelineae;Arabidopsis\"\n ))\n self.assertEqual(info_details['Size'], '697724')\n self.assertEqual(info_details['Source'], 'Genbank')\n self.assertEqual(info_details['Name'], 'Wigglesworthia glossinidia endosymbiont of Glossina brevipalpis')\n self.assertEqual(info_details['GC content'], '0.22479')\n self.assertEqual(info_details['Genetic code'], '11')\n self.assertEqual(info_details['Number of Genome Level Warnings'], '1')\n self.assertEqual(info_details['Source ID'], 'BA000021')\n self.assertEqual(info_details['Number of Protein Encoding Genes'], '20')\n self.assertEqual(info_details['Domain'], 'Eukaryota')\n self.assertTrue(info_details['Assembly Object'])\n self.assertEqual(info_details['Number contigs'], '1')\n self.assertEqual(info_details['Number of CDS'], '20')\n self.assertTrue(info_details['MD5'])",
"def load_all_data_from_file(self) -> None:\n self.load_gene_data_from_file()\n self.load_ontology_from_file(ontology_type=DataType.GO, ontology_url=self.go_ontology_url,\n ontology_cache_path=self.go_ontology_cache_path,\n config=self.config)\n self.load_associations_from_file(associations_type=DataType.GO, associations_url=self.go_associations_url,\n associations_cache_path=self.go_associations_cache_path, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.DO, ontology_url=self.do_ontology_url,\n ontology_cache_path=self.do_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.DO, associations_url=self.do_associations_url,\n associations_cache_path=self.do_associations_cache_path,\n association_additional_cache_path=self.do_associations_new_cache_path,\n association_additional_url=self.do_associations_new_url, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.EXPR, ontology_url=self.expression_ontology_url,\n ontology_cache_path=self.expression_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.EXPR,\n associations_url=self.expression_associations_url,\n associations_cache_path=self.expression_associations_cache_path,\n config=self.config)\n self.load_orthology_from_file()\n self.load_expression_cluster_data()\n self.load_protein_domain_information()",
"def download_genotype_data():\n print(\"downloading genotype data\")\n download_from_url(PSAM_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.psam\", desc=\"downloading psam\")\n download_from_url(PVAR_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pvar.zst\",\n desc=\"downloading pvar\")\n download_from_url(PGEN_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pgen.zst\",\n desc=\"downloading pgen\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pvar\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pgen\")",
"def testEnsemblToGeneFile(self):\n\n e2g = EnsemblToGeneFile(self.enstogenefile)\n\n self.assertTrue(e2g)\n\n self.assertTrue(len(e2g.geneids) == 38803)\n self.assertTrue(len(e2g.tranids) == 94647)",
"def load_gene_data_from_file(self) -> None:\n logger.info(\"Loading genes data from file\")\n if not self.gene_data or len(self.gene_data.items()) == 0:\n self.gene_data = {}\n file_path = self._get_cached_file(cache_path=self.gene_data_cache_path, file_source_url=self.gene_data_url)\n with open(file_path) as file:\n for line in file:\n fields = line.strip().split(',')\n if fields[1].startswith(\"WBGene\"):\n name = fields[2] if fields[2] != '' else fields[3]\n self.gene_data[\"WB:\" + fields[1]] = Gene(\"WB:\" + fields[1], name, fields[4] == \"Dead\", False)",
"def readGenos(self,genofile):\n self.gen = np.zeros((len(self.ped),len(self.mark)))\n self.gen[:] = np.nan\n marklist = None\n with open(genofile,'r') as fin:\n for line in fin:\n if line.startswith('#'):\n if not marklist: marklist = line.strip('#').strip().split()\n continue\n l = line.strip().split()\n if len(l) < 1: continue\n try: irow = self.ped[l[self.nc]]['rank']\n except KeyError:\n continue\n for i,mark in enumerate(self.marklist):\n if mark not in self.mark: continue\n icol = self.mark[mark]['rank']\n if self.ia == 1:\n a = l[i+self.ic]\n elif self.ia == 2:\n a = self.tbase012(l[i+self.ic],mark)\n elif self.ia == 3:\n a = self.tbase012(l[i*2+self.ic]+l[i*2+1+self.ic],mark)\n if a not in ['0','1','2']: a = np.nan\n else: a = int(a)\n self.gen[irow,icol] = a",
"def read_graph_g2o(filename):\n Edge = namedtuple(\n 'Edge', ['Type', 'fromNode', 'toNode', 'measurement', 'information'])\n edges = []\n nodes = {}\n with open(filename, 'r') as file:\n for line in file:\n data = line.split()\n\n if data[0] == 'VERTEX_SE2':\n nodeId = int(data[1])\n pose = np.array(data[2:5], dtype=np.float32)\n nodes[nodeId] = pose\n\n elif data[0] == 'VERTEX_XY':\n nodeId = int(data[1])\n loc = np.array(data[2:4], dtype=np.float32)\n nodes[nodeId] = loc\n\n elif data[0] == 'EDGE_SE2':\n Type = 'P'\n fromNode = int(data[1])\n toNode = int(data[2])\n measurement = np.array(data[3:6], dtype=np.float32)\n uppertri = np.array(data[6:12], dtype=np.float32)\n information = np.array(\n [[uppertri[0], uppertri[1], uppertri[2]],\n [uppertri[1], uppertri[3], uppertri[4]],\n [uppertri[2], uppertri[4], uppertri[5]]])\n edge = Edge(Type, fromNode, toNode, measurement, information)\n edges.append(edge)\n\n elif data[0] == 'EDGE_SE2_XY':\n Type = 'L'\n fromNode = int(data[1])\n toNode = int(data[2])\n measurement = np.array(data[3:5], dtype=np.float32)\n uppertri = np.array(data[5:8], dtype=np.float32)\n information = np.array([[uppertri[0], uppertri[1]],\n [uppertri[1], uppertri[2]]])\n edge = Edge(Type, fromNode, toNode, measurement, information)\n edges.append(edge)\n\n else:\n print('VERTEX/EDGE type not defined')\n\n # compute state vector and lookup table\n lut = {}\n x = []\n offset = 0\n for nodeId in nodes:\n lut.update({nodeId: offset})\n offset = offset + len(nodes[nodeId])\n x.append(nodes[nodeId])\n x = np.concatenate(x, axis=0)\n\n # collect nodes, edges and lookup in graph structure\n graph = Graph(x, nodes, edges, lut)\n print('Loaded graph with {} nodes and {} edges'.format(\n len(graph.nodes), len(graph.edges)))\n\n return graph",
"def on_ontology_parse(self, ctx):\n return None",
"def load_nh_gaia():\n \n dir_gaia = os.path.expanduser('~') + '/Data/Catalogs/Gaia/' \n# file_txt = 'nh16.mega.gaia.rdmpm.txt' # Original catalog, but covers MU69 area as seen from Earth\n file_txt = 'mu69.mega.gaia.rdmspm.txt' # Smaller area, covers MU69 as seen from NH.\n \n file_pickle = file_txt.replace('.txt', '.pkl')\n \n if os.path.isfile(dir_gaia + file_pickle):\n lun = open(dir_gaia + file_pickle, 'rb')\n gaia = pickle.load(lun)\n lun.close()\n print(\"Loaded: \" + file_pickle)\n \n else:\n \n # Read the NH MegaCam-Gaia catalog from disk\n \n gaia = astropy.io.ascii.read(dir_gaia + file_txt, format = 'basic')\n \n # Make a plot of the Gaia stars\n \n plt.plot(gaia['RA'], gaia['Dec'], linestyle='none', marker = '.', ms=0.005)\n plt.xlabel('RA [deg]')\n plt.ylabel('Dec [deg]')\n plt.title('NH MegaCam-Gaia catalog')\n plt.show()\n \n # Save it as a pickle file\n \n lun = open(dir_gaia + file_pickle, 'wb')\n pickle.dump(gaia, lun) \n print(\"Wrote: \" + dir_gaia + file_pickle)\n lun.close()\n \n return gaia",
"def __init__(self, config: GenedescConfigParser, species: str, go_relations: List[str] = None,\n do_relations: List[str] = None, use_cache: bool = False):\n self.config = config\n raw_files_source = config.get_wb_raw_file_sources()\n cache_location = config.get_cache_dir()\n release_version = config.get_wb_release()\n organisms_info = config.get_wb_organisms_info()\n project_id = organisms_info[species][\"project_id\"]\n self.sister_sp_fullname = \"\"\n if \"main_sister_species\" in organisms_info[species] and \"full_name\" in \\\n organisms_info[organisms_info[species][\"main_sister_species\"]]:\n self.sister_sp_fullname = organisms_info[organisms_info[species][\"main_sister_species\"]][\"full_name\"]\n self.orth_fullnames = \"\"\n if \"ortholog\" in organisms_info[species] and all([\"full_name\" in organisms_info[ortholog_sp] for ortholog_sp in\n organisms_info[species][\"ortholog\"]]):\n self.orth_fullnames = [organisms_info[ortholog_sp][\"full_name\"] for ortholog_sp in\n organisms_info[species][\"ortholog\"]]\n expression_cluster_anatomy_prefix = organisms_info[species][\"ec_anatomy_prefix\"] if \\\n \"ec_anatomy_prefix\" in organisms_info[species] else None\n expression_cluster_molreg_prefix = organisms_info[species][\"ec_molreg_prefix\"] if \\\n \"ec_molreg_prefix\" in organisms_info[species] else None\n expression_cluster_genereg_prefix = organisms_info[species][\"ec_genereg_prefix\"] if \\\n \"ec_genereg_prefix\" in organisms_info[species] else None\n super().__init__(go_relations=go_relations, do_relations=do_relations, use_cache=use_cache)\n self.gene_data_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"species\", species,\n project_id, \"annotation\", species + '.' + project_id +\n '.' + release_version + \".geneIDs.txt.gz\")\n self.gene_data_url = raw_files_source + '/' + release_version + '/species/' + species + '/' + project_id + \\\n '/annotation/' + species + '.' + project_id + '.' + release_version + '.geneIDs.txt.gz'\n self.go_ontology_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"ONTOLOGY\",\n \"gene_ontology.\" + release_version + \".obo\")\n self.go_ontology_url = raw_files_source + '/' + release_version + '/ONTOLOGY/gene_ontology.' + \\\n release_version + '.obo'\n self.go_associations_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"species\", species,\n project_id, \"annotation\", species + \".\" + project_id + \".\" + release_version +\n \".gene_association.wb.gz\")\n self.go_associations_url = raw_files_source + '/' + release_version + '/species/' + species + '/' + project_id + \\\n '/annotation/' + species + '.' + project_id + '.' + release_version + \".gene_association.wb.gz\"\n self.do_ontology_url = raw_files_source + '/' + release_version + '/ONTOLOGY/disease_ontology.' + \\\n release_version + '.obo'\n self.do_ontology_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"ONTOLOGY\",\n \"disease_ontology.\" + release_version + \".obo\")\n self.do_associations_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"ONTOLOGY\",\n \"disease_associations.by_orthology.\" + release_version +\n \".tsv.txt\")\n self.do_associations_url = raw_files_source + '/' + release_version + \\\n '/ONTOLOGY/disease_association.by_orthology.' + release_version + '.tsv.txt'\n self.do_associations_new_cache_path = os.path.join(cache_location, \"wormbase\", release_version, 'ONTOLOGY',\n 'disease_association.' + release_version + '.daf.txt')\n self.do_associations_new_url = raw_files_source + '/' + release_version + '/ONTOLOGY/disease_association.' + \\\n release_version + '.daf.txt'\n self.orthology_url = raw_files_source + '/' + release_version + '/species/' + species + '/' + project_id + \\\n '/annotation/' + species + '.' + project_id + '.' + release_version + '.orthologs.txt.gz'\n self.orthology_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"species\", species,\n project_id, \"annotation\", species + '.' + project_id + '.' +\n release_version + \".orthologs.txt.gz\")\n self.orthologs = defaultdict(lambda: defaultdict(list))\n self.protein_domain_url = raw_files_source + '/' + release_version + '/species/' + species + '/' + \\\n project_id + '/annotation/' + species + '.' + project_id + '.' + release_version + \\\n '.protein_domains.csv.gz'\n self.protein_domain_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"species\", species,\n project_id, \"annotation\", species + '.' + project_id +\n '.' + release_version + \".protein_domains.csv.gz\")\n self.protein_domains = defaultdict(list)\n self.expression_ontology_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"ONTOLOGY\",\n \"anatomy_ontology.\" + release_version + \".obo\")\n self.expression_ontology_url = raw_files_source + '/' + release_version + '/ONTOLOGY/anatomy_ontology.' + \\\n release_version + '.obo'\n self.expression_associations_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"ONTOLOGY\",\n \"anatomy_association.\" + release_version + \".wb\")\n self.expression_associations_url = raw_files_source + '/' + release_version + \\\n '/ONTOLOGY/anatomy_association.' + release_version + '.wb'\n self.expression_cluster_anatomy_url = self._get_expression_cluster_url(\n prefix=expression_cluster_anatomy_prefix, ec_type=\"anatomy\", release_version=release_version)\n self.expression_cluster_anatomy_cache_path = self._get_expression_cluster_cache_path(\n prefix=expression_cluster_anatomy_prefix, ec_type=\"anatomy\", release_version=release_version,\n cache_location=cache_location)\n self.expression_cluster_anatomy_data = defaultdict(list) if self.expression_cluster_anatomy_url else None\n self.expression_cluster_molreg_url = self._get_expression_cluster_url(\n prefix=expression_cluster_molreg_prefix, ec_type=\"molReg\", release_version=release_version)\n self.expression_cluster_molreg_cache_path = self._get_expression_cluster_cache_path(\n prefix=expression_cluster_molreg_prefix, ec_type=\"molReg\", release_version=release_version,\n cache_location=cache_location)\n self.expression_cluster_molreg_data = defaultdict(list) if self.expression_cluster_molreg_url else None\n self.expression_cluster_genereg_url = self._get_expression_cluster_url(\n prefix=expression_cluster_genereg_prefix, ec_type=\"geneReg\", release_version=release_version)\n self.expression_cluster_genereg_cache_path = self._get_expression_cluster_cache_path(\n prefix=expression_cluster_genereg_prefix, ec_type=\"geneReg\", release_version=release_version,\n cache_location=cache_location)\n self.expression_cluster_genereg_data = defaultdict(list) if self.expression_cluster_genereg_url else None",
"def main():\n\n # Accept up to three command-line arguments\n input_terms = \"<input_GO_terms_file>\"\n input_annotations = \"<input_gene_associations_file>\"\n output_filename = \"<output_filename>\"\n\n\n # The first two arguments are required GO terms file ending with .obo\n # and gene association GAF file ending with .gaf\n if len(sys.argv) < 3:\n sys.exit(\"Please provide required GO terms .obo file and gene \" +\n \"assocatiion .gaf file.\")\n elif not sys.argv[1].endswith(\".obo\"):\n sys.exit(\"Please provide a GO terms .obo file.\")\n elif not sys.argv[2].endswith(\".gaf\"):\n sys.exit(\"Please provide a gene association .gaf file.\")\n else:\n input_terms = sys.argv[1]\n input_annotations = sys.argv[2]\n\n\n # Check if the provided import .obo or .gaf files exist\n if not input_terms:\n sys.exit(input_terms + \" not found. Check the file path and try again.\")\n elif not input_annotations:\n sys.exit(input_annotations + \" not found. Check the file path and try again.\")\n elif len(sys.argv) == 3:\n output_filename = \"results.tsv\"\n sys.stdout = open(\"results.tsv\", \"w\")\n elif len(sys.argv) == 4:\n output_filename = sys.argv[3] + \".tsv\"\n sys.stdout = open(output_filename, \"w\")\n\n\n # parse id and is_valeus and make a go_dict\n split_input_terms = split_terms(input_terms)\n go_dict = {}\n for record in split_input_terms:\n (go_id, is_a) = parse_go_term(record)\n key_go_dict = \"\".join(go_id)\n go_dict[key_go_dict] = is_a\n\n\n # Export an annotation gene information to tsv format into the output file\n gene_association_map = map_protein_to_go(input_annotations)\n for protein, go_ids in sorted(gene_association_map.items()):\n print(protein, end=\"\")\n\n for go_id in sorted(go_ids):\n parent_go_ids = find_parent_terms(go_id, go_dict)\n\n count = 0\n for parent_go_id in sorted(parent_go_ids):\n\n if count == 0:\n print(\"\\t\", go_id, \"\\t\", parent_go_id)\n count += 1\n else:\n print(\"\\t\", parent_go_id, sep=\"\\t\")\n\n sys.stdout.close()",
"def _load_gene(self, gene, batch) -> None:\n try:\n assert Gene(**gene)\n except pydantic.error_wrappers.ValidationError as e:\n logger.warning(f\"Unable to load {gene} due to validation error: \"\n f\"{e}\")\n else:\n concept_id = gene['concept_id'].lower()\n gene['label_and_type'] = f\"{concept_id}##identity\"\n gene['src_name'] = \\\n PREFIX_LOOKUP[gene['concept_id'].split(':')[0].lower()]\n gene['item_type'] = 'identity'\n\n for attr_type, item_type in ITEM_TYPES.items():\n if attr_type in gene:\n value = gene[attr_type]\n if value is not None and value != []:\n if isinstance(value, str):\n items = [value.lower()]\n else:\n gene[attr_type] = list(set(value))\n items = {item.lower() for item in value}\n for item in items:\n batch.put_item(Item={\n 'label_and_type': f\"{item}##{item_type}\",\n 'concept_id': concept_id,\n 'src_name': gene['src_name'],\n 'item_type': item_type\n })\n else:\n del gene[attr_type]\n batch.put_item(Item=gene)\n self._processed_ids.append(concept_id)",
"def load_genes(outdir, confidence, validate_sg, verbose, idx=None, genes=None):\n\n if not genes is None:\n if not idx is None:\n return copy.deepcopy(genes[idx])\n else:\n gene_file = _get_gene_fname(outdir, confidence, validate_sg)\n\n if verbose:\n print('loading annotation information from %s' % gene_file)\n if idx is None:\n (genes, events) = pickle.load(open(gene_file, 'rb'), encoding='latin1')\n else:\n gene_db_file = re.sub(r'.pickle$', '', gene_file) + '.db.pickle'\n gene_idx_file = re.sub(r'.pickle$', '', gene_file) + '.idx.pickle'\n if os.path.exists(gene_idx_file):\n genes = []\n offsets = pickle.load(open(gene_idx_file, 'rb'))\n gene_handle = open(gene_db_file, 'rb')\n if not hasattr(idx, '__iter__'):\n idx = [idx]\n for e in idx:\n gene_handle.seek(offsets[e], 0)\n genes.append(pickle.load(gene_handle), encoding='latin1')\n genes = sp.array(genes)\n else:\n (genes, events) = pickle.load(open(gene_file, 'rb'), encoding='latin1')\n genes = genes[idx]\n\n return genes",
"def merge_orf_and_funtax( orf_file, funtax_file ):\n orf_df = pd.read_table(orf_file, header=None, names=orf_names, index_col='ORF_ID', usecols=orf_names, engine='python', encoding=\"ISO-8859-1\", quoting=3)\n funtax_df = pd.read_table(funtax_file, index_col='ORF_ID', engine='python', encoding=\"ISO-8859-1\", quoting=3)\n funtax_df[['COG','KO']] = orf_df[['COG','KO']]\n funtax_df['taxonId'] = funtax_df['taxonomy'].replace(r'.+\\(([0-9]+)\\)', value=r'\\1', regex=True)\n genes = funtax_df.reset_index()\n genes['gene'] = genes['ORF_ID']\n return genes.set_index('gene')",
"def add_ontology(metadata):\n metadata = add_surface_ontology(metadata)\n metadata = add_place_ontology(metadata)\n return metadata",
"def parseGOOBO(filename):\n with open(filename, \"r\") as infile:\n currentGOTerm = None\n for line in infile:\n line = line.strip()\n if not line: continue #Skip empty\n if line == \"[Term]\":\n if currentGOTerm: yield processGOTerm(currentGOTerm)\n currentGOTerm = defaultdict(list)\n elif line == \"[Typedef]\":\n #Skip [Typedef sections]\n currentGOTerm = None\n else: #Not [Term]\n #Only process if we're inside a [Term] environment\n if currentGOTerm is None: continue\n key, sep, val = line.partition(\":\")\n currentGOTerm[key].append(val.strip())\n #Add last term\n if currentGOTerm is not None:\n yield processGOTerm(currentGOTerm)",
"def _load_obcfile(casename=None): \n\n data={}\n\n if casename==None:\n print('_load_obcfile requires a filename to load.')\n return\n try:\n fp=open(casename+'_obc.dat','r')\n except IOError:\n print('_load_obcfile: invalid case name.')\n return data\n\n obc_str=fp.readline().split('=')\n obc_num=int(obc_str[1])\n t_data1=np.genfromtxt(casename+'_obc.dat',skip_header=1)\n fp.close()\n\n data['obcf_num']=obc_num\n data['obcf_numbers']=t_data1[:,0]\n data['obcf_nodes']=t_data1[:,1]\n data['obcf_value']=t_data1[:,2]\n\n \n return data"
] | [
"0.67056936",
"0.649405",
"0.60760856",
"0.60657376",
"0.60421205",
"0.60260266",
"0.59746313",
"0.59107745",
"0.5878593",
"0.5770445",
"0.57476443",
"0.5575497",
"0.5566608",
"0.551246",
"0.55002576",
"0.5498977",
"0.54912657",
"0.54823756",
"0.54741764",
"0.54462695",
"0.5401311",
"0.5401239",
"0.5396897",
"0.537225",
"0.53677154",
"0.53044474",
"0.53015655",
"0.52953964",
"0.52733725",
"0.5272979"
] | 0.7840323 | 0 |
function used for load gene annotation file gene annotation file should in GAF format | def load_gene_annotation(self, file_path):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_gene_ontology(self, file_path):\n\t\tpass",
"def parse(fname, format=\"gaf\", evidence_codes=None):\n if evidence_codes:\n if evidence_codes == 'exp':\n ecode_policy = anno_parsers.ExperimentalEvidencePolicy()\n else:\n ecode_policy = anno_parsers.AllowedSetEvidencePolicy()\n for e_code in evidence_codes:\n ecode_policy.addEvidence(e_code)\n if ecode_policy.isEmpty():\n raise ValueError('GGTK Invalid Evidence Codes: No valid evidence codes in the set.')\n\n if format == 'gaf' or format == 'goa':\n p = anno_parsers.GafAnnotationParser(ecode_policy)\n elif format == 'mgi':\n p = anno_parsers.MgiAnnotationParser(ecode_policy)\n elif format == 'entrez':\n p = anno_parsers.EntrezGene2GoAnnotationParser(ecode_policy)\n else:\n raise ValueError('GGTK Parser Not Defined: No parser of type %s is defined.' % format)\n \n if not p.isFileGood(fname):\n raise IOError('GGTK File Error: File %s not found or incorrectly formatted.' % fname)\n anno_proxy = p.parseAnnotationFile(fname)\n return _AnnotationData.AnnotationData(anno_proxy)\n\n else:\n if format == 'gaf' or format == 'goa':\n p = anno_parsers.GafAnnotationParser()\n if not p.isFileGood(fname):\n raise IOError('GGTK File Error: File %s not found or incorrectly formatted.' % fname)\n anno_proxy = p.parseAnnotationFile(fname)\n return _AnnotationData.AnnotationData(anno_proxy)\n\n elif format == 'mgi':\n p = anno_parsers.MgiAnnotationParser()\n if not p.isFileGood(fname):\n raise IOError('GGTK File Error: File %s not found or incorrectly formatted.' % fname)\n anno_proxy = p.parseAnnotationFile(fname)\n return _AnnotationData.AnnotationData(anno_proxy)\n\n elif format == 'entrez':\n p = anno_parsers.EntrezGene2GoAnnotationParser()\n if not p.isFileGood(fname):\n raise IOError('GGTK File Error: File %s not found or incorrectly formatted.' % fname)\n anno_proxy = p.parseAnnotationFile(fname)\n return _AnnotationData.AnnotationData(anno_proxy)\n else:\n raise ValueError('GGTK Parser Not Defined: No parser of type %s is defined.' % format)",
"def GFFParse(gff_file):\n genes, utr5, exons=dict(), dict(), dict()\n transcripts, utr3, cds=dict(), dict(), dict()\n # TODO Include growing key words of different non-coding/coding transcripts \n features=['mrna', 'transcript', 'ncrna', 'mirna', 'pseudogenic_transcript', 'rrna', 'snorna', 'snrna', 'trna', 'scrna', 'mrna_te_gene']\n gff_handle=open(gff_file, \"rU\")\n for gff_line in gff_handle:\n gff_line=gff_line.strip('\\n\\r').split('\\t')\n if re.match(r'#|>', gff_line[0]): # skip commented line or fasta identifier line \n continue\n if len(gff_line)==1: # skip fasta sequence/empty line if present \n continue \n assert len(gff_line)==9, '\\t'.join(gff_line) # not found 9 tab-delimited fields in this line \n if '' in gff_line: # skip this line if there any field with an empty value\n print 'Skipping..', '\\t'.join(gff_line)\n continue\n if gff_line[-1][-1]==';': # trim the last ';' character \n gff_line[-1]=gff_line[-1].strip(';')\n if gff_line[2].lower() in ['gene', 'pseudogene', 'transposable_element_gene']:\n gid, gene_info=None, dict()\n gene_info['start']=int(gff_line[3])\n gene_info['stop']=int(gff_line[4])\n gene_info['chr']=gff_line[0]\n gene_info['source']=gff_line[1]\n gene_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=') # gff attributes are separated by key=value pair \n if attb[0]=='ID':\n gid=attb[1]\n break\n genes[(gff_line[0], gid)]=gene_info # store gene information based on the chromosome and gene symbol.\n elif gff_line[2].lower() in features: \n gid, mrna_info=None, dict() \n mrna_info['start']=int(gff_line[3])\n mrna_info['stop']=int(gff_line[4])\n mrna_info['chr']=gff_line[0]\n mrna_info['strand']=gff_line[6]\n mrna_info['type'] = gff_line[2]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n gid=attb[1]\n elif attb[0]=='ID':\n mrna_info[attb[0]]=attb[1]\n for fid in gid.split(','): # child may be mapped to multiple parents ex: Parent=AT01,AT01-1-Protein \n if (gff_line[0], fid) in transcripts:\n transcripts[(gff_line[0], fid)].append(mrna_info)\n else:\n transcripts[(gff_line[0], fid)]=[mrna_info]\n elif gff_line[2].lower() in ['exon', 'pseudogenic_exon']:\n tids, exon_info=None, dict()\n exon_info['start']=int(gff_line[3])\n exon_info['stop']=int(gff_line[4])\n exon_info['chr']=gff_line[0]\n exon_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in exons:\n exons[(gff_line[0], tid)].append(exon_info)\n else:\n exons[(gff_line[0], tid)]=[exon_info]\n elif gff_line[2].lower() in ['five_prime_utr']:\n utr5_info, tids=dict(), None\n utr5_info['start']=int(gff_line[3])\n utr5_info['stop']=int(gff_line[4])\n utr5_info['chr']=gff_line[0]\n utr5_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr5:\n utr5[(gff_line[0], tid)].append(utr5_info)\n else:\n utr5[(gff_line[0], tid)]=[utr5_info]\n elif gff_line[2].lower() in ['cds']:\n cds_info, tids=dict(), None\n cds_info['start']=int(gff_line[3])\n cds_info['stop']=int(gff_line[4])\n cds_info['chr']=gff_line[0]\n cds_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in cds:\n cds[(gff_line[0], tid)].append(cds_info)\n else:\n cds[(gff_line[0], tid)]=[cds_info]\n elif gff_line[2].lower() in ['three_prime_utr']:\n utr3_info, tids=dict(), None\n utr3_info['start']=int(gff_line[3])\n utr3_info['stop']=int(gff_line[4])\n utr3_info['chr']=gff_line[0]\n utr3_info['strand']=gff_line[6]\n for attb in gff_line[-1].split(';'):\n attb=attb.split('=')\n if attb[0]=='Parent':\n tids=attb[1]\n break\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr3:\n utr3[(gff_line[0], tid)].append(utr3_info)\n else:\n utr3[(gff_line[0], tid)]=[utr3_info]\n gff_handle.close()\n return genes, transcripts, exons, utr3, utr5, cds",
"def _gene_ann(gene_ann_path):\n gene_ann = pd.read_csv(gene_ann_path)\n protein_gene = gene_ann[gene_ann.gene_type ==\n 'protein_coding'].gene_name.tolist()\n return(protein_gene)",
"def parse_KEGG(kaas_f, annot_d):\n\n with open(kaas_f,\"r\") as f:\n\n for line in f:\n line = line.strip().split(\"\\t\")\n \n # If there is a kegg annotation (I think KEGGS should be unique)\n if not len(line) == 1:\n seqid = line[0]\n kegg = line[1]\n \n annot_d[seqid][\"KEGG\"] = kegg\n \n return annot_d",
"def import_gene_assembly(infile):\n deserialized = None\n with open(infile, 'r') as file_handle:\n deserialized = json.load(file_handle, object_hook=decode_assembly)\n return deserialized",
"def parse_anno_from_gff3(options, contigs):\n\n anno = dict()\n idx2gene = dict()\n gene2idx = dict()\n\n if options.verbose:\n print >> sys.stderr, \"Parsing annotation from %s ...\" % options.anno\n \n ### initial run to get the transcript to gene mapping\n if options.verbose:\n print >> sys.stderr, \"... init structure\"\n\n trans2gene = dict() ### dict with: keys = transcript IDs, values = gene IDs\n for line in open(options.anno, 'r'):\n if line[0] == '#':\n continue\n sl = line.strip().split('\\t')\n if sl[2] in ['mRNA', 'transcript', 'mrna', 'miRNA', 'tRNA', 'snRNA', 'snoRNA', 'ncRNA', 'mRNA_TE_gene', 'rRNA', 'pseudogenic_transcript', 'transposon_fragment']:\n tags = get_tags_gff(sl[8])\n trans2gene[tags['ID']] = tags['Parent']\n\n ### init genome structure\n for c in contigs:\n if options.verbose:\n print >> sys.stderr, 'reserving memory for contig %s of len %s' % (c, contigs[c])\n anno[c] = sp.zeros((contigs[c] + 1,), dtype = 'int32')\n\n ### init list of considered GFF fields\n fields = options.fields.split(',')\n\n ### generate a list of exons with attached gene/transcript information\n ### one list per chromsome\n counter = 1\n gene_counter = 2 ### 0 is default for no coverage and 1 is mask for overlap\n\n exons = dict() # contains the exon list per transcript, only need this for mask_alternative_overlap\n\n t0 = time.time()\n for line in open(options.anno, 'r'):\n if options.verbose and counter % 10000 == 0:\n print >> sys.stderr, '.',\n if counter % 100000 == 0:\n t1 = time.time() - t0\n print >> sys.stderr, \"%i - took %.2f secs\" % (counter, t1)\n t0 = time.time()\n counter += 1 \n\n if line[0] == '#':\n continue\n sl = line.strip().split('\\t')\n \n if not sl[2] in fields:\n continue\n\n tags = get_tags_gff(sl[8])\n if sl[2] == 'exon':\n trans_id = tags['Parent']\n gene_id = trans2gene[trans_id]\n else:\n print >> sys.stderr, 'Currently only >exon< is supported'\n sys.exit(1)\n\n if not gene2idx.has_key(tuple([gene_id])):\n gene2idx[tuple([gene_id])] = gene_counter\n idx2gene[gene_counter] = tuple([gene_id])\n gene_counter += 1\n\n ### store for each position of the transcriptome a tuple containing all overlapping gene IDs\n ### assume positions are 1 based and in closed intervals\n try:\n start = int(sl[3]) - 1\n except ValueError:\n start = 0\n try:\n stop = int(sl[4])\n except ValueError:\n stop = 1\n\n if not sl[0] in exons:\n exons[sl[0]] = dict()\n\n if options.mask_alternative_overlap:\n try:\n exons[sl[0]][trans_id].append([start, stop])\n except KeyError:\n exons[sl[0]][trans_id] = [[start, stop]]\n\n ### check, if there is already a different gene ID present, form a combination ID\n if sp.any(anno[sl[0]][start:stop] > 0):\n for p in range(start, stop):\n if anno[sl[0]][p] == 0:\n new_set = tuple([gene_id])\n else:\n new_set = tuple(set(idx2gene[anno[sl[0]][p]]) | set([gene_id]))\n try:\n anno[sl[0]][p] = gene2idx[new_set]\n except KeyError:\n anno[sl[0]][p] = gene_counter\n gene2idx[new_set] = gene_counter\n idx2gene[gene_counter] = new_set\n gene_counter += 1\n else:\n anno[sl[0]][start:stop] = sp.array([gene2idx[tuple([gene_id])]] * (stop - start), dtype = 'int32')\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where we have more than one annotated gene\n if options.mask_gene_overlap:\n total_pos = 0\n total_masked = 0\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to gene overlap:'\n for c in anno:\n masked_pos = 0\n p_idx = sp.where(anno[c] > 1)[0]\n pos = p_idx.shape[0]\n for p in p_idx:\n if len(idx2gene[anno[c][p]]) > 1:\n anno[c][p] = 1\n masked_pos += 1\n total_pos += pos\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i (%i) masked (total) - %.2f %%' % (c, masked_pos, pos, masked_pos / float(max(1, pos)) * 100)\n if options.verbose:\n print >> sys.stderr, \"Total positions: %i\\nMasked positions: %i (%.2f %%)\" % (total_pos, total_masked, total_masked / float(max(1, total_pos)) * 100)\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where exonic and intronic positions are annotated\n if options.mask_alternative_overlap:\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to exon/intron overlap:'\n for c in exons:\n masked_pos = 0\n for t in exons[c]:\n if len(exons[c][t]) < 2:\n continue\n ### pre-process exon\n tmp = sp.array(exons[c][t], dtype='int')\n s_idx = sp.argsort(tmp[:, 0])\n tmp = tmp[s_idx, :]\n ### mask positions that are intronic and exonic\n for e in range(1, tmp.shape[0]):\n p_idx = sp.where(anno[c][tmp[e - 1, 1] + 1:tmp[e, 0]] > 1)[0]\n if p_idx.shape[0] > 0:\n anno[c][p_idx + tmp[e - 1, 1] + 1] = 1\n masked_pos += p_idx.shape[0]\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i pos masked' % (c, masked_pos)\n if options.verbose:\n print >> sys.stderr, 'Masked positions: %i' % total_masked\n print >> sys.stderr, \"... done\"\n\n \n if options.verbose:\n print >> sys.stderr, \"Storing exon array in HDF5 %s ...\" % (options.anno_hdf5 + '.exons.hdf5')\n\n ### store annotation in hdf5\n hdf_out = h5py.File(options.anno_hdf5 + '.exons.hdf5', 'w')\n for c in anno.keys():\n hdf_out.create_dataset(name = c, data = anno[c])\n hdf_out.close()\n\n if options.verbose:\n print >> sys.stderr, \"... pickling gene ID map\"\n\n cPickle.dump((idx2gene, gene2idx), open(options.anno_hdf5 + '.pickle', 'w'))\n\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n return (anno, idx2gene, gene2idx)",
"def _read_eeg(eeg_file):\r\n pass",
"def index_gff(gff, logger):\n f_in = open(gff, \"r\")\n gene_start_stop_dict = dict()\n gene_scaff_dict = dict()\n gene_first_exon_dict = dict()\n gene_direction = dict()\n gene_gff_line = dict()\n gene_set = set([])\n for line in f_in:\n if line.startswith(\"#\"):\n continue\n if not line.strip():\n continue\n assert len(line.split(\"\\t\")) == 9 , \"GFF fields wrong length should be 9\"\n scaff, source, feature, start, stop, score, \\\n direction, frame, gene_info = line.split(\"\\t\")\n gene = split_gene_name(gene_info)\n scaff = scaff.rstrip()\n if feature == \"gene\":\n gene_gff_line[gene] = line\n gene_set.add(gene)\n start_stop = \"%s\\t%s\" % (start, stop)\n gene_start_stop_dict[gene] = start_stop\n gene_scaff_dict[gene] = scaff\n gene_direction[gene] = direction\n if not gene in gene_first_exon_dict.keys():\n if feature == \"exon\" or feature == \"CDS\":\n start_stop = \"%s\\t%s\" % (start, stop)\n gene_first_exon_dict[gene] = start_stop\n f_in.close()\n logger.info(\"Number of genes = %d\", len(gene_set))\n return gene_start_stop_dict, gene_first_exon_dict, \\\n gene_scaff_dict, gene_direction, gene_set, gene_gff_line",
"def parse_anno_from_gtf(options, contigs):\n\n anno = dict()\n idx2gene = dict()\n gene2idx = dict()\n\n if options.verbose:\n print >> sys.stderr, \"Parsing annotation from %s ...\" % options.anno\n \n ### init genome structure\n for c in contigs:\n if options.verbose:\n print >> sys.stderr, 'reserving memory for chr %s of len %s' % (c, contigs[c])\n anno[c] = sp.zeros((contigs[c] + 1, ), dtype = 'int32')\n\n ### init list of considered GFF fields\n fields = options.fields.split(',')\n\n ### generate a list of exons with attached gene/transcript information\n ### one list per chromsome\n counter = 1\n gene_counter = 2 ### 0 is default for no coverage and 1 is mask for overlap\n\n exons = dict()\n\n t0 = time.time()\n for line in open(options.anno, 'r'):\n if options.verbose and counter % 10000 == 0:\n print >> sys.stderr, '.',\n if counter % 100000 == 0:\n t1 = time.time() - t0\n print >> sys.stderr, \"%i - took %.2f secs\" % (counter, t1)\n t0 = time.time()\n counter += 1 \n\n if line[0] == '#':\n continue\n sl = line.strip().split('\\t')\n \n if not sl[2] in fields:\n continue\n\n if sl[2] != 'exon':\n print >> sys.stderr, 'Currently only >exon< is supported'\n sys.exit(1)\n\n tags = get_tags_gtf(sl[8])\n gene_id = tags['gene_id']\n trans_id = tags['transcript_id']\n\n if not gene2idx.has_key(tuple([gene_id])):\n gene2idx[tuple([gene_id])] = gene_counter\n idx2gene[gene_counter] = tuple([gene_id])\n gene_counter += 1\n\n try:\n start = int(sl[3]) - 1\n except ValueError:\n start = 0\n try:\n stop = int(sl[4])\n except ValueError:\n stop = 1\n\n chrm = sl[0]\n if chrm == 'chrM_rCRS':\n chrm = 'chrM'\n\n if not chrm in exons:\n exons[chrm] = dict()\n\n if options.mask_alternative_overlap:\n try:\n exons[chrm][trans_id].append([start, stop])\n except KeyError:\n exons[chrm][trans_id] = [[start, stop]]\n\n ### check, if there is already a different gene ID present, form a combination ID\n if sp.any(anno[chrm][start:stop] > 0):\n for p in range(start, stop):\n if anno[chrm][p] == 0:\n new_set = tuple([gene_id])\n else:\n new_set = tuple(set(idx2gene[anno[chrm][p]]) | set([gene_id]))\n try:\n anno[chrm][p] = gene2idx[new_set]\n except KeyError:\n anno[chrm][p] = gene_counter\n gene2idx[new_set] = gene_counter\n idx2gene[gene_counter] = new_set\n gene_counter += 1\n else:\n anno[chrm][start:stop] = sp.array([gene2idx[tuple([gene_id])]] * (stop - start), dtype = 'int32')\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where we have more than one annotated gene\n if options.mask_gene_overlap:\n total_pos = 0\n total_masked = 0\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to gene overlap:'\n for c in anno:\n masked_pos = 0\n p_idx = sp.where(anno[c] > 1)[0]\n pos = p_idx.shape[0]\n #print >> sys.stderr, 'found %i positions' % p_idx.shape[0]\n for p in p_idx:\n if len(idx2gene[anno[c][p]]) > 1:\n anno[c][p] = 1\n masked_pos += 1\n total_pos += pos\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i (%i) masked (total) - %.2f %%' % (c, masked_pos, pos, masked_pos / float(max(1, pos)) * 100)\n if options.verbose:\n print >> sys.stderr, \"Total positions: %i\\nMasked positions: %i (%.2f %%)\" % (total_pos, total_masked, total_masked / float(max(1, total_pos)) * 100)\n print >> sys.stderr, \"... done\"\n\n ### mask all positions in the genome, where exonic and intronic positions are annotated\n if options.mask_alternative_overlap:\n total_masked = 0\n if options.verbose:\n print >> sys.stderr, '\\nMasking positions due to exon/intron overlap:'\n for c in exons:\n masked_pos = 0\n for t in exons[c]:\n if len(exons[c][t]) < 2:\n continue\n ### pre-process exon\n tmp = sp.array(exons[c][t], dtype='int')\n s_idx = sp.argsort(tmp[:, 0])\n tmp = tmp[s_idx, :]\n ### mask positions that are intronic and exonic\n for e in range(1, tmp.shape[0]):\n p_idx = sp.where(anno[c][tmp[e - 1, 1] + 1:tmp[e, 0]] > 1)[0]\n if p_idx.shape[0] > 0:\n anno[c][p_idx + tmp[e - 1, 1] + 1] = 1\n masked_pos += p_idx.shape[0]\n total_masked += masked_pos\n if options.verbose:\n print >> sys.stderr, '\\t%s: %i pos masked' % (c, masked_pos)\n if options.verbose:\n print >> sys.stderr, 'Masked positions: %i' % total_masked\n print >> sys.stderr, \"... done\"\n\n if options.verbose:\n print >> sys.stderr, \"Storing exon array in HDF5 %s ...\" % (options.anno_hdf5 + '.exons.hdf5')\n\n ### store annotation in hdf5\n hdf_out = h5py.File(options.anno_hdf5 + '.exons.hdf5', 'w')\n for c in anno.keys():\n hdf_out.create_dataset(name = c, data = anno[c])\n hdf_out.close()\n\n if options.verbose:\n print >> sys.stderr, \"... pickling gene ID map\"\n\n cPickle.dump((idx2gene, gene2idx), open(options.anno_hdf5 + '.pickle', 'w'))\n\n if options.verbose:\n print >> sys.stderr, \"... done\"\n\n return (anno, idx2gene, gene2idx)",
"def load_gff(filepath):\n # GFF fields\n colnames = ['seqid', 'source', 'type', 'start', 'end', 'score', 'strand',\n 'phase', 'attributes']\n\n # get lines from file\n with open(filepath, 'r') as fp:\n lines = fp.readlines()\n\n # filter out non-gene entries\n gene_rows = [ x for x in lines if 'gene\\t' in x]\n\n # Next, let's create a StringIO buffer -- this is similar to the file\n # and url handles we have seen so far. We can then pass this to a csv\n # reader instance in the same way we have seen for actual files\n\n # First though, let's collapse the rows back into a single string\n csv_str = \"\".join(gene_rows)\n str_buffer = StringIO.StringIO(csv_str)\n\n return csv.DictReader(str_buffer, fieldnames=colnames, delimiter='\\t')",
"def read_gene_families(gftxt, protfile = None, cdsfile = None, wrkdir = None):\n gene_families = []\n if protfile is None and cdsfile is None:\n logging.info(\"Gene families need to have sequences!\")\n with open(gftxt, 'r') as f:\n for line in f:\n line = line.rstrip()\n x = line.split()\n gf_id = x.pop(0)[:-1]\n gf_genes = x\n gene_families.append(GeneFamily(gf_id=gf_id, gf_members=gf_genes))\n return gene_families\n \n if protfile is not None:\n prot = SeqIO.to_dict(SeqIO.parse(protfile, \"fasta\"))\n\n if cdsfile is not None:\n cds = SeqIO.to_dict(SeqIO.parse(cdsfile, \"fasta\"))\n \n with open(gftxt, 'r') as handle:\n for line in handle:\n line = line.rstrip()\n x = line.split()\n gf_id = x.pop(0)[:-1]\n gf_genes = x\n gf_prot = {}\n gf_cds = {}\n for gid in x:\n if prot[gid][-1:].seq == '*':\n gf_prot[gid] = prot[gid][:-1]\n else:\n gf_prot[gid] =prot[gid]\n if cds[gid][-3:].seq == \"TAA\" or \\\n cds[gid][-3:].seq == \"TAG\" or \\\n cds[gid][-3:].seq == \"TGA\":\n gf_cds[gid] = cds[gid][:-3]\n else:\n gf_cds[gid] = cds[gid]\n gene_families.append(GeneFamily(gf_id = gf_id, gf_members = gf_genes, \n prot_seqs = gf_prot, cds_seqs = gf_cds, wrkdir=wrkdir))\n return gene_families",
"def import_gff(file, genome_version, verbose = False):\n \n from tridentdb import models\n import re\n from django.db.utils import DatabaseError\n \n if genome_version == None:\n print(\"Genome Version is needed for loading a gff file.\")\n return\n\n genomes = models.Genome.objects.filter(genome_ver = genome_version)\n if len(genomes) == 0:\n print(\"Unknown Genome Version: %s\" % genome_version)\n return\n \n lineno = 1\n for line in file:\n if verbose:\n print(\"Line Number: %d\" % lineno)\n lineno += 1\n if not line:\n continue\n line = line.strip()\n if line[0] == '#':\n continue\n info = line.split('\\t')\n chromosome = info[0].replace(\"chr\", \"\")\n is_primary_transcript = (info[2] == 'miRNA_primary_transcript')\n genomic_mir_start = info[3]\n genomic_mir_end = info[4]\n is_on_positive_strand = (info[6] == '+')\n \n mirbase_id = mirbase_acc = mirbase_name = mirbase_derives_from = None\n mirbase = info[8].split(';')\n for tag in mirbase:\n (name, val) = tag.split('=')\n if name == \"ID\":\n mirbase_id = val\n elif name == \"accession_number\":\n mirbase_acc = val\n elif name == \"Alias\":\n # Use alias for accession_number IFF accession_number\n # is not used\n if not mirbase_acc:\n mirbase_acc = val\n elif name == \"Name\":\n mirbase_name = val\n elif name == \"derives_from\":\n mirbase_derives_from = val\n else:\n print(\"Unknown Mirbase tag: \\\"%s\\\"\" % name)\n continue\n\n mirna = models.MicroRNA(chromosome=chromosome, is_primary_transcript = is_primary_transcript, genomic_mir_start = genomic_mir_start, genomic_mir_end = genomic_mir_end, is_on_positive_strand = is_on_positive_strand, mirbase_id = mirbase_id, mirbase_acc = mirbase_acc, mirbase_name = mirbase_name, mirbase_derives_from = mirbase_derives_from, genome = genomes[0] )\n \n try:\n mirna.save()\n except DatabaseError as de:\n from sys import stderr\n stderr.write(\"Error loading GFF line: {0}\\n\".format(line))\n raise de\n ##end of import_gff",
"def read_gtf_file(gtf_file):\n genes = {}\n transcripts = {}\n exons = {}\n\n with open(gtf_file) as gtf:\n for line in gtf:\n line = line.strip()\n\n # Ignore header\n if line.startswith(\"#\"):\n continue\n\n # Split into constitutive fields on tab\n tab_fields = line.split(\"\\t\")\n chrom = tab_fields[0]\n entry_type = tab_fields[2]\n\n # Entry is a gene\n if entry_type == \"gene\":\n gene = Gene.get_gene_from_gtf(tab_fields)\n native_id = gene.identifier\n genes[native_id] = gene\n\n # Entry is a transcript\n elif entry_type == \"transcript\":\n transcript = Transcript.get_transcript_from_gtf(tab_fields)\n gene_id = transcript.gene_id\n if gene_id in genes:\n genes[gene_id].add_transcript(transcript)\n native_id = transcript.identifier\n transcripts[native_id] = transcript\n \n # Entry is an edge\n elif entry_type == \"exon\":\n exon = Edge.create_edge_from_gtf(tab_fields)\n # This ID is used because of a rare GENCODE bug\n location_exon_id = exon.identifier\n exons[location_exon_id] = exon \n\n transcript_id = list(exon.transcript_ids)[0]\n gene_id = exon.annotations[\"gene_id\"]\n \n if location_exon_id not in exons:\n # Add the new edge to the data structure\n exons[location_exon_id] = exon\n else:\n # Update existing exon entry, including its transcript set\n exon = exons[location_exon_id]\n exon.transcript_ids.add(transcript_id)\n \n if transcript_id in transcripts: \n currTranscript = transcripts[transcript_id]\n currTranscript.add_exon(exon)\n\n return genes, transcripts, exons",
"def filtraFileDiAnn(fileInput, geneNames):\n\n\t#---------------------\n\t# Creazione di una lista dove ogni elemento e' una riga del file \n\t# Ogni elem e' una lista di informazioni divise per colonne \n\t#\n\t# formato di un elemento di lines:\n\t#\n\t#\tPOSIZIONE \t\t\tCONTENUTO\n\t#\t\t0\t\t\t\t\tcromosoma\n\t#\t\t3\t\t\t\t\tstart\n\t#\t\t4\t\t\t\t\tend\n\t#\t\t6\t\t\t\t\tstrand\n\t#\t\t8\t\t\t\t\tgene_id\n\t#\t\t9\t\t\t\t\ttranscript_id\n\t#\t\t10\t\t\t\t\texon_number\n\t#\t\t11\t\t\t\t\tgene_name\n\t#\t\t12\t\t\t\t\ttranscript_name\t\n\t#\n\n\n\tstringa \t= '\\texon\\t'\n\tlines \t\t= []\n\tdictGeneChr = {}\n\t\n\t# Indici per il file di annotazione\n\t#\n\tidx_cromosoma = 0\n\tidx_geneName = 11\n\tidx_start = 3\n\tidx_end = 4\n\t\n\tfor x in open(fileInput):\n\t\triga = x.strip(';\\n').replace('; ','\\t').split('\\t')\n\n\t\tif not geneNames.has_key(riga[idx_geneName]):\n\t\t\tcontinue\n\t\t\t\t\n\t\t# Creazione del dizionario dei gene_name per ogni cromosoma\n\t\t#\n\t\tkey_geneChr = riga[idx_geneName] + '\\t' + riga[idx_cromosoma]\n\t\tif not dictGeneChr.has_key(key_geneChr):\n\t\t\tdictGeneChr[key_geneChr] = [riga[idx_start], riga[idx_end]]\n\t\telse:\n\t\t\t\n\t\t\t# Si aggiona il valore dello start del gene se si trova un \n\t\t\t# valore piu' piccolo\n\t\t\t#\n\t\t\tif int(dictGeneChr[key_geneChr][0]) > int(riga[idx_start]):\n\t\t\t\tdictGeneChr[key_geneChr][0] = riga[idx_start]\n\t\t\t\t\n\t\t\t# Si aggiorna il valore dell'end del gene se si trova un\n\t\t\t# valore piu' grande\n\t\t\t#\n\t\t\tif int(dictGeneChr[key_geneChr][1]) < int(riga[idx_end]):\t\n\t\t\t\tdictGeneChr[key_geneChr][1] = riga[idx_end]\n\t\t\n\t\t# Si filtra il file considerando solamente le regioni di tipo \"exon\"\n\t\t#\n\t\tif stringa in x:\n\t\t\tlines.append(riga)\n\n\treturn [lines, dictGeneChr]",
"def readGenes(gtf, tid=False):\n gs = {}\n #get all genes information\n print(\"reading annotaions from %s\" % gtf)\n for line in tqdm(open(gtf).read().split(\"\\n\")[:-1]):\n if line.startswith(\"#\"):\n continue\n line = line.split(\"\\n\")[0].split(\"\\t\")\n if line[2] != \"exon\":\n continue\n e = parseGtfLine(line, tid)\n if e.name not in gs:\n g = Gene()\n g.chrom = e.chrom\n g.start = e.start\n g.end = e.end\n g.strand = e.strand\n g.name = e.name\n g.id = e.id\n g.exons = {(e.start, e.end): e}\n gs[g.name] = g\n else:\n #same position exons\n if (e.start, e.end) in gs[e.name].exons:\n continue\n else:\n g = gs[e.name]\n if e.start < g.start:\n g.start = e.start\n if e.end > g.end:\n g.end = e.end\n g.exons[(e.start, e.end)] = e\n #get all genes information\n ngs = {} #key is chromosome\n for k, g in gs.items():\n if g.chrom not in ngs:\n ngs[g.chrom] = {}\n if g.strand == \"+\":\n tss = g.start\n else:\n tss = g.end\n #tss position is key, other information is value, for following search\n if tss not in ngs[g.chrom]:\n ngs[g.chrom][tss] = g\n return ngs",
"def load_gene_data_from_file(self) -> None:\n logger.info(\"Loading genes data from file\")\n if not self.gene_data or len(self.gene_data.items()) == 0:\n self.gene_data = {}\n file_path = self._get_cached_file(cache_path=self.gene_data_cache_path, file_source_url=self.gene_data_url)\n with open(file_path) as file:\n for line in file:\n fields = line.strip().split(',')\n if fields[1].startswith(\"WBGene\"):\n name = fields[2] if fields[2] != '' else fields[3]\n self.gene_data[\"WB:\" + fields[1]] = Gene(\"WB:\" + fields[1], name, fields[4] == \"Dead\", False)",
"def build_gff(annotations, faa):\n with open(faa, \"rt\") as faa_file:\n for line in faa_file:\n if \">\" not in line:\n continue\n\n # each fasta is suffixed on the annotated faa if a prefix _INT (_1 .. _n)\n contig_name, start, end, strand = parse_fasta_header(line)\n if None in (contig_name, start, end, strand):\n print(\n \"It was not possible to parse the \" + line, end=\"\", file=sys.stderr\n )\n continue\n\n clean_name = Annotation.clean_seq_name(contig_name)\n\n row_annotations = Annotation.merge(\n [ann.get() for ann in annotations.get(contig_name, [])]\n )\n\n ann_string = \";\".join(\n [\n \"{}={}\".format(k, \",\".join(v).strip())\n for k, v in row_annotations.items()\n ]\n )\n\n eggNOGScore = \"\".join(row_annotations.get(\"eggNOG_score\", []))\n\n if len(ann_string):\n yield [\n clean_name,\n \"eggNOG-v2\",\n \"CDS\",\n start,\n end,\n eggNOGScore or \".\",\n \"+\" if strand == \"1\" else \"-\",\n \".\",\n \"ID=\" + clean_name + \";\" + ann_string,\n ]",
"def readGenes(gtf):\n #read gtf\n genes = HTSeq.GenomicArrayOfSets(\"auto\", stranded=False)\n gs = {}\n for line in open(gtf):\n if line.startswith(\"#\"):\n continue\n line = line.split(\"\\n\")[0].split(\"\\t\")\n if line[2] != 'exon':\n continue\n ds = parseGtfFeature(line[8])\n key = \"|\".join([ds[\"gene_id\"], ds[\"gene_name\"]])\n nline = [\n line[0], line[3], line[4],\n \"|\".join([ds[\"gene_id\"], ds[\"gene_name\"]]), \".\", line[6]\n ]\n if key not in gs:\n gs[key] = [line[0], int(line[3]), int(line[4])]\n else:\n if int(line[3]) < gs[key][1]:\n gs[key][1] = int(line[3])\n if int(line[4]) > gs[key][2]:\n gs[key][2] = int(line[4])\n for g, v in gs.items():\n iv = HTSeq.GenomicInterval(v[0], v[1], v[2])\n genes[iv] += g\n return genes",
"def load_ga_file(self, filename):\n data_array, metric, basis_names, support = read_ga_file(filename)\n if not np.allclose(np.diagonal(metric), self.sig):\n raise ValueError('The signature of the ga file does not match this layout')\n return MVArray.from_value_array(self, data_array)",
"def read_gff(gff):\n genome = getseq(args.genome)\n dictoftranscripts = {}\n for k in open(gff):\n if not k.startswith(\"#\"):\n lines = k.strip().split(\"\\t\")\n if lines[2] == \"exon\":\n strand = lines[6]\n chromosome = lines[0]\n start = lines[3]\n end = lines[4]\n transcriptid = re.search(\"Parent=transcript:(.*)\", lines[8]).group(1)\n if transcriptid + \"#\" + chromosome in dictoftranscripts:\n dictoftranscripts[transcriptid + \"#\" + chromosome].extend([start, end])\n else:\n dictoftranscripts[transcriptid + \"#\" + chromosome] = []\n dictoftranscripts[transcriptid + \"#\" + chromosome].extend([start, end])\n\n for key, value in dictoftranscripts.iteritems():\n value.sort()\n print value\n for coord1 in value:\n\n for coord2 in value[1:]:\n #print coord1, coord2\n if int(coord1) != int(value[-1]) and value.index(coord2) != value.index(coord1)+1 and value.index(coord2) > value.index(coord1):\n\n exon1_start = int(coord1)\n exon1_end = int(coord2)\n #print exon1_start, exon1_end\n #print key.split(\"#\")[1]\n #print value.index(coord1), value.index(coord2)\n exon_seq = genome.get(key.split(\"#\")[1],\"NA\")\n\n if exon_seq != \"NA\":\n sequence_exon = exon_seq[exon1_start:exon1_end+1]\n #print exon1_start, exon1_end, sequence_exon\n for start, end, strand, frame, pro in translate(sequence_exon):\n junction =\n print start, end, strand, frame, pro",
"def load_annots(annot_file):\n with open(annot_file, 'r') as annot:\n data = annot.read().split('\\n')\n for line in data:\n temp = line.split(',')\n db_annot.setdefault(temp[0], temp[1:4])",
"def load_annos(self, anno_path):\n\n if os.path.exists(anno_path) is False or os.path.isfile(anno_path) is False or anno_path.endswith('txt') is False:\n print(\"Wrong path: not exist or not a txt file: %s\" % anno_path)\n return None, None\n\n list_file_id, list_anno_id = [], []\n list_x, list_y, list_w, list_h = [], [], [], []\n list_blur, list_expr, list_illum, list_occ, list_pose, list_inval = [], [], [], [], [], []\n anno_id = 0\n\n list_id = []\n list_filename = []\n file_id = 0\n\n num_annos_total = 0\n\n with open(anno_path) as afile:\n line = \"begin\"\n while line != \"\":\n line = afile.readline()\n\n if line.rstrip().endswith('jpg'): # it is a file\n file_name = line.strip()\n list_id.append(file_id)\n list_filename.append(file_name)\n\n num_annos = int(afile.readline().strip())\n\n for i in range(num_annos):\n px, py, pw, ph, blur, expr, illum, inval, occ, pose = afile.readline().strip().split(' ')\n px, py, pw, ph = int(px), int(py), int(pw), int(ph)\n\n if pw == 0 or ph == 0: # ignore invalid faces (0 width or height)\n continue\n\n if pw < 0:\n px = px+pw\n pw = abs(pw)\n if ph < 0:\n py = py+ph\n ph = abs(ph)\n\n list_file_id.append(file_id)\n list_anno_id.append(anno_id)\n list_x.append(px)\n list_y.append(py)\n list_w.append(pw)\n list_h.append(ph)\n list_blur.append(int(blur))\n list_expr.append(int(expr))\n list_illum.append(int(illum))\n list_occ.append(int(occ))\n list_pose.append(int(pose))\n list_inval.append(int(inval))\n anno_id = anno_id + 1\n\n file_id = file_id + 1\n num_annos_total += num_annos\n\n files = {'id': np.array(list_id), 'filename': list_filename }\n annos = {'file_id': np.array(list_file_id), 'anno_id': np.array(list_anno_id), \\\n 'x': np.array(list_x), 'y': np.array(list_y), \\\n 'w': np.array(list_w), 'h': np.array(list_h), \\\n 'blur': np.array(list_blur), 'expression': np.array(list_expr), \\\n 'illumination': np.array(list_illum), 'occlusion': np.array(list_occ), \\\n 'pose': np.array(list_pose), 'invalid': np.array(list_inval) }\n\n assert (len(list_id) == len(list_filename)), \\\n \"file_id and filename lists should have the same length\"\n\n self._num_annos = num_annos_total\n self._num_images = file_id\n\n return files, annos",
"def load_gltf(self):\n with open(str(self.path)) as fd:\n self.gltf = GLTFMeta(self.path, json.load(fd), self.meta)",
"def load_genes(outdir, confidence, validate_sg, verbose, idx=None, genes=None):\n\n if not genes is None:\n if not idx is None:\n return copy.deepcopy(genes[idx])\n else:\n gene_file = _get_gene_fname(outdir, confidence, validate_sg)\n\n if verbose:\n print('loading annotation information from %s' % gene_file)\n if idx is None:\n (genes, events) = pickle.load(open(gene_file, 'rb'), encoding='latin1')\n else:\n gene_db_file = re.sub(r'.pickle$', '', gene_file) + '.db.pickle'\n gene_idx_file = re.sub(r'.pickle$', '', gene_file) + '.idx.pickle'\n if os.path.exists(gene_idx_file):\n genes = []\n offsets = pickle.load(open(gene_idx_file, 'rb'))\n gene_handle = open(gene_db_file, 'rb')\n if not hasattr(idx, '__iter__'):\n idx = [idx]\n for e in idx:\n gene_handle.seek(offsets[e], 0)\n genes.append(pickle.load(gene_handle), encoding='latin1')\n genes = sp.array(genes)\n else:\n (genes, events) = pickle.load(open(gene_file, 'rb'), encoding='latin1')\n genes = genes[idx]\n\n return genes",
"def parse_JGI_GFF2_file(infile_GFF2, stop_on_error=False): \n for line in open(infile_GFF2):\n if line.startswith('#'): continue\n fields = line.strip().split('\\t')\n annotations = {}\n for ann in fields[8].split('; '):\n try:\n if '\"' in ann:\n key, val = ann.split(' \"')\n val = val.strip('\"')\n else:\n key, val = ann.split(' ')\n annotations[key] = val\n except ValueError:\n error_msg = \"Can't parse this annotation into a key:value pair! %s\"%ann\n if stop_on_error: raise ValueError(error_msg)\n else: print \"ERROR: %s\"%error_msg\n continue\n yield fields[:8], annotations\n # TODO unit-test?",
"def geneactivity(adata,\n gtf_file,\n key_added='gene',\n upstream=5000,\n feature_type='gene',\n annotation='HAVANA',\n layer_name='geneactivity',\n raw=False, \n copy=True):\n ### extracting the genes\n gtf = {}\n with open(gtf_file) as f:\n for line in f:\n if line[0:2] != '##' and '\\t'+feature_type+'\\t' in line and '\\t'+annotation+'\\t' in line:\n line = line.rstrip('\\n').split('\\t')\n if line[6] == '-':\n if line[0] not in gtf.keys():\n gtf[line[0]] = [[int(line[3]), int(line[4])+upstream,line[-1].split(';')[:-1]]]\n else:\n gtf[line[0]].append([int(line[3]), int(line[4])+upstream,line[-1].split(';')[:-1]])\n else:\n if line[0] not in gtf.keys():\n gtf[line[0]] = [[int(line[3])-upstream, int(line[4]),line[-1].split(';')[:-1]]]\n else:\n gtf[line[0]].append([int(line[3])-upstream, int(line[4]),line[-1].split(';')[:-1]])\n\n # extracting the feature coordinates\n if raw==True:\n raw_adata = adata.raw.to_adata()\n else:\n raw_adata = adata.copy()\n raw_adata_features = {}\n feature_index = 0\n for line in raw_adata.var_names.tolist():\n line = line.split('_')\n if line[0] not in raw_adata_features.keys():\n raw_adata_features[line[0]] = [[int(line[1]),int(line[2]), feature_index]]\n else:\n raw_adata_features[line[0]].append([int(line[1]),int(line[2]), feature_index])\n feature_index += 1\n \n ## find the features overlaping the genes. and build the count matrix\n gene_index = []\n gene_activity_X = []\n \n for chrom in gtf.keys():\n if chrom in raw_adata_features.keys():\n #print(chrom)\n chrom_index = 0\n previous_features_index = 0\n for gene in gtf[chrom]:\n gene_values = []\n gene_start = gene[0]\n gene_end = gene[1]\n for feature in raw_adata_features[chrom]:\n feature_index = 0\n if (feature[1]<= gene_start): # the window is before the gene. we need to test the next window.\n continue\n elif (gene_end <= feature[0]): # the window is after the gene. we need totest the next gene.\n break\n else: # the window is overlapping the gene. \n gene_values.append(raw_adata.X[:,feature[2]].todense())\n if gene_values != []:\n gene_activity_X.append(np.sum(gene_values, axis=0))\n gene_index.append(gene[-1])\n \n\n gene_activity_X = np.concatenate(tuple(gene_activity_X), axis=-1)\n\n # get the variable metadata\n gene_name = []\n if feature_type=='transcript':\n for x in gene_index:\n for y in x:\n if 'transcript_name' in y:\n gene_name.append(y.lstrip(' transcript_name \"').rstrip('\"'))\n #gene_name = [x[7].lstrip(' transcript_name \"').rstrip('\"') for x in gene_index]\n elif feature_type=='gene':\n for x in gene_index:\n for y in x:\n if 'gene_name' in y:\n gene_name.append(y.lstrip(' gene_name \"').rstrip('\"'))\n #gene_name = [x[4].lstrip(' gene_name \"').rstrip('\"') for x in gene_index]\n\n metadata_genes = {'gene_id' : [],\n 'transcript_id' : [],\n 'gene_type' : [],\n 'gene_name' : [],\n 'transcript_type' : [],\n 'transcript_name' : [],\n 'protein_id' : []}\n\n for line in gene_index:\n dico_line = {}\n for element in line:\n if ' \"' in element:\n dico_line[element.rstrip('\"').lstrip(\" \").split(' \"')[0]] = element.rstrip('\"').lstrip(\" \").split(' \"')[1]\n \n for key in metadata_genes.keys():\n if key in dico_line.keys():\n metadata_genes[key].append(dico_line[key])\n else:\n metadata_genes[key].append('NA') \n \n dataframe_genes = pd.DataFrame.from_dict(metadata_genes)\n dataframe_genes.index = gene_name\n\n #adata.layers[layer_name] = ad.AnnData(gene_activity_X, var=dataframe_genes, obs=raw_adata.obs)\n gene_adata = ad.AnnData(gene_activity_X, var=dataframe_genes, obs=raw_adata.obs)\n gene_adata.uns = adata.uns.copy()\n gene_adata.obsm = adata.obsm.copy()\n gene_adata.obsp = adata.obsp.copy()\n return(gene_adata)",
"def load_annotations(self):\n fname, aux = QFileDialog.getOpenFileName(self, 'Open file', '', \"(*.csv)\")\n if fname != '':\n self.model.AnnotationLoad(fname=fname)",
"def process_all_leading_genes(f_path):\n with open(f_path, 'r') as f:\n contents = f.read()\n parts = contents.strip().split('\\t')\n genes = parts[2:]\n return genes",
"def parse_b2go(annot_f, annot_d=None):\n with open(annot_f, \"r\") as f:\n\n for line in f:\n line = line.strip().split(\"\\t\")\n seq_id = line[0]\n\n # Create a dict entry if new seqid\n # This is weird (should it be created out of the for loop ?)\n if not annot_d:\n annot_d = {}\n\n if seq_id not in annot_d:\n annot_d[seq_id] = {}\n\n # Check and sort by annotation type\n for i in line[1:]:\n\n if i.startswith(\"GO:\"):\n # Method for creating or appending to existing list\n annot_d[ seq_id ].setdefault(\"GOs\", []).append(i)\n\n elif i.startswith(\"EC:\"):\n annot_d[ seq_id ].setdefault(\"ECs\", []).append(i)\n\n # Should not have more than 1 annot but for doublec hecking\n else:\n annot_d[ seq_id ].setdefault(\"annot\",[]).append(i)\n\n return annot_d"
] | [
"0.66905075",
"0.6432617",
"0.6350883",
"0.627544",
"0.62443864",
"0.61875546",
"0.61622775",
"0.6161676",
"0.61342305",
"0.6079505",
"0.60508186",
"0.6028002",
"0.6026591",
"0.60153025",
"0.60118026",
"0.60092837",
"0.600845",
"0.600644",
"0.5981259",
"0.5959721",
"0.59460914",
"0.59417135",
"0.59380794",
"0.5934507",
"0.5917084",
"0.58971614",
"0.58959365",
"0.58949757",
"0.58899784",
"0.5886434"
] | 0.7997125 | 0 |
function used for evaluation with PPI dataset | def evaluate(self, dataset):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluate(self, plot):",
"def evaluate_design(self): # to update the pr object",
"def evaluate(self, prediction_fn):\n pass",
"def test_predictor():",
"def evaluate(self,p):\n if not self.initialized: self.__initialize__()\n if self.vp0: p_ = 1-p\n else: p_ = p\n if self.ids_to_consider is None:\n #sum on all parametrized cell\n cf = np.sum(self.V[self.p_ids-1]*p_)/self.V_tot - self.max_v_frac\n else:\n cf = np.sum((self.V[self.ids_to_consider-1]*p_))/self.V_tot - self.max_v_frac\n return cf",
"def evaluate(self) :\n pass",
"def evaluate(self):\n pass",
"def evaluate(self):\n pass",
"def eval_pp(net, z, zt):\n if net.name != 'lbl':\n Im = zt['IM']\n else:\n Im = None\n pp = lm_tools.perplexity(net, zt['ngrams'], z['word_dict'], Im=Im, context=net.context)\n print 'PERPLEXITY: ' + str(pp)",
"def test_from_func_variational(pres, nbr_by_label, nbr_by_label_test, nbr_comp, plot_graph, function):\n print(pres)\n _, samp_train, samp_test, labels = function(nbr_by_label, nbr_by_label_test, nbr_comp, plot_graph)\n\n if len(labels) == 2:\n print(\"Success for my implementation (second order, variational):\")\n my_impl_variational(samp_train, samp_test, labels)\n\n return 0",
"def EvaluateFunction(self, p_float=..., p_float=..., p_float=...):\n ...",
"def pvalue(data, control_label=None, *args, **kwargs):\n def fn(control, test):\n if _is_proportion(control, test):\n return ztest(control, test, alternative='two-sided')[1]\n else:\n return ttest_ind(control, test, alternative='two-sided')[1]\n\n return _apply(data, fn, control_label)",
"def p(self):\n return hlp.parms(self.y(0))",
"def EvaluatePointDataField(self, *float, **kwargs):\n ...",
"def proba(c_pred,m_pred,f_pred, dataset):\n p = np.zeros(10)\n if dataset == 'cifar10':\n for i in range(10):\n if i <4:\n if i <2:\n p[i] = c_pred[0]*(m_pred[0]/(m_pred[0]+m_pred[1]))*(f_pred[i]/np.sum(f_pred[0:2]))\n elif i <4:\n p[i] = c_pred[0]*(m_pred[1]/(m_pred[0]+m_pred[1]))*(f_pred[i]/np.sum(f_pred[2:4]))\n if i >=4:\n if i <6:\n p[i] = c_pred[1]*(m_pred[2]/(m_pred[2]+m_pred[3]+m_pred[4]))*(f_pred[i]/np.sum(f_pred[4:6]))\n elif i <8:\n p[i] = c_pred[1]*(m_pred[3]/(m_pred[2]+m_pred[3]+m_pred[4]))*(f_pred[i]/np.sum(f_pred[6:8]))\n elif i <10:\n p[i] = c_pred[1]*(m_pred[4]/(m_pred[2]+m_pred[3]+m_pred[4]))*(f_pred[i]/np.sum(f_pred[8:10]))\n else :\n for i in range(10):\n if i <5:\n if i <3:\n p[i] = c_pred[0]*(m_pred[0]/(m_pred[0]+m_pred[1]))*(f_pred[i]/np.sum(f_pred[0:3]))\n elif i <5:\n p[i] = c_pred[0]*(m_pred[1]/(m_pred[0]+m_pred[1]))*(f_pred[i]/np.sum(f_pred[3:5]))\n if i >=5:\n if i <8:\n p[i] = c_pred[1]*(m_pred[2]/(m_pred[2]+m_pred[3]))*(f_pred[i]/np.sum(f_pred[5:8]))\n elif i <10:\n p[i] = c_pred[1]*(m_pred[3]/(m_pred[2]+m_pred[3]))*(f_pred[i]/np.sum(f_pred[8:]))\n return(p)",
"def evaluation_result(para_file, result_shp,val_shp,evaluation_txt=None):\n basic.outputlogMessage(\"evaluation result\")\n IoUs = vector_features.calculate_IoU_scores(result_shp,val_shp)\n if IoUs is False:\n return False\n\n #save IoU to result shapefile\n operation_obj = shape_opeation()\n operation_obj.add_one_field_records_to_shapefile(result_shp, IoUs, 'IoU')\n\n iou_threshold = parameters.get_digit_parameters(para_file,'IOU_threshold','float')\n true_pos_count = 0\n false_pos_count = 0\n val_polygon_count = operation_obj.get_shapes_count(val_shp)\n # calculate precision, recall, F1 score\n for iou in IoUs:\n if iou > iou_threshold:\n true_pos_count += 1\n else:\n false_pos_count += 1\n\n false_neg_count = val_polygon_count - true_pos_count\n if false_neg_count < 0:\n basic.outputlogMessage('warning, false negative count is smaller than 0, recall can not be trusted')\n\n precision = float(true_pos_count) / (float(true_pos_count) + float(false_pos_count))\n recall = float(true_pos_count) / (float(true_pos_count) + float(false_neg_count))\n if (true_pos_count > 0):\n F1score = 2.0 * precision * recall / (precision + recall)\n else:\n F1score = 0\n\n #output evaluation reslult\n if evaluation_txt is None:\n evaluation_txt = \"evaluation_report.txt\"\n f_obj = open(evaluation_txt,'w')\n f_obj.writelines('true_pos_count: %d\\n'%true_pos_count)\n f_obj.writelines('false_pos_count: %d\\n'% false_pos_count)\n f_obj.writelines('false_neg_count: %d\\n'%false_neg_count)\n f_obj.writelines('precision: %.6f\\n'%precision)\n f_obj.writelines('recall: %.6f\\n'%recall)\n f_obj.writelines('F1score: %.6f\\n'%F1score)\n f_obj.close()\n\n ##########################################################################################\n ## another method for calculating false_neg_count base on IoU value\n # calculate the IoU for validation polygons (ground truths)\n IoUs = vector_features.calculate_IoU_scores(val_shp, result_shp)\n if IoUs is False:\n return False\n\n # if the IoU of a validation polygon smaller than threshold, then it's false negative\n false_neg_count = 0\n idx_of_false_neg = []\n for idx,iou in enumerate(IoUs):\n if iou < iou_threshold:\n false_neg_count += 1\n idx_of_false_neg.append(idx+1) # index start from 1\n\n precision = float(true_pos_count) / (float(true_pos_count) + float(false_pos_count))\n recall = float(true_pos_count) / (float(true_pos_count) + float(false_neg_count))\n if (true_pos_count > 0):\n F1score = 2.0 * precision * recall / (precision + recall)\n else:\n F1score = 0\n # output evaluation reslult\n\n # evaluation_txt = \"evaluation_report.txt\"\n f_obj = open(evaluation_txt, 'a') # add to \"evaluation_report.txt\"\n f_obj.writelines('\\n\\n** Count false negative by IoU**\\n')\n f_obj.writelines('true_pos_count: %d\\n' % true_pos_count)\n f_obj.writelines('false_pos_count: %d\\n' % false_pos_count)\n f_obj.writelines('false_neg_count_byIoU: %d\\n' % false_neg_count)\n f_obj.writelines('precision: %.6f\\n' % precision)\n f_obj.writelines('recall: %.6f\\n' % recall)\n f_obj.writelines('F1score: %.6f\\n' % F1score)\n # output the index of false negative\n f_obj.writelines('\\nindex (start from 1) of false negatives: %s\\n' % ','.join([str(item) for item in idx_of_false_neg]))\n f_obj.close()\n\n pass",
"def ppf(self,x):\n # TODO speed this up by doing it in Crow, not in python\n if hasattr(x,'__len__'):\n returnPpf = np.array([self.ppf(i) for i in x])\n else:\n returnPpf = self._distribution.inverseCdf(x)\n return returnPpf",
"def evaluate(self) -> int:",
"def ppf(self,x):\n return self.categoricalDist.ppf(x)",
"def evaluate(self, test_data, test_labels):\n raise NotImplementedError",
"def prf_analysis(y_true: list, y_pred: list) -> None:\n print('Precision: {:,.2f}'.format(precision_score(y_true, y_pred)))\n print('Recall : {:,.2f}'.format(recall_score(y_true, y_pred)))\n print('F1 : {:,.2f}'.format(f1_score(y_true, y_pred)))\n print('Accuracy : {:,.2f}'.format(accuracy_score(y_true, y_pred)))\n return None",
"def evaluation(model_path, threshold):\n classifier = joblib.load(model_path)\n\n positive = np.load(\"./processed_data/validation/positive.npy\")\n unlabeled = np.load(\"./processed_data/validation/unlabeled.npy\")\n\n p_result = np.array(classifier.predict_proba(positive[:, :-1])[:, 1])\n plt.hist(p_result, bins=300)\n plt.show()\n\n tp_rate = np.where(p_result >= threshold, 1, 0).sum() / p_result.shape[0]\n print(tp_rate)\n\n u_result = np.array(classifier.predict_proba(unlabeled[:, :-1])[:, 1])\n plt.hist(u_result, bins=300)\n plt.show()\n\n\n # the following steps aim to filter 'possible' negative instances in the evaluation-unlabeled set\n stageone_classifier = joblib.load(\"./solver_result/liblinear/0.01/logistic.pkl\")\n stgone_result = np.array(stageone_classifier.predict_proba(unlabeled[:,:-1])[:, 1])\n possibly_negative = unlabeled[np.where(stgone_result <= _negative_threshold)]\n print(positive.shape)\n print(unlabeled.shape)\n print(possibly_negative.shape)\n possi_ng_result = np.array(classifier.predict_proba(possibly_negative[:, :-1])[:, 1])\n fp_rate = np.where(possi_ng_result >= threshold, 1, 0).sum() / possi_ng_result.shape[0]\n plt.hist(possi_ng_result, bins=300)\n plt.show()\n\n print(fp_rate)\n print(\"TP: \" + str(tp_rate) + \" FP: \" + str(fp_rate) + \" GMean: \" + str(math.sqrt(tp_rate * (1 - fp_rate))))",
"def func_Ip_318(pp, pd):\n return pp/(np.pi*(pd/2)**2)",
"def affichage(sequenceRules,sequenceData,Paires,ecart,tolerance) :\n print(\"-------------sequenceData---------------\")\n print(sequenceData)\n #groupPaires\n groupPairesResult=groupPaires(Paires,10)\n #print(\"--------Group Paires: --------------\")\n #print(groupPairesResult)\n \n #biggerGroup\n #biggerGroupResult=biggerGroup(sequence,groupPairesResult,2)\n #print(\"--------Bigger group : --------------\")\n #print(biggerGroupResult)\n\n #winepi\n winepiResultGroupPaire=winepi(groupPairesResult,0.6)\n #print(\"--------Winepi group paire : --------------\")\n #print_big_rules(winepiResultGroupPaire)\n\n #winepiResult=winepi(biggerGroupResult,0.8)\n #print(\"--------Winepi bigger Group : --------------\")\n #print_rules(winepiResult)\n \n \n print(\"\\n\")\n print(\"-------event->paire----------\")\n winepiResult=winepi2(sequenceRules,Paires,0.6)\n predict_e=prediction2(sequenceData,Paires,winepiResult)\n #for p in predict_e :\n # print(p)\n #print(\"\\n\")\n rules=accuracy(predict_e,sequenceData,winepiResult)\n n_ok,n_ko,n_bet,avg_gap =analyse_result(rules,tolerance)\n #print_event_rules(rules)\n print(\"rules ok : \",n_ok,\" rules under : \",n_ko,\" rules over : \",n_bet,\" average gap : \",avg_gap)\n \n \n \n #print(\"--------Winepi group paire : --------------\")\n #print_rules(winepiResult)\n\n #winepiResult=winepi(biggerGroupResult,0.8)\n print(\"--------Winepi bigger Group : --------------\")\n #print_rules(winepiResult)\n print(\"\\n\")\n print(\"--------Prédiction : paire-> triplet --------------\")\n predict_p=prediction(sequenceData,Paires,winepiResultGroupPaire,ecart)\n #for p in predict_p :\n # print(p)\n #print(\"\\n\")\n rules=accuracy(predict_p,sequenceData,winepiResultGroupPaire)\n #print_big_rules(rules)\n print(\"\\n\")\n n_ok,n_ko,n_bet,avg_gap =analyse_result(rules,tolerance)\n print(\"rules ok : \",n_ok,\" rules under : \",n_ko,\" rules over : \",n_bet,\" average gap : \",avg_gap)\n print(\"\\n\\n\")\n\n return None",
"def p(self) -> Probability:\n ...",
"def p_methods(data, pv_index=0, alpha = 0.05):\n\n #### Raise an error for an impossible alpha value\n if (alpha>= 1) or (alpha<= 0):\n raise ProbabilityError(\"alpha needs to be between 0 and 1!\")\n \n ####if it's a pd.dataframe, rename to col header\n if isinstance(data, pd.DataFrame):\n if isinstance(pv_index, int):\n pv_index = data.columns.get_values()[pv_index]\n data =data.rename(columns ={pv_index: \"p_value\"})\n ###or make a vector a pd.dataframe\n else:\n data = pd.DataFrame({\"p_value\": data})\n\n if (data[\"p_value\"].max()> 1) or (data[\"p_value\"].max()< 0):\n raise ProbabilityError(\"One or more p-values is not between 0 and 1!\") \n\n ###set the size of the data\n m = data.shape[0]\n\n ###find the smallest p_value st. p<k*alpha/m (BH method):\n ##set the rank, making ties the minimum\n df =data.sort_values(by=[\"p_value\"])\n df[\"rank\"]=round(df.rank(axis=0, method = 'min')[\"p_value\"])\n df[\"bh_value\"] = alpha*df[\"rank\"]/m\n df_temp = df\n df_temp[\"bh_sig\"]= np.where(df_temp[\"p_value\"] <= df_temp[\"bh_value\"], True, False)\n df_temp =df_temp[df_temp[\"bh_sig\"]==True]\n\n ###the maximum true value\n\n if len(df_temp[\"bh_sig\"]) == 0:\n max_true = 0\n else:\n max_true = max(df_temp[\"rank\"])\n\n ####Back to cool dataframe work!\n df[\"bh_significant\"]=np.where(df[\"rank\"]<=max_true, True, False)\n df[\"bonf_value\"] = alpha/m\n df[\"bonf_significant\"] = np.where(df[\"p_value\"]<=df[\"bonf_value\"], True, False)\n df = df.drop(['rank'], axis=1)\n df = df.drop(['bh_sig'], axis=1)\n\n return(df)",
"def predict_proba(self):\n ...",
"def evaluate(self, test_data):\n test_results = [(self.decide(self.feedforward(x)), y)\n for (x, y) in test_data]\n\n # Check for and keep track of TP's FP's and FN's\n # Write FP's and FN's to a special directories\n TP = FP = FN = count = 0\n TN = 0\n # what you got = x, what should be = y\n for ((x, y), (image, gt)) in zip(test_results, test_data):\n count += 1\n if x == 1 and y == 1:\n TP += 1\n elif x == 1 and y == 0:\n FP += 1\n if not self.validating: self.save_Image(FP_PATH, count, image)\n elif x == 0 and y == 1:\n FN += 1\n if not self.validating: self.save_Image(FN_PATH, count, image)\n \n if (TP + FP ) == 0:\n return TP, FP, FN, '---------', '---------','---------'\n else:\n false_rate = float(FP) / float(TP + FP)\n\n if (TP + FN ) == 0:\n return TP, FP, FN, '---------', '---------', false_rate\n else:\n detect_rate = float(TP) / float(TP + FN)\n\n if (TP + FP + FN ) == 0:\n return TP, FP, FN, '---------', detect_rate, false_rate\n else:\n quality_rate = float(TP) / float(TP+FP+FN)\n \n return TP, FP, FN, quality_rate, detect_rate, false_rate",
"def main():\n\t# \"\"\"\n\t# \tMain function of test python module\n\t# \"\"\"\n\t# random.seed(os.urandom(345634)) # initialize random generator\n\t# t = np.linspace(0.0, 24.0, 96.0) # define the time axis of a day, here we use 96 values every quarter of an hour\n\t# # standard load profile -- input\n\t# q = extra.read_slp(t,\n\t# 'Profielen-Elektriciteit-2015-versie-1.00 Folder/profielen Elektriciteit 2015 versie 1.00.csv') # read the sample standard load profile, can be any length, can be resized given a low/high resolution time axis\n\t# q = q / np.sum(q) # normalization of standard load profile\n\t# # process duration\n\t# duration_axis = np.linspace(0.0, 24.0, 96.0)\n\t# (p_d, E_p) = extra.app_time(duration_axis, 10, 2, 0.0,\n\t# 24.0) # function that define the pdf of duration of a process\n\t# # process consumption\n\t# consumption_axis = np.linspace(0.0, 3.5, 96.0)\n\t# (p_k, E_k) = extra.app_consumption(consumption_axis, 10, 2, 0.0,\n\t# 3.5) # function that define the pdf of duration of a process\n\t# # pdf of starting time\n\t# p_t_0 = lpd.infer_t_0(q, p_d, E_k) # computes the pdf of starting time of processes\n\t# p_t_0 = p_t_0 / np.sum(p_t_0) # normalization of the pdf to sum up to zero\n #\n\t# \"\"\"\n\t# 1st Approach, starting time of processes is a discrete propapibility density function\n\t# \"\"\"\n\t# # synthetic profile of D processes\n\t# D = 2000\n\t# synthetic_profile = lpd.synthetic_profile(D, t, p_d, consumption_axis, p_k, p_t_0)\n\t# synthetic_profile_1 = lpd.synthetic_profile(D, t, p_d, consumption_axis, p_k, p_t_0)\n\t# # expected value of D processes\n\t# q_e_e = lpd.infer_q_e(t, p_t_0, p_d, E_k, D)\n\t# # plot\n\t# plt.step(t, synthetic_profile, \"g-\")\n\t# plt.step(t, q_e_e, \"b--\")\n #\n\t# \"\"\"\n\t# 2nd Approach, starting time of processes is a continuous propapibility density function\n\t# \"\"\"\n\t# # synthetic profile of D processes\n\t# ts, cs = lpd.continous_synthetic_profile(D, t, p_d, consumption_axis, p_k, p_t_0)\n\t# plt.step(ts / len(t) * t[-1], cs, where='post', c='r')\n\t# plt.xlim(0, 24.0)\n\t# plt.legend([\"synthetic\", \"expected\", \"continuous\"], loc=0)\n\t# plt.show()\n #\n\t# \"\"\"\n\t# Time discretization\n\t# \"\"\"\n\t# n_intervals = 24 * 1 # discretized in minutes\n\t# discrete_timeaxis = np.linspace(0.0, 24.0, n_intervals + 1)\n\t# discrete_consumption = lpd.signal_discretization(discrete_timeaxis, t, ts, cs)\n\t# plt.step(ts / len(t) * t[-1], cs, where='post', c='r')\n\t# plt.step(discrete_timeaxis, discrete_consumption, where='post', c='k', ls='--', lw=2)\n\t# plt.legend([\"continuous\", \"discretized\"], loc=0)\n\t# plt.show()\n #\n #\n\t# \"\"\"\n\t# Repeated day synthetic profile creation\n\t# \"\"\"\n\t# # synthetic profile of D processes\n\t# D = 2000\n\t# n = 10\n\t# slp = lpd.synthetic_profile_repeated(D, t, p_d, consumption_axis, p_k, p_t_0, n)\n\t# plt.step(range(len(slp)), slp, \"g-\")\n\t# plt.show()\n\tt = np.linspace(0.0, 24.0, 96.0)\n\tload_profile = extra.read_slp(t, 'Profielen-Elektriciteit-2015-versie-1.00 Folder/profielen Elektriciteit 2015 versie 1.00.csv')\n\tslp = synthetic.create_synthetic_load(load_profile, 5.0, 5)\n\tplt.step(range(len(slp)), slp)\n\tplt.show()",
"def decision_function(self, X):\n ..."
] | [
"0.63488966",
"0.62352973",
"0.62028337",
"0.6166929",
"0.60389817",
"0.603585",
"0.601392",
"0.601392",
"0.5865932",
"0.58606446",
"0.5839742",
"0.58377177",
"0.5827857",
"0.5818832",
"0.5813053",
"0.5810459",
"0.58043694",
"0.57498485",
"0.5742421",
"0.5736939",
"0.57332903",
"0.57308",
"0.5724836",
"0.5721199",
"0.57191753",
"0.5718593",
"0.5709597",
"0.57032275",
"0.56905437",
"0.5687866"
] | 0.67466784 | 0 |
Decorator for views that checks that the user passes the given test, redirecting to the login page if necessary. The test should be a callable that takes the user object and returns True if the user passes. | def user_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
if not login_url:
login_url = LOGIN_URL
def decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
if test_func(request):
return view_func(request, *args, **kwargs)
path = urlquote(request.get_full_path())
tup = login_url, redirect_field_name, path
return CustomRedirect(request, '%s?%s=%s' % tup)
return wraps(view_func, assigned=available_attrs(view_func))(_wrapped_view)
return decorator | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def request_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):\n if not login_url:\n login_url = settings.LOGIN_URL\n\n def decorator(view_func):\n def _wrapped_view(request, *args, **kwargs):\n if test_func(request):\n return view_func(request, *args, **kwargs)\n path = urlquote(request.get_full_path())\n tup = login_url, redirect_field_name, path\n return HttpResponseRedirect('%s?%s=%s' % tup)\n return _wrapped_view\n return decorator",
"def login_required(func):\n\t@wraps(func)\n\tdef decorated_view(*args, **kwargs):\n\t\tif not users.get_current_user():\n\t\t\treturn redirect(users.create_login_url(request.url))\n\t\treturn func(*args, **kwargs)\n\treturn decorated_view",
"def access_allowed(test_func, redirect_url=None):\n def decorate(view_func):\n def wrapper(request, *args, **kwargs):\n if test_func(request.user):\n return view_func(request, *args, **kwargs)\n raise PermissionDenied\n return update_wrapper(wrapper, view_func)\n return decorate",
"def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n # checks is user login\n if session.get(\"user_id\") is None:\n return redirect(url_for(\"login\", next=request.url))\n return f(*args, **kwargs)\n return decorated_function",
"def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"login\"))\n\n return view(**kwargs)\n\n return wrapped_view",
"def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"auth.login\"))\n\n return view(**kwargs)\n\n return wrapped_view",
"def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"auth.login\"))\n\n return view(**kwargs)\n\n return wrapped_view",
"def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for('auth.login'))\n\n return view(**kwargs)\n\n return wrapped_view",
"def login_required(view_func):\n @wraps(view_func)\n def _checklogin(request, *args, **kwargs):\n if request.user.is_active:\n # The user is valid. Continue to the admin page.\n return view_func(request, *args, **kwargs)\n return site.login(request)\n return _checklogin",
"def student_required(view_func=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):\n actual_decorator = user_passes_test(\n BaseUser.is_student,\n login_url=login_url,\n redirect_field_name=redirect_field_name\n )\n if view_func:\n return actual_decorator(view_func)\n return actual_decorator",
"def check_authorization_user(login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):\n def decorator(view_func):\n @wraps(view_func)\n def _wrapped_view(request, *args, **kwargs):\n if user_roles_check(request):\n return view_func(request, *args, **kwargs)\n path = args[0].path\n resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)\n return redirect_to_login(\n path, resolved_login_url, redirect_field_name)\n return _wrapped_view\n return decorator",
"def login_required(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n current = users.get_current_user()\n if not current:\n return redirect(users.create_login_url(request.url))\n elif current.email() == '[email protected]':\n return func(*args, **kwargs)\n else:\n return redirect(users.create_logout_url(request.url))\n return decorated_view",
"def login_required(view_function):\n\t@wraps(view_function) # Tells debuggers that is is a function wrapper\n\tdef decorator(*args, **kwargs):\n\t\tauth = current_app.auth\n\t\tallowed = False\n\t\t# User must be logged in\n\t\tif current_user.is_authenticated:\n\t\t\t# User must be verified (if required)\n\t\t\tif auth.AUTH_ENABLE_CONFIRM_ACCOUNT and current_user.verified:\n\t\t\t\tallowed=True\n\t\t\t# User can be not verified (if allowed)\n\t\t\telif not auth.AUTH_ENABLE_CONFIRM_ACCOUNT:\n\t\t\t\tallowed=True\n\t\tif not allowed:\n\t\t\t# Redirect to unauthenticated page\n\t\t\treturn auth.unauthenticated()\n\t\t# It's OK to call the view\n\t\treturn view_function(*args, **kwargs)\n\treturn decorator",
"def login_required(f):\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if g.USER is None:\n return redirect(\n url_for(\"home\", force_login=True, next=request.url)\n )\n\n return f(*args, **kwargs)\n\n return decorated_function",
"def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(url_for(\"index\"))\n return f(*args, **kwargs)\n return decorated_function",
"def login_required(fn):\n\n def _dec(view_func):\n def _checklogin(request, *args, **kwargs):\n\n user = users.get_current_user()\n if user:\n return view_func(request, *args, **kwargs)\n\n else:\n return HttpResponseRedirect(users.create_login_url(request.get_full_path()))\n\n _checklogin.__doc__ = view_func.__doc__\n _checklogin.__dict__ = view_func.__dict__\n\n return _checklogin\n\n return _dec(fn)",
"def login_required(f):\r\n @wraps(f)\r\n def wrap(*args, **kwargs):\r\n if \"logged_in\" in session:\r\n return f(*args, **kwargs)\r\n return redirect(\"/user/login\")\r\n return wrap",
"def login_required(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if g.user is None:\n flash('You have to log in first')\n return redirect(url_for('authentication.login', next=url_for(request.endpoint)))\n return func(*args, **kwargs)\n return wrapper",
"def login_required(view):\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"handlers.login\"))\n\n return view(**kwargs)\n\n return wrapped_view",
"def login_required(f):\r\n @wraps(f)\r\n def decorated_function(*args, **kwargs):\r\n if session.get(\"user_id\") is None:\r\n flash(\"You must log in to view that page.\")\r\n return redirect(url_for(\"login\"))\r\n return f(*args, **kwargs)\r\n return decorated_function",
"def login_required(func):\n @wraps(func)\n def f(*args, **kwargs):\n if g.user is None:\n app.logger.info('redirecting not logged in user')\n return redirect(url_for('index'))\n elif not g.user.initialized and f.__name__ not in ['profile_create','logout']:\n return redirect(url_for('profile_create'))\n else:\n return func(*args, **kwargs)\n return f",
"def employee_required(view_func=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):\n actual_decorator = user_passes_test(\n BaseUser.is_employee,\n login_url=login_url,\n redirect_field_name=redirect_field_name\n )\n if view_func:\n return actual_decorator(view_func)\n return actual_decorator",
"def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n try:\n if session['user_id']:\n pass\n except KeyError:\n return redirect(url_for('users.login', next=request.url))\n return f(*args, **kwargs)\n return decorated_function",
"def require_registered(function):\n\n @wraps(function)\n def wrap(request, *args, **kwargs):\n decorated_view_func = login_required(request)\n if not decorated_view_func.user.is_authenticated:\n return decorated_view_func(request) # return redirect to login\n\n if request.user.password_disposition != User.FULL:\n return redirect('set-password')\n else:\n return function(request, *args, **kwargs)\n\n return wrap",
"def login_required(function):\n\n @wraps(function)\n def wrapper(self, *args, **kw):\n \"\"\"Redirect to main if user doesn't logged in. \"\"\"\n\n if not self.user:\n return self.redirect('/blog/login')\n return function(self, *args, **kw)\n return wrapper",
"def commLogin_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n # checks is user login\n if session.get(\"user_id\") is None:\n return redirect(url_for(\"login\", next=request.url))\n # checks if user has a committee login (an int)\n elif session.get(\"user_id\") < 0:\n return redirect(url_for(\"crisis\", next=request.url))\n return f(*args, **kwargs)\n return decorated_function",
"def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/\")\n return f(*args, **kwargs)\n return decorated_function",
"def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/\")\n return f(*args, **kwargs)\n return decorated_function",
"def login_required(f):\r\n @wraps(f)\r\n def decorated_function(*args, **kwargs):\r\n if session.get(\"user_id\") is None:\r\n return redirect(\"/login\")\r\n return f(*args, **kwargs)\r\n return decorated_function",
"def login_required(f):\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function"
] | [
"0.81273854",
"0.7381807",
"0.73482054",
"0.72734046",
"0.7270463",
"0.72175115",
"0.72175115",
"0.7205731",
"0.7179196",
"0.716704",
"0.71667445",
"0.71552765",
"0.71374995",
"0.7130772",
"0.7118831",
"0.70776236",
"0.7057909",
"0.70545197",
"0.70490575",
"0.70335615",
"0.7006072",
"0.69779766",
"0.6977953",
"0.6950745",
"0.694437",
"0.6935991",
"0.69317126",
"0.69317126",
"0.69250274",
"0.6921097"
] | 0.8669284 | 0 |
Test translation of one word | def test_word_translation(self):
self.assertEqual(translator.translate_word("hour"), "ourhay")
self.assertEqual(translator.translate_word(""), "")
self.assertEqual(translator.translate_word("aaa"), "aaayay") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_translation_smoke():\n english_to_morse = get_translator(\"english\", \"morse\")\n morse_to_english = get_translator(\"morse\", \"english\")\n morse = english_to_morse.translate(\"hello world\")\n english = morse_to_english.translate(morse)\n assert english == \"HELLO WORLD\"",
"def translate_leet(phrase):",
"def test_language_sensitivity(self): \n \n for lang in self.LANGUAGES:\n activate(lang)\n \n self.assertEqual(get_wording_text('test_1'), lang)",
"def correctWord (w):\r\n if len(re.findall(r\"[а-я]\",w))>len(re.findall(r\"[a-z]\",w)):\r\n return w.translate(eng_rusTranslateTable)\r\n else:\r\n return w.translate(rus_engTranslateTable)",
"def correctWord (w):\n\n if len(re.findall(ur\"[а-я]\",w))>len(re.findall(ur\"[a-z]\",w)):\n return w.translate(eng_rusTranslateTable)\n else:\n return w.translate(rus_engTranslateTable)",
"def try_translation(text):\n try:\n trad = Translator().translate(text = text, dest = 'fr')\n except:\n sleep(random() * 60)\n return False\n \n return trad.text",
"def translate():\n pass",
"def test_content(google_translator):\n # from bs4 import BeautifulSoup\n # assert 'GitHub' in BeautifulSoup(response.content).title.string\n assert google_translator.translate(text='좋은') == \"good\"",
"def translate(text):\n start = dt.datetime.now()\n words = find_words(text)\n start = time_elapsed(\"Find words\", start)\n results = check_words(words)\n start = time_elapsed(\"Check words\", start)\n return results",
"def test_find_word(self):\n mic = mi.MicrophoneToText()\n\n teststring = 'x transcript\": ort lautet testort }x'\n\n word = mic.find_word(teststring)\n\n self.assertEqual(word, ' ort lautet testort ')",
"def test_first_equal(self):\n self.assertEqual(heaviest_word(\"man i need a taxi up to ubud\"), \"taxi\")",
"def question_new_translate():",
"def test_issue7306(en_lookup_nlp):\n doc = Doc(en_lookup_nlp.vocab, words=[\"singing\"])\n lemmatizer = en_lookup_nlp.get_pipe(\"lemmatizer\")\n doc = lemmatizer(doc)\n assert doc[0].lemma_ == \"sing\"",
"def is_stopword(self, word, language):",
"def test_functionallity(self):\n\n pp = Lexpp(external_dict=pkg_resources.resource_filename(\"lexpp\", \"tests/test.dict\"))\n\n test_word = \"キャプテン\"\n entries = list(pp.lookup(test_word))\n\n self.assertEqual(len(entries), 4)\n\n for e in entries:\n self.assertEqual(type(e), Entry)\n rep = pp.get_representative_form(e)\n self.assertEqual(rep, test_word)",
"def translate_wrapper(atext):\n print(\"translating:\",atext)\n res=\"\"\n res=translate(atext,\"pl\",\"fr\")\n time.sleep(0.5)\n print(\"translation:\",res)\n return res",
"def test_translate_command(command, expected):\n assert translate_command(command) == expected",
"def translate_query(sentence):\n try:\n query = translator.translate(sentence, lang_tgt='en')\n return query\n except Exception:\n return False",
"def test_simple_translation_using_get(self):\n pass",
"def test_translate_unique_langs(self):\n\n trans_msgs_dict = MessageController.translate_unique_langs({'2': 'es', '4': 'fr'}, \n 'hi', 'en', False, False)\n\n self.assertEqual(trans_msgs_dict, {'es': u'{hola}', 'fr': u'salut'})",
"def translate(self):\n\t\tvowels = \"aeiou\"\n\n\t\tif (self.word[0] not in vowels) and (self.word[1] in vowels):\n\t\t\tnew_word = self.word[1:] + self.word[0] + \"ay\"\n\t\telif self.word[0] in vowels:\n\t\t\tnew_word = self.word + \"way\"\n\t\telse:\n\t\t\tnew_word = self.word[2:] + self.word[:2] + \"ay\"\n\n\t\tprint(new_word)",
"def test_issue4104(en_lookup_nlp):\n words = [\"dry\", \"spun\", \"spun-dry\"]\n doc = Doc(en_lookup_nlp.vocab, words=words)\n lemmatizer = en_lookup_nlp.get_pipe(\"lemmatizer\")\n doc = lemmatizer(doc)\n assert [token.lemma_ for token in doc] == [\"dry\", \"spin\", \"spin-dry\"]",
"def ugettext(self, text):\r\n if text == \"There was a problem with the staff answer to this problem.\":\r\n text = \"TRANSLATED!\"\r\n return text",
"def test_i18n(self):\n # If you want to modify this code, please get acquainted with\n # Python's locale module. In particular:\n # http://docs.python.org/library/locale.html#locale.getdefaultlocale\n\n # Set the standard C locale.\n os.environ['LANG'] = 'C'\n os.environ['LC_ALL'] = 'C.UTF-8'\n\n #must be after above\n out1 = tr('Hello!')\n expected1 = 'Hello!'\n msg = 'Expected %s, got %s' % (expected1, out1)\n assert out1 == expected1, msg\n\n # Set the Indonesian locale to test translations.\n os.environ['LANG'] = 'id'\n os.environ['LC_ALL'] = 'id_ID.UTF-8'\n\n #must be after above\n #indoout1 = tr('Hello!') # translate as 'Hi'\n #indoexpected1 = 'Hi!'\n #msg = 'Expected %s, got %s' % (indoexpected1, indoout1)\n #assert indoout1 == indoexpected1, msg",
"def beginning_checker(self, translit):\n tr_new = re.sub(r'(\\A|·)夫', r'\\1弗', translit)\n tr_new = re.sub(r'(\\A|·)耶', r'\\1叶', tr_new)\n return tr_new",
"def test_score_word(self):\n self.assertEqual(1, score_word('a', 'a'))\n self.assertEqual(1, score_word('aa', 'ab'))\n self.assertEqual(1, score_word('ba', 'bb'))\n self.assertEqual(0, score_word('a', 'b'))\n self.assertEqual(0, score_word('ab', 'ba'))\n self.assertEqual(2, score_word('aba', 'cba'))\n self.assertEqual(2, score_word('abc', 'abd'))",
"def test_contains_returns_true_for_words_with_spaces(multi_trie):\n assert multi_trie.contains(\"hi you\") is True",
"def test_output_get_word(self):\n actual = get_words('../corpus/alice.txt')\n expected = [\"alice\"]\n self.assertEqual(actual, expected)",
"def test_create_one_word(self, one_word_with_translations):\n positive_data = dict(\n name='haus',\n definition='test',\n language_id='de',\n )\n new_word = Word.objects.create(**positive_data)\n new_word.translation.set(one_word_with_translations)\n\n assert Word.objects.filter(**positive_data).exists()\n assert list(new_word.translation.all()) == one_word_with_translations",
"def test_no_cyrillic_string(self):\n string = \"test\"\n expected = \"test\"\n self.assertEqual(transliterate(string), expected)"
] | [
"0.7597297",
"0.74136245",
"0.73001736",
"0.7175371",
"0.7151106",
"0.7067673",
"0.6857479",
"0.6792205",
"0.6749835",
"0.66606575",
"0.6636238",
"0.6636003",
"0.6418098",
"0.6397569",
"0.6391174",
"0.63768405",
"0.63734776",
"0.6352836",
"0.6352223",
"0.63490295",
"0.63399535",
"0.63188183",
"0.62998694",
"0.6298174",
"0.62725633",
"0.6271324",
"0.6265433",
"0.6251981",
"0.62455",
"0.62299114"
] | 0.86208665 | 0 |
Makes the CWP and pprof file names consistent. For the same function, it may happen for some file paths to differ slightly in the CWP data compared to the pprof output. In a file name, for each tuple element of the list, we substitute the first element with the second one. | def MakeCWPAndPprofFileNamesConsistent(file_name):
file_name = file_name.replace(', ', '; ')
for replacing_pair_string in FILE_NAME_REPLACING_PAIR_STRINGS:
file_name = file_name.replace(replacing_pair_string[0],
replacing_pair_string[1])
return file_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def MakePprofFunctionKey(function_and_file_name):\n # TODO(evelinad): Use pprof --topproto instead of pprof --top to parse\n # protobuffers instead of text output. Investigate if there is an equivalent\n # for pprof --tree that gives protobuffer output.\n #\n # In the CWP output, we replace the , with ; as a workaround for parsing\n # csv files. We do the same for the pprof output.\n #\n # TODO(evelinad): Use dremel --csv_dialect=excel-tab in the queries for\n # replacing the , delimiter with tab.\n function_and_file_name = function_and_file_name.replace(', ', '; ')\n # If the function and file name sequence contains the FUNCTION_FILE_SEPARATOR,\n # we normalize the path name of the file and make the string subtitutions\n # to make the CWP and pprof data consistent. The returned key is composed\n # from the function name and normalized file path name, separated by a comma.\n # If the function and file name does not contain the FUNCTION_FILE_SEPARATOR,\n # we just do the strings substitution.\n if FUNCTION_FILE_SEPARATOR in function_and_file_name:\n function_name, file_name = \\\n function_and_file_name.split(FUNCTION_FILE_SEPARATOR)\n file_name = \\\n MakeCWPAndPprofFileNamesConsistent(os.path.normpath(\"/\" + file_name))\n return ','.join([function_name, file_name])\n\n return MakeCWPAndPprofFileNamesConsistent(function_and_file_name)",
"def fname_tsk1(tup): #Task 1 & 2\n fname1 = f\"file_{str(tup[0]).zfill(3):}: {tup[1]:.2f}, {tup[2]:.2e}, {tup[3]:.2e}\"\n return(fname1)",
"def fname_tsk3(tup):\n t_size = str(len(tup))\n fname3 = \"The \" + t_size + \" numbers are: \"\n for t in tup:\n fname3 = fname3 + \"{:d}, \"\n return fname3[:-2].format(*tup)",
"def fname_tsk5(lst):\n lst[0] = del_lst(lst[0])\n lst[2] = del_lst(lst[2])\n body = \"The weight of an {0} is {1} and the weight of a \" \\\n \"{2} is {3}\"\n return body.format(*lst)",
"def outputpairedstats(fname,writemode,name1,n1,m1,se1,min1,max1,name2,n2,m2,se2,min2,max2,statname,stat,prob):\r\n suffix = '' # for *s after the p-value\r\n try:\r\n x = prob.shape\r\n prob = prob[0]\r\n except:\r\n pass\r\n if prob < 0.001: suffix = ' ***'\r\n elif prob < 0.01: suffix = ' **'\r\n elif prob < 0.05: suffix = ' *'\r\n title = [['Name','N','Mean','SD','Min','Max']]\r\n lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1],\r\n [name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]]\r\n if type(fname)<>StringType or len(fname)==0:\r\n print\r\n print statname\r\n print\r\n pstats.printcc(lofl)\r\n print\r\n try:\r\n if stat.shape == ():\r\n stat = stat[0]\r\n if prob.shape == ():\r\n prob = prob[0]\r\n except:\r\n pass\r\n print 'Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix\r\n print\r\n else:\r\n file = open(fname,writemode)\r\n file.write('\\n'+statname+'\\n\\n')\r\n file.close()\r\n writecc(lofl,fname,'a')\r\n file = open(fname,'a')\r\n try:\r\n if stat.shape == ():\r\n stat = stat[0]\r\n if prob.shape == ():\r\n prob = prob[0]\r\n except:\r\n pass\r\n file.write(pstats.list2string(['\\nTest statistic = ',round(stat,4),' p = ',round(prob,4),suffix,'\\n\\n']))\r\n file.close()\r\n return None",
"def writeProfile(fname,prof):\n t = np.linspace(0,1,prof.shape[0],endpoint=False)\n fh = open(fname,'w')\n for x in range(prof.shape[0]):\n fh.write('%.7e %.7e\\n' % (t[x],prof[x]))\n fh.close()",
"def generate_name(path_list):\n name = path_list[0]\n for item in path_list[1:]:\n name += \"[\" + item + \"]\"\n return name",
"def Generate_Profile_Name(self, profile_list):\r\n name = \"\"\r\n #Take the first three letters of the word itself-\r\n # the real first three letters are (+) or (-)\r\n for term in profile_list:\r\n name = name + term.Display_()[3:6]\r\n original_length = len(name)\r\n counter = 1\r\n spl = self.system.Profile_List()\r\n #While there are matches, increment the counter and try again\r\n while( name in spl ):\r\n name = name[:original_length]+str(counter)\r\n return name",
"def _generate_filename(doc_type, login, *args):\n filename = []\n filename.append(doc_type)\n filename.append(login)\n for item in args:\n filename.append(item)\n filename.append(datetime.datetime.now().isoformat(timespec='microseconds'))\n filename = '_'.join(filename)\n return filename",
"def standardized_name(path, filename):\n path_file = os.path.join(path, filename)\n stat = os.stat(path_file)\n extension = path_file.split('.')[-1]\n creation_time = datetime.fromtimestamp(stat.st_mtime).strftime('%m-%d-%Y_%H:%M:%S')\n return '{}.{}'.format(creation_time, extension)",
"def name_from_file(pth = getattr(modules['__main__'], '__file__', 'optimize.default')):\n\treturn '{0:s}'.format(splitext(basename(pth))[0])",
"def generate_report_file_name(args: Dict[str, Any]) -> str:\n return (\n f\"{args.get('report_type', '').lower().replace(' ', '_')}_fireeye_\"\n f\"{datetime.now().strftime('%Y-%m-%d_%H:%M:%S')}.\"\n f\"{args.get('type', REPORT_TYPE_ALLOWED_FORMAT[args.get('report_type', '')][0])}\"\n )",
"def genCleanedOutputName(outputFile, paired = False):\n if paired:\n return outputFile.replace(\"cleaned\", \"cleaned_1.fastq\"), \\\n outputFile.replace(\"cleaned\", \"cleaned_2.fastq\")\n else:\n return outputFile.replace(\"cleaned\", \"cleaned_1.fastq\"), \"\"\n # if fastq2 is not None:\n # return fastq1.replace(\".fastq\", \".cleaned.fastq\"), fastq2.replace(\".fastq\", \".cleaned.fastq\")\n # else:\n # return fastq1.replace(\".fastq\", \".cleaned.fastq\"), \"\"",
"def generate_file_name(well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"",
"def naming_convention(file_dir, file_name):\n long_hash = sha1sum(os.path.join(file_dir, file_name))\n file_prefix, file_sufix = file_name.split('.')\n new_name = '{file_prefix}-{short_hash}.{file_sufix}'.format(\n file_prefix=file_prefix,\n short_hash=long_hash[:8],\n file_sufix=file_sufix)\n return new_name, long_hash",
"def test_fname():\n\n assert fname('data', 'json') == 'data.json'\n assert fname('data.json', 'json') == 'data.json'\n assert fname('pic', 'png') == 'pic.png'\n assert fname('pic.png', 'png') == 'pic.png'\n assert fname('report.pdf', 'pdf') == 'report.pdf'\n assert fname('report.png', 'pdf') == 'report.png'",
"def fname_tsk6(lst):\n max_name = max(len(lst[0][0]), len(lst[1][0]), len(lst[2][0]), len('Name'))\n max_year = max(len(str(lst[0][1])), len(str(lst[1][1])),\n len(str(lst[2][1])), len('Year'))\n max_price = max(len(str(lst[0][2])), len(str(lst[1][2])),\n len(str(lst[2][2])), len('Price'))\n length = [max_name, max_year, max_price]\n report = (\"Name\" + (max_name - len(\"Name\")) * \" \" + \"|\" +\n (max_year - len(\"Year\")) * \" \" + \"Year\" + \"|\" +\n (max_price - len(\"Price\")) * \" \" + \"Price|\\n\")\n for car in lst:\n new_line = \"{nm:{mnm}}|{y:{my}}|{pri:{mpri}}|\".format(\n nm=car[0], y=car[1], pri=car[2], mnm=length[0], my=length[1], mpri=length[2]\n )\n report += new_line + \"\\n\"\n return report",
"def filename(fname='andor'):\n fname = fname.strip()\n pc = max(map(_ctrn,glob.glob(os.path.join(camera.status.path,fname+\"*.fits\")))+[0])\n cc = max(map(_ctrn,glob.glob(\"/data/counters/\"+fname+\"*.cntr\"))+[0])\n fc = max([pc,cc])+1\n #Set the filectr to one higher than the highest existing file with \n #the same basename in the current path or in /data/counters\n camera.status.filename = fname\n filectr(fc)",
"def get_fixed_filename(filename):\n initial_changed_name = filename.replace(\" \", \"_\").replace(\".TXT\", \".txt\")\n previous_character = \"\"\n new_name = \"\"\n for i, character in enumerate(initial_changed_name):\n current_character = character\n if previous_character == \"_\" and current_character.islower():\n new_name += current_character.upper()\n elif previous_character == \"(\" and current_character.islower():\n new_name += current_character.upper()\n elif previous_character.islower() and current_character.isupper():\n new_name += \"_{}\".format(current_character)\n else:\n new_name += current_character\n previous_character = character\n return new_name",
"def get_fixed_filename(filename):\n new_name = \"\"\n for i, char in enumerate(filename):\n if i + 1 != len(filename):\n previous_character = filename[i - 1]\n next_character = filename[i + 1]\n if char.islower() and next_character.isupper():\n new_name += char + \"_\"\n elif previous_character == \".\":\n new_name += char\n elif char.islower() and not previous_character.isalpha():\n new_name += char.upper()\n else:\n new_name += char\n else:\n new_name += char\n new_name = new_name.replace(\" \", \"_\").replace(\".TXT\", \".txt\")\n return new_name",
"def FileNameToFile(files):\n files = files.replace('%20%28ja%29', '.ja')\n if files in up_list:\n if files == 'UserManual':\n return \"index.html\"\n elif files == 'UserManual.ja':\n return \"index.ja.html\"\n else:\n return files.lower() + \".html\"\n else: # modules\n sol = files.replace('.py', '').replace('%2F', '_')\n return 'modules/' + sol + '.html'",
"def generate_file_name(old_file_name: str) -> str:\r\n return old_file_name.split(\".\")[0] + '_features' + '.npy'",
"def fname_tsk4(tup):\n form_string = \"{3:02d} {4:d} {2:d} {0:02d} {1:d}\"\n return form_string.format(*tup)",
"def update_destination_file_name (file_name):\n\tglobal COUNTER \n\tCOUNTER += 1\n\tsplitted = file_name.split('/')\n\treturn file_name[:len(file_name)-len(splitted[-1])] + 'Image%05d' % COUNTER +'_'+splitted[-1]",
"def outfigname(num, ext, char=\"\"):\n return \"f{}{}{}\".format(num, char, ext)",
"def ParsePprofTopOutput(file_name):\n\n pprof_top_statistics = {}\n\n # In the pprof top output, the statistics of the functions start from the\n # 6th line.\n with open(file_name) as input_file:\n pprof_top_content = input_file.readlines()[6:]\n\n for line in pprof_top_content:\n function_statistic_match = FUNCTION_STATISTIC_REGEX.search(line)\n flat, flat_p, sum_p, cum, cum_p = function_statistic_match.groups()\n flat_p = str(float(flat_p) / 100.0)\n sum_p = str(float(sum_p) / 100.0)\n cum_p = str(float(cum_p) / 100.0)\n lookup_index = function_statistic_match.end()\n function_and_file_name = line[lookup_index + 2 : -1]\n key = MakePprofFunctionKey(function_and_file_name)\n pprof_top_statistics[key] = (flat, flat_p, sum_p, cum, cum_p)\n return pprof_top_statistics",
"def _build_fname_templ(n):\n parts =[globals.ds_fn_templ.format(i='{i_ref:d}', ds='{ref}', var='{ref_var}')]\n for i in range(1, n):\n parts += [globals.ds_fn_templ.format(i='{i_ds%i:d}' % i, ds='{ds%i}' % i,\n var='{var%i}' % i)]\n return globals.ds_fn_sep.join(parts) + '.nc'",
"def get_coverprofile_filename(self, pkg):\n items = (\"coverage\", \"txt\")\n if self.buildtags:\n items = (\"coverage\", self.buildtags.replace(\",\", \".\").replace(\" \", \"\"), \"txt\")\n return os.path.join(pkg.replace(self.base_pkg + \"/\", \"\"), \".\".join(items))",
"def generate_name(config):\n\n name = basename(config.name)\n if config.prepro is not None:\n name += \"_\" + config.prepro\n if config.extract_pos:\n name += \"_pos\"\n return name",
"def construct_name(p, prefix):\n name = prefix\n for key in p.keys():\n if (type(p[key]) != tuple) and (type(p[key]) != list):\n name = name + '_' + str(key) + '-' + str(p[key])\n else:\n name = name + '_' + str(key) + '-' + str(p[key][0])\n return name"
] | [
"0.6336867",
"0.6131035",
"0.5696922",
"0.55790573",
"0.55010515",
"0.54772717",
"0.5450916",
"0.5304689",
"0.5298604",
"0.5282132",
"0.52429986",
"0.5242389",
"0.5162972",
"0.51255804",
"0.51252884",
"0.510435",
"0.5082193",
"0.50774354",
"0.5064152",
"0.5051272",
"0.50464123",
"0.50346035",
"0.5032202",
"0.5018317",
"0.50134647",
"0.5007429",
"0.4997297",
"0.49934927",
"0.49753675",
"0.49720255"
] | 0.7195294 | 0 |
Creates the function key from the function and file name. Parsing the the pprof top and tree outputs is difficult due to the fact that it hard to extract the function and file name (i.e the function names can have a lot of unexpected charachters such as spaces, operators etc). For the moment, we used FUNCTION_FILE_SEPARATOR as delimiter between the function and the file name. However, there are some cases where the file name does not start with / and we treat this cases separately (i.e ../sysdeps, ../nptl, aesx86_64.s). | def MakePprofFunctionKey(function_and_file_name):
# TODO(evelinad): Use pprof --topproto instead of pprof --top to parse
# protobuffers instead of text output. Investigate if there is an equivalent
# for pprof --tree that gives protobuffer output.
#
# In the CWP output, we replace the , with ; as a workaround for parsing
# csv files. We do the same for the pprof output.
#
# TODO(evelinad): Use dremel --csv_dialect=excel-tab in the queries for
# replacing the , delimiter with tab.
function_and_file_name = function_and_file_name.replace(', ', '; ')
# If the function and file name sequence contains the FUNCTION_FILE_SEPARATOR,
# we normalize the path name of the file and make the string subtitutions
# to make the CWP and pprof data consistent. The returned key is composed
# from the function name and normalized file path name, separated by a comma.
# If the function and file name does not contain the FUNCTION_FILE_SEPARATOR,
# we just do the strings substitution.
if FUNCTION_FILE_SEPARATOR in function_and_file_name:
function_name, file_name = \
function_and_file_name.split(FUNCTION_FILE_SEPARATOR)
file_name = \
MakeCWPAndPprofFileNamesConsistent(os.path.normpath("/" + file_name))
return ','.join([function_name, file_name])
return MakeCWPAndPprofFileNamesConsistent(function_and_file_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_function_key(fn):\n return hashlib.md5(fn.func_code.co_code).hexdigest()",
"def file_key(filename):\n return FILE_PREFIX + filename",
"def fname(func):\n return \"%s.%s\" % (func.__module__, func.__name__)",
"def generate_key(*args, **kwargs):\n parts = []\n\n parts.append(fn.__module__)\n\n if hasattr(fn, '__self__'):\n parts.append(fn.__self__.__class__.__name__)\n\n parts.append(fn.__name__)\n\n if id:\n if callable(id):\n _id = id\n elif isinstance(id, basestring):\n _id = str(id).format\n else:\n raise TypeError('%s keys are invalid' % id.__class__.__name__)\n\n parts.append(_id(*args, **kwargs))\n\n ## TODO Implement args hashing\n #if args:\n # parts.append(pickle.dumps(args))\n #\n #if kwargs:\n # parts.append(pickle.dumps(sorted(kwargs.items())))\n\n return '.'.join(parts)",
"def makekey(function, *args, **kwargs) -> str:\n arguments = str((function.__name__, args, kwargs)).strip()\n arguments = arguments.translate(\n str.maketrans('', '', string.punctuation+string.whitespace)\n )\n key = codecs.encode(pickle.dumps(arguments, protocol=0), \"base64\").decode().strip()\n return key",
"def fname(func: Callable) -> str:\n return \"{}.{}\".format(func.__module__, func.__name__)",
"def get_prog_key(func_name, import_file_pk):\n return _get_cache_key(\n PROGRESS_CACHE_PREFIX.format(func_name), import_file_pk\n )",
"def __build_file_name(self, func, args):\n # Build a unique string to hash\n if self.__log:\n self.__logger.info(f\"Building file name for {func.__name__} with {args}\")\n\n # Hash with the specified algorithm and hexdigest\n # to produce a string\n fname = self.algorithm(\n b\"\".join([func.__name__.encode(\"utf8\"), pickle.dumps(args)])\n ).hexdigest()\n\n pathToFile = os.path.join(self.cacheDir, fname)\n if self.__log:\n self.__logger.info(f\"Built path {pathToFile}\")\n return pathToFile",
"def name_of(func):\n folder = func.__code__.co_filename\n file = path.split(folder)[1]\n file = \".\".join(path.splitext(file)[:-1])\n return file",
"def input_name_from_func_name(func_name):\n\treturn os.path.join(INPUTS_DIR, ''.join(func_name.split('make_')[1:])) \\\n\t\t\t+ '.%s' % EXTENSION",
"def _generate_processed_key_name(process_to, upload_name):\n timestamp = datetime.now().strftime('%Y%m%d%H%M%S%f')\n name, extension = os.path.splitext(upload_name)\n digest = md5(''.join([timestamp, upload_name])).hexdigest()\n return os.path.join(process_to, '{0}.{1}'.format(digest, extension))",
"def file_key(filename):\n prio = 4\n if filename == 'install.rdf':\n prio = 1\n elif filename in [\"chrome.manifest\", \"icon.png\", \"icon64.png\"]:\n prio = 2\n elif filename in LICENSE_FILENAMES:\n prio = 5\n return (prio, os.path.split(filename.lower()))",
"def fname(key):\n return key.rsplit(\"/\", 1)[-1]",
"def _get_func_fullname(func):\r\n modules, funcname = get_func_name(func)\r\n modules.append(funcname)\r\n return os.path.join(*modules)",
"def _function_sig_key(name: str, *args: Any, **kwargs: Any) -> int:\n function_sig = name\n for arg in args:\n function_sig += str(arg)\n for _, value in kwargs.items():\n function_sig += str(value)\n\n return hash(function_sig)",
"def _filename(self, key):\n return os.path.join(self.root, key[:2], key)",
"def header_from_function_name_and_args(fname, fargs):\n header = \"void {fname}_({fargs_str});\".format(\n fname=fname, fargs_str=args_str_from_args(fargs)\n )\n return header",
"def keys_to_filename(*args,**kwargs):\n\n\t\t\tstrict = kwargs.get('strict',True)\n\t\t\tif not spot in self.toc: raise Exception('need a spotname to look up keys')\n\t\t\t#---! it may be worth storing this as a function a la divy_keys\n\t\t\t#---follow the top,step,part naming convention\n\t\t\ttry:\n\t\t\t\tbackwards = [''.join(['%s' if i[0]=='subpattern' else chr(i[1]) \n\t\t\t\t\tfor i in re.sre_parse.parse(regex)]) \n\t\t\t\t\tfor regex in [self.spots[spot][key] for key in ['top','step','part']]]\n\t\t\t\tfn = os.path.join(\n\t\t\t\t\tself.spots[spot]['rootdir'],\n\t\t\t\t\t'/'.join([backwards[ii]%i for ii,i in enumerate(args)]))\n\t\t\texcept Exception as e: \n\t\t\t\ttracer(e)\n\t\t\t\t#---previously: raise Exception('error making keys: %s,%s'%(str(spotname),str(args)))\n\t\t\t\timport pdb;pdb.set_trace() #---legit\n\t\t\tif strict: assert os.path.isfile(fn)\n\t\t\treturn fn",
"def _get_key(self, file_name, config) -> str:\n pass",
"def _generate_function_specific_name(a, vertices):\n coeff_hash = hash(str(a))\n if coeff_hash < 0:\n # Cannot have minus sign in name\n coeff_hash *= -1\n vertices_hash = hash(str(vertices))\n if vertices_hash < 0:\n # Cannot have minus sign in name\n vertices_hash *= -1\n return str(coeff_hash) + \"_\" + str(vertices_hash)",
"def time_key(file_name):\n splits = file_name.split('/')\n [date] = re.findall(r'(\\d{4}_\\d{2}_\\d{2})', splits[-2])\n date_id = [int(token) for token in date.split('_')]\n recording_id = natural_key(splits[-1])\n session_id = session_key(splits[-2])\n \n return date_id + session_id + recording_id",
"def get_name_line_file(_prefix):\n _keys = {\n \"type\": \"region\",\n \"name\": _prefix,\n }\n _extra = {\"file\": \"<unknown>\", \"line\": \"0\"}\n _pdict = perform_regex(_prefix)\n if _pdict is not None:\n if \"head\" in _pdict:\n _keys[\"name\"] = _pdict[\"head\"].rstrip()\n _extra[\"line\"] = _pdict[\"line\"]\n _extra[\"file\"] = _pdict[\"file\"]\n else:\n _keys[\"name\"] = _pdict[\"func\"]\n _extra[\"file\"] = (\n _pdict[\"file\"] if \"file\" in _pdict else \"<unknown file>\"\n )\n if \"line\" in _pdict:\n _extra[\"line\"] = _pdict[\"line\"]\n if \"tail\" in _pdict:\n _keys[\"name\"] = \"{}/{}\".format(_keys[\"name\"], _pdict[\"tail\"])\n return (_keys, _extra)",
"def function_info_to_file_id_address(self, function_info):\n return (int(function_info[3], 16), int(function_info[4], 16))",
"def _function_name(func):\n return \"Calling the function: def {}()\".format(func.__name__)",
"def extract_function_name():\n tb = sys.exc_info()[-1]\n stk = traceback.extract_tb(tb, 1)\n fname = stk[0][3]\n return fname",
"def createkey(*args): # {{{2\n return '-'.join(map(simplifyname, args))",
"def tmp_key(filename):\n return TMP_PREFIX + filename",
"def get_func_lookup():\n return {\n \"randomstr\": randomstr,\n \"random\": random,\n \"sha256\": sha256,\n \"ed25519\": ed25519_private_key,\n \"rsa\": rsa_private_key,\n \"rsapublic\": rsa_public_key,\n \"publickey\": public_key,\n \"reveal\": reveal,\n \"loweralphanum\": loweralphanum,\n \"basicauth\": basicauth,\n }",
"def text_for_funcs_in_script(filename, prefix):\n funcs = funcs_in_script(filename)\n\n ###################################################\n # FIND LENGTH OF LONGEST FUNCTION NAME #\n ###################################################\n maxlen = 0\n for func in funcs:\n name, header = func\n length = len(name)\n if length > maxlen:\n maxlen = length\n\n ###################################################\n # CREATE ONE LINE FOR EACH FUNCTION #\n ###################################################\n text = ''\n for func in funcs:\n name, header = func\n namep = name + '()'\n line = prefix + namep.ljust(maxlen + 3) + '> ' + header + '\\n'\n text += line\n\n return text",
"def name_from_file(pth = getattr(modules['__main__'], '__file__', 'optimize.default')):\n\treturn '{0:s}'.format(splitext(basename(pth))[0])"
] | [
"0.6344893",
"0.61733",
"0.61548144",
"0.6029061",
"0.59346366",
"0.5924424",
"0.5907838",
"0.5815705",
"0.58027244",
"0.57330346",
"0.5722702",
"0.56463015",
"0.5637381",
"0.5543911",
"0.5503695",
"0.5477241",
"0.5434116",
"0.54224616",
"0.5410873",
"0.53719246",
"0.53501993",
"0.53459895",
"0.5337211",
"0.52882487",
"0.52645206",
"0.5242627",
"0.52055174",
"0.5193294",
"0.51916987",
"0.5188658"
] | 0.780996 | 0 |
Computes the cumulative inclusive count value of a function. A function might appear declared in multiple files or objects. When computing the fraction of the inclusive count value from a child function to the parent function, we take into consideration the sum of the inclusive_count count values from all the ocurences of that function. | def ComputeCWPCummulativeInclusiveStatistics(cwp_inclusive_count_statistics):
cwp_inclusive_count_statistics_cumulative = defaultdict(int)
for function_key, function_statistics \
in cwp_inclusive_count_statistics.iteritems():
function_name, _ = function_key.split(',')
cwp_inclusive_count_statistics_cumulative[function_name] += \
function_statistics[1]
return cwp_inclusive_count_statistics_cumulative | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ComputeCWPChildFunctionsFractions(cwp_inclusive_count_statistics_cumulative,\n cwp_pairwise_inclusive_count_statistics):\n\n pairwise_inclusive_count_fractions = {}\n\n for parent_function_key, child_functions_metrics in \\\n cwp_pairwise_inclusive_count_statistics.iteritems():\n child_functions_fractions = {}\n parent_function_inclusive_count = \\\n cwp_inclusive_count_statistics_cumulative.get(parent_function_key, 0.0)\n\n if parent_function_key in cwp_inclusive_count_statistics_cumulative:\n for child_function_key, child_function_inclusive_count \\\n in child_functions_metrics.iteritems():\n child_functions_fractions[child_function_key] = \\\n child_function_inclusive_count / parent_function_inclusive_count\n else:\n for child_function_key, child_function_inclusive_count \\\n in child_functions_metrics.iteritems():\n child_functions_fractions[child_function_key] = 0.0\n pairwise_inclusive_count_fractions[parent_function_key] = \\\n child_functions_fractions\n\n return pairwise_inclusive_count_fractions",
"def ParseCWPPairwiseInclusiveCountFile(file_name):\n pairwise_inclusive_count_statistics = defaultdict(lambda: defaultdict(float))\n\n with open(file_name) as input_file:\n statistics_reader = csv.DictReader(input_file, delimiter=',')\n\n for statistic in statistics_reader:\n parent_function_name, child_function_name = \\\n statistic['parent_child_functions'].split(\n PARENT_CHILD_FUNCTIONS_SEPARATOR)\n child_function_file_name = MakeCWPAndPprofFileNamesConsistent(\n os.path.normpath(statistic['child_function_file']))\n inclusive_count = statistic['inclusive_count']\n\n # There might be situations where a child function appears in\n # multiple files or objects. Such situations can occur when in the\n # Dremel queries are not specified the Chrome OS version and the\n # name of the board (i.e the files can belong to different kernel or\n # library versions), when the child function is a template function\n # that is declared in a header file or there are name collisions\n # between multiple executable objects.\n # If a pair of child and parent functions appears multiple times, we\n # add their inclusive count values.\n child_function_key = ','.join(\n [child_function_name, child_function_file_name])\n pairwise_inclusive_count_statistics[parent_function_name] \\\n [child_function_key] += float(inclusive_count)\n\n return pairwise_inclusive_count_statistics",
"def counter(): # Local function\n nonlocal count\n if count < n:\n count += 1\n return count",
"def count(f):\n def counted(*args):\n counted.call_count += 1\n return f(*args)\n counted.call_count = 0\n return counted",
"def ParseCWPInclusiveCountFile(file_name):\n cwp_inclusive_count_statistics = defaultdict(lambda: ('', 0, 0.0, 0))\n\n with open(file_name) as input_file:\n statistics_reader = csv.DictReader(input_file, delimiter=',')\n for statistic in statistics_reader:\n function_name = statistic['function']\n file_name = MakeCWPAndPprofFileNamesConsistent(\n os.path.normpath(statistic['file']))\n dso_name = statistic['dso']\n inclusive_count = statistic['inclusive_count']\n inclusive_count_fraction = statistic['inclusive_count_fraction']\n\n # We ignore the lines that have empty fields(i.e they specify only the\n # addresses of the functions and the inclusive counts values).\n if all([\n function_name, file_name, dso_name, inclusive_count,\n inclusive_count_fraction\n ]):\n key = '%s,%s' % (function_name, file_name)\n\n # There might be situations where a function appears in multiple files\n # or objects. Such situations can occur when in the Dremel queries there\n # are not specified the Chrome OS version and the name of the board (i.e\n # the files can belong to different kernel or library versions).\n inclusive_count_sum = \\\n cwp_inclusive_count_statistics[key][1] + int(inclusive_count)\n inclusive_count_fraction_sum = \\\n cwp_inclusive_count_statistics[key][2] + \\\n float(inclusive_count_fraction)\n\n # All the functions are initially marked as EXTRA_FUNCTION.\n value = \\\n (dso_name, inclusive_count_sum, inclusive_count_fraction_sum,\n EXTRA_FUNCTION)\n cwp_inclusive_count_statistics[key] = value\n\n return cwp_inclusive_count_statistics",
"def count_frames(f):\n def counted(n):\n counted.open_count += 1\n counted.max_count = max(counted.max_count, counted.open_count)\n result = f(n)\n counted.open_count -= 1\n return result\n counted.open_count = 0\n counted.max_count = 0\n return counted",
"def fn(node):\n if not node: return 0\n if not node.left or not node.right: return 1 + fn(node.left) + fn(node.right)\n return 1 + min(fn(node.left), fn(node.right))",
"def countcalls(f):\n def _f(fn):\n countcalls[fn]",
"def _get_fitness_increes(self, fit_list, func):\n inc = 0\n for index in range(1, len(fit_list)-1):\n inc += fit_list[index] - fit_list[index-1]\n\n return inc",
"def summationRecursion(lower, upper):\r\n if lower > upper:\r\n return 0\r\n else:\r\n return lower + summationRecursion(lower + 1, upper)",
"def parallel_count_calculate_func(cls, parallel_count):\n if parallel_count == 0:\n result = 1\n\n else:\n count = float(0.0)\n\n for number in range(int(parallel_count)):\n count += float(1 / (float(number) + 1))\n\n result = pow(count, (-1))\n\n return result",
"def count_me(fnc):\n def increment(self, *args, **kwargs):\n type(self)._count += 1\n return fnc(self, *args, **kwargs)\n return increment",
"def counter(fn, counters):\r\n\r\n cnt = 0\r\n def inner(*args, **kwargs):\r\n nonlocal cnt\r\n cnt += 1\r\n counters[fn.__name__] = cnt\r\n return fn(*args, **kwargs)\r\n return inner",
"def farey_count(n, a, b, num, denom):\n count = 0\n c, d = n / b, n - 1\n while c != num and d != denom:\n k = int((n + b) / d)\n a, b, c, d = c, d, (k*c-a), (k*d-b)\n count += 1\n return count",
"def get_inc(self):\n return self.inc_min + self.inc_ * self.inc_range",
"def count_cond(condition):\n \"*** YOUR CODE HERE ***\"\n def f(n):\n i, total = 1, 0\n while i <= n:\n if condition(n, i):\n total += 1\n i += 1\n return total\n return f",
"def current_nbc_coverage():\n covered = 0\n total = 0\n for layer in layer_to_compute:\n covered = covered + np.count_nonzero(nbc_cov_dict[layer.name])\n total = total + np.size(nbc_cov_dict[layer.name])\n return covered / float(total)",
"def counted ( f ):\n def wrapped ( *args, **kwargs ):\n wrapped.calls += 1\n return f( *args , **kwargs )\n wrapped.calls = 0\n return wrapped",
"def summationReduce(lower, upper):\r\n if lower > upper:\r\n return 0\r\n else:\r\n return reduce(lambda x, y: x + y, range(lower, upper + 1))",
"def count_up(n):\n def counter(i):\n \"*** YOUR CODE HERE ***\"\n counter(1)",
"def get_count(self, min_value, max_value):\n index = self.get_bin_index(min_value)\n current_start_value = self.values[index]\n current_stop_value = self.values[index + 1]\n count = 0\n # Add total in this area:\n count += self.counts[index]\n if current_start_value != -float(\"inf\"):\n # Remove proportion before min_value:\n current_total_range = current_stop_value - current_start_value\n percent = (min_value - current_start_value) / current_total_range\n count -= self.counts[index] * percent\n if max_value < current_stop_value:\n # stop is inside this area too, so remove after max\n if current_start_value != -float(\"inf\"):\n percent = (current_stop_value - max_value) / current_total_range\n count -= self.counts[index] * percent\n return count\n # max_value is beyond this area, so loop until last area:\n index += 1\n while max_value > self.values[index + 1]:\n # add the whole count\n count += self.counts[index]\n index += 1\n # finally, add the proportion in last area before max_value:\n current_start_value = self.values[index]\n current_stop_value = self.values[index + 1]\n if current_stop_value != float(\"inf\"):\n current_total_range = current_stop_value - current_start_value\n percent = (max_value - current_start_value) / current_total_range\n count += self.counts[index] * percent\n else:\n count += self.counts[index]\n return count",
"def getCountFiles():\n result = 0\n session = Queries.createSession()\n try:\n result = session.execute(func.count(FileTable.id)).fetchone()[0]\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()\n return result",
"def global_counter(fn):\n cntr = 0 \n\n def inner(*args, **kwargs):\n nonlocal cntr\n cntr = cntr + 1\n g_counters[fn.__name__] = cntr # counters is global\n return fn(*args, **kwargs)\n\n return inner",
"def compute_contrib_count(contribs, percent, time_from, time_to):\n counts = []\n s = 0\n for c in contribs:\n l = len([v for v in contribs[c] if time_from <= v <= time_to])\n counts.append(l)\n s += l\n counts.sort(reverse=True)\n\n i = 1\n rat = percent/100.0\n while sum(counts[:i]) < rat*s:\n i += 1\n return i",
"def find_numerical_contours(counts):\n\tone_sigma_boundary = sigma_boundary(counts, 68)\n\tone_sigma = counts > one_sigma_boundary\n\ttwo_sigma_boundary = sigma_boundary(counts, 95)\n\ttwo_sigma = (counts > two_sigma_boundary) & (counts < one_sigma_boundary)\n\tthree_sigma_boundary = sigma_boundary(counts, 99)\n\tthree_sigma = (counts > three_sigma_boundary) & (counts < two_sigma_boundary)\n\n\t# Check method: Output actual percentages in each region\n\tprint('total no. samples:')\n\tprint(np.sum(counts))\n\tprint('included in 1st sigma region:')\n\tprint(np.sum(one_sigma * counts) / np.sum(counts))\n\tprint('included in 2 sigma region:')\n\tprint((np.sum(one_sigma * counts) + np.sum(two_sigma * counts)) / np.sum(counts))\n\tprint('included in 3 sigma region:')\n\tprint((np.sum(one_sigma * counts) + np.sum(two_sigma * counts) + np.sum(three_sigma * counts)) / np.sum(counts))\n\n\tfilled_numerical_contours = one_sigma * 1 + two_sigma * 2 + three_sigma * 3\n\n\treturn filled_numerical_contours",
"def _additive_estimate(events, timeline, _additive_f, _additive_var, reverse):\n if reverse:\n events = events.sort_index(ascending=False)\n population = events['entrance'].sum() - events['removed'].cumsum().shift(1).fillna(0)\n deaths = events['observed'].shift(1).fillna(0)\n estimate_ = np.cumsum(_additive_f(population, deaths)).ffill().sort_index()\n var_ = np.cumsum(_additive_var(population, deaths)).ffill().sort_index()\n else:\n deaths = events['observed']\n population = events['entrance'].cumsum() - events['removed'].cumsum().shift(1).fillna(0) #slowest line here.\n estimate_ = np.cumsum(_additive_f(population, deaths))\n var_ = np.cumsum(_additive_var(population, deaths))\n\n estimate_ = estimate_.reindex(timeline, method='pad').fillna(0)\n var_ = var_.reindex(timeline, method='pad')\n var_.index.name = 'timeline'\n estimate_.index.name = 'timeline'\n\n return estimate_, var_",
"def count_cond(condition):\n def countDef(n):\n i,count = 1,0\n while i <= n:\n if condition(n, i):\n count += 1;s\n i += 1;\n return count;\n return countDef;",
"def fn(node):\n if not node: return 0 \n ans = node.val + fn(node.left) + fn(node.right)\n freq[ans] += 1\n return ans",
"def get_counts(self, min_value, max_value, span_value):\n counts = []\n\n if max_value == min_value:\n max_value = min_value * 1.1 + 1\n min_value = min_value / 1.1 - 1\n\n bucketPos = 0\n binLeft = min_value\n\n while binLeft < max_value:\n binRight = binLeft + span_value\n count = 0.0\n # Don't include last as bucketLeft, which is infinity:\n while bucketPos < len(self.values) - 1:\n bucketLeft = self.values[bucketPos]\n bucketRight = min(max_value, self.values[bucketPos + 1])\n intersect = min(bucketRight, binRight) - max(bucketLeft, binLeft)\n\n if intersect > 0:\n if bucketLeft == -float(\"inf\"):\n count += self.counts[bucketPos]\n else:\n count += (intersect / (bucketRight - bucketLeft)) * self.counts[\n bucketPos\n ]\n\n if bucketRight > binRight:\n break\n\n bucketPos += 1\n\n counts.append(count)\n binLeft += span_value\n\n return counts",
"def minOperations(n):\n count = 0\n min_val = 2\n if n < 2:\n return 0\n while min_val <= n:\n if (n % min_val == 0):\n count = count + min_val\n n = n / min_val\n else:\n min_val = min_val + 1\n return (count)"
] | [
"0.597441",
"0.55660766",
"0.5475768",
"0.54315376",
"0.5387883",
"0.53473026",
"0.5299066",
"0.52987206",
"0.52914274",
"0.52319247",
"0.5216198",
"0.51691157",
"0.5139399",
"0.51383424",
"0.5124791",
"0.5121849",
"0.50887746",
"0.5069851",
"0.50526094",
"0.5018618",
"0.49693537",
"0.4966524",
"0.49550217",
"0.49501532",
"0.49481454",
"0.493968",
"0.49348012",
"0.49041364",
"0.4890437",
"0.4869524"
] | 0.6362533 | 0 |
Computes the fractions of the inclusive count values for child functions. The fraction represents the inclusive count value of a child function over the one of the parent function. | def ComputeCWPChildFunctionsFractions(cwp_inclusive_count_statistics_cumulative,
cwp_pairwise_inclusive_count_statistics):
pairwise_inclusive_count_fractions = {}
for parent_function_key, child_functions_metrics in \
cwp_pairwise_inclusive_count_statistics.iteritems():
child_functions_fractions = {}
parent_function_inclusive_count = \
cwp_inclusive_count_statistics_cumulative.get(parent_function_key, 0.0)
if parent_function_key in cwp_inclusive_count_statistics_cumulative:
for child_function_key, child_function_inclusive_count \
in child_functions_metrics.iteritems():
child_functions_fractions[child_function_key] = \
child_function_inclusive_count / parent_function_inclusive_count
else:
for child_function_key, child_function_inclusive_count \
in child_functions_metrics.iteritems():
child_functions_fractions[child_function_key] = 0.0
pairwise_inclusive_count_fractions[parent_function_key] = \
child_functions_fractions
return pairwise_inclusive_count_fractions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def denominator(self, ???):",
"def numerator(self, ???):",
"def fractionPassing(self):\n return self.cut.entries / self.entries",
"def calculate_vote_fractions():\n return _calculate_vote_fractions(models.get_candidate_to_vote_count())",
"def fractions():\n\n pi = 22 / 7\n print(\"22/7\\n====\")\n print_as_text(pi)\n\n pi = 333 / 106\n print(\"333/106\\n=======\")\n print_as_text(pi)\n\n pi = 355 / 113\n print(\"355/113\\n=======\")\n print_as_text(pi)\n\n pi = 52163 / 16604\n print(\"52163/16604\\n===========\")\n print_as_text(pi)\n\n pi = 103993 / 33102\n print(\"103993/33102\\n============\")\n print_as_text(pi)\n\n pi = 245850922 / 78256779\n print(\"245850922/78256779\\n==================\")\n print_as_text(pi)",
"def denominator(self):\n return 1",
"def fraction_to_percentage(fraction):\n return fraction * 100.0",
"def ratio_func(a, b):\n return a / b",
"def cont_frac_rat( frac ):\n\n num = frac.numerator\n den = frac.denominator\n\n answer = []\n r1 = num\n r2 = den\n r3 = r1 % r2\n q = r1 / r2\n\n answer.append(q)\n\n while r3 != 0: # euclidean algorithm\n r1 = r2\n r2 = r3\n r3 = r1 % r2\n q = r1/r2\n answer.append(q)\n\n return answer",
"def get_percentage_f_votes(self):\n\n votes_f = self.get_num_f_votes()\n votes_sf = self.get_num_sf_votes()\n\n # avoid dividing by zero\n if votes_f + votes_sf == 0:\n return 0\n else:\n ratio = float(votes_f)/(votes_f + votes_sf)\n return round(ratio * 100, 1)",
"def percentages(self) -> pandas.Series:\n if self._percentages is None:\n scalar = 1 if self.use_fraction else 100\n self._percentages = scalar * self.counts/self.total\n return self._percentages",
"def _reduce(self) -> None:\n divisor = self._gcd(self._numerator, self._denominator)\n self._numerator = self._numerator // divisor\n self._denominator = self._denominator // divisor",
"def computeFraction( poi_messages, all_messages ):\n fraction=0\n if poi_messages != 'NaN' and all_messages != 'NaN':\n\t\tfraction = poi_messages/float(all_messages)\n return fraction",
"def computeFraction( poi_messages, all_messages ):\n fraction = 0.\n\n if (poi_messages != 'NaN') and (all_messages != 'NaN') and (all_messages != 0):\n fraction = 1.0 * poi_messages/all_messages\n\n return fraction",
"def reduce(self):\n import math\n g = math.gcd(self.num, self.den)\n return Fraction(self.num//g, self.den//g)",
"def computeFraction( poi_messages, all_messages ):\n fraction = 0.\n \n if all_messages == 'NaN':\n return fraction\n \n if poi_messages == 'NaN':\n poi_messages = 0\n \n fraction = 1.0*poi_messages/all_messages\n\n return fraction",
"def divide(self, numerator, denominator):\r\n if denominator == 0:\r\n if not self.has_error:\r\n self.has_error = True\r\n self.error += DIVISION_BY_ZERO\r\n return 1\r\n else:\r\n return numerator / denominator",
"def fractionify_and_reduce(n):\n nume, denom = fractionify(n)\n return reduce(nume, denom)",
"def numerator(num):\n require_type(isa(num,fractions.Fraction) or isa(num,int),\n 'parameter of numerator must be a fraction or integer')\n return num.numerator",
"def lowComplexityFraction(self):\n length = len(self)\n if length:\n lowerCount = len(list(filter(str.islower, self.sequence)))\n return float(lowerCount) / length\n else:\n return 0.0",
"def div(self):\n a = self.nums()\n x = LibraryFunctions.per(a, 0.9) - LibraryFunctions.per(a, 0.1)\n return x / 2.58",
"def computeFraction(poi_messages, all_messages):\n fraction = 0.\n if all_messages != \"NaN\":\n fraction = float(poi_messages)/float(all_messages)\n else:\n fraction = 0\n return fraction",
"def get_percentage_sf_votes(self):\n\n votes_f = self.get_num_f_votes()\n votes_sf = self.get_num_sf_votes()\n\n # avoid dividing by zero\n if votes_f + votes_sf == 0:\n return 0\n else:\n ratio = float(votes_sf)/(votes_f + votes_sf)\n return round(ratio * 100, 1)",
"def getfloat(self, fraction) -> float:\n self.numerator_a = fraction.numerator_a\n self.denominator_b = fraction.denominator_b\n self.fraction = str(self.numerator_a) + '/' + str(self.denominator_b)\n return super().__float__()",
"def divide(value, arg):\n\treturn float(value) / float(arg)",
"def rF(count, total):\n\treturn float(count)/float(total)",
"def test_div(self):\n newvalues= Fraction(7,10)/Fraction(4,5)\n fraction1 = Fraction(newvalues[0],newvalues[1])\n self.assertEqual(str(fraction1),\"35/40\")",
"def divide(numerator, denominator):\n ensure_divisibility(numerator, denominator)\n return numerator // denominator",
"def pct(self):\n\t\treturn self.bottle.pct()",
"def getPercent(*args):"
] | [
"0.60595304",
"0.58882415",
"0.5839464",
"0.5791325",
"0.57302094",
"0.56365466",
"0.55660135",
"0.5556466",
"0.5539381",
"0.5531349",
"0.54849494",
"0.5468713",
"0.544012",
"0.5428886",
"0.5428331",
"0.54159963",
"0.5400832",
"0.5396846",
"0.53820527",
"0.5370296",
"0.53531057",
"0.5352327",
"0.5337898",
"0.53340673",
"0.5314313",
"0.53039515",
"0.5298871",
"0.5280298",
"0.52730745",
"0.5271949"
] | 0.60914195 | 0 |
Parses the contents of the function groups file. | def ParseFunctionGroups(cwp_function_groups_lines):
# The order of the groups mentioned in the cwp_function_groups file
# matters. A function declared in a file will belong to the first
# mentioned group that matches its path to the one of the file.
# It is possible to have multiple paths that belong to the same group.
return [tuple(line.split()) for line in cwp_function_groups_lines] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse(options):\n global p_entering_group_block, p_exiting_group_block, p_group_next, p_group_name, p_group_set\n \n in_group_block = False\n \n group_list = []\n group_elem = {}\n \n order_keys = []\n \n with open(options.input_file, mode=fd_read_options) as fd_input:\n for line in fd_input:\n line = line.strip()\n \n # We match a group block\n if p_entering_group_block.search(line):\n in_group_block = True\n \n # We are in a group block\n if in_group_block:\n if p_group_name.search(line):\n group_name = p_group_name.search(line).group('group_name')\n group_elem['name'] = group_name\n if not('name' in order_keys): order_keys.append('name')\n \n # We match a setting\n if p_group_set.search(line):\n group_key = p_group_set.search(line).group('group_key')\n if not(group_key in order_keys): order_keys.append(group_key)\n \n group_value = p_group_set.search(line).group('group_value').strip()\n group_value = re.sub('[\"]', '', group_value)\n \n group_elem[group_key] = group_value\n \n # We are done with the current group id\n if p_group_next.search(line):\n group_list.append(group_elem)\n group_elem = {}\n \n # We are exiting the group block\n if p_exiting_group_block.search(line):\n in_group_block = False\n \n return (group_list, order_keys)",
"def _parse_groups_file(self, group_labels_file):\n groups = None\n if group_labels_file and os.path.exists(group_labels_file):\n groups = pd.read_csv(group_labels_file)\n if 'subject_id' not in groups.columns or 'group' not in groups.columns:\n raise AttributeError(\n \"Please add columns names 'subject_id' and 'group' to the group labels file.\"\n )\n return groups",
"def read_group():\n groups = list()\n with open(current_app.config[\"GROUP_PATH\"]) as group_file:\n for line in group_file:\n (name, _, gid, members) = [e.strip() for e in line.split(\":\")]\n if members:\n members = members.split(\",\")\n else:\n members = []\n groups.append(Group(name, int(gid), members))\n return groups",
"def openFile(self, filename):\n self._tree = ET.parse(filename)\n self._root = self._tree.getroot()\n self._groups = self._root.findall('group')",
"def parseGroupsFileToDictOfChilden(groups_file):\n return parseGroupsFileToDict(groups_file, \"children\")",
"def read_grp(fname):\n global DAYS\n uint_types = [DAYS,\n 'Current crop type', \n 'Current residue on ground type', \n 'Previous residue on ground type', \n 'Old residue on ground type', \n 'Current dead root type', \n 'Previous dead root type', \n 'Old dead root type']\n\n meta = {}\n data = None\n header = []\n\n meta['fname'] = fname\n meta['id'] = ''.join([L for L in fname if L in '0123456789'])\n \n fid = open(fname, 'rb')\n for i, line in enumerate(fid.readlines()):\n line_as_list = line.strip().split()\n\n if len(line_as_list) == 0:\n continue\n\n elif line_as_list[0][0] == '#':\n continue\n\n elif line_as_list[0] == 'int':\n try:\n meta[line[1]] = int(line[2])\n except:\n pass\n \n elif line_as_list[0] == 'float':\n try:\n meta[line[1]] = float(line[2])\n except:\n pass\n\n elif line_as_list[0] == 'char':\n continue\n\n elif line_as_list[0][0] == '{':\n cname = line.strip()[1:-1].replace(r'kg/m', r'kg*m**-1') \\\n .replace(r'kg/m**2', r'kg*m**-2') \\\n .replace(r'kg/m**3', r'kg*m**-3') \\\n .replace(r'kg/m**4', r'kg*m**-4') \\\n .replace(r'mm/hr', r'mm*hr**-1') \\\n .replace(r'mm/h', r'mm*hr**-1') \\\n .replace(r'm/day', r'm*day**-1') \\\n .replace(r'g/cc', r'g*cc**-1') \\\n .replace(r'kg-s/m**4', r'kg-s*m**-4') \\\n .replace(r's/m', r's*m**-1') \\\n .replace(r'Irrigation_volume_supplied/unit_area',\n r'Irrigation_volume_supplied*unit_area**-1')\n header.append(cname)\n\n else:\n if len(header) == len(line_as_list):\n \n # if we are here and data == None we need to initialize the data dictionary\n if data == None:\n data = {}\n for cname in header:\n typecode = ('f', 'h')[any([cname==s for s in uint_types])]\n data[cname] = array.array(typecode)\n\n for (cname, string) in zip(header, line_as_list):\n if any([cname==s for s in uint_types]):\n value = int(string)\n else:\n value = float(string)\n\n if cname == DAYS:\n\n if value in set(data[DAYS]):\n break\n\n data[cname].append(value)\n\n else:\n raise Exception('Failed to parse line %i, unexpected number of columns.'%(i+1))\n \n fid.close()\n\n # pack the table data into numpy arrays\n for (cname, v) in data.items():\n dtype = (np.float32, np.int16)[any([cname==s for s in uint_types])]\n data[cname] = np.array(v, dtype=dtype)\n\n return (meta, data)",
"def load_groups(filename):\n with open(filename, 'rb') as f:\n for line in f:\n group, users = line.split(':', 1)\n yield group, frozenset(users.split())",
"def readGroups(self):\n\t\tgroups = self._fileSystem.readGroups()\n\t\tif groups is None:\n\t\t\treturn\n\t\treturn groups",
"def _group(codes, group_file):\n \n groups, size = {}, len(codes)\n group_temp = 'oma_temporary_groups.tsv'\n if os.path.isfile(group_temp):\n info('Loading pre-existed temporary OMA ortholog groups (oma_temporary_'\n 'groups.tsv) ...')\n for blocks in _lines(group_temp):\n groups[blocks[0]] = blocks[1:]\n else:\n info('Parsing OMA ortholog groups (oma-groups.txt.gz) ...')\n for blocks in _lines(group_file):\n number, finger, entries = blocks[0], blocks[1], blocks[2:]\n ids = [entry for entry in entries if entry[:5] in codes]\n if size == len(set(i[:5] for i in ids)):\n groups[finger] = ids\n if groups:\n with open(group_temp, 'w') as o:\n o.writelines('{}\\t{}\\n'.format(k, '\\t'.join(v))\n for k, v in groups.items())\n info('Yield {} one-to-one ortholog groups for {} query items.'.format(\n len(groups), size))\n return groups",
"def read_groups_old(handle):\n log(\"Starting pre-pass though the MAF file\")\n seq_tech_strains = set() #will make into a list of 2-tuples\n #handle = open(maf)\n tech = \"\"\n strain = \"\"\n for line in handle:\n if line.startswith(\"RD\"):\n assert not tech and not strain\n elif line.startswith(\"ST\\t\"):\n tech = line[3:].strip()\n elif line.startswith(\"SN\\t\"):\n strain = line[3:].strip()\n elif line.startswith(\"ER\"):\n assert tech or strain, \"Missing read group data!\"\n seq_tech_strains.add((tech, strain))\n tech = \"\"\n strain = \"\"\n #handle.close()\n seq_tech_strains = sorted(list(seq_tech_strains))\n read_group_ids = dict()\n for id, (tech, strain) in enumerate(seq_tech_strains):\n platform = clean_platform(tech)\n assert len(strain.split())<=1, \"Whitespace in strain %r (SN line)\" % strain\n read_group_id = (\"%s_%s\" % (tech, strain)).strip(\"_\")\n read_group_ids[(tech, strain)] = read_group_id\n print \"@RG\\tID:%s\\tPL:%s\\tSM:%s\" % (read_group_id, platform, strain)\n del strain, tech, seq_tech_strains\n return read_group_ids",
"def get_groups(args):\n\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"Config file not valid, please use the verify function to debug\")\n return []\n\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n\n groups = []\n for group in config_json[\"groups\"]:\n groups.append(group[\"name\"])\n return groups",
"def parse(file):\r\n # Check cache before parsing file\r\n global _parsed_file_cache\r\n if file in _parsed_file_cache:\r\n return _parsed_file_cache[file]\r\n \r\n FuncDefnRegexp = r'^def.*\\{'\r\n FuncEndRegexp = r'^\\}.*$'\r\n with open(file, 'r') as f:\r\n data = f.read()\r\n file_lines = data.split(\"\\n\")\r\n all_fns = []\r\n fn_lines = ''\r\n for line in file_lines:\r\n if len(fn_lines) > 0:\r\n if re.match(FuncEndRegexp, line):\r\n fn_lines += line + \"\\n\"\r\n all_fns.append(fn_lines)\r\n fn_lines = ''\r\n else:\r\n fn_lines += line + \"\\n\"\r\n elif re.match(FuncDefnRegexp, line):\r\n fn_lines += line + \"\\n\"\r\n \r\n func_results = []\r\n for fn in all_fns:\r\n func_results += [GroovyFunctionParser.parse(fn)]\r\n \r\n _parsed_file_cache[file] = func_results\r\n return func_results",
"def fromgroups(args):\n from jcvi.formats.bed import Bed\n\n p = OptionParser(fromgroups.__doc__)\n opts, args = p.parse_args(args)\n\n if len(args) < 2:\n sys.exit(not p.print_help())\n\n groupsfile = args[0]\n bedfiles = args[1:]\n beds = [Bed(x) for x in bedfiles]\n fp = open(groupsfile)\n groups = [row.strip().split(\",\") for row in fp]\n for b1, b2 in product(beds, repeat=2):\n extract_pairs(b1, b2, groups)",
"def parse_anime_group(filename):\n print_info('Extracting hash from {0}'.format(filename))\n for regex in ANIME_GROUP_REGEXS:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n ep_group = m.group('Group')\n print_info('Extracted Group: {0}'.format(ep_group))\n return ep_group\n \n return None",
"def load_from_file(rfile=\"data_ass6.txt\"):\n datafile = open(rfile, \"r\")\n linenum = 0\n for line in datafile:\n linenum += 1\n if linenum == 1:\n groupdata = line.strip().split()\n group = StudentGroup(groupdata[0])\n group.maxidy = int(groupdata[1])\n else:\n studentdata = line.strip().split()\n student = Student(int(studentdata[0]), studentdata[1])\n student.active = bool(studentdata[2])\n group.studentlist.append(student)\n for i in range(3, len(studentdata), 2):\n student.grades[studentdata[i]] = float(studentdata[i + 1])\n datafile.close()\n print \"Data loaded from \" + rfile\n return group",
"def parseGroupsFileToDict(groups_file, thing_to_map):\n groups = {}\n printVerbose(\"Reading count file: %s\" % groups_file)\n # collect the seed names, and the children sequence names\n i = 0\n nb_lines = 0\n for line in open(groups_file, 'r'):\n nb_lines +=1\n data = line.rstrip().split(\"\\t\")\n seed = data[0]\n children = \"\"\n if thing_to_map == \"children\":\n if len(data) > 1:\n children = ' '.join(list(set(data[1:])))\n groups[seed] = children\n if thing_to_map == \"counts\":\n if len(data) > 1:\n children = data[1]\n groups[seed] = len(children.split(\" \"))\n\n if nb_lines % 100000 == 0:\n printVerbose(\"%s lines processed\" % nb_lines)\n printVerbose(\"Done reading count file.\")\n return groups",
"def load_groups(files):\n groups = defaultdict(list)\n for f in files:\n d = np.load(f, allow_pickle=True)\n gkey = to_group_key(d['args'].item()._get_kwargs())\n groups[gkey].append((f, d))\n return groups",
"def load_group_from_config(self):\n\n group_file_name = \"cicada/config/group.yaml\"\n if os.path.isfile(group_file_name):\n self.group_data = dict()\n with open(group_file_name, 'r') as stream:\n self.group_data = yaml.safe_load(stream)\n self.all_groups = deepcopy(self.group_data)\n if self.group_data:\n keys_to_del = []\n for key, value in self.group_data.items():\n missing_file = False\n for file in value:\n if file not in self.nwb_path_list.values():\n missing_file = True\n if missing_file:\n keys_to_del.append(key)\n for key in keys_to_del:\n self.group_data.pop(key)\n self.grouped_labels = []\n if self.group_data:\n self.grouped = True\n for value in self.group_data.values():\n nwb_file_list = []\n for file in value:\n io = NWBHDF5IO(file, 'r')\n nwb_file = io.read()\n self.data_dict[nwb_file.identifier] = nwb_file\n nwb_file_list.append(nwb_file.identifier)\n self.grouped_labels.append(nwb_file_list)\n self.showGroupMenu.setEnabled(True)\n self.addGroupDataMenu.setEnabled(True)\n self.populate_menu()\n else:\n self.showGroupMenu.setEnabled(False)\n self.addGroupDataMenu.setEnabled(False)\n self.showGroupMenu.clear()\n self.addGroupDataMenu.clear()",
"def parse_file():\n\tfile_lines = []\n\n\t## For each line in the file, if it's not empty, store it\n\tfor line in fileinput.input():\n\t\tif len(line) > 1:\n\t\t\tfile_lines.append(line.strip())\n\t\n\trun_algorithms(file_lines)",
"def load():\n with open('groups.pkl', \"rb\") as f:\n while True:\n try:\n groups = yield pickle.load(f)\n except EOFError:\n break\n return groups",
"def read_fof_group(self, grnr, datasets=None):\n\n offset = self.cat[\"GroupOffset\"][grnr]\n count = self.cat[\"GroupLen\"][grnr]\n return self.read_particle_range(offset, count, datasets)",
"def parseGRO(filename):\n\tprint(\"Cannot parse .gro files yet.\")\n\texit()",
"def _get_group_from_file(self, wanted_group):\n wanted_gid = \"\"\n if (isinstance(wanted_group, int) or\n re.match(\"^\\\\d+$\", wanted_group)):\n wanted_gid = str(wanted_group)\n wanted_group = \"\"\n try:\n ingroup = open(self.group_file)\n except (IOError, OSError):\n return (\"\", \"\", \"\")\n else:\n for line in ingroup:\n (group, dummy, gid, users) = line.strip().split(':')\n if wanted_group and group == wanted_group:\n return (group, gid, users)\n if wanted_gid and gid == wanted_gid:\n return (group, gid, users)\n ingroup.close()\n return (\"\", \"\", \"\")",
"def testNetgroupParser(self):\n parser = linux_file_parser.NetgroupParser()\n dat = u\"\"\"group1 (-,user1,) (-,user2,) (-,user3,)\n#group1 comment\ngroup2 (-,user4,) (-,user2,)\n\nsuper_group (-,user5,) (-,user6,) (-,文德文,) group1 group2\nsuper_group2 (-,user7,) super_group\nsuper_group3 (-,user5,) (-,user6,) group1 group2\n\"\"\"\n dat_fd = StringIO.StringIO(dat)\n\n config_lib.CONFIG.Set(\"Artifacts.netgroup_user_blacklist\", [\"user2\",\n \"user3\"])\n out = list(parser.Parse(None, dat_fd, None))\n users = []\n for result in out:\n if isinstance(result, rdfvalue.Anomaly):\n self.assertTrue(utils.SmartUnicode(u\"文德文\") in result.symptom)\n else:\n users.append(result)\n\n self.assertItemsEqual([x.username for x in users],\n [u\"user1\", u\"user4\", u\"user5\", u\"user6\", u\"user7\"])\n\n dat_fd.seek(0)\n config_lib.CONFIG.Set(\"Artifacts.netgroup_filter_regexes\",\n [r\"^super_group3$\"])\n out = list(parser.Parse(None, dat_fd, None))\n self.assertItemsEqual([x.username for x in out],\n [u\"user5\", u\"user6\"])",
"def parse(self, f):\n \n for line in f:\n self.parse_line(line)",
"def load_grouping():\n if os.path.isfile('grouping.json'):\n logger.debug(\"Grouping file exists. Loading..\")\n with open('grouping.json', 'r+') as f:\n try:\n grouping_json = json.loads(f.read())\n except ValueError:\n grouping_json = json.loads(\"{}\")\n logger.debug(\"Error parsing grouping.json.\")\n else:\n grouping_json = json.loads(\"{}\")\n return grouping_json",
"def _parse_groups(self):\n for root in self.roots:\n for group in root.iter('group'):\n group_name = group.attrib.get('name', '')\n for enum in group.iter('enum'):\n enum_name = enum.attrib.get('name', '')\n self.group_to_enum_list[group_name].append(enum_name)",
"def advent_9a(file_name):\n with open(file_name) as input_file:\n line = input_file.readline()\n group_scores = []\n get_group(line, 0, 1, group_scores, [])\n return sum([value for (_, value) in group_scores])",
"def all_groups(request):\r\n group = Group()\r\n return HttpResponse(json.dumps(group.parseFile()))",
"def read_pgroups(in_file):\n out = {}\n with open(in_file) as in_handle:\n for line in (l for l in in_handle if not l.startswith(\"#\")):\n locus, alleles, group = line.strip().split(\";\")\n for allele in alleles.split(\"/\"):\n out[\"HLA-%s%s\" % (locus, allele)] = group\n return out"
] | [
"0.66318685",
"0.65091795",
"0.6411748",
"0.62168884",
"0.614106",
"0.613057",
"0.6062225",
"0.60367906",
"0.5948503",
"0.5924105",
"0.59104806",
"0.590227",
"0.586359",
"0.5841075",
"0.5809704",
"0.57882077",
"0.5773083",
"0.568154",
"0.5659379",
"0.5616858",
"0.5614927",
"0.56119347",
"0.55925405",
"0.5575373",
"0.5539356",
"0.55341566",
"0.55300856",
"0.54630244",
"0.54396844",
"0.5437995"
] | 0.71220386 | 0 |
Parses a file that contains the output of the pprof top command. | def ParsePprofTopOutput(file_name):
pprof_top_statistics = {}
# In the pprof top output, the statistics of the functions start from the
# 6th line.
with open(file_name) as input_file:
pprof_top_content = input_file.readlines()[6:]
for line in pprof_top_content:
function_statistic_match = FUNCTION_STATISTIC_REGEX.search(line)
flat, flat_p, sum_p, cum, cum_p = function_statistic_match.groups()
flat_p = str(float(flat_p) / 100.0)
sum_p = str(float(sum_p) / 100.0)
cum_p = str(float(cum_p) / 100.0)
lookup_index = function_statistic_match.end()
function_and_file_name = line[lookup_index + 2 : -1]
key = MakePprofFunctionKey(function_and_file_name)
pprof_top_statistics[key] = (flat, flat_p, sum_p, cum, cum_p)
return pprof_top_statistics | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ParsePprofTreeOutput(file_name):\n\n # In the pprof output, the statistics of the functions start from the 9th\n # line.\n with open(file_name) as input_file:\n pprof_tree_content = input_file.readlines()[9:]\n\n pprof_tree_statistics = defaultdict(lambda: defaultdict(float))\n track_child_functions = False\n\n # The statistics of a given function, its parent and child functions are\n # included between two separator marks.\n # All the parent function statistics are above the line containing the\n # statistics of the given function.\n # All the statistics of a child function are below the statistics of the\n # given function.\n # The statistics of a parent or a child function contain the calls, calls\n # percentage, the function name and the file where the function is declared.\n # The statistics of the given function contain the flat, flat percentage,\n # sum percentage, cummulative, cummulative percentage, function name and the\n # name of the file containing the declaration of the function.\n for line in pprof_tree_content:\n separator_match = SEPARATOR_REGEX.search(line)\n\n if separator_match:\n track_child_functions = False\n continue\n\n parent_function_statistic_match = FUNCTION_STATISTIC_REGEX.search(line)\n\n if parent_function_statistic_match:\n track_child_functions = True\n lookup_index = parent_function_statistic_match.end()\n parent_function_key_match = \\\n FUNCTION_KEY_SEPARATOR_REGEX.search(line, pos=lookup_index)\n lookup_index = parent_function_key_match.end()\n parent_function_key = MakePprofFunctionKey(line[lookup_index:-1])\n continue\n\n if not track_child_functions:\n continue\n\n child_function_statistic_match = \\\n CHILD_FUNCTION_PERCENTAGE_REGEX.search(line)\n child_function_percentage = \\\n float(child_function_statistic_match.group(1))\n lookup_index = child_function_statistic_match.end()\n child_function_key_match = \\\n FUNCTION_KEY_SEPARATOR_REGEX.search(line, pos=lookup_index)\n lookup_index = child_function_key_match.end()\n child_function_key = MakePprofFunctionKey(line[lookup_index:-1])\n\n pprof_tree_statistics[parent_function_key][child_function_key] += \\\n child_function_percentage / 100.0\n\n return pprof_tree_statistics",
"def get_top_info(dut, proc_name=None):\n exclude_keys = ['total', 'used', 'free', 'buff_cache']\n if proc_name:\n command = \"top -n 1 b | grep {}\".format(proc_name)\n output = st.show(dut, command)\n rv = [{each_key: each_line[each_key] for each_key in each_line if each_key not in exclude_keys}\n for each_line in output]\n rv = filter_and_select(rv, None, {'command': proc_name})\n else:\n command = \"top -n 1 b\"\n output = st.show(dut, command)\n rv = [{each_key: each_line[each_key] for each_key in each_line if each_key not in exclude_keys}\n for each_line in output[1:]]\n return rv",
"def getTop():\n\n p = Popen([\"top\", \"-n\", \"1\"], stdout=PIPE, close_fds=True)\n f = p.communicate()\n\n lines = f[0].split(\"\\n\")\n mem = lines[0]\n mem = mem.split(\",\")\n mem[0] = mem[0].split(\"\\x1b[H\\x1b[JMem:\")[1]\n\n for i in range(0,len(mem)):\n mem[i] = mem[i].split()\n\n cpu = lines[1]\n cpu = cpu.split(\",\")\n\n for i in range(0,len(cpu)): \n cpu[i] = cpu[i].split() \n \n usedRam = float(mem[0][0].split(\"K\")[0])/10000.0\n usedRam = \"%.2f\" % usedRam\n\n freeRam = float(mem[1][0].split(\"K\")[0])/10000.0\n freeRam = \"%.2f\" % freeRam\n\n cpuUser = int(cpu[0][1].split(\"%\")[0])\n cpuSystem = int(cpu[0][3].split(\"%\")[0])\n cpuIdle = int(cpu[0][7].split(\"%\")[0])\n \n cpuData = { \"user\" : cpuUser, \"system\" : cpuSystem, \"idle\" : cpuIdle}\n ramData = { \"free\" : freeRam, \"used\": usedRam }\n \n data = {\"mem\" : ramData, \"cpu\" : cpuData}\n return data",
"def do_topfile(self, path=None,category='top'):\n s = StringIO()\n with open(self.path) as f:\n for line in f:\n if line.startswith('top') or line.startswith('Cpu') or \\\n line.startswith('Tasks') or line.startswith('Mem') or \\\n line.startswith('Swap'):\n s.write(line)\n s.seek(0)\n oswdata=pd.read_csv(file, comment=\"L\", sep=\"\\n\", names='a')\n raw=oswdata[oswdata.iloc[:,0].str.startswith(\"top\")].dropna(axis=1)\n raw['a']=raw['a'].str.replace('days, ','days ')\n raw['a']=raw['a'].str.replace('top - ','')\n raw['a']=raw['a'].str.replace(' up ',',')\n raw['a']=raw['a'].str.replace('users','')\n raw['a']=raw['a'].str.replace('load average: ','')\n top=raw['a'].str.split(',', 5, expand=True).rename(columns={0:'ts', 1:'uptime', 2:'users', 3:'load1', 4:'load10', 5:'load15'})\n top=top.reset_index().rename(columns={'index': 'pos'})\n data=top.copy()\n #print top\n \n raw=oswdata[oswdata.iloc[:,0].str.startswith(\"Tasks\")].dropna(axis=1)\n raw['a']=raw['a'].str.replace('Tasks:','')\n raw['a']=raw['a'].str.replace(' total,',',')\n raw['a']=raw['a'].str.replace(' running,',',')\n raw['a']=raw['a'].str.replace(' sleeping,',',')\n raw['a']=raw['a'].str.replace(' stopped,',',')\n raw['a']=raw['a'].str.replace(' zombie','')\n task=raw['a'].str.split(',', 4, expand=True).rename(columns={0:'tot', 1:'run', 2:'sleep', 3:'stop', 4:'zom'})\n task=task.reset_index().rename(columns={'index': 'pos'})\n task['pos']=task['pos'].apply(lambda d: int(d)-1)\n data=pd.merge(data,task,how='outer',on='pos')\n #print task\n \n raw=oswdata[oswdata.iloc[:,0].str.startswith(\"Cpu\")].dropna(axis=1)\n raw['a']=raw['a'].str.replace('Cpu\\(s\\):','')\n raw['a']=raw['a'].str.replace('us,',',')\n raw['a']=raw['a'].str.replace('sy,',',')\n raw['a']=raw['a'].str.replace('ni,',',')\n raw['a']=raw['a'].str.replace('id,',',')\n raw['a']=raw['a'].str.replace('wa,',',')\n raw['a']=raw['a'].str.replace('hi,',',')\n raw['a']=raw['a'].str.replace('si,',',')\n raw['a']=raw['a'].str.replace('st','')\n cpu=raw['a'].str.split(',', 7, expand=True).rename(columns={0:'us', 1:'sy', 2:'ni', 3:'id', 4:'wa', 5:'hi', 6:'si', 7:'st'})\n cpu=cpu.reset_index().rename(columns={'index': 'pos'})\n cpu['pos']=cpu['pos'].apply(lambda d: int(d)-2)\n data=pd.merge(data,cpu,how='outer',on='pos')\n #print cpu\n \n \n raw=oswdata[oswdata.iloc[:,0].str.startswith(\"Mem:\")].dropna(axis=1)\n raw['a']=raw['a'].str.replace('Mem:','')\n raw['a']=raw['a'].str.replace('k total,',',')\n raw['a']=raw['a'].str.replace('k used,',',')\n raw['a']=raw['a'].str.replace('k free,',',')\n raw['a']=raw['a'].str.replace('k buffers','')\n mem=raw['a'].str.split(',', 3, expand=True).rename(columns={0:'Memtot', 1:'Memused', 2:'Memfree', 3:'Membuf'})\n mem=mem.reset_index().rename(columns={'index': 'pos'})\n mem['pos']=mem['pos'].apply(lambda d: int(d)-3)\n data=pd.merge(data,mem,how='outer',on='pos')\n #print mem\n \n raw=oswdata[oswdata.iloc[:,0].str.startswith(\"Swap:\")].dropna(axis=1)\n raw['a']=raw['a'].str.replace('Swap:','')\n raw['a']=raw['a'].str.replace('k total,',',')\n raw['a']=raw['a'].str.replace('k used,',',')\n raw['a']=raw['a'].str.replace('k free,',',')\n raw['a']=raw['a'].str.replace('k cached','')\n swap=raw['a'].str.split(',', 3, expand=True).rename(columns={0:'Swaptot', 1:'Swapused', 2:'Swapfree', 3:'cache'})\n swap=swap.reset_index().rename(columns={'index': 'pos'})\n swap['pos']=swap['pos'].apply(lambda d: int(d)-4)\n #print swap\n data=pd.merge(data,swap,how='outer',on='pos')\n print(data)\n return data",
"def readProf(fname, wdir='.'):\n\n fname = path.join(wdir, fname)\n x, y = [], []\n\n with open(fname) as f:\n lines = f.readlines()\n\n for line in lines:\n elements = line.split()\n\n if elements[0] == '#':\n pass\n else:\n x.append(float(elements[0]))\n y.append(float(elements[1]))\n\n return x, y",
"def parse_trace_file(filename):\n f = open(filename, 'r')\n trace_data = f.read()\n\n messages = parse_atm_messages(trace_data) + parse_host_messages(trace_data)\n f.close()\n messages.sort()\n\n return messages",
"def read_cpu_stats(target_file):\n test_line = target_file.readline()\n if \"CPU\" in test_line:\n logical_processors = target_file.readline().strip()\n processors_desc = target_file.readline().strip()\n threads_cores = target_file.readline().strip()\n return CpuStats(logical_processors, processors_desc, threads_cores)\n if \"logical processors\" in test_line:\n logical_processors = test_line.strip()\n processors_desc = target_file.readline().strip()\n threads_cores = target_file.readline().strip()\n return CpuStats(logical_processors, processors_desc, threads_cores)\n return CpuStats('', '', '')",
"def parse_output_file(fname):\n d = defaultdict(lambda: [])\n with open(fname, \"r\") as f:\n curr_topic = None\n expected_count = 0\n curr_count = 0\n for line in f:\n line = line.strip()\n if curr_count < expected_count and curr_topic is not None:\n d[curr_topic].append(line)\n curr_count += 1\n elif curr_topic is None:\n m = re.match(r\"Top ([0-9]+) labels for topic ([0-9]+) are:\", line)\n if m:\n expected_count = int(m.group(1))\n curr_topic = int(m.group(2))\n curr_count = 0\n else:\n curr_count = 0\n expected_count = 0\n curr_topic = None\n return d",
"def _memtop_exec(options, user_args):\n if options.outfile is None:\n out = sys.stdout\n else:\n out = open(options.outfile, 'w')\n\n tracemalloc.start()\n\n _load_and_exec(options.file[0], user_args)\n\n snapshot = tracemalloc.take_snapshot()\n display_top(snapshot, limit=options.limit, file=out)",
"def parse_dump_file(filename):\n with open(filename, 'r') as dump_file:\n results = dict()\n for line in dump_file:\n match = re.search(r'Janky frames: (\\d+) \\(([\\d\\.]+)%\\)', line)\n if match is not None:\n results['jankNum'] = int(match.group(1))\n results['jank_percent'] = float(match.group(2))\n return results",
"def main():\n parse_file(sys.argv[1])",
"def parseFile(self, file):\n return_dict = {}\n with open(file) as f:\n for line in f:\n line = line.strip()\n\n if line:\n if line.startswith('Left'):\n return_dict['Left'] = self.getStats(f)\n elif line.startswith('Right'):\n return_dict['Right'] = self.getStats(f)\n elif line.startswith('Aligned'):\n return_dict['Aligned'] = self.getStats(f, line)\n elif line.startswith('Reads'):\n return_dict['Reads'] = self.getStats(f)\n else:\n matched_summary = re.search('([\\d|.%]+)', line)\n return_dict['Overall'] = matched_summary.group(1)\n\n #return_dict['Summary'] = re.search('(\\d+\\.\\d+%)', line).group(1)\n\n return return_dict",
"def parsing(pfam_file):\n pfam_file = open(pfam_file)\n outfile = open(\"parsed_out_pfam.txt\", \"w\")\n pfam_dict = {}\n counter_proteins = 0\n for line in pfam_file:\n if not line.startswith(\"#\"):\n columns = line.strip()\n columns = columns.split()\n if columns[2] not in pfam_dict:\n pfam_dict[columns[2]] = columns[2],columns[0], columns[4]\n counter_proteins += 1\n line = \"%-20s: %-20s %-20s \\n\" %(columns[2], columns[0], columns[4])\n outfile.write(line)\n pfam_file.close()\n print counter_proteins\n return None",
"def loadProfile(fname):\n \n x = np.loadtxt(fname)\n return x[:,1]",
"def read_profiles(filename):\n profiles = []\n with gzip.open(filename, mode='rt', encoding='utf8') as infile:\n for line in infile:\n profiles.append(Counter(line.split()))\n return profiles",
"def parse_stats(output):\n lines = [line for line in output if \"[Stats]\" in line]\n stats = {\n 'totals': {'time': 0, 'tasks': 0, 'avg': 0}\n }\n for line in lines:\n m = re.search(r'\\((\\d+) ms\\).+\\((\\d+)\\).+\\((\\d+) us.+\\)', line)\n if not m:\n continue\n dt, tasks, avg = map(int, m.groups())\n if 'totals' in line:\n stats['totals'] = {'time': dt, 'tasks': tasks, 'avg': avg}\n return stats",
"def main(output_file):\n with open(output_file, 'w+') as fl:\n poor_perf_stats = pstats.Stats('poor_perf.log', stream=fl)\n good_perf_stats = pstats.Stats('good_perf.log', stream=fl)\n\n poor_perf_stats.sort_stats('cumtime')\n\n fl.write('--------------------------------------------\\n')\n fl.write('POOR PERFORMANCE STATS\\n')\n fl.write(f\"Time: {poor_perf_stats.total_tt}\\n\")\n fl.write(f\"Function Calls: {poor_perf_stats.total_calls}\\n\")\n fl.write(f\"Top cumulative times\\n\")\n poor_perf_stats.print_stats(20)\n\n fl.write('--------------------------------------------\\n')\n fl.write('GOOD PERFORMANCE STATS\\n')\n fl.write(f\"Time: {good_perf_stats.total_tt}\\n\")\n fl.write(f\"Function Calls: {good_perf_stats.total_calls}\\n\")\n fl.write(f\"Top 20 cumulative times\\n\")\n good_perf_stats.print_stats(20)",
"def _memtop_setup_parser(parser):\n parser.add_argument('file', nargs=1, help='Python script to check for memory usage.')\n parser.add_argument('-o', default=None, action='store', dest='outfile',\n help='Name of output file. By default, output goes to stdout.')\n parser.add_argument('-l', '--limit', action='store', type=int, default=20, dest='limit',\n help='Limit the number of lines in the output.')",
"def get_top():\n print(\"This processes are using the cpu the most:\")\n print(os.system(\"ps axo %cpu,pid,euser,cmd | sort -nr | head -n 5\"))",
"def parse_file():\n\tfile_lines = []\n\n\t## For each line in the file, if it's not empty, store it\n\tfor line in fileinput.input():\n\t\tif len(line) > 1:\n\t\t\tfile_lines.append(line.strip())\n\t\n\trun_algorithms(file_lines)",
"def parse_file(filename='activity_1321393768.tcx'):\n\n # tree = etree.parse(filename)\n tree = getattr(etree, 'parse')(filename)\n xmlstr = '{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}'\n points = tree.findall('//'+xmlstr+'Trackpoint')\n # dist = [r.find(xmlstr+'DistanceMeters').text for r in points]\n heart = [int(r.find(xmlstr+'HeartRateBpm')[0].text) for r in points]\n speed = [float(r.find(xmlstr+'Extensions')[0][0].text) for r in points]\n pace = [float(curr)*3600/1000 for curr in speed]\n time = range(0, len(speed))\n return (time, heart, pace)",
"def parse_file_with_protobuf(path_to_file, db):\r\n stats = None\r\n # Perhaps an Android Q protobuf file\r\n try:\r\n stats = read_usage_stats_pb_file(path_to_file)\r\n\r\n except:\r\n print('Parse error - Non XML and Non Protobuf file? at: ' + path_to_file)\r\n\r\n add_entries_to_db(stats, db)",
"def parse_scores_file(filename):\n scores = []\n with open(filename, \"r\") as scores_file:\n for line in scores_file:\n line = line.rstrip()\n info = line.split(\":\")\n scores.append(Score(info[0], int(info[1]), float(info[2])))\n return scores",
"def parser(filename):\n\n regex = re.compile(\n # prolog\n r\"run(?P<run>\\w+)\"\n ##r\"\\-(?P<code_name>((mfdn)|(obscalc-ob))[^\\-]*)\"\n r\"\\-(?P<descriptor>\"\n # descriptor contents\n r\"Z(?P<Z>\\d+)\\-N(?P<N>\\d+)\"\n r\"\\-(?P<interaction>.+)\\-(?P<coulomb>\\d)\"\n r\"\\-(?P<truncation_descriptor>.+)\"\n ## r\"\\-Nmax(?P<Nmax>\\d+)\"\n # epilog\n r\").res\"\n )\n\n conversions = {\n \"Z\" : int,\n \"N\" : int,\n \"interaction\" : str,\n \"coulomb\" : int,\n }\n\n match = regex.match(filename)\n if (match == None):\n raise ValueError(\"bad form for spncci results filename: \" + filename)\n info = match.groupdict()\n\n # convert fields\n for key in conversions:\n conversion = conversions[key]\n info[key] = conversion(info[key]) if (info[key] is not None) else None\n\n return info",
"def exec_parser(src_file: str):\n stdin = open(src_file)\n result = subprocess.run(['php', 'parse.php'], check=True, stdout=subprocess.PIPE, stdin=stdin)\n return result.stdout",
"def parse():\n file = open(INPUT, 'r')\n\n expect_eff = False\n expect_vout = False\n\n eff_dict = {}\n vout_dict = {}\n\n for line in file:\n if line.startswith('PCC'):\n id = line.strip()\n expect_eff = True\n elif expect_eff:\n if line.startswith('efficiency'):\n eff_str = line.strip().split(':')[1]\n # get rid of % symbol\n eff = int(eff_str.split('%')[0])\n eff_dict[id] = .01 * eff\n\n expect_vout = True\n\n expect_eff = False\n elif expect_vout:\n if line.startswith('output voltage'):\n vout_str = line.strip().split(':')[1]\n vout = int(vout_str)\n vout_dict[id] = vout\n\n expect_vout = False\n\n with open(EFF_OUTPUT, 'w') as f:\n json.dump(eff_dict, f)\n\n with open(VOUT_OUTPUT, 'w') as f:\n json.dump(vout_dict, f)\n\n # plot stats of eff and vout\n plot_hist(eff_dict.values(), 'Efficiency', 'eff', bins=50)\n plot_hist(vout_dict.values(), 'V_out', 'vout', bins=50)",
"def get_load_data():\n proc_stat = open(\"/proc/stat\", \"r\")\n ret = []\n #times_since_startup = proc_stat.readline().strip().split()[1:]\n for line in proc_stat:\n line_split = line.strip().split()\n if(not (\"cpu\" in line_split[0])): #we have gone past the CPU lines\n break\n else:\n #everything but the label since we know [0] is overall and after that is per core by index\n ret.append(line_split[1:]) \n proc_stat.close()\n return ret",
"def main():\n\n\t# Parse the file\n\tmem_file = advanced_analysis('../data_1/mempages.dat.out')",
"def parse_trace_file(file_path):\n\n if not os.path.isfile(file_path):\n LOGGER.error(\"Trace file '%s' doesn't exist.\", file_path)\n return None\n\n try:\n with open(file_path, \"r\", newline=\"\\n\") as f:\n data_points = f.readlines()\n except IOError:\n LOGGER.error(\"Exception while reading trace file '%s'. Terminating.\", file_path)\n sys.exit(0)\n\n data_point_tuple_list = []\n for value in range(0, len(data_points)):\n split_string = data_points[value].split(\" \")\n\n memory_address = split_string[0].strip()\n read_or_write = split_string[1].strip()\n\n current_tuple = (memory_address, read_or_write)\n data_point_tuple_list.append(current_tuple)\n\n return data_point_tuple_list",
"def parse_tcode_outfile(fname,offset=0):\n command = \"cat %s | grep -v '#' | sed '/^$/d' | sed 1d\" % (fname) \n ci,co = popen2(command)\n ci.close()\n out = co.readlines()\n co.close()\n retlist = []\n for line in out:\n line = line.strip()\n while \" \" in line: line = line.replace(\" \",\" \")\n parts = line.split(\" \",3)\n retlist.append( (int(parts[0])+offset, int(parts[1])+offset, float(parts[2]), parts[3] ) )\n return retlist"
] | [
"0.66381496",
"0.60057384",
"0.59760386",
"0.57892656",
"0.5726717",
"0.56621575",
"0.5601552",
"0.5589054",
"0.55685717",
"0.5450823",
"0.54446125",
"0.5419087",
"0.5403299",
"0.53546697",
"0.5340686",
"0.53404295",
"0.5312534",
"0.5310855",
"0.5305642",
"0.53044057",
"0.5302372",
"0.52751046",
"0.5266251",
"0.52146655",
"0.5211778",
"0.5189808",
"0.5168835",
"0.51531434",
"0.514821",
"0.5137994"
] | 0.7867616 | 0 |
Parses a file that contains the output of the pprof tree command. | def ParsePprofTreeOutput(file_name):
# In the pprof output, the statistics of the functions start from the 9th
# line.
with open(file_name) as input_file:
pprof_tree_content = input_file.readlines()[9:]
pprof_tree_statistics = defaultdict(lambda: defaultdict(float))
track_child_functions = False
# The statistics of a given function, its parent and child functions are
# included between two separator marks.
# All the parent function statistics are above the line containing the
# statistics of the given function.
# All the statistics of a child function are below the statistics of the
# given function.
# The statistics of a parent or a child function contain the calls, calls
# percentage, the function name and the file where the function is declared.
# The statistics of the given function contain the flat, flat percentage,
# sum percentage, cummulative, cummulative percentage, function name and the
# name of the file containing the declaration of the function.
for line in pprof_tree_content:
separator_match = SEPARATOR_REGEX.search(line)
if separator_match:
track_child_functions = False
continue
parent_function_statistic_match = FUNCTION_STATISTIC_REGEX.search(line)
if parent_function_statistic_match:
track_child_functions = True
lookup_index = parent_function_statistic_match.end()
parent_function_key_match = \
FUNCTION_KEY_SEPARATOR_REGEX.search(line, pos=lookup_index)
lookup_index = parent_function_key_match.end()
parent_function_key = MakePprofFunctionKey(line[lookup_index:-1])
continue
if not track_child_functions:
continue
child_function_statistic_match = \
CHILD_FUNCTION_PERCENTAGE_REGEX.search(line)
child_function_percentage = \
float(child_function_statistic_match.group(1))
lookup_index = child_function_statistic_match.end()
child_function_key_match = \
FUNCTION_KEY_SEPARATOR_REGEX.search(line, pos=lookup_index)
lookup_index = child_function_key_match.end()
child_function_key = MakePprofFunctionKey(line[lookup_index:-1])
pprof_tree_statistics[parent_function_key][child_function_key] += \
child_function_percentage / 100.0
return pprof_tree_statistics | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ParsePprofTopOutput(file_name):\n\n pprof_top_statistics = {}\n\n # In the pprof top output, the statistics of the functions start from the\n # 6th line.\n with open(file_name) as input_file:\n pprof_top_content = input_file.readlines()[6:]\n\n for line in pprof_top_content:\n function_statistic_match = FUNCTION_STATISTIC_REGEX.search(line)\n flat, flat_p, sum_p, cum, cum_p = function_statistic_match.groups()\n flat_p = str(float(flat_p) / 100.0)\n sum_p = str(float(sum_p) / 100.0)\n cum_p = str(float(cum_p) / 100.0)\n lookup_index = function_statistic_match.end()\n function_and_file_name = line[lookup_index + 2 : -1]\n key = MakePprofFunctionKey(function_and_file_name)\n pprof_top_statistics[key] = (flat, flat_p, sum_p, cum, cum_p)\n return pprof_top_statistics",
"def main():\n parse_file(sys.argv[1])",
"def parse_tree_file(filename):\n f = open(filename)\n data = f.read()\n f.close()\n return parser.parse(data, lexer=lexer)",
"def parse_file():\r\n if len(sys.argv) < 2:\r\n print(\"Need a file\")\r\n sys.exit(1)\r\n\r\n data_input = open(sys.argv[1])\r\n\r\n data = []\r\n for line in data_input: #for each of these lines\r\n if(len(line) == 0): pass #skip empty lines\r\n split_within_line = line.split(\"\\t\") #split by tabs\r\n new_datum = Datum(split_within_line[0], split_within_line[1], split_within_line[2]) #feed splits into a Datum object\r\n data.append(new_datum) #add Datum to list of data\r\n\r\n #make a list of characters representing the issues\r\n for i in range(len(data[0].dat_votes)-1): #from 0 to the end of the list of issues from the first datum\r\n original_issues.append(chr(i+97))\r\n\r\n\r\n i = 0\r\n tuning_set = []\r\n training_set = []\r\n num_reps = len(data)\r\n for i in range(0, num_reps-1):\r\n if (i % 4 == 0):\r\n tuning_set.append(data[i])\r\n else:\r\n training_set.append(data[i])\r\n\r\n pair = _count_parties(training_set)\r\n\r\n unpruned = induce_node_tree(training_set, original_issues,\"D\",-1)\r\n # print(\"\\n#### UNPRUNED TREE ####\\n\")\r\n # print(unpruned)\r\n\r\n unprune_acc = calc_accuracy(unpruned, tuning_set)\r\n\r\n pruned = prune_tree(unpruned, tuning_set)\r\n print(\"\\n#### PRUNED TREE ####\\n\")\r\n print(pruned)\r\n\r\n acc = calc_accuracy(pruned, training_set)\r\n\r\n # print(\"Accuracy of unpruned tree with tuning_set: \" + str(unprune_acc))\r\n print(\"Accuracy of pruned tree with tuning_set: \" + str(acc))\r\n leave_one_out_cross_validation(data)",
"def Parse(source, filename):\n lexer = Lexer(filename)\n parser = Parser(lexer, source, filename)\n\n lex.lex(object=lexer)\n yacc.yacc(module=parser, debug=0, write_tables=0)\n\n tree = yacc.parse(source)\n return tree",
"def parser(filename):\n\n regex = re.compile(\n # prolog\n r\"run(?P<run>\\w+)\"\n ##r\"\\-(?P<code_name>((mfdn)|(obscalc-ob))[^\\-]*)\"\n r\"\\-(?P<descriptor>\"\n # descriptor contents\n r\"Z(?P<Z>\\d+)\\-N(?P<N>\\d+)\"\n r\"\\-(?P<interaction>.+)\\-(?P<coulomb>\\d)\"\n r\"\\-(?P<truncation_descriptor>.+)\"\n ## r\"\\-Nmax(?P<Nmax>\\d+)\"\n # epilog\n r\").res\"\n )\n\n conversions = {\n \"Z\" : int,\n \"N\" : int,\n \"interaction\" : str,\n \"coulomb\" : int,\n }\n\n match = regex.match(filename)\n if (match == None):\n raise ValueError(\"bad form for spncci results filename: \" + filename)\n info = match.groupdict()\n\n # convert fields\n for key in conversions:\n conversion = conversions[key]\n info[key] = conversion(info[key]) if (info[key] is not None) else None\n\n return info",
"def parse_file():\r\n # Open the text file as read only\r\n file = open(\"formulas.txt\", \"r\")\r\n\r\n # Iterate through each line in the file\r\n for formula in file:\r\n # Create a new tree based on the formula\r\n tree = parse_formula(formula.rstrip())\r\n # Formatting\r\n print(\"Formula: {}\".format(formula.rstrip()))\r\n print(\"Tree:\")\r\n tree.display()\r\n print(\"-----------------------------\")",
"def parse_file(filename='activity_1321393768.tcx'):\n\n # tree = etree.parse(filename)\n tree = getattr(etree, 'parse')(filename)\n xmlstr = '{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}'\n points = tree.findall('//'+xmlstr+'Trackpoint')\n # dist = [r.find(xmlstr+'DistanceMeters').text for r in points]\n heart = [int(r.find(xmlstr+'HeartRateBpm')[0].text) for r in points]\n speed = [float(r.find(xmlstr+'Extensions')[0][0].text) for r in points]\n pace = [float(curr)*3600/1000 for curr in speed]\n time = range(0, len(speed))\n return (time, heart, pace)",
"def parse(fname):\n\n tree = pbsXml.parse(fname)\n\n if not tree:\n tools.error('failed to parse pbsdump xml file ' + fname)\n return 0\n\n root = tree.getroot()\n\n nodes = dict() # Hold list of nodes\n\n # Iterate on all Node items\n for child in root.findall('Node'):\n # Get node name\n name = child.find('name').text\n\n # Build new entry for the given node\n nodes[name] = dict()\n node = nodes[name]\n matches = GET_NODE_ID.match(name)\n node[id] = int(matches.group(1))\n\n # Collect data\n node['name'] = name\n node['np'] = int(child.find('np').text)\n node['state'] = child.find('state').text\n node['power_state'] = child.find('power_state').text\n data = child.find('jobs')\n if data is not None:\n node['jobs'] = data.text\n else:\n node['jobs'] = None\n\n node['nb_sockets'] = child.find('total_sockets').text\n node['nb_numa_nodes'] = child.find('total_numa_nodes').text\n props = child.find('properties').text\n node['properties'] = props.split(',')\n\n # Get the status entries\n node['status'] = dict()\n data = child.find('status')\n if data is None:\n tools.error('Node ' + name + \" has no status entry! Skipped.\")\n continue\n\n status = data.text\n status_list = status.split(',')\n for entry in status_list:\n data = entry.split('=')\n matches = IS_BYTE_SIZE.match(data[1])\n if matches:\n # Convert whatever size in GB\n data[1] = tools.size_convert(matches.group(1), matches.group(2), 'gb')\n\n # Keep the data\n node['status'][data[0]] = data[1]\n\n return nodes",
"def exec_parser(src_file: str):\n stdin = open(src_file)\n result = subprocess.run(['php', 'parse.php'], check=True, stdout=subprocess.PIPE, stdin=stdin)\n return result.stdout",
"def parse_file():\n\tfile_lines = []\n\n\t## For each line in the file, if it's not empty, store it\n\tfor line in fileinput.input():\n\t\tif len(line) > 1:\n\t\t\tfile_lines.append(line.strip())\n\t\n\trun_algorithms(file_lines)",
"def parse():\n file = open(INPUT, 'r')\n\n expect_eff = False\n expect_vout = False\n\n eff_dict = {}\n vout_dict = {}\n\n for line in file:\n if line.startswith('PCC'):\n id = line.strip()\n expect_eff = True\n elif expect_eff:\n if line.startswith('efficiency'):\n eff_str = line.strip().split(':')[1]\n # get rid of % symbol\n eff = int(eff_str.split('%')[0])\n eff_dict[id] = .01 * eff\n\n expect_vout = True\n\n expect_eff = False\n elif expect_vout:\n if line.startswith('output voltage'):\n vout_str = line.strip().split(':')[1]\n vout = int(vout_str)\n vout_dict[id] = vout\n\n expect_vout = False\n\n with open(EFF_OUTPUT, 'w') as f:\n json.dump(eff_dict, f)\n\n with open(VOUT_OUTPUT, 'w') as f:\n json.dump(vout_dict, f)\n\n # plot stats of eff and vout\n plot_hist(eff_dict.values(), 'Efficiency', 'eff', bins=50)\n plot_hist(vout_dict.values(), 'V_out', 'vout', bins=50)",
"def parse_dump_file(filename):\n with open(filename, 'r') as dump_file:\n results = dict()\n for line in dump_file:\n match = re.search(r'Janky frames: (\\d+) \\(([\\d\\.]+)%\\)', line)\n if match is not None:\n results['jankNum'] = int(match.group(1))\n results['jank_percent'] = float(match.group(2))\n return results",
"def parse(klass, f):\n members = []\n for line in f:\n line = line.strip()\n if not line:\n continue # skip empty lines\n members.append(PedigreeMember.parse_line(line))\n return Pedigree(members)",
"def parse(klass, f):\n members = []\n for line in f:\n line = line.strip()\n if not line:\n continue # skip empty lines\n members.append(PedigreeMember.parse_line(line))\n return Pedigree(members)",
"def parse_file(self, path, max_resolution, threshold, proteins={}):\n\n \"\"\"\n create regex pattern here so it is not done repeatedly while parsing file\n\n groups:\n 0 - Protein ID\n 1 - Chain ID\n 2 - Length of protein chain\n 3 - Exptl.\n 4 - Resolution\n 5 - R-factor\n 6 - FreeRValue\n \"\"\"\n regex_str = '(\\w{4})(\\w)\\s+(\\d+)\\s+(\\w+)\\s+([\\d\\.]+)\\s+([\\d\\.]+)\\s+([\\d\\.]+)'\n regex_pattern = re.compile(regex_str)\n\n printc('Processing: %s' % path)\n\n raw = None\n try:\n _file = gzip.open(path, 'r')\n\n #first line is labels, discard it\n _file.readline()\n\n for line in _file:\n match = regex_pattern.match(line)\n if match:\n groups = match.groups()\n\n if groups[0] in proteins:\n # if protein already exists just update the additional\n # chain information. The properties should not change\n # between records in the selection file.\n protein = proteins[groups[0]]\n if not groups[1] in protein['chains']:\n protein['chains'].append(groups[1])\n #print 'Selecting Protein: %s Chain: %s Threshold: %s' % (groups[0],groups[1], threshold)\n\n else:\n # protein is not in proteins dict yet create initial\n # structure from parsed properties.\n resolution = float(groups[4])\n if resolution > 0 and resolution <= max_resolution:\n proteins[groups[0]] = {\n 'code':groups[0],\n 'chains':[groups[1]],\n 'resolution':groups[4],\n 'rfactor':groups[5],\n 'rfree':groups[6],\n 'threshold':threshold\n }\n\n #print 'Selecting Protein: %s Chain: %s Threshold: %s' % (groups[0],groups[1], threshold)\n\n finally:\n if _file:\n _file.close()\n\n return proteins",
"def parse(self, fp):\n\n # create the plex scanner for fp\n self.create_scanner(fp)\n\n # call parsing logic\n self.stmt_list()\n print('Parsing successful!')",
"def stat_parser():\n from tools import file_importer, file_outporter\n from math import log\n \n print(\"this is stat parser\")\n \n relPath = \"bob/processed/24h_bobdata_ed2.csv\"\n outPathUp = \"bob/processed/24h_bobprots_up_full.csv\"\n outPathDown = \"bob/processed/24h_bobprots_down_full.csv\"\n inpF = file_importer(relPath)\n outFUp = file_outporter(outPathUp)\n outFDown = file_outporter(outPathDown)\n \n \n skipFlag = True\n \n for inpLine in inpF:\n if skipFlag:\n skipFlag = False\n outFDown.write(\"ID,Uniprot ID,Gene name,unique peptides (unique+razor),KO1,KO2,KO3,WT1,WT2,WT3,enrichment,P value\\n\")\n outFUp.write(\"ID,Uniprot ID,Gene name,unique peptides (unique+razor),KO1,KO2,KO3,WT1,WT2,WT3,enrichment,P value\\n\")\n continue\n inpLine = inpLine.split(\"\\\" \\\"\")\n curLine = []\n for inpI in inpLine:\n curLine.append(inpI.strip(\"\\\"\\n\"))\n try: \n curLine[-1] = float(curLine[-1])\n except ValueError:\n curLine[-1] = 1 \n if curLine[-1] < 0.05 and int(curLine[3]) > 1: # check if protein has at least 2 unique peptides and has a significant p value\n curLine[4:10] = [int(x) for x in curLine[4:10]]\n enrScore = log((sum(curLine[4:7]) / 3.0)/(sum(curLine[7:10]) / 3.0),2) # calculate log2 enrichment score\n # print int(sum(curLine[4:7]) / 3.0), int(sum(curLine[7:10]) / 3.0)\n if sum(curLine[4:7]) / 3.0 > sum(curLine[7:10]) / 3.0: # if the mean of the KO intensities is higher than the wt \n for outI in curLine:\n outFDown.write(str(outI).strip(\" \"))\n if outI is not curLine[-1]:\n outFDown.write(\",\")\n if outI is curLine[-2]:\n outFDown.write(str(enrScore)+ \",\")\n else:\n outFDown.write(\"\\n\")\n # outFDown.write(curLine[1] + \",\" + curLine[2] + \"\\n\")\n else:\n # outFUp.write(curLine[1] + \",\" + curLine[2] + \"\\n\")\n for outI in curLine:\n outFUp.write(str(outI).strip(\" \"))\n if outI is not curLine[-1]:\n outFUp.write(\",\")\n if outI is curLine[-2]:\n outFUp.write(str(enrScore)+ \",\")\n else:\n outFUp.write(\"\\n\")\n \n inpF.close()\n outFUp.close()\n outFDown.close()\n print(\"stat_parser completed\")",
"def parseFile(self,filename):\n\n name = '[0-9a-zA-Z_]+'\n string = '\\\\\"(.+)\\\\\"'\n\n testclass = None\n functionName = None\n\n fin = open(filename, 'r')\n for line in fin:\n # testclass starts\n res = re.match('class ('+name+')', line)\n if res != None:\n testclass = res.group(1)\n\n # end of testclass \n if re.match('};', line) != None:\n testclass = None\n\n # function start\n res = re.match('\\\\s+void ('+name+')\\\\(\\\\)', line)\n if res != None:\n functionName = res.group(1)\n\n elif re.match('\\\\s+}', line) != None:\n functionName = None\n\n if functionName == None:\n continue\n\n # check\n res = re.match('\\s+check.*\\('+string, line)\n if res != None:\n code = res.group(1)\n\n # code..\n res = re.match('\\\\s+'+string, line)\n if res != None:\n code = code + res.group(1)\n\n # assert\n res = re.match('\\\\s+ASSERT_EQUALS\\\\(\\\\\"([^\"]*)\\\\\",', line)\n if res != None and len(code) > 10:\n node = { 'testclass':testclass,\n 'functionName':functionName,\n 'code':code,\n 'expected':res.group(1) }\n self.nodes.append(node)\n code = ''\n\n # close test file\n fin.close()",
"def parse_tsp_file(file):\n # define regular expressions for the fields to parse\n regexes = {'name': re.compile(\"NAME : (.*)\"),\n 'comment': re.compile(\"COMMENT : (?!STARTNODE :|STARTNODES : |CLUSTERS :)(.*)\"),\n 'single_start': re.compile(\"COMMENT : STARTNODE : ([0-9])+\"),\n 'multi_start': re.compile(\"COMMENT : STARTNODES : (.*)\"),\n 'nodes':\n re.compile(\n r\"([0-9]+)\\ *([0-9]*\\.?[0-9]*)\\ *([0-9]*\\.?[0-9]*)\",\n re.MULTILINE),\n 'groups': re.compile(\"COMMENT : CLUSTERS : (.*)\")}\n # initialize results\n result = {'name': 'No Name', 'comment': '', 'startnodes': [],\n 'nodes': [], 'groups': []}\n # Define application rules\n\n def apply_match(regex_name, match):\n \"\"\"Applies a specific processing rule for each regex sperately as the\n fields vary in data types and structures\"\"\"\n if regex_name is 'name':\n result['name'] = match.group(1)\n elif regex_name is 'single_start':\n result['startnodes'] = [int(match.group(1))]\n elif regex_name is 'multi_start':\n result['startnodes'] = ast.literal_eval(match.group(1))\n elif regex_name is 'groups':\n result['groups'] = ast.literal_eval(\n match.group(1).replace(\" \", \"\"))\n elif regex_name is 'comment':\n result['comment'] += match.group(1) + \"\\n\"\n elif regex_name is 'nodes':\n result['nodes'].append([int(float(match.group(2))),\n int(float(match.group(3)))])\n # Process the lines in the file and check for matches for each regular\n # expression\n _file = open(file, 'r')\n lines = _file.readlines()\n for line in lines:\n if len(line):\n for regex_name in regexes:\n match = re.match(regexes[regex_name], line)\n if match:\n apply_match(regex_name, match)\n _file.close()\n return result",
"def parse_problem(path_to_file):\n with open(path_to_file, 'r') as f:\n lines = f.readlines()\n return parse_problem_lines(lines)",
"def fileparse(filename, node):\n\n fd = open(filename)\n line = fd.readline().strip('\\r\\n')\n\n while line != '':\n node.Add(line, node)\n line = fd.readline().strip('\\r\\n')",
"def file_parse():\n\n\tfilename = input(\"Enter the file path for your graph: \")\n\ttarget = open(filename, 'r')\n\n\ttarget_lines = [] \t# List of lines from target file\n\t\n\t# Grab the graph count and node/edge count for the first graph\n\ti = 0\n\tfor line in target:\n\t\tif i == 0:\n\t\t\tgraph_count = int(line)\n\t\telif i == 1:\n\t\t\tnode_count = int(line)\n\t\telif i == 2:\n\t\t\tedge_count = int(line)\n\t\telse:\t\n\t\t\ttarget_lines.append(line.strip('\\n'))\n\t\ti += 1\n\n\treturn graph_create(target_lines, graph_count, node_count, edge_count)",
"def parse_file(self, file):\n return self.parse(file.read())",
"def main():\n\n\t# Parse the file\n\tmem_file = advanced_analysis('../data_1/mempages.dat.out')",
"def parseProgram(inputFile):\n print(\"Program\")\n parseStatements(inputFile)",
"def readProf(fname, wdir='.'):\n\n fname = path.join(wdir, fname)\n x, y = [], []\n\n with open(fname) as f:\n lines = f.readlines()\n\n for line in lines:\n elements = line.split()\n\n if elements[0] == '#':\n pass\n else:\n x.append(float(elements[0]))\n y.append(float(elements[1]))\n\n return x, y",
"def parse(self, filename):\n infile = file(filename)\n for line in infile:\n self.parseLine(line)",
"def read_nodes_dmp(fname):\n df = pd.read_csv(fname, sep=\"|\", header=None, index_col=False,\n names=['tax_id', \n 'parent_tax_id',\n 'rank', \n 'embl_code',\n 'division_id', \n 'inherited_div_flag', # 1 or 0\n 'genetic_code_id', \n 'inherited_GC_flag', # 1 or 0\n 'mitochondrial_genetic_code_id', \n 'inherited_MGC_flag', # 1 or 0\n 'GenBank_hidden_flag',\n 'hidden_subtree_root_flag', # 1 or 0 \n 'comments'])\n return df.assign(rank = lambda x: x['rank'].str.strip(),\n embl_code = lambda x: x['embl_code'].str.strip(),\n comments = lambda x: x['comments'].str.strip())",
"def parse(self):\n logger=self.logger\n tokenizer=Tokenizer()\n self.scope=produtil.testing.parsetree.Scope()\n self.override(self.scope)\n self.parser=Parser(self.run_mode,logger,self.verbose)\n self.parser.requested_platform_name=self.platform_name\n morevars=self.make_vars()\n with open(self.inloc,'rt') as fileobj:\n self.parse_result=self.parser.parse(\n TokenizeFile(tokenizer,fileobj,self.inloc,1),self.scope,\n unique_id=self.unique_id,morevars=morevars)"
] | [
"0.66067183",
"0.6017137",
"0.59014267",
"0.58663404",
"0.5841529",
"0.5826687",
"0.5749238",
"0.5748941",
"0.5733292",
"0.5695029",
"0.5656529",
"0.56446993",
"0.55805326",
"0.55548406",
"0.55548406",
"0.55536187",
"0.55310863",
"0.5505214",
"0.55027777",
"0.5502458",
"0.5498558",
"0.5494872",
"0.5477688",
"0.5474041",
"0.54648143",
"0.54460925",
"0.5423553",
"0.5421917",
"0.54164046",
"0.5391422"
] | 0.79150075 | 0 |
Parses the CWP inclusive count files. A line should contain the name of the function, the file name with the declaration, the inclusive count and inclusive count fraction out of the total extracted inclusive count values. | def ParseCWPInclusiveCountFile(file_name):
cwp_inclusive_count_statistics = defaultdict(lambda: ('', 0, 0.0, 0))
with open(file_name) as input_file:
statistics_reader = csv.DictReader(input_file, delimiter=',')
for statistic in statistics_reader:
function_name = statistic['function']
file_name = MakeCWPAndPprofFileNamesConsistent(
os.path.normpath(statistic['file']))
dso_name = statistic['dso']
inclusive_count = statistic['inclusive_count']
inclusive_count_fraction = statistic['inclusive_count_fraction']
# We ignore the lines that have empty fields(i.e they specify only the
# addresses of the functions and the inclusive counts values).
if all([
function_name, file_name, dso_name, inclusive_count,
inclusive_count_fraction
]):
key = '%s,%s' % (function_name, file_name)
# There might be situations where a function appears in multiple files
# or objects. Such situations can occur when in the Dremel queries there
# are not specified the Chrome OS version and the name of the board (i.e
# the files can belong to different kernel or library versions).
inclusive_count_sum = \
cwp_inclusive_count_statistics[key][1] + int(inclusive_count)
inclusive_count_fraction_sum = \
cwp_inclusive_count_statistics[key][2] + \
float(inclusive_count_fraction)
# All the functions are initially marked as EXTRA_FUNCTION.
value = \
(dso_name, inclusive_count_sum, inclusive_count_fraction_sum,
EXTRA_FUNCTION)
cwp_inclusive_count_statistics[key] = value
return cwp_inclusive_count_statistics | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ParseCWPPairwiseInclusiveCountFile(file_name):\n pairwise_inclusive_count_statistics = defaultdict(lambda: defaultdict(float))\n\n with open(file_name) as input_file:\n statistics_reader = csv.DictReader(input_file, delimiter=',')\n\n for statistic in statistics_reader:\n parent_function_name, child_function_name = \\\n statistic['parent_child_functions'].split(\n PARENT_CHILD_FUNCTIONS_SEPARATOR)\n child_function_file_name = MakeCWPAndPprofFileNamesConsistent(\n os.path.normpath(statistic['child_function_file']))\n inclusive_count = statistic['inclusive_count']\n\n # There might be situations where a child function appears in\n # multiple files or objects. Such situations can occur when in the\n # Dremel queries are not specified the Chrome OS version and the\n # name of the board (i.e the files can belong to different kernel or\n # library versions), when the child function is a template function\n # that is declared in a header file or there are name collisions\n # between multiple executable objects.\n # If a pair of child and parent functions appears multiple times, we\n # add their inclusive count values.\n child_function_key = ','.join(\n [child_function_name, child_function_file_name])\n pairwise_inclusive_count_statistics[parent_function_name] \\\n [child_function_key] += float(inclusive_count)\n\n return pairwise_inclusive_count_statistics",
"def parse_file_count(path, args):\n try:\n fisier = open(path, 'r')\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n n_found = 0\n pattern = args.pattern\n for line in fisier:\n if args.ignore_case:\n line = line.lower()\n pattern = pattern.lower()\n n_found += line.count(pattern)\n\n fisier.close()\n return n_found",
"def gcovParser(filePath):\n\t\tresult = 0\n\t\twith open(filePath + \".c.gcov\", \"r\") as file:\n\t\t\tfor line in file:\n\t\t\t\tnumber = line.split(':')[0]\n\t\t\t\tnumber = number.strip()\n\t\t\t\tif number.isdigit():\n\t\t\t\t\tresult += int(number)\n\n\t\treturn [result]",
"def parse_gcov_file(gcov_file):\n count = {}\n with open(gcov_file) as fh:\n for line in fh:\n tag, value = line.split(':')\n if tag == 'file':\n src_file = value.rstrip()\n elif tag == 'lcount':\n line_num, exec_count = value.split(',')\n count[int(line_num)] = int(exec_count)\n\n return src_file, count",
"def count_LOC(path):\n re_empty = re.compile(r\"[\\s]*(#|\\n|\\\"\\\"\\\")\")\n re_for = re.compile(r\"for.*in\")\n re_lambda = re.compile(r\"lambda\")\n re_if = re.compile(r\"if.*:\")\n re_def = re.compile(r\"def (?P<fname>\\w+)\\(\")\n\n total_LOC, indent_level = 0, 0\n cur_part = None\n parts = defaultdict(int)\n\n with open(path, 'r') as _file:\n for line in filter(lambda l : not re_empty.match(l), _file):\n\n extra = len( re_for.findall(line) ) - 1 + len( re_lambda.findall(line) ) - 1 + len( re_if.findall(line) ) -1\n\n if extra < 0: extra = 0\n\n total_LOC += 1 + extra\n if cur_part:\n parts[cur_part] += 1 + extra\n\n defs = re_def.search(line)\n if defs:\n cur_part = defs.groupdict()['fname']\n indent_level = first_non_whitespace(line)\n\n cur_indent = first_non_whitespace(line)\n if cur_indent < indent_level:\n cur_part = None\n indent_level = cur_indent\n\n return(total_LOC, parts)",
"def parameter_count(file):\n param=[]\n param_count=[]\n with open(file,encoding=\"utf-8\",errors='ignore') as f:\n\n for cnt, line in enumerate(f):\n #Check if a line begins with def \n if re.search(\"def\", line):\n #Get the function name and parameter\n r=re.search('(?:def\\s).*', line)\n else:\n continue\n if r:\n l=str(r.group())\n p=l.split(',')\n p[-1]=p[-1][:-1]\n r= p[0].split('(')\n \n if len(p)>1:\n \n first=r[-1]\n last=p[-1][:-1]\n param.append(first)\n param.append(last)\n \n for i in p[1:-1]:\n param.append(i)\n else:\n param.append(r[-1].split('(')[-1][:-1])\n if not param:\n param_count.append(0)\n else: \n param_count.append(len(param))\n param=[]\n\n return param_count",
"def contig_count(contig):\n return sum([1 for line in open(contig, 'rU').readlines() if line.startswith('>')])",
"def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)",
"def fileCounter(directory):",
"def loadfile(self,fd):\n pat=re.compile(r'!')\n f=self.files.index(fd)\n index=0\n newstack=0\n fnc={}\n inc={}\n thisline=[]\n for line in fd:\n line=line.strip()\n if pat.search(line):\n if newstack>0 and index>1:\n count=int(thisline[index-1])\n for i in range(index-1):\n fn=thisline[i]\n fn=re.sub('^.*(: |`)','',fn)\n fn=re.sub('\\/.*$','',fn)\n inc[fn]=inc.get(fn,0)+1\n fn=re.sub('\\+.*$','',fn)\n fnc[fn]=fnc.get(fn,0)+1\n if i==0:\n self.excl[f][fn]=self.excl[f].get(fn,0)+count\n else:\n fn=fn+\"+\"+prefunc\n prefunc=fn\n self.total[f]+=count\n for i in fnc:\n self.incl[f][i]=self.incl[f].get(i,0)+count*fnc[i]\n for i in inc:\n self.inst[f][i]=self.inst[f].get(i,0)+count*inc[i]\n self.caller_callee[f][fn]=self.caller_callee[f].get(fn,0)+count\n fnc.clear()\n inc.clear()\n del thisline[:]\n index=0\n\n newstack+=1\n continue\n\n if newstack>0:\n thisline += [line]\n index+=1",
"def analyzePythonCode(self, sourceFile):\n numLines = 0 # Number of lines of code\n numDocStr = 0 # Number of doc strings in code\n numComments = 0 # Number of comments in the code\n numDefs = 0 # Number of functions\n numClasses = 0 # Number of classes\n f=self.openFile(sourceFile)\n for line in f:\n numLines += 1;\n loc = 0\n while (loc != -1): #count the # of times the '#' characters appears\n loc = line.find(\"#\", loc)\n if (loc != -1):\n loc += 1\n numComments += 1\n loc = 0\n while (loc != -1):\n loc = line.find('\"#', loc) #discount the # of times the '#' char appears as the 1st char in double quotes (skip hex constants)\n if (loc != -1):\n loc += 1\n numComments -= 1\n loc = 0\n while (loc != -1):\n loc = line.find(\"'#\", loc) #discount the # of times the '#' char appears as the 1st char in single quotes (skip hex constants)\n if (loc != -1):\n loc += 1\n numComments -= 1\n loc = 0\n while (loc != -1): #count the # of ''' found\n loc = line.find(\"'''\", loc)\n if (loc != -1):\n loc += 1\n numDocStr += 1\n loc = 0\n while (loc != -1): #count the # of \"\"\" found\n loc = line.find('\"\"\"', loc)\n if (loc != -1):\n loc += 1\n numDocStr += 1\n\n if line.strip(AutoGrader.Const.PYTHON_WHITE_SPACES) != '':\n if line.strip(AutoGrader.Const.PYTHON_WHITE_SPACES).split()[0] == 'def': #count # of defs\n numDefs += 1\n if line.strip(AutoGrader.Const.PYTHON_WHITE_SPACES).split()[0] == 'class': #count # of classes\n numClasses += 1\n \n f.close()\n numDocStr /= 2 #assume that the \"\"\" and ''' chars appear in pairs \n return numLines, numDocStr, numComments, numDefs, numClasses",
"def countLines(file_name, start, end):\r\n\r\n with open(file_name, \"r\") as file:\r\n counter_lines = 0\r\n\r\n for line in islice(file, start, end):\r\n counter_lines += 1\r\n\r\n return counter_lines",
"def line_count(file):\n with open(file, \"r\") as f:\n return sum(1 for line in f)",
"def ComputeCWPCummulativeInclusiveStatistics(cwp_inclusive_count_statistics):\n cwp_inclusive_count_statistics_cumulative = defaultdict(int)\n\n for function_key, function_statistics \\\n in cwp_inclusive_count_statistics.iteritems():\n function_name, _ = function_key.split(',')\n cwp_inclusive_count_statistics_cumulative[function_name] += \\\n function_statistics[1]\n\n return cwp_inclusive_count_statistics_cumulative",
"def main():\n\n args = get_args()\n file_arg = args.file\n\n # print('file_arg = \"{}\"'.format(file_arg.name if file_arg else ''))\n\n result= {}\n result['<20'] = 0\n result['20-30'] = 0\n result['30-40'] = 0\n result['40-50'] = 0\n result['50-60'] = 0\n result['>=60'] = 0\n \n for line in file_arg:\n val = int(line.strip())\n if ( val < 20):\n result['<20'] += 1\n if ( val >= 20 and val < 30):\n result['20-30'] += 1\n if ( val >= 30 and val < 40):\n result['30-40'] += 1\n if ( val >= 40 and val < 50):\n result['40-50'] += 1\n if ( val >= 50 and val < 60):\n result['50-60'] += 1\n if ( val >= 60 ):\n result['>=60'] += 1\n \n \n print(result)",
"def analyze_files(self):\n for file in os.listdir(self.directory):\n if file[-3:] == (\".py\"):\n fopen = open(os.path.join(self.directory, file), \"r\")\n try:\n if not (py_file := fopen):\n raise FileNotFoundError\n\n with py_file: # close file after opening\n class_count: int = 0\n fun_count: int = 0\n l_count: int = 0\n ch_count: int = 0\n for line in py_file: # calculate values for the file\n if line.strip().startswith(\"class \"):\n class_count = class_count+1\n elif line.strip().startswith(\"def \"):\n fun_count = fun_count+1\n\n l_count = l_count+1\n ch_count = ch_count+len(line)\n\n self.files_summary[str(os.path.join(self.directory, file))] = {\"class\": class_count, \"function\": fun_count, \"line\": l_count,\n \"char\": ch_count}\n except FileNotFoundError:\n print(f\"File {py_file} is not found or can not be opened\")\n fopen.close()",
"def _load_and_count(self,\n filename,\n min_date=None,\n max_date=None,\n n_samples=None,\n randomized=False,\n return_post_counts=False\n ):\n ## Load User Data\n fn_data = self._loader.load_user_data(filename,\n min_date=min_date,\n max_date=max_date,\n n_samples=n_samples,\n randomized=randomized)\n ## Count Number of Posts\n n_posts = len(fn_data)\n ## Early Return for Zero Data\n if n_posts == 0:\n if not return_post_counts:\n return Counter()\n else:\n return Counter(), n_posts\n ## Identify Tokens\n fn_tokens = [i[\"text_tokenized\"] for i in fn_data]\n ## Get Counts\n fn_counts = self._count_tokens(fn_tokens)\n if not return_post_counts:\n return fn_counts\n else:\n return fn_counts, n_posts",
"def countWords(file_name, start, end):\r\n\r\n with open(file_name, \"r\") as file:\r\n counter_words = 0\r\n\r\n for line in islice(file, start, end):\r\n res = len(line.split())\r\n counter_words += res\r\n\r\n return counter_words",
"def get_regions_counts(fname, seglen, mincounts):\n counts = defaultdict(int)\n seglen=int(seglen)\n with open(fname) as fin:\n infile = csv.DictReader(fin, delimiter='\\t')\n for line in infile:\n if int(line['interactions']) < mincounts:\n continue\n t_reg = (\n line['RNA1 chromosome'],int(int(line['Start of RNA1 first read'])/seglen)*seglen,\n line['RNA1 strand'], \n line['RNA2 chromosome'],int(int(line['Start of RNA2 last read'])/seglen)*seglen,\n line['RNA2 strand'])\n\n counts[t_reg] = int(line['interactions'])\n return counts",
"def wcount(lines, topn=10):\n '''a=[]\n for line in lines:\n word = line.strip()\n a.append(word)\n def histogram(s):\n d = dict()\n for i in s:\n if i in d:\n d[i]+=1\n else:\n d[i]=1\n return d'''\n def process_line(lines,diction):\n lines = lines.replace('-',' ')\n for word in lines.split():\n word=word.strip(string.punctuation+string.whitespace)\n word.lower()\n diction[word]=diction.get(word,0)+1\n\n def process_file(lines):\n diction = {}\n process_line(lines,diction)\n return diction\n diction=process_file(lines)\n x=list(diction.values())\n x.sort()\n x.reverse()\n count = 0\n for i in range(topn):\n for key in list(diction.keys()):\n if diction[key]==x[i] and count<topn:\n print(\"%s %d\"%(key,diction[key]))\n count +=1\n del diction[key]\n pass",
"def analyze(filename):\r\n start = datetime.datetime.now()\r\n\r\n ao_count = 0\r\n\r\n with open(filename) as csvfile:\r\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\r\n year_count = {\r\n \"2013\": 0,\r\n \"2014\": 0,\r\n \"2015\": 0,\r\n \"2016\": 0,\r\n \"2017\": 0,\r\n \"2018\": 0\r\n }\r\n for row in reader:\r\n l_row = list(row)\r\n print(f\"\\n{row}\")\r\n year = l_row[5][6:]\r\n if year in year_count.keys():\r\n year_count[year] += 1\r\n if \"ao\" in l_row[6]:\r\n ao_count += 1\r\n\r\n end = datetime.datetime.now()\r\n return start, end, year_count, ao_count",
"def clauses_from_file(filename):\n with open(filename, \"r\") as fin:\n #remove comments from beginning\n line = fin.readline()\n while(line.lstrip()[0] == 'c'):\n line = fin.readline()\n\n header = line.split(\" \")\n num_literals = int(header[2].rstrip())\n\n lines = fin.readlines()\n\n for i in range(len(lines)):\n lines[i] = lines[i].split(\" \")[:-1]\n lines[i] = [int(x) for x in lines[i]]\n\n return (lines, num_literals)",
"def _setup_n_ints_in_file(self):\n self.n_ints_in_file = sigproc.calc_n_ints_in_file(self.filename)",
"def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri",
"def analysis(file):\n\n fields = []\n\n with open(file) as f:\n lines = f.readlines()\n rows = len(lines)\n filesize = sum([len(line) for line in lines])\n\n tmp = []\n\n for line in lines[8:len(lines)-1]:\n fs = line.strip().split('\\t')\n\n \"\"\"\n fields:\n ts\n uid\n id.orig_h\n id.orig_p\n id.resp_h\n id.resp_p\n proto\n trans_id\n query\n qclass\n qclass_name\n qtype\n qtype_name\n rcode\n rcode_name\n AA\n TC\n RD\n RA\n Z\n answersTTLs\n rejected\n \"\"\"\n\n tmp.append(fs[N])\n\n #print(log, rows, ','.join(methods))\n\n # time intervals\n #tss_sorted = sorted(map(float,tmp))\n #tss_sorted = map(float, tmp)\n #intervals = map(int,[tss_sorted[i+1]-tss_sorted[i] for i in range(len(tss_sorted)-1)])\n #print('%s %s' % (log, ' '.join(map(str,intervals))))\n #file = urlparse(fs[N]).path.split('/')[-1].split('.')\n #if len(file)>1:\n # tmp.append(file[-1])\n #tmp.append(urlparse(fs[N]).path.split('/')[-1])\n #tmp.append(urlparse(fs[N]).path)\n\n #fields.append(set(tmp))\n #fields.append(intervals)\n fields.append(tmp)\n\n\n dic = {}\n for i in fields:\n for j in i:\n if j in dic:\n dic[j] += 1\n else:\n dic[j] = 1\n ls = sorted(dic.items(), lambda x,y: cmp(x[1], y[1]), reverse = True)\n for i in range(len(ls)):\n print('%s\\t%s' %(ls[i][0], ls[i][1]))\n #print('%s' % join(ls[i][1]))",
"def countCharacters(file_name, start, end):\r\n\r\n with open(file_name, \"r\") as file:\r\n counter_chars = 0\r\n\r\n for line in islice(file, start, end):\r\n counter_chars += len(line)\r\n\r\n return counter_chars",
"def contentcheck_numerical():\n filename = \"Analysis.txt\"\n temp_line = \"\"\n count = 0\n for line in open(filename, 'r'):\n temp_line = temp_line + line\n if \"DATA INFORMATION\" in temp_line:\n count = count + 1\n if \"MEAN, MEDIAN AND MODE:\" in temp_line:\n count = count + 1\n if \"Correlation\" in temp_line:\n count = count + 1\n if \"Normality Tests\" in temp_line:\n count = count + 1\n return count",
"def fileLineCount(fPath):\n\twith open(fPath) as f:\n\t\tfor i, li in enumerate(f):\n\t\t\tpass\n\treturn (i + 1)",
"def get_rec_count(files: List[str],\n dialect: csv.Dialect) -> Tuple[Optional[int], int]:\n rec_cnt = -1\n for _ in csv.reader(fileinput.input(files), dialect):\n rec_cnt += 1\n fileinput.close()\n return rec_cnt",
"def getTotalCaseAndControlCounts(genotypesFilename):\r\n\r\n\tcomphetSuffix = \"\"\r\n\tif \"comphet\" in genotypesFilename:\r\n\t\tcomphetSuffix = \" (#1)\"\r\n\r\n\t# We read through the whole file. Might take a while, but easier than dealing with all edge cases.\r\n\tmaxCoveredCasePercentage = 0\r\n\tmaxCoveredControlPercentage = 0\r\n\treader = csv.reader(open(genotypesFilename, \"r\"))\r\n\theader = next(reader)\r\n\r\n\tfor variant in reader:\r\n\r\n\t\tvariant = dict(zip(header, variant))\r\n\t\tcasePercentage = float(variant[\"Covered Case Percentage\" + comphetSuffix])/100.0\r\n\t\tif casePercentage > maxCoveredCasePercentage:\r\n\t\t\tmaxCoveredCasePercentage = casePercentage\r\n\t\t\tcoveredCases = int(variant[\"Covered Case\" + comphetSuffix])\r\n\t\t\ttotalCases = int(round(coveredCases/casePercentage))\r\n\r\n\t\tcontrolPercentage = float(variant[\"Covered Ctrl Percentage\" + comphetSuffix])/100.0\r\n\t\tif controlPercentage > maxCoveredControlPercentage:\r\n\t\t\tmaxCoveredControlPercentage = controlPercentage\r\n\t\t\tcoveredControls = int(variant[\"Covered Ctrl\" + comphetSuffix])\r\n\t\t\ttotalControls = int(round(coveredControls/controlPercentage))\r\n\treturn totalCases, totalControls"
] | [
"0.65325326",
"0.5770873",
"0.5255738",
"0.5147463",
"0.5132285",
"0.5097598",
"0.50919753",
"0.50912225",
"0.507354",
"0.5055528",
"0.5017052",
"0.50100046",
"0.4995706",
"0.49907556",
"0.49840093",
"0.49835533",
"0.4940733",
"0.49401423",
"0.49015447",
"0.48846525",
"0.48541293",
"0.48453942",
"0.48432696",
"0.4824248",
"0.4816764",
"0.47995722",
"0.47974828",
"0.47945026",
"0.4788498",
"0.4778643"
] | 0.71798396 | 0 |
Parses the CWP pairwise inclusive count files. A line of the file should contain a pair of a parent and a child function, concatenated by the PARENT_CHILD_FUNCTIONS_SEPARATOR, the name of the file where the child function is declared and the inclusive count fractions of the pair of functions out of the total amount of inclusive count values. | def ParseCWPPairwiseInclusiveCountFile(file_name):
pairwise_inclusive_count_statistics = defaultdict(lambda: defaultdict(float))
with open(file_name) as input_file:
statistics_reader = csv.DictReader(input_file, delimiter=',')
for statistic in statistics_reader:
parent_function_name, child_function_name = \
statistic['parent_child_functions'].split(
PARENT_CHILD_FUNCTIONS_SEPARATOR)
child_function_file_name = MakeCWPAndPprofFileNamesConsistent(
os.path.normpath(statistic['child_function_file']))
inclusive_count = statistic['inclusive_count']
# There might be situations where a child function appears in
# multiple files or objects. Such situations can occur when in the
# Dremel queries are not specified the Chrome OS version and the
# name of the board (i.e the files can belong to different kernel or
# library versions), when the child function is a template function
# that is declared in a header file or there are name collisions
# between multiple executable objects.
# If a pair of child and parent functions appears multiple times, we
# add their inclusive count values.
child_function_key = ','.join(
[child_function_name, child_function_file_name])
pairwise_inclusive_count_statistics[parent_function_name] \
[child_function_key] += float(inclusive_count)
return pairwise_inclusive_count_statistics | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ParseCWPInclusiveCountFile(file_name):\n cwp_inclusive_count_statistics = defaultdict(lambda: ('', 0, 0.0, 0))\n\n with open(file_name) as input_file:\n statistics_reader = csv.DictReader(input_file, delimiter=',')\n for statistic in statistics_reader:\n function_name = statistic['function']\n file_name = MakeCWPAndPprofFileNamesConsistent(\n os.path.normpath(statistic['file']))\n dso_name = statistic['dso']\n inclusive_count = statistic['inclusive_count']\n inclusive_count_fraction = statistic['inclusive_count_fraction']\n\n # We ignore the lines that have empty fields(i.e they specify only the\n # addresses of the functions and the inclusive counts values).\n if all([\n function_name, file_name, dso_name, inclusive_count,\n inclusive_count_fraction\n ]):\n key = '%s,%s' % (function_name, file_name)\n\n # There might be situations where a function appears in multiple files\n # or objects. Such situations can occur when in the Dremel queries there\n # are not specified the Chrome OS version and the name of the board (i.e\n # the files can belong to different kernel or library versions).\n inclusive_count_sum = \\\n cwp_inclusive_count_statistics[key][1] + int(inclusive_count)\n inclusive_count_fraction_sum = \\\n cwp_inclusive_count_statistics[key][2] + \\\n float(inclusive_count_fraction)\n\n # All the functions are initially marked as EXTRA_FUNCTION.\n value = \\\n (dso_name, inclusive_count_sum, inclusive_count_fraction_sum,\n EXTRA_FUNCTION)\n cwp_inclusive_count_statistics[key] = value\n\n return cwp_inclusive_count_statistics",
"def ComputeCWPChildFunctionsFractions(cwp_inclusive_count_statistics_cumulative,\n cwp_pairwise_inclusive_count_statistics):\n\n pairwise_inclusive_count_fractions = {}\n\n for parent_function_key, child_functions_metrics in \\\n cwp_pairwise_inclusive_count_statistics.iteritems():\n child_functions_fractions = {}\n parent_function_inclusive_count = \\\n cwp_inclusive_count_statistics_cumulative.get(parent_function_key, 0.0)\n\n if parent_function_key in cwp_inclusive_count_statistics_cumulative:\n for child_function_key, child_function_inclusive_count \\\n in child_functions_metrics.iteritems():\n child_functions_fractions[child_function_key] = \\\n child_function_inclusive_count / parent_function_inclusive_count\n else:\n for child_function_key, child_function_inclusive_count \\\n in child_functions_metrics.iteritems():\n child_functions_fractions[child_function_key] = 0.0\n pairwise_inclusive_count_fractions[parent_function_key] = \\\n child_functions_fractions\n\n return pairwise_inclusive_count_fractions",
"def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri",
"def ParsePprofTreeOutput(file_name):\n\n # In the pprof output, the statistics of the functions start from the 9th\n # line.\n with open(file_name) as input_file:\n pprof_tree_content = input_file.readlines()[9:]\n\n pprof_tree_statistics = defaultdict(lambda: defaultdict(float))\n track_child_functions = False\n\n # The statistics of a given function, its parent and child functions are\n # included between two separator marks.\n # All the parent function statistics are above the line containing the\n # statistics of the given function.\n # All the statistics of a child function are below the statistics of the\n # given function.\n # The statistics of a parent or a child function contain the calls, calls\n # percentage, the function name and the file where the function is declared.\n # The statistics of the given function contain the flat, flat percentage,\n # sum percentage, cummulative, cummulative percentage, function name and the\n # name of the file containing the declaration of the function.\n for line in pprof_tree_content:\n separator_match = SEPARATOR_REGEX.search(line)\n\n if separator_match:\n track_child_functions = False\n continue\n\n parent_function_statistic_match = FUNCTION_STATISTIC_REGEX.search(line)\n\n if parent_function_statistic_match:\n track_child_functions = True\n lookup_index = parent_function_statistic_match.end()\n parent_function_key_match = \\\n FUNCTION_KEY_SEPARATOR_REGEX.search(line, pos=lookup_index)\n lookup_index = parent_function_key_match.end()\n parent_function_key = MakePprofFunctionKey(line[lookup_index:-1])\n continue\n\n if not track_child_functions:\n continue\n\n child_function_statistic_match = \\\n CHILD_FUNCTION_PERCENTAGE_REGEX.search(line)\n child_function_percentage = \\\n float(child_function_statistic_match.group(1))\n lookup_index = child_function_statistic_match.end()\n child_function_key_match = \\\n FUNCTION_KEY_SEPARATOR_REGEX.search(line, pos=lookup_index)\n lookup_index = child_function_key_match.end()\n child_function_key = MakePprofFunctionKey(line[lookup_index:-1])\n\n pprof_tree_statistics[parent_function_key][child_function_key] += \\\n child_function_percentage / 100.0\n\n return pprof_tree_statistics",
"def parse(filehandle):\n\n intervalstart = False\n candidatestart = False\n candidates = []\n for line in filehandle:\n elems = [x.strip() for x in line.split('=')]\n # Filter useless lines\n if len(elems) < 2:\n continue\n if not intervalstart:\n if elems[0] == 'Object class':\n objectclass = elems[1]\n\n elif elems[0] == 'xmin':\n xmin = float(elems[1])\n\n elif elems[0] == 'xmax':\n xmax = float(elems[1])\n\n elif elems[0] == 'nx':\n size = int(elems[1])\n\n elif elems[0] == 'dx':\n shift = float(elems[1])\n\n elif elems[0] == 'x1':\n start = float(elems[1])\n\n elif elems[0] == 'ceiling':\n ceiling = int(elems[1])\n\n elif elems[0] == 'maxnCandidates':\n maxnCandidates = int(elems[1])\n\n elif elems[0] == 'intensity':\n # Iteration never returns here because intervalstart is True\n intervalstart = True\n intTier = Tier(xmin, xmax, size, '\"Intensity\"')\n text = '\"' + elems[1] + '\"'\n intTier.shift = shift\n\n # Set begin and end so they can be used in the next iteration\n # to set the first Interval for the pitch Tier\n begin = 0.0\n end = start\n intTier.addInterval(Interval(begin, end, text))\n pitchTier = Tier(xmin, xmax, size, objectclass)\n pitchTier.shift = shift\n # Prepare candidate list for first Interval\n # First iteration skips the intensity condition below\n candidates.append((0, 0))\n\n elif intervalstart:\n\n if elems[0] == 'intensity':\n begin = intTier[-1].xmax\n end = begin + shift\n text = '\"' + elems[1] + '\"'\n intTier.addInterval(Interval(begin, end, text))\n candidates = []\n\n elif elems[0] == 'nCandidates':\n nc = int(elems[1])\n candidatestart = True\n\n elif candidatestart:\n\n if elems[0] == \"frequency\":\n freq = float(elems[1])\n elif elems[0] == \"strength\":\n strength = float(elems[1])\n candidates.append((freq, strength))\n\n if len(candidates) == nc:\n # Candidate are ranked according to a decoding algorithm\n # First candidate is most likely, but we parse them all\n pitchTier.addInterval(\n Interval(begin, end, '\"' + str(candidates[0][0]) + '\"'))\n candidatestart = False\n\n return (pitchTier, intTier)",
"def counterCompute(line, nodes, rowname):\n counter = 0\n if nodes != 1: #node has parents \n parent = line[1: nodes] \n for par in parent:\n if (\"Not \" + par) in rowname: #one parent is \"Not par\"\n counter = counter + math.pow(2, nodes - 2 - parent.index(par))\n return counter",
"def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)",
"def elements(filelist, start, end):\n process = False\n n = 1\n with open(filelist) as fh:\n for line in fh:\n fname = line.strip()\n if n == start:\n process = True\n if n > end:\n return\n if process:\n yield (n, fname)\n n += 1",
"def parse_infiles(self):\n\n errs = 0\n # check file existence first\n for ifile in self.infiles:\n if ifile in ['-', 'stdin']: pass\n elif not os.path.isfile(ifile):\n print('** input file not found: %s' % ifile)\n errs += 1\n if errs: return 1\n \n # check for existence separately\n for ifile in self.infiles:\n if self.verb > 2: print('++ processing %s ...' % ifile)\n\n # open, read, close\n if ifile in ['-', 'stdin']: fp = sys.stdin\n else:\n try: fp = open(ifile)\n except:\n print(\"** failed to open input file %s\" % ifile)\n return 1\n ilines = fp.readlines()\n if ifile != sys.stdin: fp.close()\n\n # empty should be a terminal failure\n if len(ilines) < 1:\n print('** empty input for file %s' % ifile)\n return 1\n\n if len(self.labels) == 0:\n rv, self.labels = self.make_labels(ilines)\n self.parents = [self.find_parent_label(lab) for lab in self.labels]\n if rv: return 1\n\n rv, ldict = self.make_dict(ilines)\n if rv: return 1\n\n self.ldict.append(ldict)\n\n return 0",
"def parse_file_count(path, args):\n try:\n fisier = open(path, 'r')\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n n_found = 0\n pattern = args.pattern\n for line in fisier:\n if args.ignore_case:\n line = line.lower()\n pattern = pattern.lower()\n n_found += line.count(pattern)\n\n fisier.close()\n return n_found",
"def parse_sequence_lengths(filepath, base_pair_limit):\n\n total_count = 0\n limit_count = 0\n with open(filepath) as f:\n line = f.readline()\n while line:\n if line.startswith('@'):\n total_count += 1\n seq = f.readline()\n sep = f.readline()\n qual = f.readline()\n if len(seq.strip()) > base_pair_limit:\n limit_count += 1\n line = f.readline()\n\n return limit_count / total_count",
"def parse_files(procs):\n xsections = {} # {(proc, mass): cross-section}\n qcd_unc = {} # {(proc, mass): (unc_down, unc_up)}\n pdf_unc = {} # {(proc, mass): (unc_down, unc_up)}\n branchings = {} # {mass: branching}\n br_unc = {} # {mass: (unc_down, unc_up)}\n\n # parse text files with cross-sections and their uncertainties\n for proc in procs:\n with open('SM_Higgs_14TeV_{0}.txt'.format(proc), 'r') as f:\n for line in f.readlines():\n if line.startswith('#'):\n continue\n\n (mass, xs, scup, scdn, pdfup, pdfdn) = line.split()\n ind = (proc, mass)\n\n xsections[ind] = float(xs)\n qcd_unc[ind] = (1 + 0.01 * float(scdn), 1 + 0.01 * float(scup))\n pdf_unc[ind] = (1 + 0.01 * float(pdfdn), 1 + 0.01 * float(pdfup))\n\n # parse text file with branching ratios and their uncertainties\n with open('SM_Higgs_branching_ratios.txt', 'r') as f:\n for line in f.readlines():\n if line.startswith('#'):\n continue\n\n # take branching ratios for the HZg analysis\n (mass, _, _, _, _, _, _, br, uncup, uncdn) = line.split()\n\n branchings[mass] = float(br)\n br_unc[mass] = (1 + 0.01 * float(uncdn), 1 + 0.01 * float(uncup))\n\n # sorted list with mass values\n masses = sorted(branchings.keys(), key=lambda x: float(x))\n\n return {'masses': masses, 'procs': procs, 'xs': xsections, 'br': branchings,\n 'qcd_unc': qcd_unc, 'pdf_unc': pdf_unc, 'br_unc': br_unc}",
"def vasp_file_lines(vasp_file, line_continuation=False):\n vasp_file_stripped = (line.rstrip(\"\\n\") for line in vasp_file)\n line_nr = 0\n for line in vasp_file_stripped:\n line_nr += 1\n while line_continuation and line.endswith(\"\\\\\"):\n line = line[:-1] + next(vasp_file_stripped)\n line_nr += 1\n yield line_nr, line",
"def ComputeCWPCummulativeInclusiveStatistics(cwp_inclusive_count_statistics):\n cwp_inclusive_count_statistics_cumulative = defaultdict(int)\n\n for function_key, function_statistics \\\n in cwp_inclusive_count_statistics.iteritems():\n function_name, _ = function_key.split(',')\n cwp_inclusive_count_statistics_cumulative[function_name] += \\\n function_statistics[1]\n\n return cwp_inclusive_count_statistics_cumulative",
"def count_LOC(path):\n re_empty = re.compile(r\"[\\s]*(#|\\n|\\\"\\\"\\\")\")\n re_for = re.compile(r\"for.*in\")\n re_lambda = re.compile(r\"lambda\")\n re_if = re.compile(r\"if.*:\")\n re_def = re.compile(r\"def (?P<fname>\\w+)\\(\")\n\n total_LOC, indent_level = 0, 0\n cur_part = None\n parts = defaultdict(int)\n\n with open(path, 'r') as _file:\n for line in filter(lambda l : not re_empty.match(l), _file):\n\n extra = len( re_for.findall(line) ) - 1 + len( re_lambda.findall(line) ) - 1 + len( re_if.findall(line) ) -1\n\n if extra < 0: extra = 0\n\n total_LOC += 1 + extra\n if cur_part:\n parts[cur_part] += 1 + extra\n\n defs = re_def.search(line)\n if defs:\n cur_part = defs.groupdict()['fname']\n indent_level = first_non_whitespace(line)\n\n cur_indent = first_non_whitespace(line)\n if cur_indent < indent_level:\n cur_part = None\n indent_level = cur_indent\n\n return(total_LOC, parts)",
"def loadfile(self,fd):\n pat=re.compile(r'!')\n f=self.files.index(fd)\n index=0\n newstack=0\n fnc={}\n inc={}\n thisline=[]\n for line in fd:\n line=line.strip()\n if pat.search(line):\n if newstack>0 and index>1:\n count=int(thisline[index-1])\n for i in range(index-1):\n fn=thisline[i]\n fn=re.sub('^.*(: |`)','',fn)\n fn=re.sub('\\/.*$','',fn)\n inc[fn]=inc.get(fn,0)+1\n fn=re.sub('\\+.*$','',fn)\n fnc[fn]=fnc.get(fn,0)+1\n if i==0:\n self.excl[f][fn]=self.excl[f].get(fn,0)+count\n else:\n fn=fn+\"+\"+prefunc\n prefunc=fn\n self.total[f]+=count\n for i in fnc:\n self.incl[f][i]=self.incl[f].get(i,0)+count*fnc[i]\n for i in inc:\n self.inst[f][i]=self.inst[f].get(i,0)+count*inc[i]\n self.caller_callee[f][fn]=self.caller_callee[f].get(fn,0)+count\n fnc.clear()\n inc.clear()\n del thisline[:]\n index=0\n\n newstack+=1\n continue\n\n if newstack>0:\n thisline += [line]\n index+=1",
"def generate_transition_counts(file_path):\r\n\ttransition_unigram_counts = dict()\r\n\ttransition_bigram_counts = dict()\r\n\ttransition_bigram_counts_two_start_tokens = dict()\r\n\ttransition_trigram_counts = dict()\r\n\r\n\twith open(file_path) as f:\r\n\t\tline_count = 0\r\n\t\tfor line in f:\r\n\t\t\tline_count+=1\r\n\t\t\tif line_count%3 != 0:\r\n\t\t\t\tcontinue\r\n\t\t\ttag_set = [\"<START>\"] + line.lower().split()\r\n\t\t\ti = 0\r\n\t\t\twhile(i<len(tag_set)):\r\n\t\t\t\ttag = tag_set[i]\r\n\t\t\t\tif(tag in transition_unigram_counts):\r\n\t\t\t\t\ttransition_unigram_counts[tag]+=1\r\n\t\t\t\telse:\r\n\t\t\t\t\ttransition_unigram_counts[tag]=1\r\n\t\t\t\tif(i>0):\r\n\t\t\t\t\ttag_bigram = (tag_set[i-1], tag_set[i])\r\n\t\t\t\t\tif tag_bigram not in transition_bigram_counts:\r\n\t\t\t\t\t\ttransition_bigram_counts[tag_bigram] = 1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\ttransition_bigram_counts[tag_bigram]+=1\r\n\t\t\t\ti+=1\r\n\t\t\ttag_set_2 = [\"<START>\"] + tag_set\r\n\t\t\ti = 1\r\n\t\t\twhile(i<len(tag_set_2)):\r\n\t\t\t\ttag_bigram = (tag_set_2[i-1], tag_set_2[i])\r\n\t\t\t\tif tag_bigram not in transition_bigram_counts_two_start_tokens:\r\n\t\t\t\t\ttransition_bigram_counts_two_start_tokens[tag_bigram] = 1\r\n\t\t\t\telse:\r\n\t\t\t\t\ttransition_bigram_counts_two_start_tokens[tag_bigram]+=1\r\n\t\t\t\t\r\n\t\t\t\tif(i>1):\r\n\t\t\t\t\ttag_trigram = (tag_set_2[i-2], tag_set_2[i-1], tag_set_2[i])\r\n\t\t\t\t\tif tag_trigram not in transition_trigram_counts:\r\n\t\t\t\t\t\ttransition_trigram_counts[tag_trigram] = 1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\ttransition_trigram_counts[tag_trigram]+=1\r\n\r\n\t\t\t\ti+=1\r\n\treturn (transition_unigram_counts, transition_bigram_counts, transition_bigram_counts_two_start_tokens, transition_trigram_counts)",
"def parser(in_file,verbose):\n\n # perform high-level parsing into sections\n res_file_lines = [row for row in in_file]\n tokenized_lines = tools.split_and_prune_lines(res_file_lines)\n sections = tools.extracted_sections(tokenized_lines)\n\n # split out common sections and subsequent groups of results sections\n def is_results_sentinel_section(section):\n \"\"\" Identify mesh point separator \"pseudo-section\" header.\n\n (Helper function for res_parser_spncci.)\n \"\"\"\n (section_name,_) = section\n return (section_name == \"RESULTS\")\n\n grouped_sections = tools.split_when(is_results_sentinel_section,sections)\n common_sections = list(next(grouped_sections))\n grouped_results_sections = [list(section_group) for section_group in grouped_sections]\n\n if (verbose):\n print(\"Section counts\")\n print(\" Common sections:\",len(common_sections))\n for results_section_group in grouped_results_sections:\n print(\" Results sections (by group):\",len(results_section_group))\n\n # generate results objects by mesh point\n mesh_data = []\n if (grouped_results_sections):\n # there are results sections: actual mesh, not counting run\n for results_section_group in grouped_results_sections:\n full_section_group = common_sections + results_section_group\n results = spncci_results_data.SpNCCIResultsData()\n parse_mesh_point(results,full_section_group,section_handlers)\n mesh_data.append(results)\n else:\n # no results sections: counting run\n results = spncci_results_data.SpNCCIResultsData()\n parse_mesh_point(results,common_sections,section_handlers)\n mesh_data.append(results)\n\n return mesh_data",
"def fileCounter(directory):",
"def get_segments(file_name):\n count = 1\n total_num_lines = num_lines_in_file(file_name)\n with open(file_name, 'r') as file_in:\n pre_segment = file_in.readline().split()[0]\n segments = [pre_segment]\n num_lines = []\n for line in file_in:\n line = line.split()\n if line[0].startswith(';;'):\n count += 1\n else:\n if len(line) >= LINE_LEN:\n if line[0] == pre_segment:\n count += 1\n else:\n segments.append(line[0])\n pre_segment = line[0]\n num_lines.append(count)\n count = 1\n else:\n count += 1\n last_num_lines_entry = total_num_lines - sum(num_lines)\n num_lines.append(last_num_lines_entry)\n assert len(segments) == len(num_lines), \"%i != %i\" %(len(segments), len(num_lines))\n return segments, num_lines",
"def _combineLines(junctionList, leftEdge, rightEdge):\n\n # the only things we have to adjust are the score and the name \n scoreSoFar = 0\n previousNumBasesCovered = 0\n minStart = -1\n maxStop = -1\n # keep a count of all the left/right edges so we can pick the most common \n leftEdgeCount = {}\n rightEdgeCount = {}\n\n # first we need to sort the junctionList by score, in reverse (highest score first) \n junctionList.sort(reverse=True)\n name = junctionList[0][1].split(\"\\t\")[3]\n countPlus = 0\n countMinus = 0\n numJunctions = 0\n for (score, line, leftEdge, rightEdge) in junctionList:\n pieces = line.split(\"\\t\")\n #print \"previous start: %s, this start: %s\" % (minStart, pieces[1]) \n start = int(pieces[1])\n stop = int(pieces[2])\n strand = pieces[5]\n if strand == \"+\":\n countPlus += 1\n elif strand == \"-\":\n countMinus += 1\n else:\n print line\n raise hmmErrors.InvalidInputException(\"ERROR! strand value %s not valid. \" % strand)\n\n name = pieces[3]\n if pieces[3].find(\"|junc=\") > 0:\n numCollapsed = int(pieces[3].split(\"junc=\")[-1])\n numJunctions += numCollapsed\n else:\n numJunctions += 1\n\n if previousNumBasesCovered == 0:\n previousNumBasesCovered = (leftEdge - start) + (stop - rightEdge)\n scoreSoFar = float(pieces[4])\n else:\n # we only want to count bases grown on the outer sides (start and stop) and ignore bases on the inner edges \n newBases = max(0, minStart-start) + max(0, stop-maxStop)\n scoreSoFar = scoreSoFar + (newBases / float(newBases+previousNumBasesCovered) ) * float(pieces[4])\n previousNumBasesCovered = previousNumBasesCovered + newBases\n\n\n if minStart > 0:\n minStart = min(minStart, start)\n else:\n minStart = start\n\n maxStop = max(maxStop, stop)\n\n if not leftEdgeCount.has_key(leftEdge):\n leftEdgeCount[leftEdge] = 0\n leftEdgeCount[leftEdge] += 1\n if not rightEdgeCount.has_key(rightEdge):\n rightEdgeCount[rightEdge] = 0\n rightEdgeCount[rightEdge] += 1\n\n maxLeft = max(leftEdgeCount.values())\n for k, v in leftEdgeCount.iteritems():\n if v == maxLeft:\n useLeft = k\n break\n maxRight = max(rightEdgeCount.values())\n for k, v in rightEdgeCount.iteritems():\n if v == maxRight:\n useRight = k\n break\n if countPlus >= countMinus:\n strand = \"+\"\n else:\n strand = \"-\"\n\n pieces = junctionList[0][1].split(\"\\t\")\n namePieces = pieces[3].split(\"|\")\n rootName = \"\"\n for piece in namePieces:\n if not piece.startswith(\"junc=\"):\n rootName += piece + \"|\"\n finalName = rootName + (\"junc=%s\" % numJunctions)\n blockStarts = \"0,%s,\" % (useRight - minStart)\n blockSizes = \"%s,%s,\" % ( (useLeft-minStart), (maxStop-useRight) )\n\n return \"\\t\".join(str(x) for x in [pieces[0], minStart, maxStop, finalName, scoreSoFar,\n strand, minStart, maxStop, \"0\", \"2\", blockSizes, blockStarts\n ])",
"def crapome_parser():\n import os.path\n \n # contTreshold = 30 # set this to the desired contamination score\n resD = {}\n \n # crapFile = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"1503486016360_gp-1.txt\"),\"rU\")\n crapFile = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"Crapome-all-proteins-ptpn22-ip-ctl.txt\"),\"rU\")\n \n headerFlag = True\n \n fileLength = 0\n for inpLine in crapFile: # parse crapome output file\n if headerFlag:\n headerFlag = False\n continue\n fileLength += 1\n lineList = inpLine.split(\"\\t\")\n if lineList[2] == \"\": continue\n elif len(lineList) > 2: contScore = int(lineList[2].split(\"/\")[0])\n else: contScore = 0\n \n # if contScore < contTreshold:\n resD[lineList[0]] = contScore\n \n # print \"Contaminant treshold: \" + str(contTreshold)\n \n print(\"lines parsed: \" + str(fileLength))\n print(\"Number of results: \" + str(len(resD)))\n \n # inpFile = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"cav1ko-1_no0.csv\"),\"r\")\n # outF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"cav1ko-1_no0_crapome.csv\"),\"w\")\n inpFile = open(os.path.join(\"/home/mate/workspace/katamari/src/ed/bob/processed\", \"OST-24-05-2017_combined_ttest_ed_2.csv\"),\"rU\")\n outF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"processed\", \"OST-24-05-2017_combined_ttest_ed_2_cr.csv\"),\"w\")\n\n \n \n headerFlag = True\n for inpLine in inpFile: # parse the input file for crapome and add crapome results to it\n inpList = inpLine.rstrip(\"\\n\").split(\",\")\n for inpI in inpList:\n outF.write(inpI + \",\")\n \n if headerFlag: \n outF.write(\"Crapome score\")\n headerFlag = False\n elif inpList[2].upper() in resD: outF.write(str(resD[inpList[2].upper()]))\n else: outF.write(\"0\")\n \n outF.write(\"\\n\")\n print(\"results written to file\")",
"def scan_P__(P__):\n\n for _P_, P_ in pairwise(P__): # Iterate through pairs of lines.\n _iter_P_, iter_P_ = iter(_P_), iter(P_) # Convert to iterators.\n try:\n _P, P = next(_iter_P_), next(iter_P_) # First pair to check.\n except StopIteration: # No more up_fork-down_fork pair.\n continue # To next pair of _P_, P_.\n while True:\n isleft, olp = comp_edge(_P, P) # Check for 4 different cases.\n if olp and _P['sign'] == P['sign']:\n _P['down_fork_'].append(P)\n P['up_fork_'].append(_P)\n try: # Check for stopping:\n _P, P = (next(_iter_P_), P) if isleft else (_P, next(iter_P_))\n except StopIteration: # No more up_fork - down_fork pair.\n break # To next pair of _P_, P_.\n\n return [*flatten(P__)] # Flatten P__ before return.",
"def relative_minpair_fl(corpus_context, segment,\n relative_count_to_relevant_sounds=False, relative_count_to_whole_corpus=True,\n distinguish_homophones=False, minimal_pair_definition=False,\n output_filename=None, environment_filter=None,\n prevent_normalization=False, stop_check=None, call_back=None):\n all_segments = corpus_context.inventory\n segment_pairs = [(segment, other.symbol) for other in all_segments\n if other.symbol != segment and other.symbol != '#']\n\n results = []\n to_output = []\n for sp in segment_pairs:\n res = minpair_fl(corpus_context, [sp],\n relative_count_to_relevant_sounds=relative_count_to_relevant_sounds,\n relative_count_to_whole_corpus=relative_count_to_whole_corpus,\n distinguish_homophones=distinguish_homophones,\n environment_filter=environment_filter,\n prevent_normalization=prevent_normalization,\n stop_check=stop_check, call_back=call_back)\n results.append(res[0])\n\n if output_filename is not None:\n to_output.append((sp, res[1]))\n if output_filename is not None:\n save_minimal_pairs(output_filename, to_output)\n return sum(results) / len(segment_pairs)",
"def gcovParser(filePath):\n\t\tresult = 0\n\t\twith open(filePath + \".c.gcov\", \"r\") as file:\n\t\t\tfor line in file:\n\t\t\t\tnumber = line.split(':')[0]\n\t\t\t\tnumber = number.strip()\n\t\t\t\tif number.isdigit():\n\t\t\t\t\tresult += int(number)\n\n\t\treturn [result]",
"def stat_parser():\n from tools import file_importer, file_outporter\n from math import log\n \n print(\"this is stat parser\")\n \n relPath = \"bob/processed/24h_bobdata_ed2.csv\"\n outPathUp = \"bob/processed/24h_bobprots_up_full.csv\"\n outPathDown = \"bob/processed/24h_bobprots_down_full.csv\"\n inpF = file_importer(relPath)\n outFUp = file_outporter(outPathUp)\n outFDown = file_outporter(outPathDown)\n \n \n skipFlag = True\n \n for inpLine in inpF:\n if skipFlag:\n skipFlag = False\n outFDown.write(\"ID,Uniprot ID,Gene name,unique peptides (unique+razor),KO1,KO2,KO3,WT1,WT2,WT3,enrichment,P value\\n\")\n outFUp.write(\"ID,Uniprot ID,Gene name,unique peptides (unique+razor),KO1,KO2,KO3,WT1,WT2,WT3,enrichment,P value\\n\")\n continue\n inpLine = inpLine.split(\"\\\" \\\"\")\n curLine = []\n for inpI in inpLine:\n curLine.append(inpI.strip(\"\\\"\\n\"))\n try: \n curLine[-1] = float(curLine[-1])\n except ValueError:\n curLine[-1] = 1 \n if curLine[-1] < 0.05 and int(curLine[3]) > 1: # check if protein has at least 2 unique peptides and has a significant p value\n curLine[4:10] = [int(x) for x in curLine[4:10]]\n enrScore = log((sum(curLine[4:7]) / 3.0)/(sum(curLine[7:10]) / 3.0),2) # calculate log2 enrichment score\n # print int(sum(curLine[4:7]) / 3.0), int(sum(curLine[7:10]) / 3.0)\n if sum(curLine[4:7]) / 3.0 > sum(curLine[7:10]) / 3.0: # if the mean of the KO intensities is higher than the wt \n for outI in curLine:\n outFDown.write(str(outI).strip(\" \"))\n if outI is not curLine[-1]:\n outFDown.write(\",\")\n if outI is curLine[-2]:\n outFDown.write(str(enrScore)+ \",\")\n else:\n outFDown.write(\"\\n\")\n # outFDown.write(curLine[1] + \",\" + curLine[2] + \"\\n\")\n else:\n # outFUp.write(curLine[1] + \",\" + curLine[2] + \"\\n\")\n for outI in curLine:\n outFUp.write(str(outI).strip(\" \"))\n if outI is not curLine[-1]:\n outFUp.write(\",\")\n if outI is curLine[-2]:\n outFUp.write(str(enrScore)+ \",\")\n else:\n outFUp.write(\"\\n\")\n \n inpF.close()\n outFUp.close()\n outFDown.close()\n print(\"stat_parser completed\")",
"def extract(lines):\n prefix = 'Note: including file: '\n for line in lines:\n if line.startswith(prefix):\n line = os.path.normpath(line[len(prefix):])\n # Determine the depth by counting the number of spaces starting the line.\n depth = len(line) - len(line.lstrip()) + 1\n yield (depth, line.strip())",
"def count_seqs_in_filepaths(fasta_filepaths, seq_counter=count_seqs):\r\n total = 0\r\n counts = []\r\n inaccessible_filepaths = []\r\n # iterate over the input files\r\n for fasta_filepath in fasta_filepaths:\r\n # if the file is actually fastq, use the fastq parser.\r\n # otherwise use the fasta parser\r\n if fasta_filepath.endswith('.fastq'):\r\n parser = parse_fastq\r\n elif fasta_filepath.endswith('.tre') or \\\r\n fasta_filepath.endswith('.ph') or \\\r\n fasta_filepath.endswith('.ntree'):\r\n # This is clunky, but really convenient bc\r\n # it lets us count tree tips with count_seqs.py\r\n def parser(f):\r\n t = DndParser(f, constructor=PhyloNode)\r\n return zip(t.iterTips(), repeat(''))\r\n else:\r\n parser = parse_fasta\r\n\r\n try:\r\n # get the count of sequences in the current file\r\n current_count = seq_counter(fasta_filepath, parser=parser)\r\n # store it\r\n counts.append((current_count, fasta_filepath))\r\n # and increment the total count\r\n total += current_count[0]\r\n except IOError:\r\n # if the file couldn't be open, keep track of the filepath\r\n inaccessible_filepaths.append(fasta_filepath)\r\n\r\n return counts, total, inaccessible_filepaths",
"def contig_count(contig):\n return sum([1 for line in open(contig, 'rU').readlines() if line.startswith('>')])",
"def createProcesses(arguments, file_list, start, end):\r\n NUM_PROCESS = 1 # Check variables\r\n NUM_FILE = len(file_list)\r\n NUM_ARGS = len(arguments)\r\n\r\n proc_info = {}\r\n\r\n sem.acquire()\r\n processNumber.value += 1\r\n sem.release()\r\n\r\n for file in file_list:\r\n statinfo = os.stat(file)\r\n size = str(statinfo.st_size)\r\n proc_info.update({os.getpid():[file, size[:-1]]})\r\n\r\n result_list = [] # List that keeps all the results for each file\r\n PSTRING = \"\" # String that will be printed with the result of each read file\r\n\r\n date_beg = datetime.datetime.now()\r\n\r\n if \"-w\" in arguments:\r\n if nProcessesBiggerThanFiles:\r\n result_list.append(str(countWords(file, start, end)))\r\n else:\r\n result_list.append(str(countWords(file, None, None)))\r\n\r\n elif \"-l\" in arguments:\r\n if nProcessesBiggerThanFiles:\r\n result_list.append(str(countLines(file, start, end)))\r\n else:\r\n result_list.append(str(countLines(file, None, None)))\r\n\r\n if \"-L\" in arguments:\r\n if nProcessesBiggerThanFiles:\r\n result_list.append(str(biggestLine(file, start, end)))\r\n else:\r\n result_list.append(str(biggestLine(file, None, None)))\r\n\r\n elif \"-c\" in arguments:\r\n if nProcessesBiggerThanFiles:\r\n result_list.append(str(countCharacters(file, start, end)))\r\n else:\r\n result_list.append(str(countCharacters(file, None, None)))\r\n\r\n if int(NUM_PROCESS) < int(NUM_FILE):\r\n if NUM_FILE == 1:\r\n if NUM_ARGS > 1:\r\n for number in range(len(result_list)):\r\n division = int(number) % 2\r\n\r\n if division == 0:\r\n total[0] += int(result_list[number])\r\n else:\r\n if total[1] > int(result_list[number]):\r\n continue\r\n else:\r\n total[1] = int(result_list[number])\r\n\r\n else:\r\n if NUM_ARGS > 1:\r\n total[0] += int(result_list[-2])\r\n result_number = int(result_list[-1])\r\n\r\n if total[1] > int(result_number):\r\n pass\r\n else:\r\n total[1] = int(result_number)\r\n\r\n else:\r\n total[0] += int(result_list[-1])\r\n\r\n elif int(NUM_PROCESS) == int(NUM_FILE):\r\n if NUM_ARGS > 1:\r\n for number in range(len(result_list)):\r\n division = int(number) % 2\r\n\r\n if division == 0:\r\n total[0] += int(result_list[number])\r\n else:\r\n if total[1] > int(result_list[number]):\r\n continue\r\n else:\r\n total[1] = int(result_list[number])\r\n\r\n else:\r\n for number in result_list:\r\n total[0] += int(number)\r\n\r\n for number in result_list:\r\n PSTRING += str(number) + \" \"\r\n\r\n if \"-l\" in arguments:\r\n proc_info[os.getpid()].append([\"linhas\", result_list[0]])\r\n elif \"-c\" in arguments:\r\n proc_info[os.getpid()].append([\"caracteres\", result_list[0]])\r\n elif \"-w\" in arguments:\r\n proc_info[os.getpid()].append([\"palavras\", result_list[0]])\r\n\r\n PSTRING += file\r\n print(PSTRING)\r\n\r\n date_end = datetime.datetime.now()\r\n duration = date_end - date_beg\r\n hours, minutes, seconds, microseconds = convert_timedelta(duration)\r\n time_str = str(hours) + \":\" + str(minutes) + \":\" + str(seconds) + \":\" + str(microseconds)\r\n\r\n proc_info[os.getpid()].append(time_str)\r\n\r\n queue_proc.put(proc_info)"
] | [
"0.65458274",
"0.56343454",
"0.49421835",
"0.48282015",
"0.47991893",
"0.47809947",
"0.46508983",
"0.46498537",
"0.46464705",
"0.46307102",
"0.46042892",
"0.45846727",
"0.45683825",
"0.45669442",
"0.45614496",
"0.45445207",
"0.4517693",
"0.45140862",
"0.4451195",
"0.44409293",
"0.44400764",
"0.4420526",
"0.4406386",
"0.44031885",
"0.43902597",
"0.43883875",
"0.43878835",
"0.4381414",
"0.43403247",
"0.43198374"
] | 0.70118713 | 0 |
Given the block size and the desired block index, return the slice of bytes from 0 to the end of the given block. | def bytes_to_block(block_size: int, i: int) -> slice:
return slice(0, block_size * (i + 1)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bytes_in_block(block_size: int, i: int) -> slice:\n return slice(block_size * i, block_size * (i + 1))",
"def get_block(self, idx):\n self.input_file.seek(idx * self.blocksize)\n return self.input_file.read(self.blocksize)",
"def read(self, size=-1):\n if not self._buf:\n self._buf.append(next(self._iter, b''))\n if len(self._buf[0]) < size or size < 0:\n return self._buf.pop(0)\n block = self._buf.pop(0)\n self._buf.insert(0, block[size:])\n return block[:size]",
"def read_block(file, block_size):\n block = b\"\"\n for i in range(block_size):\n this_byte = file.read(1)\n # If the last block consumed the last char in file:\n if this_byte == b\"\" and i == 0:\n return (-1, False)\n # If we reach EOF prematurely:\n elif this_byte == b\"\":\n block += chr(0).encode()*(block_size - i)\n return (block, False)\n else:\n block += this_byte\n return (block, True)",
"def ith_byte_block(block_size: int, i: int) -> int:\n assert block_size > 0\n assert i >= 0\n return i // block_size",
"def fetch_block(path, offset, block_size):\n with open(path, 'rb') as file:\n file.seek(offset)\n return bz2.decompress(file.read(block_size))",
"def get_data_block_contents_bytes(self):\n bb = self.volume.blkdev.block_bytes\n if self.volume.is_ffs:\n return bb\n else:\n return bb - 24",
"def slice(self, size=None, offset=None):\n offset = offset or 0\n size = size or len(self)\n\n data = []\n data_size = 0\n\n # dequeue chunks\n size += self.offset + offset\n while self.chunks:\n if data_size >= size:\n break\n chunk = self.chunks.popleft()\n data.append(chunk)\n data_size += len(chunk)\n\n # re-queue merged chunk\n data = b''.join(data)\n self.chunks.appendleft(data)\n\n return data[self.offset + offset:size]",
"def __read_block(self, size):\n buf = b\"\"\n if len(self.__read_buffer):\n limit = (\n size if size <= len(self.__read_buffer) else\n len(self.__read_buffer)\n )\n buf = self.__read_buffer[:limit]\n self.__read_buffer = self.__read_buffer[limit:]\n size -= limit\n if not size:\n return buf\n try:\n buf += self.sock.recv(size)\n except (socket.timeout, ssl.SSLError):\n raise Error(\"Failed to read %d bytes from the server\" % size)\n self.__dprint(buf)\n return buf",
"def read(self, size: int = -1) -> bytes:\n if self.size_read >= self.chunksize:\n return b''\n if size < 0:\n size = self.chunksize - self.size_read\n if size > self.chunksize - self.size_read:\n size = self.chunksize - self.size_read\n data = self.file.read(size)\n self.size_read = self.size_read + len(data)\n if self.size_read == self.chunksize and (self.chunksize & 1):\n dummy = self.file.read(1)\n self.size_read = self.size_read + len(dummy)\n return data",
"def __ReadBytes(self, size, block=True):\n need_more_data = True\n result = b''\n while need_more_data:\n read_bytes = self.__input_stream.read(size)\n if read_bytes:\n result += read_bytes\n elif read_bytes is not None:\n return (result, False)\n elif not block:\n return (result, True)\n need_more_data = (len(result) < size)\n\n return (result, True)",
"def read(self, block_no):\n with open(self.file_path, 'r+') as f:\n f.seek(block_no * config.block_size)\n return f.read(config.block_size)",
"def read(self, addr, size):\n ret = []\n\n while size > 0:\n block = self._load_block(addr)\n offset = addr - block.base\n n_read = self.block_size - offset\n\n less = size if size < n_read else n_read\n end = offset + less\n addr += n_read\n size -= n_read\n this = block.read(offset, end)\n\n if not ret:\n ret = this\n else:\n ret.extend(this)\n\n if len(ret) == 1:\n return ret[0]\n return ret",
"def naive_block_padding(b: bytes, size: int) -> bytes:\n assert size <= 0xff\n\n l = len(b)\n if l > 0 and l % size == 0:\n return b\n\n return b + b'\\x00' * (size - (l % size))",
"def block_split(stream, block_size=BLOCK_SIZE_IN_BYTES):\n # TODO: this could possibly be a generator\n return [stream[i:i + BLOCK_SIZE_IN_BYTES]\n for i in range(0, len(stream), BLOCK_SIZE_IN_BYTES)]",
"def read_bytes(self, number_of_bytes):\n\n self.index = -1\n data = self.buf[self.offset:self.offset + number_of_bytes]\n self.offset += number_of_bytes\n\n return data",
"def getBytes(self, addr: ghidra.program.model.address.Address, dest: List[int], destIndex: int, size: int) -> int:\n ...",
"def byte_slices(self) -> Iterator[slice]:\n for byte_index in islice(self.byte_offsets, 0, self.max_bytes, 8):\n yield slice(byte_index, byte_index + 8)",
"def read(self, size=-1):\n\n if size < 0:\n raise NotImplementedError(\"Don't be greedy, that could be massive!\")\n elif size == 0:\n if self._text:\n return \"\"\n else:\n return b\"\"\n elif self._within_block_offset + size <= len(self._buffer):\n # This may leave us right at the end of a block\n # (lazy loading, don't load the next block unless we have too)\n data = self._buffer[self._within_block_offset:self._within_block_offset + size]\n self._within_block_offset += size\n assert data # Must be at least 1 byte\n return data\n else:\n # if read data overflows to next block\n # pull in rest of data in current block\n data = self._buffer[self._within_block_offset:]\n\n # decrement size so that we only pull the rest of the data\n # from next block\n size -= len(data)\n self._load_block() # will reset offsets\n\n if not self._buffer:\n return data # EOF\n\n # if there is still more to read\n elif size:\n # pull rest of data from next block\n return data + self.read(size)\n else:\n # Only needed the end of the last block\n return data",
"def read_bytes(path, s3=None, delimiter=None, not_zero=False, blocksize=2**27,\n sample=True, compression=None, **kwargs):\n bucket = kwargs.pop('host', '')\n s3_path = bucket + path\n if s3 is None:\n s3 = _get_s3(**kwargs)\n\n if '*' in path:\n filenames = sorted(s3.glob(s3_path))\n if not filenames:\n raise IOError(\"No such files: '%s'\" % s3_path)\n sample, first = read_bytes(filenames[0], s3, delimiter, not_zero,\n blocksize, sample=sample,\n compression=compression)\n rest = [read_bytes(f, s3, delimiter, not_zero, blocksize,\n sample=False, compression=compression)[1]\n for f in filenames[1:]]\n return sample, [first] + rest\n else:\n if blocksize is None:\n offsets = [0]\n else:\n size = getsize(s3_path, compression, s3)\n offsets = list(range(0, size, blocksize))\n if not_zero:\n offsets[0] = 1\n\n info = s3.ls(s3_path, detail=True)[0]\n\n token = tokenize(info['ETag'], delimiter, blocksize, not_zero, compression)\n\n s3_storage_options = s3.get_delegated_s3pars()\n\n logger.debug(\"Read %d blocks of binary bytes from %s\", len(offsets), s3_path)\n\n delayed_read_block_from_s3 = delayed(read_block_from_s3)\n values = [delayed_read_block_from_s3(s3_path, offset, blocksize,\n delimiter=delimiter,\n compression=compression,\n dask_key_name='read-block-s3-%s-%d' % (token, offset),\n **s3_storage_options)\n for offset in offsets]\n\n if sample:\n if isinstance(sample, int) and not isinstance(sample, bool):\n nbytes = sample\n else:\n nbytes = 10000\n sample = read_block_from_s3(s3_path, 0, nbytes, s3,\n delimiter, compression, **kwargs)\n\n return sample, values",
"def read_block(f, offset, length, delimiter=None):\n if delimiter:\n f.seek(offset)\n seek_delimiter(f, delimiter, 2**16)\n start = f.tell()\n length -= start - offset\n\n f.seek(start + length)\n seek_delimiter(f, delimiter, 2**16)\n end = f.tell()\n\n offset = start\n length = end - start\n\n f.seek(offset)\n b = f.read(length)\n return b",
"def block_splitter(data, block_size):\n buf = []\n for i, datum in enumerate(data):\n buf.append(datum)\n if len(buf) == block_size:\n yield buf\n buf = []\n\n # If there's anything leftover (a partial block),\n # yield it as well.\n if buf:\n yield buf",
"def slice_generator(\n sequence_length,\n n_blocks):\n return ((int(round((b - 1) * sequence_length/n_blocks)),\n int(round(b * sequence_length/n_blocks)))\n for b in range(1, n_blocks+1))",
"def read(self, file_offset: int, buffer):\n bytes_read = 0\n block_to_read = file_offset // self.fs.block_size # local to inode\n # print(block_to_read, file_offset)\n if block_to_read == 0:\n offset_in_block = file_offset\n else:\n offset_in_block = file_offset % block_to_read\n read_buffer = bytearray(self.fs.block_size)\n\n while bytes_read < len(buffer):\n block_addr = self.getDiskAddrOfBlock(self.fs, block_to_read)\n if block_addr == -1: # no more data to read\n break\n\n if self.fs.block_map[block_addr] == 1: # skip if block isn't allocated\n # Get block: check cache first, otherwise read in\n cached_block = self.fs.blockCache.get(block_addr)\n if cached_block != None:\n read_buffer = cached_block\n else:\n self.fs.block_device.read_block(block_addr, read_buffer)\n \n # Read to end of block, or read to end of buffer, whichever's shorter\n bytes_to_read = min(self.fs.block_size-offset_in_block, len(buffer)-bytes_read)\n start = bytes_read\n stop = start + bytes_to_read\n read_start = offset_in_block\n read_stop = read_start + bytes_to_read\n # print(start,stop,read_start,read_stop)\n buffer[start:stop] = read_buffer[read_start:read_stop]\n \n bytes_read += bytes_to_read\n # Remaining blocks will be (left-)aligned\n if offset_in_block != 0:\n offset_in_block = 0\n\n block_to_read += 1\n\n return bytes_read",
"def getBytes(memory: ghidra.program.model.mem.Memory, startAddress: ghidra.program.model.address.Address, length: int) -> List[int]:\n ...",
"def read_bytes(self) -> bytes:\n t = self.pc\n while self.data[self.pc] != 0:\n self.pc += 1\n result = self.data[t:self.pc]\n self.pc += 1 # jump '\\0'\n return result",
"def getallocatedblocks(): # real signature unknown; restored from __doc__\n return 0",
"def seek(self, virtual_offset):\n\n # Do this inline to avoid a function call,\n # start_offset, within_block = split_virtual_offset(virtual_offset)\n start_offset = virtual_offset >> 16\n within_block = virtual_offset ^ (start_offset << 16)\n if start_offset != self._block_start_offset:\n # Don't need to load the block if already there\n # (this avoids a function call since _load_block would do nothing)\n self._load_block(start_offset)\n assert start_offset == self._block_start_offset\n if within_block > len(self._buffer):\n if not (within_block == 0 and len(self._buffer) == 0):\n raise ValueError(\"Within offset %i but block size only %i\"\n % (within_block, len(self._buffer)))\n self._within_block_offset = within_block\n return virtual_offset",
"def _read_raw_bytes_multiple(self, size, maxread=512, verbose=0):\n ret = []\n instr = self.visa_handle\n with self.visa_handle.ignore_warning(pyvisa.constants.VI_SUCCESS_MAX_CNT):\n nread = 0\n while nread < size:\n nn = min(maxread, size - nread)\n chunk, status = instr.visalib.read(instr.session, nn)\n ret += [chunk]\n nread += len(chunk)\n if verbose:\n print('_read_raw: %d/%d bytes' % (len(chunk), nread))\n ret = b''.join(ret)\n return ret",
"def _read_bytes(self, start, num_bytes):\n with self._fp_lock:\n self._fp.seek(start)\n return self._fp.read(num_bytes)"
] | [
"0.80001956",
"0.638865",
"0.63013935",
"0.6273106",
"0.6159458",
"0.6123575",
"0.5852839",
"0.5841076",
"0.5801677",
"0.57806903",
"0.5606504",
"0.5605544",
"0.5587722",
"0.55432785",
"0.55123276",
"0.549337",
"0.5320378",
"0.53183264",
"0.5302764",
"0.52887833",
"0.5285994",
"0.5271814",
"0.52349013",
"0.5216103",
"0.5212085",
"0.51863676",
"0.51818717",
"0.51737624",
"0.5136523",
"0.51299393"
] | 0.7709877 | 1 |
Given the block size and the desired block index, return the slice of interesting bytes. | def bytes_in_block(block_size: int, i: int) -> slice:
return slice(block_size * i, block_size * (i + 1)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bytes_to_block(block_size: int, i: int) -> slice:\n return slice(0, block_size * (i + 1))",
"def get_block(self, idx):\n self.input_file.seek(idx * self.blocksize)\n return self.input_file.read(self.blocksize)",
"def read(self, size=-1):\n if not self._buf:\n self._buf.append(next(self._iter, b''))\n if len(self._buf[0]) < size or size < 0:\n return self._buf.pop(0)\n block = self._buf.pop(0)\n self._buf.insert(0, block[size:])\n return block[:size]",
"def ith_byte_block(block_size: int, i: int) -> int:\n assert block_size > 0\n assert i >= 0\n return i // block_size",
"def get_data_block_contents_bytes(self):\n bb = self.volume.blkdev.block_bytes\n if self.volume.is_ffs:\n return bb\n else:\n return bb - 24",
"def fetch_block(path, offset, block_size):\n with open(path, 'rb') as file:\n file.seek(offset)\n return bz2.decompress(file.read(block_size))",
"def read_block(file, block_size):\n block = b\"\"\n for i in range(block_size):\n this_byte = file.read(1)\n # If the last block consumed the last char in file:\n if this_byte == b\"\" and i == 0:\n return (-1, False)\n # If we reach EOF prematurely:\n elif this_byte == b\"\":\n block += chr(0).encode()*(block_size - i)\n return (block, False)\n else:\n block += this_byte\n return (block, True)",
"def __read_block(self, size):\n buf = b\"\"\n if len(self.__read_buffer):\n limit = (\n size if size <= len(self.__read_buffer) else\n len(self.__read_buffer)\n )\n buf = self.__read_buffer[:limit]\n self.__read_buffer = self.__read_buffer[limit:]\n size -= limit\n if not size:\n return buf\n try:\n buf += self.sock.recv(size)\n except (socket.timeout, ssl.SSLError):\n raise Error(\"Failed to read %d bytes from the server\" % size)\n self.__dprint(buf)\n return buf",
"def read_bytes(self, number_of_bytes):\n\n self.index = -1\n data = self.buf[self.offset:self.offset + number_of_bytes]\n self.offset += number_of_bytes\n\n return data",
"def slice(self, size=None, offset=None):\n offset = offset or 0\n size = size or len(self)\n\n data = []\n data_size = 0\n\n # dequeue chunks\n size += self.offset + offset\n while self.chunks:\n if data_size >= size:\n break\n chunk = self.chunks.popleft()\n data.append(chunk)\n data_size += len(chunk)\n\n # re-queue merged chunk\n data = b''.join(data)\n self.chunks.appendleft(data)\n\n return data[self.offset + offset:size]",
"def byte_slices(self) -> Iterator[slice]:\n for byte_index in islice(self.byte_offsets, 0, self.max_bytes, 8):\n yield slice(byte_index, byte_index + 8)",
"def getBytes(self, addr: ghidra.program.model.address.Address, dest: List[int], destIndex: int, size: int) -> int:\n ...",
"def _read_raw_bytes_multiple(self, size, maxread=512, verbose=0):\n ret = []\n instr = self.visa_handle\n with self.visa_handle.ignore_warning(pyvisa.constants.VI_SUCCESS_MAX_CNT):\n nread = 0\n while nread < size:\n nn = min(maxread, size - nread)\n chunk, status = instr.visalib.read(instr.session, nn)\n ret += [chunk]\n nread += len(chunk)\n if verbose:\n print('_read_raw: %d/%d bytes' % (len(chunk), nread))\n ret = b''.join(ret)\n return ret",
"def read(self, addr, size):\n ret = []\n\n while size > 0:\n block = self._load_block(addr)\n offset = addr - block.base\n n_read = self.block_size - offset\n\n less = size if size < n_read else n_read\n end = offset + less\n addr += n_read\n size -= n_read\n this = block.read(offset, end)\n\n if not ret:\n ret = this\n else:\n ret.extend(this)\n\n if len(ret) == 1:\n return ret[0]\n return ret",
"def read(self, size: int = -1) -> bytes:\n if self.size_read >= self.chunksize:\n return b''\n if size < 0:\n size = self.chunksize - self.size_read\n if size > self.chunksize - self.size_read:\n size = self.chunksize - self.size_read\n data = self.file.read(size)\n self.size_read = self.size_read + len(data)\n if self.size_read == self.chunksize and (self.chunksize & 1):\n dummy = self.file.read(1)\n self.size_read = self.size_read + len(dummy)\n return data",
"def getallocatedblocks(): # real signature unknown; restored from __doc__\n return 0",
"def _read_raw_bytes_direct(self, size):\n with(self.visa_handle.ignore_warning(pyvisa.constants.VI_SUCCESS_MAX_CNT)):\n data, statuscode = self.visa_handle.visalib.read(\n self.visa_handle.session, size)\n\n return data",
"def block_splitter(data, block_size):\n buf = []\n for i, datum in enumerate(data):\n buf.append(datum)\n if len(buf) == block_size:\n yield buf\n buf = []\n\n # If there's anything leftover (a partial block),\n # yield it as well.\n if buf:\n yield buf",
"def naive_block_padding(b: bytes, size: int) -> bytes:\n assert size <= 0xff\n\n l = len(b)\n if l > 0 and l % size == 0:\n return b\n\n return b + b'\\x00' * (size - (l % size))",
"def build(self, block_size):",
"def __ReadBytes(self, size, block=True):\n need_more_data = True\n result = b''\n while need_more_data:\n read_bytes = self.__input_stream.read(size)\n if read_bytes:\n result += read_bytes\n elif read_bytes is not None:\n return (result, False)\n elif not block:\n return (result, True)\n need_more_data = (len(result) < size)\n\n return (result, True)",
"def read(self, size=-1):\n\n if size < 0:\n raise NotImplementedError(\"Don't be greedy, that could be massive!\")\n elif size == 0:\n if self._text:\n return \"\"\n else:\n return b\"\"\n elif self._within_block_offset + size <= len(self._buffer):\n # This may leave us right at the end of a block\n # (lazy loading, don't load the next block unless we have too)\n data = self._buffer[self._within_block_offset:self._within_block_offset + size]\n self._within_block_offset += size\n assert data # Must be at least 1 byte\n return data\n else:\n # if read data overflows to next block\n # pull in rest of data in current block\n data = self._buffer[self._within_block_offset:]\n\n # decrement size so that we only pull the rest of the data\n # from next block\n size -= len(data)\n self._load_block() # will reset offsets\n\n if not self._buffer:\n return data # EOF\n\n # if there is still more to read\n elif size:\n # pull rest of data from next block\n return data + self.read(size)\n else:\n # Only needed the end of the last block\n return data",
"def read_bytes(self, size):\n return self.read('bytes:'+str(size))",
"def read(self, block_no):\n with open(self.file_path, 'r+') as f:\n f.seek(block_no * config.block_size)\n return f.read(config.block_size)",
"def getInstructionBytes(self, offset: int, bytestart: int, size: int) -> int:\n ...",
"def __read(self, i: int) -> bytes:\r\n b = self.data[self.idx: self.idx + i]\r\n self.idx += i\r\n if len(b) != i:\r\n raise DecodingError(\r\n \"Incorrect byte length returned between indexes of {0} and {1}. Possible unexpected End of File.\"\r\n .format(str(self.idx), str(self.idx - i)))\r\n return b",
"def getBytes(memory: ghidra.program.model.mem.Memory, startAddress: ghidra.program.model.address.Address, length: int) -> List[int]:\n ...",
"def get_blocks_before(self, hash_bytes: bytes, num_blocks: int = 100) -> list[Block]:\n raise NotImplementedError",
"def readBytes(self, size=1):\n return self.bytes",
"def read_bytes(self) -> bytes:\n t = self.pc\n while self.data[self.pc] != 0:\n self.pc += 1\n result = self.data[t:self.pc]\n self.pc += 1 # jump '\\0'\n return result"
] | [
"0.7297683",
"0.64004445",
"0.61794794",
"0.6134597",
"0.58951896",
"0.586929",
"0.58357203",
"0.58036876",
"0.575804",
"0.5738425",
"0.5701645",
"0.5659119",
"0.5560989",
"0.55597043",
"0.5514207",
"0.5506305",
"0.55037373",
"0.5469516",
"0.5417936",
"0.54173195",
"0.5410381",
"0.5376962",
"0.5352557",
"0.5351906",
"0.53075254",
"0.52966416",
"0.5288053",
"0.5265433",
"0.52501476",
"0.5238396"
] | 0.79082745 | 0 |
Given a buffer and the key len, split the buffer into blocks. | def split_blocks(b: bytes, k_len: int) -> tuple:
assert len(b) >= k_len
return tuple(
bytes(
b[j] for j in range(i, len(b), k_len)
) for i in range(0, k_len)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _buff_split(self, upload_buffer):\n if upload_buffer.intent_count() == 0:\n return\n tail_buffer = upload_buffer\n while True:\n if tail_buffer.length < self.recommended_upload_part_size + self.min_part_size:\n # `EmergePlanner_buff_partition` can split in such way that tail part\n # can be smaller than `min_part_size` - to avoid unnecessary download of possible\n # incoming copy intent, we don't split further\n yield tail_buffer\n return\n head_buff, tail_buffer = self._buff_partition(tail_buffer)\n yield head_buff",
"def block_split(stream, block_size=BLOCK_SIZE_IN_BYTES):\n # TODO: this could possibly be a generator\n return [stream[i:i + BLOCK_SIZE_IN_BYTES]\n for i in range(0, len(stream), BLOCK_SIZE_IN_BYTES)]",
"def chunk_bytes(buf):\n assert len(buf) >= CHUNK_SIZE / 2\n n = len(buf)\n if n < CHUNK_SIZE:\n yield buf[: CHUNK_SIZE // 2] + buf[-CHUNK_SIZE // 2 :], n\n return\n\n for i in range(0, len(buf), CHUNK_SIZE):\n if i + CHUNK_SIZE <= n:\n yield buf[i : i + CHUNK_SIZE], CHUNK_SIZE\n else:\n yield buf[n - CHUNK_SIZE :], n - i",
"def split_file(self, input_file, buffer=1024) -> str:\n file_size = os.stat(input_file).st_size\n with create_pg(total=file_size, leave=False, unit='B', unit_scale=True, unit_divisor=1024,\n desc='Splitting file') as t:\n\n with open(input_file, 'rb') as src:\n while True:\n with tempfile.NamedTemporaryFile() as f:\n with open(f.name, 'wb') as dest:\n written = 0\n while written < self.max_size:\n data = src.read(buffer)\n if data:\n dest.write(data)\n written += buffer\n t.update(len(data))\n else:\n if written == 0:\n return # file has ended on split size - don't yield\n\n break\n\n yield f.name",
"def block_splitter(data, block_size):\n buf = []\n for i, datum in enumerate(data):\n buf.append(datum)\n if len(buf) == block_size:\n yield buf\n buf = []\n\n # If there's anything leftover (a partial block),\n # yield it as well.\n if buf:\n yield buf",
"def testSplit(self):\n\n protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()\n bigstring = \"\".join(chr(byte) for byte in range(ord(\"a\"), ord(\"z\")+1))\n\n databuf = TTransport.TMemoryBuffer()\n prot = protocol_factory.getProtocol(databuf)\n prot.writeI32(42)\n prot.writeString(bigstring)\n prot.writeI16(24)\n data = databuf.getvalue()\n cutpoint = len(data)/2\n parts = [ data[:cutpoint], data[cutpoint:] ]\n\n framed_buffer = TTransport.TMemoryBuffer()\n framed_writer = TTransport.TFramedTransport(framed_buffer)\n for part in parts:\n framed_writer.write(part)\n framed_writer.flush()\n self.assertEquals(len(framed_buffer.getvalue()), len(data) + 8)\n\n # Recreate framed_buffer so we can read from it.\n framed_buffer = TTransport.TMemoryBuffer(framed_buffer.getvalue())\n framed_reader = TTransport.TFramedTransport(framed_buffer)\n prot = protocol_factory.getProtocol(framed_reader)\n self.assertEqual(prot.readI32(), 42)\n self.assertEqual(prot.readString(), bigstring)\n self.assertEqual(prot.readI16(), 24)",
"def chunks(cipher, size):\n\treturn [cipher[i*size:(i+1)*size] for i in range(int(math.ceil(len(cipher)*1.0/size)))]",
"def _buff_partition(self, upload_buffer):\n left_buff = UploadBuffer(upload_buffer.start_offset)\n buff_start = upload_buffer.start_offset\n for idx, (intent, fragment_end) in enumerate(upload_buffer.iter_items()):\n candidate_size = fragment_end - buff_start\n if candidate_size > self.recommended_upload_part_size:\n right_fragment_size = candidate_size - self.recommended_upload_part_size\n left_buff.append(intent, fragment_end - right_fragment_size)\n return left_buff, upload_buffer.get_slice(\n start_idx=idx, start_offset=left_buff.end_offset\n )\n else:\n left_buff.append(intent, fragment_end)\n if candidate_size == self.recommended_upload_part_size:\n return left_buff, upload_buffer.get_slice(start_idx=idx + 1)\n\n return left_buff, UploadBuffer(left_buff.end_offset)",
"def splitInBlocks (l, n):\n k = len(l) / n\n r = len(l) % n\n\n i = 0\n blocks = []\n while i < len(l):\n if len(blocks)<r:\n blocks.append(l[i:i+k+1])\n i += k+1\n else:\n blocks.append(l[i:i+k])\n i += k\n\n return blocks",
"def split_chunks(\n key: core.ChunkKey,\n dataset: xarray.Dataset,\n target_chunks: Mapping[str, int],\n) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:\n # This function splits consolidated arrays into blocks of new sizes, e.g.,\n # ⌈x_00 x_01 ...⌉ ⌈⌈x_00⌉ ⌈x_01⌉ ...⌉\n # X = |x_10 x_11 ...| = ||x_10| |x_11| ...|\n # |x_20 x_21 ...| |⌊x_20⌋ ⌊x_21⌋ ...|\n # ⌊ ... ... ...⌋ ⌊ ... ... ...⌋\n # and emits them as (ChunkKey, xarray.Dataset) pairs.\n all_bounds = []\n for dim, chunk_size in target_chunks.items():\n start = key.get(dim, 0)\n stop = start + dataset.sizes[dim]\n all_bounds.append(_split_chunk_bounds(start, stop, chunk_size))\n\n for bounds in itertools.product(*all_bounds):\n offsets = dict(key)\n slices = {}\n for dim, (start, stop) in zip(target_chunks, bounds):\n base = key.get(dim, 0)\n offsets[dim] = start\n slices[dim] = slice(start - base, stop - base)\n\n new_key = core.ChunkKey(offsets)\n new_chunk = dataset.isel(slices)\n yield new_key, new_chunk",
"def bytes_to_block(block_size: int, i: int) -> slice:\n return slice(0, block_size * (i + 1))",
"def split_to_chunks(of_list, chunk_size):\n assert of_list is not None\n\n for i in range(0, len(of_list), chunk_size):\n yield of_list[i:i + chunk_size]",
"def _validate_and_split_key(self, key):\n if self._len_keys == 1:\n return self._validate_and_split_len_one(key)\n else:\n return self._validate_and_split_len(key)",
"def _split_chunk(self, collection_name: str, key: int):\n def split_command():\n self._mongo_client.admin.command('split', collection_name, middle={SHARD_KEY: key})\n self._try_until_done(split_command)\n self._chunks[collection_name][key] = MAIN_MONGO_SHARD_NAME\n logging.info(f\"MongoAgent: Split chunk of {collection_name} at {key}\")",
"def split_chunk(list, chunk_size):\n for i in range(0, len(list), chunk_size):\n yield list[i:i + chunk_size]",
"def split_in_half(keys_56bits):\n left_keys, right_keys = keys_56bits[:28], keys_56bits[28:]\n return left_keys, right_keys",
"def chunker(bitstream, chunk_size):\n\n for chunk in nslice(lazy_pad(bitstream, 2 * chunk_size), 2 * chunk_size):\n yield chunk[:chunk_size], chunk[chunk_size:]",
"def make_chunks(l, chunk_length):\n for i in range(0, len(l), chunk_length):\n yield l[i:i + chunk_length]",
"def build_chunks(read_bytes, file_size, chunk_size):\n\n chunks = []\n\n index = 0\n start = 0\n\n while start < file_size:\n end = min(start + chunk_size, file_size)\n size = end - start\n\n chunk = FileChunk(index, size, partial(read_bytes, start, size))\n chunks.append(chunk)\n\n index += 1\n start += chunk_size\n\n return chunks",
"def chunk_split(cls, text):\n parts = []\n current = []\n for line in text.splitlines():\n size = sum(len(part) + 1 for part in current)\n extra = len(line)\n if size + extra >= 2000:\n if current:\n # The message is full, split here.\n parts.append(\"\\n\".join(current))\n current.clear()\n if extra >= 2000:\n # The line itself is too long, split on whitespace instead.\n *lines, line = wrap(line, 2000, expand_tabs=False, replace_whitespace=False)\n parts.extend(lines)\n current.append(line)\n if current:\n parts.append(\"\\n\".join(current))\n return parts",
"def text_splitter_bytes(text,splitter=\"\\n\", split_every=4096):\n #divide il testo\n text = [elem+splitter for elem in text.split(splitter) if elem]\n\n res=[]\n to_append=\"\"\n for elem in text:\n #se la lunghezza del testo in bytes del to_append + elem supera il parametrp\n if len((to_append+elem).encode('utf-8'))>=split_every:\n #appendi e resetta to_append\n res.append(to_append)\n to_append=\"\"\n # altrimenti aggiungi elem\n to_append+=elem\n #se la lunghezza del text è inferiore a split_every appendi al res\n res.append(to_append)\n return res",
"def _filter_chunk(chunk_buffer, filter_mask, filter_pipeline, itemsize):\n num_filters = len(filter_pipeline)\n for i, pipeline_entry in enumerate(filter_pipeline[::-1]):\n\n # A filter is skipped is the bit corresponding to its index in the\n # pipeline is set in filter_mask\n filter_index = num_filters - i - 1 # 0 to num_filters - 1\n if filter_mask & (1 << filter_index):\n continue\n\n filter_id = pipeline_entry['filter_id']\n if filter_id == GZIP_DEFLATE_FILTER:\n chunk_buffer = zlib.decompress(chunk_buffer)\n\n elif filter_id == SHUFFLE_FILTER:\n buffer_size = len(chunk_buffer)\n unshuffled_buffer = bytearray(buffer_size)\n step = buffer_size // itemsize\n for j in range(itemsize):\n start = j * step\n end = (j+1) * step\n unshuffled_buffer[j::itemsize] = chunk_buffer[start:end]\n chunk_buffer = unshuffled_buffer\n elif filter_id == FLETCH32_FILTER:\n _verify_fletcher32(chunk_buffer)\n # strip off 4-byte checksum from end of buffer\n chunk_buffer = chunk_buffer[:-4]\n else:\n raise NotImplementedError(\n \"Filter with id: %i import supported\" % (filter_id))\n return chunk_buffer",
"def _split_on_chunks(self, iterable, size):\n return utils.split_on_chunks(iterable, size)",
"def blockify_chunks(chunks):\n acc = []\n size = 0\n for chunk, chunk_size in chunks:\n assert len(chunk) == CHUNK_SIZE\n assert len(acc) <= BLOCK_SIZE\n if len(acc) == BLOCK_SIZE:\n # Only the last chunk may be short.\n assert size == CHUNK_SIZE * BLOCK_SIZE\n yield acc, size\n acc = []\n size = 0\n acc.append(chunk)\n size += chunk_size\n assert acc\n yield acc, size",
"def split_on_chunks(sequence, length: int, no_rest=False):\n\n if not isinstance(sequence, (list, tuple)):\n raise TypeError('Support only an instance of list or tuple')\n\n sequence_len = len(sequence)\n\n if length < 1:\n raise ValueError(\"Length of a chunk must be at least 1\")\n elif length >= sequence_len:\n return sequence\n\n chuncks = list()\n for i in range(0, sequence_len, length):\n chuncks.append(tuple(sequence[i: i + length]))\n\n if no_rest is True and len(chuncks[-1]) < length and len(chuncks) > 1:\n chuncks[-2] = list(chuncks[-2])\n chuncks[-2].extend(chuncks[-1])\n chuncks[-2] = tuple(chuncks[-2])\n chuncks = chuncks[:-1]\n\n return chuncks",
"def _chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]",
"def ssplit(str_, length=420):\n buf = list()\n for line in str_.split('\\n'):\n buf.extend(textwrap.wrap(line.rstrip('\\r'), length))\n return buf",
"def _get_chunks(l, n = 10):\n \n for i in range(0, len(l), n): yield l[i: i + n]",
"def get_chunks(vals, size):\n for i in range(0, len(vals), size):\n yield vals[i:i + size]",
"def split_chunk(chunk, *a, **kw):\n return split_chunk(chunk, *a, **kw)"
] | [
"0.61693007",
"0.611693",
"0.59779125",
"0.5802576",
"0.5779824",
"0.5730315",
"0.5620192",
"0.5557428",
"0.55036813",
"0.54738647",
"0.5424786",
"0.53956705",
"0.5385404",
"0.53824395",
"0.5314768",
"0.52782834",
"0.52711946",
"0.52539396",
"0.5253796",
"0.523956",
"0.52367216",
"0.5211237",
"0.5192952",
"0.5192448",
"0.51853734",
"0.5164654",
"0.5150032",
"0.514835",
"0.51239944",
"0.5123078"
] | 0.71291816 | 0 |
A naive padding implementation. Given the block size, pad the input buffer with '\x00' bytes, so that the result is a multiple of the specified size. If the buffer is greater than 0, but already a multiple of `size` it's returned unmodified. | def naive_block_padding(b: bytes, size: int) -> bytes:
assert size <= 0xff
l = len(b)
if l > 0 and l % size == 0:
return b
return b + b'\x00' * (size - (l % size)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Pad(value, block_size):\n assert block_size < 256\n pad_length = block_size - (len(value) % block_size)\n if not pad_length:\n # Always pad at least 1 block.\n pad_length = block_size\n padding = [random.randint(0, 255) for _ in range(pad_length)]\n padding[-1] = len(padding)\n padding = struct.pack('B' * len(padding), *padding)\n return value + padding",
"def chunk( seq, size, pad=None ):\n n = len(seq)\n mod = n % size\n for i in xrange(0, n-mod, size):\n yield seq[i:i+size]\n if mod:\n padding = [pad] * (size-mod)\n yield seq[-mod:] + padding",
"def pad(plain, size):\n offset = size - (len(plain) % size)\n return plain + chr(offset) * offset",
"def left_zero_pad(s, blocksize):\n if blocksize > 0 and len(s) % blocksize:\n s = (blocksize - len(s) % blocksize) * b('\\000') + s\n return s",
"def pad(size, value):\n return (value + size - 1)/size*size",
"def pkcs7_pad_bytes(input_bytes, block_size):\r\n return pad(input_bytes, block_size)",
"def fixed_padding(inputs, kernel_size, data_format='channels_last'):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n return _padding(inputs, (pad_beg, pad_end), data_format)",
"def pad(msg):\n return msg + (BLOCK_SIZE - len(msg)) * PADDING",
"def pad(s):\n\ttry:\n\t\tfrom Cryptodome.Cipher import AES\n\texcept ImportError:\n\t\tfrom Crypto.Cipher import AES\n\treturn s + b\"\\0\" * (AES.block_size - len(s) % AES.block_size)",
"def _zero_pad(self, kernel, size):\n if len(size) != kernel.ndim:\n size = kernel.shape[:1] + tuple(size) + kernel.shape[-1:]\n padsize = np.array(size) - np.array(kernel.shape)\n paddown = padsize // 2\n padup = padsize - paddown\n padarray = np.concatenate((padup[..., None],\n paddown[..., None]), axis=1)\n pads = tuple([tuple(p) for p in padarray])\n kernel_pad = np.pad(kernel, pads, 'constant', constant_values=0)\n return kernel_pad",
"def pad_left(x, block_size=3, fill=0):\n if len(x) > block_size:\n return x\n else:\n right = np.array(list(str(x)))\n left = np.repeat(str(fill), block_size - right.size )\n return \"\".join(np.concatenate([left, right]))",
"def pad(img: torch.Tensor, new_size: Union[int, Tuple[int, int]]) ->torch.Tensor:\n new_size = to_tuple(new_size)\n old_size = img.shape[-2:]\n pad_size = (torch.tensor(new_size) - torch.tensor(old_size)) / 2\n padding = torch.cat((torch.floor(pad_size), torch.ceil(pad_size)))\n padding[padding < 0] = 0\n padding = [int(x) for x in padding]\n return F.pad(img, padding=padding, padding_mode='edge')",
"def _Pad(self, data):\n pad = self.block_size - len(data) % self.block_size\n return data + util.RepeatByte(pad, pad)",
"def configure_minibatch_by_padded_size(\n *,\n size: Sizing,\n buffer: int,\n discard_oversize: bool,\n get_length: Optional[Callable[[ItemT], int]] = None\n) -> BatcherT:\n # Avoid displacing optional values from the underlying function.\n optionals = {\"get_length\": get_length} if get_length is not None else {}\n return partial(\n minibatch_by_padded_size,\n size=size,\n buffer=buffer,\n discard_oversize=discard_oversize,\n **optionals\n )",
"def make_padding(kernel_size, stride, dilation):\n return -((-kernel_size - (kernel_size - 1) * (dilation - 1)) // stride + 1) // 2",
"def space_to_depth_fixed_padding(inputs, kernel_size,\n data_format='channels_last', block_size=2):\n pad_total = kernel_size - 1\n pad_beg = (pad_total // 2 + 1) // block_size\n pad_end = (pad_total // 2) // block_size\n return _padding(inputs, (pad_beg, pad_end), data_format)",
"def _NoPadBufferSize(self, buffer_size):\n no_pad_size = self.block_size * (buffer_size // self.block_size)\n return max(no_pad_size, self.block_size)",
"def pad(plain_text):\n number_of_bytes_to_pad = block_size - len(plain_text) % block_size\n ascii_string = chr(number_of_bytes_to_pad)\n padding_str = number_of_bytes_to_pad * ascii_string\n padded_plain_text = plain_text + padding_str\n return padded_plain_text",
"def compute_padding(size, factor):\n if size % factor == 0 and (size/factor) % 2 == 0:\n p = 0\n else:\n p = 1\n while (size + p) % factor != 0 or ((size + p) / factor) % 2 !=0:\n p += 1\n return p",
"def __Pad(self, data):\n pad = self.block_size - len(data) % self.block_size\n return data + pad * chr(pad)",
"def minibatch_by_padded_size(\n seqs: Iterable[ItemT],\n size: Sizing,\n buffer: int = 256,\n discard_oversize: bool = False,\n get_length: Callable = len,\n) -> Iterable[List[ItemT]]:\n if isinstance(size, int):\n size_ = itertools.repeat(size) # type: Iterator[int]\n else:\n size_ = iter(size)\n for outer_batch in minibatch(seqs, size=buffer):\n outer_batch = list(outer_batch)\n target_size = next(size_)\n for indices in _batch_by_length(outer_batch, target_size, get_length):\n subbatch = [outer_batch[i] for i in indices]\n padded_size = max(len(seq) for seq in subbatch) * len(subbatch)\n if discard_oversize and padded_size >= target_size:\n pass\n else:\n yield subbatch",
"def pad(input, pad_size):\n if not pad_size:\n return input\n return tf.pad(input, [[0,0],[pad_size, pad_size],[pad_size, pad_size],[0,0]], 'REFLECT')",
"def nullPad(s):\n padding = chr(0) * (Blowfish.block_size - (len(s) % Blowfish.block_size))\n if padding:\n return s + padding\n else:\n return s",
"def pad(seq, n):\n return",
"def un_pkcs_1_5(b: int, size: int) -> bytes:\n unpadded = b.to_bytes(size, \"big\")\n\n if not (unpadded[0] == 0x00 and unpadded[1] == 0x02):\n raise BadPaddingException\n unpadded = unpadded[2:]\n\n i = 0\n while unpadded[i] == 0xff:\n i += 1\n unpadded = unpadded[i:]\n\n if not (unpadded[0] == 0x00):\n raise BadPaddingException\n\n unpadded = unpadded[1:]\n return unpadded",
"def _fixed_padding(inputs, kernel_size, *args, mode='CONSTANT', **kwargs):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]], mode=mode)\n return padded_inputs",
"def _fixed_padding(inputs, kernel_size, *args, mode='CONSTANT', **kwargs):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if kwargs['data_format'] == 'NCHW':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end],\n [pad_beg, pad_end]],\n mode=mode)\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]], mode=mode)\n return padded_inputs",
"def pad_to_blocksize(string, blocksize=AES_BSZ, leftpad=False, pad_char=None):\n if len(string) % blocksize == 0:\n return string\n length = len(string) + blocksize - (len(string) % blocksize)\n return pad_to_len(string, length, leftpad=leftpad, pad_char=pad_char)",
"def pad_with_buffer(b: bytes, pad: bytes) -> bytes:\n assert b\n assert pad\n\n b += pad\n b = pkcs_7(b, 16)\n\n return b",
"def pad_rec(target_size, path, output):\n with open(path, 'rb') as handle, open(output, 'wb') as padded:\n data = handle.read()\n data_length = len(data)\n padded.write(data)\n pad_length = target_size - data_length\n if pad_length < 9:\n raise ValueError('target size too small')\n pad_op = struct.pack('<IIB', 1, pad_length, 0xfe)\n pad_op += b'\\x00' * (pad_length - 9)\n padded.write(pad_op)"
] | [
"0.70165694",
"0.65415794",
"0.65264106",
"0.6474348",
"0.64708847",
"0.6293283",
"0.6265598",
"0.6215768",
"0.61022586",
"0.6072546",
"0.6051565",
"0.5936931",
"0.59051484",
"0.5872826",
"0.5849457",
"0.583683",
"0.58303237",
"0.5825839",
"0.5821051",
"0.57902086",
"0.57723695",
"0.5772278",
"0.5769417",
"0.5724903",
"0.5719188",
"0.5714288",
"0.5711099",
"0.56819373",
"0.5663764",
"0.56401974"
] | 0.83171093 | 0 |
PKCS7 un_padding. Remove padding from the bytes. If padding is invalid, throws an exception. | def un_pkcs_7(b: bytes, size: int) -> bytes:
b = bytearray(b)
padding = b[-1]
if padding <= 0 or padding > size:
raise BadPaddingException
for i in range(-padding, 0):
if b[i] != padding:
raise BadPaddingException
return bytes(b[:-padding]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_padding(paddedMsg, block_size): \n try:\n if not valid_padding(paddedMsg, block_size):\n raise ValueError\n except ValueError:\n print(f\"{ paddedMsg } has invalid PKCS#7 padding.\")\n return\n \n last_byte = paddedMsg[-1]\n unpadded = paddedMsg[:-last_byte]\n print(f\"Padding removed successfully...\")\n print(f\"Before padding removal: { paddedMsg }\")\n print(f\"After padding removal: { unpadded }\")",
"def _UnPad(self, padded):\n pad = bytearray(padded)[-1]\n return padded[:-pad]",
"def remove_padding(self, data):\n pad_len = ord(data[-1])\n return data[:-pad_len]",
"def unpad(padded_data):\r\n num_padded_bytes = ord(padded_data[-1])\r\n return padded_data[:-num_padded_bytes]",
"def __UnPad(self, padded):\n pad = ord(padded[-1])\n return padded[:-pad]",
"def un_pkcs_1_5(b: int, size: int) -> bytes:\n unpadded = b.to_bytes(size, \"big\")\n\n if not (unpadded[0] == 0x00 and unpadded[1] == 0x02):\n raise BadPaddingException\n unpadded = unpadded[2:]\n\n i = 0\n while unpadded[i] == 0xff:\n i += 1\n unpadded = unpadded[i:]\n\n if not (unpadded[0] == 0x00):\n raise BadPaddingException\n\n unpadded = unpadded[1:]\n return unpadded",
"def RemovePadding(value):\n pad_length = struct.unpack('B', value[-1])[0]\n return value[:-pad_length]",
"def decrypt(self, b):\n decrypted = self.__aes.ecbDecrypt(b)\n return unpadPkcs7(decrypted, 16)",
"def _remove_padding(input_str):\r\n num_pad_bytes = ord(input_str[-1])\r\n if num_pad_bytes < 1 or num_pad_bytes > AES.block_size or num_pad_bytes >= len(input_str):\r\n raise UsernameDecryptionException(\"padding\")\r\n return input_str[:-num_pad_bytes]",
"def remove_padding(data: str) -> str:\n\n return data[:data.find('\\0')]",
"def __unpad(self, data):\n return data[0:-ord(data[-1])]",
"def remove_padding(im, pad):\n\n return im[pad:-pad, pad:-pad]",
"def test():\n\n block_size = 16\n\n # Test case 1: incorrect value < required:\n paddedMsg = b'ICE ICE BABY\\x03\\x03\\x03\\x03'\n remove_padding(paddedMsg, block_size)\n\n # Test caes 2: incorrect value > required:\n paddedMsg = b\"ICE ICE BABY\\x05\\x05\\x05\\x05\" \n remove_padding(paddedMsg, block_size)\n\n # Test case 3: incorrect length:\n paddedMsg = b\"ICE ICE BABY\\x04\\x04\\x04\"\n remove_padding(paddedMsg, block_size)\n\n # Test case 4: variable numbers:\n paddedMsg = b\"ICE ICE BABY\\x01\\x02\\x03\\x04\"\n remove_padding(paddedMsg, block_size)\n\n # Test case 5: correct padding \n paddedMsg = b\"ICE ICE BABY\\x04\\x04\\x04\\x04\"\n remove_padding(paddedMsg, block_size)",
"def unpad(self):\n return bytes(self)[:-self[-1]]",
"def unpad(data, *args, **kwargs): # pragma: no cover\n raise NotImplementedError()",
"def unpad(I, pad):\n\tif pad[3] == 0 and pad[1] > 0:\n\t\treturn I[..., pad[2]:, pad[0]:-pad[1]]\n\telif pad[3] > 0 and pad[1] == 0:\n\t\treturn I[..., pad[2]:-pad[3], pad[0]:]\n\telif pad[3] == 0 and pad[1] == 0:\n\t\treturn I[..., pad[2]:, pad[0]:]\n\telse:\n\t\treturn I[..., pad[2]:-pad[3], pad[0]:-pad[1]]",
"def pkcs_7(b: bytes, size: int) -> bytes:\n assert size <= 0xff\n\n b = bytearray(b)\n padding = size - (len(b) % size)\n for _ in range(padding):\n b.append(padding)\n\n return bytes(b)",
"def unset_padding(self):\n if self.metadata.Signal.has_item('pad_tuple'):\n Npy, Npx = self.metadata.Signal.pad_tuple\n else:\n # If no padding was done, return the same signal\n return self\n Nx, Ny = self.axes_manager.signal_shape\n s=self.deepcopy()\n del s.metadata.Signal.pad_tuple\n if self.axes_manager.navigation_dimension == 0:\n s.data = s.data[Npy[0]:(Ny-Npy[1]), Npx[0]:(Nx-Npx[1])]\n s.get_dimensions_from_data()\n elif self.axes_manager.navigation_dimension > 0:\n s.data = s.data[..., Npy[0]:(Ny-Npy[1]), Npx[0]:(Nx-Npx[1])]\n s.get_dimensions_from_data()\n # copy in case of non-linear defoci\n s.axes_manager.navigation_axes[0].axis = self.axes_manager.navigation_axes[0].axis.copy()\n return s",
"def base64_aes_decrypt(self,data,key):\n cipher = AES.new(key)\n try:\n return self._depkcs7padding(cipher.decrypt(base64.b64decode(data)))\n except Exception, ex:\n return ''",
"def __packet_unstuff(self, data):\n unstuffed = bytearray()\n escape = False\n for count in data:\n if escape == False:\n if count == 0x7e:\n continue\n if count == 0x7f:\n continue\n if count == 0x7d:\n escape = True\n else:\n unstuffed.append(count)\n else:\n unstuffed.append(count + 0x7d)\n escape = False\n\n return(unstuffed)",
"def pkcs5_unpad(self,s):\n return s[0:-ord(s[-1])]",
"def unpad(plain):\n return plain[:-ord(plain[-1])]",
"def remove_padding_from_bb(boxes, x_padding):\n boxes[boxes[:, 0] < x_padding] = x_padding\n boxes[:, 0] -= x_padding\n boxes[:, 2] -= x_padding\n return boxes",
"def base64_aes_decrypt(self,data,key):\r\n cipher = AES.new(key)\r\n return self._depkcs7padding(cipher.decrypt(base64.b64decode(data)))",
"def attention_bias_ignore_padding(memory_padding):\n\tret = tf.multiply(memory_padding, -1e18)\n\treturn tf.expand_dims(tf.expand_dims(ret, axis=1), axis=1)",
"def imap4_utf7_decode(data):\n\n if not isinstance(data, bytes):\n return bytearray(data, 'utf-8')\n return imap_utf7_codec.imap4_utf7_decode(data)",
"def _unpad(\r\n s: str,\r\n) -> str:\r\n last_character = s[len(s) - 1:]\r\n bytes_to_remove = ord(last_character)\r\n return s[:-bytes_to_remove]",
"def decrypt(self, ciphertext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass",
"def canonicalPadding(b):\n if b[0] & 0x80 == 0x80:\n raise DecredError(\"negative number\")\n if len(b) > 1 and b[0] == 0x00 and b[1] & 0x80 != 0x80:\n raise DecredError(\"excessive padding\")",
"def _decrypt(self, b, strip_padding=True):\n from cryptography.hazmat.primitives.ciphers \\\n import Cipher, algorithms, modes\n from cryptography.hazmat.backends import default_backend\n\n backend = default_backend()\n cypher = Cipher(\n algorithms.AES(self.__key), modes.CBC(self.__iv), backend=backend)\n decryptor = cypher.decryptor()\n result = decryptor.update(b) + decryptor.finalize()\n if strip_padding:\n result = result[:-result[-1]]\n return result"
] | [
"0.7573973",
"0.6920545",
"0.65433896",
"0.63635516",
"0.61938757",
"0.6060388",
"0.5942703",
"0.5744001",
"0.57044625",
"0.5590399",
"0.5486308",
"0.54624707",
"0.54142153",
"0.54136795",
"0.5382624",
"0.5271841",
"0.522418",
"0.51658076",
"0.5146927",
"0.5141305",
"0.49797198",
"0.49562797",
"0.492687",
"0.49069536",
"0.48462418",
"0.4841137",
"0.4836582",
"0.48295617",
"0.48129332",
"0.48029408"
] | 0.78874135 | 0 |
PKCS1.5 unpadding. Check whether padding is correct, remote it and return the message. | def un_pkcs_1_5(b: int, size: int) -> bytes:
unpadded = b.to_bytes(size, "big")
if not (unpadded[0] == 0x00 and unpadded[1] == 0x02):
raise BadPaddingException
unpadded = unpadded[2:]
i = 0
while unpadded[i] == 0xff:
i += 1
unpadded = unpadded[i:]
if not (unpadded[0] == 0x00):
raise BadPaddingException
unpadded = unpadded[1:]
return unpadded | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_padding(paddedMsg, block_size): \n try:\n if not valid_padding(paddedMsg, block_size):\n raise ValueError\n except ValueError:\n print(f\"{ paddedMsg } has invalid PKCS#7 padding.\")\n return\n \n last_byte = paddedMsg[-1]\n unpadded = paddedMsg[:-last_byte]\n print(f\"Padding removed successfully...\")\n print(f\"Before padding removal: { paddedMsg }\")\n print(f\"After padding removal: { unpadded }\")",
"def test():\n\n block_size = 16\n\n # Test case 1: incorrect value < required:\n paddedMsg = b'ICE ICE BABY\\x03\\x03\\x03\\x03'\n remove_padding(paddedMsg, block_size)\n\n # Test caes 2: incorrect value > required:\n paddedMsg = b\"ICE ICE BABY\\x05\\x05\\x05\\x05\" \n remove_padding(paddedMsg, block_size)\n\n # Test case 3: incorrect length:\n paddedMsg = b\"ICE ICE BABY\\x04\\x04\\x04\"\n remove_padding(paddedMsg, block_size)\n\n # Test case 4: variable numbers:\n paddedMsg = b\"ICE ICE BABY\\x01\\x02\\x03\\x04\"\n remove_padding(paddedMsg, block_size)\n\n # Test case 5: correct padding \n paddedMsg = b\"ICE ICE BABY\\x04\\x04\\x04\\x04\"\n remove_padding(paddedMsg, block_size)",
"def _UnPad(self, padded):\n pad = bytearray(padded)[-1]\n return padded[:-pad]",
"def un_pkcs_7(b: bytes, size: int) -> bytes:\n b = bytearray(b)\n padding = b[-1]\n if padding <= 0 or padding > size:\n raise BadPaddingException\n\n for i in range(-padding, 0):\n if b[i] != padding:\n raise BadPaddingException\n\n return bytes(b[:-padding])",
"def RemovePadding(value):\n pad_length = struct.unpack('B', value[-1])[0]\n return value[:-pad_length]",
"def pkcs5_unpad(self,s):\n return s[0:-ord(s[-1])]",
"def decipher2(s, key): # s = message\n return decipher_raw2(s, key).rstrip(bytes('\\x00'.encode('utf-8')))",
"def unpad(padded_data):\r\n num_padded_bytes = ord(padded_data[-1])\r\n return padded_data[:-num_padded_bytes]",
"def __UnPad(self, padded):\n pad = ord(padded[-1])\n return padded[:-pad]",
"def decrypt_message(encrypted_message):",
"def decipher(s, key): # s = message\n return decipher_raw(s, key).rstrip(bytes('\\x00'.encode('utf-8')))",
"def remove_padding(self, data):\n pad_len = ord(data[-1])\n return data[:-pad_len]",
"def decrypt_message(message: bytes, receiver_private_key: RsaKey) -> bytes:\n iv = message[:IV_LEN]\n enc_aes_key = message[IV_LEN:IV_LEN + receiver_private_key.size_in_bytes()] # Assume encryption has been done with same key size\n enc_message = message[IV_LEN + receiver_private_key.size_in_bytes():]\n\n cipher_rsa = PKCS1_OAEP.new(receiver_private_key)\n aes_key = cipher_rsa.decrypt(enc_aes_key)\n\n cipher_aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(cipher_aes.decrypt(enc_message), AES.block_size) # Padding have to be removed",
"def decrypt_message(self, env_key, data):\n\n if not env_key or not data:\n raise Exception('Arguments missing.')\n\n key = RSA.importKey(self.private_key)\n try:\n env_key = unquote(env_key).decode('utf8')\n data = unquote(data).decode('utf8')\n except AttributeError:\n # Python 3 compatible\n env_key = unquote(env_key)\n data = unquote(data)\n\n try:\n env_key = base64.b64decode(env_key)\n data = base64.b64decode(data)\n \n cipher = PKCS1_v1_5.new(key)\n\n sentinel = []\n session_key = cipher.decrypt(env_key, sentinel)\n\n rc4_cipher = ARC4.new(session_key)\n\n xml_data = rc4_cipher.decrypt(data)\n\n # TODO: add xml validation\n # schema_root = etree.XML(xml_data)\n # schema = etree.XMLSchema(schema_root)\n # parser = etree.XMLParser(schema=schema)\n\n return xml_data\n except Exception as e:\n if self.developement:\n exception(e)\n\n raise Exception('Could not decrypt message.')",
"def _unseal_message(self, message):\n decrypted_message = self.incoming_handle.update(message)\n return decrypted_message",
"def _remove_padding(input_str):\r\n num_pad_bytes = ord(input_str[-1])\r\n if num_pad_bytes < 1 or num_pad_bytes > AES.block_size or num_pad_bytes >= len(input_str):\r\n raise UsernameDecryptionException(\"padding\")\r\n return input_str[:-num_pad_bytes]",
"def decrypt(self, key):\n super(MACDataUplinkMessage, self).decrypt(key, dir=0)",
"def decrypt_message(data,symetric_key,private_key):\n\tif type(data) == str or type(data) == bytes:\n\t\tdata = json.loads(data)\n\ttyp = data['type']\n\tnonce = data['nonce'].encode(\"iso-8859-1\")\n\tmessage = data['message'].encode(\"iso-8859-1\")\n\tnonce, *_ = decrypt(private_key,nonce)\n\tmessage = AESCCM(symetric_key).decrypt(nonce,message,None)\n\tmessage ={'type':typ,'nonce' : nonce.decode(\"iso-8859-1\"),'message':message.decode(\"iso-8859-1\")}\n\treturn message",
"def decrypt_msg(msg, query, padding, iv=None, blocksize=16, threads=1):\n # Input validation\n msg = bytearray(msg)\n assert len(msg) % blocksize == 0\n if iv is not None:\n iv = bytearray(iv)\n assert len(iv) == blocksize\n msg = iv + msg\n else:\n assert len(msg) > blocksize\n\n # Split into \"iv\", ciphertext pairs\n blocks = chop(bytearray(msg), blocksize)\n pairs = zip(blocks, blocks[1:])\n\n # Decrypt every pair seperately (to minimize query size)\n logger.info('Decrypting %d block[s] of data using a padding oracle' % len(pairs))\n out = bytearray()\n for n, (iv, block) in enumerate(pairs):\n logger.info('Decrypting block %d' % n)\n out += decrypt(iv, block, query, padding, threads)\n logger.info('Decrypted block: %s' % hex(out[-blocksize:]))\n return out",
"def _decrypt(self, msg):\r\n # they must be real crypto experts at pubnub.com\r\n # two lines of code and two capital mistakes :-(\r\n # pylint: disable=E1101\r\n key = hashlib.sha256(self.cipher).hexdigest()[0:32]\r\n aes = AES.new(key, AES.MODE_CBC, \"0123456789012345\")\r\n decrypted = aes.decrypt(base64.decodestring(msg))\r\n return json.loads(decrypted[0:-ord(decrypted[-1])])",
"def decrypt():\n request_data = request.get_json()\n\n if ('ciphertext' in request_data and\n 'tag' in request_data and\n 'enc_session_key' in request_data and\n 'nonce' in request_data):\n\n try:\n for key in request_data.keys():\n request_data[key] = b64decode(request_data[key])\n except binascii.Error:\n return Response(\n json.dumps(\n {\n 'error': 'Malformed payload'\n }\n ),\n 400,\n mimetype='application/json'\n )\n\n encryption = Decryption(request_data['enc_session_key'], request_data['nonce'])\n try:\n message = encryption.decrypt(\n (request_data['ciphertext'], request_data['tag'])\n ).decode()\n except ValueError as error:\n return Response(\n json.dumps(\n {\n 'error': f'Failed to decrypt the message due to the error: [{error}]'\n }\n ),\n 400,\n mimetype='application/json'\n )\n\n return jsonify({'message': message}), 200\n\n return Response(\n json.dumps(\n {\n 'error': (\n 'Tag / Ciphertext / Nonce / Encrypted Session Key'\n ' missing in the request body'\n )\n }\n ),\n 400,\n mimetype='application/json'\n )",
"def decrypt(self, message):\n # message = message.upper().split()\n # message = \"\".join(message)\n # desalting the message to remove 5 characters blocks\n padding = input(\"Have you used 5 characters blocks? y/n \")\n if padding == \"y\":\n message = message.replace(\" \", \"\")\n message = self.desalt_random(message)\n message = \"\".join(message)\n\n message = message.upper()\n message_list = []\n for ch in message:\n message_list.append(self.main_dict[ch][0])\n\n # OTP Encryption / process the message with OTP\n otp = input(\"What is the OTP that was generated for you during \"\n \"encryption process?: \")\n otp = otp.upper()\n random_otp = []\n for ch in otp:\n random_otp.append(self.main_dict[ch][0])\n\n # If OTP is correct, decrypt the message with mod27\n if len(message_list) != len(random_otp):\n print(\"You typed a wrong OTP.\")\n return None\n else:\n math_list = []\n for i, item in enumerate(message_list):\n if message_list[i] >= random_otp[i]:\n x = message_list[i] - random_otp[i]\n for key, value in self.main_dict.items():\n if value[0] == x:\n math_list.append(key)\n else:\n for key, value in self.main_dict.items():\n if item == value[0]:\n x = value[1] - random_otp[i]\n for key, value in self.main_dict.items():\n if value[0] == x:\n math_list.append(key)\n return \"\".join(math_list)",
"def decrypt_message(K, iv, ciphertext, tag):\n aes = Cipher(\"aes-128-gcm\")\n plain = aes.quick_gcm_dec(K, iv, ciphertext, tag)\n \n \n return plain.encode(\"utf8\")",
"def decrypt(self, buffer):\n try:\n ct = base64.b64decode(buffer)\n except:\n print('f a i l')\n return bytes('fail')\n\n cipher = AES.new(self.psk, AES.MODE_GCM, FIXED_IV)\n pt = unpad(cipher.decrypt(ct), AES.block_size)\n return pt",
"def decrypt(self, message: bytearray) -> bytearray:\n return self.__PRGA(message)",
"def decrypt(self, cipherText, additionalData=''):\n # warning only valid in the random oracle\n mac_key = sha2(b'Poor Mans Key Extractor'+self._key).digest()\n mac = MessageAuthenticator(mac_key)\n if not mac.verify(cipherText, additionalData=additionalData):\n raise ValueError(\"Invalid mac. Your data was tampered with or your key is wrong\")\n else:\n return super(AuthenticatedCryptoAbstraction, self).decrypt(cipherText['msg'])",
"def unpad(plain):\n return plain[:-ord(plain[-1])]",
"def unpad(self):\n return bytes(self)[:-self[-1]]",
"def unpack(self, pkt):\n if pkt[0]!='$' or pkt[-3]!='#':\n raise ValueError('bad packet')\n if (sum(ord(c) for c in pkt[1:-3]) % 256) != int(pkt[-2:],16):\n raise ValueError('bad checksum')\n pkt = pkt[1:-3]\n return pkt",
"def decrypt_message(self, cipher):\n\t\tmessage = cipher ** self.private_key % self.hidden_primes_product\n\t\treturn message"
] | [
"0.7033483",
"0.67119503",
"0.6185447",
"0.6130692",
"0.6012814",
"0.58991176",
"0.5789447",
"0.57747304",
"0.57602435",
"0.56955993",
"0.5692743",
"0.5626397",
"0.5573706",
"0.5565686",
"0.55546814",
"0.5532581",
"0.5451245",
"0.5402548",
"0.53575903",
"0.53340465",
"0.52554524",
"0.523507",
"0.5228947",
"0.52280045",
"0.51982516",
"0.5190594",
"0.51886946",
"0.5159273",
"0.5153308",
"0.51451135"
] | 0.6907179 | 1 |
Return a string representation of the variable. | def __str__(self):
return f"Variable(type={self._type}, id={self._id}, value={self.status}, init={self.init})" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __str__(self):\n s = \"\"\n for field in self.fields:\n if field.size not in VAR_PREFIXES:\n s += field.name + \": \" + str(field.size) + \" bits with value \" + str(field.value) + \".\\n\"\n else:\n s += field.name + \": variable size: \" + str(field.size) + \", with value \" + str(field.value) + \".\\n\"\n\n return s",
"def to_string(self):\n return str(vars(self))",
"def to_string(self):\n return str(vars(self))",
"def to_string(self):\n return str(vars(self))",
"def to_string(self):\n return str(vars(self))",
"def to_string(self):\n return str(vars(self))",
"def __str__(self):\n return '{}.{} >> {}'.format(self.scope, self.name,\n '/'.join(map(str, self.variables)))",
"def __str__(self):\n _str = \"Variables:\\n\"\n for variable in self.variables:\n _str += \" {}\\n\".format(str(variable))\n _str += \"\\nConstraints:\\n\"\n for constraint in self.constraints:\n _str += \" {}\\n\".format(str(constraint))\n return _str",
"def __repr__(self):\n values = ', '.join(f'{k}={v}' for k, v in self.variables.items())\n return f'D({values})'",
"def __str__(self):\n datastr = self.f_val_to_str()\n return_string = \"%s %s\" % (self.f_get_class_name(), self.v_full_name)\n if self.v_comment:\n return_string += \" (`%s`)\" % self.v_comment\n if datastr:\n return_string += \": \" + datastr\n\n return return_string",
"def __str__(self):\n return repr(self.value)",
"def __str__(self):\n return self.identity(default=self.nc_get_variable(\"\"))",
"def var_to_string(var):\n\n var_code = type_codes.get(var.get(\"type\", None), None)\n\n if (not var_code):\n raise ValueError(\"Cannot convert variable to string\")\n\n code = var_code.format(**var)\n\n return code",
"def str_without_type(self):\n str = self.var.name\n if self.is_assign():\n str += ' = {}'.format(self.initializer)\n return str",
"def __str__(self):\n\n\t\treturn str(self.__value)",
"def __repr__(self):\n return pformat(vars(self))",
"def variable_string(self, name):\n return \"$(\" + name + \")\"",
"def name(self):\n return '{} {} {}'.format(self.var_period, self.var_type,\n self.var_detail)",
"def __str__(self):\n return type(self).__name__ + str(vars(self))",
"def literal_to_string(self, literal):\n s = '!' if is_negated(literal) else ''\n return s + self.variables[literal >> 1]",
"def __str__(self) -> str:\n return str(self.getvalue())",
"def __str__(self) -> str:\n return str(self.getvalue())",
"def __str__(self) -> str:\n return str(self.getvalue())",
"def asString(self):\n\n res = []\n for v in list(self.vars.values()):\n res.append(v.asString())\n res.append('')\n for e in list(self.enums.values()):\n res.append(e.asString())\n res.append('')\n for s in list(self.structs.values()):\n res.append(s.defAsString())\n res.append('')\n for s in list(self.structs.values()):\n res.append(s.dataAsString())\n\n return '\\n'.join(res)",
"def __str__(self):\n return str(self.value)",
"def __str__(self):\n return str(self.value)",
"def __str__(self):\n return str(self.value)",
"def __str__(self):\n return str(self.value)",
"def __str__(self):\n return str(self.value)",
"def __str__(self):\n return str(self.value)"
] | [
"0.77372766",
"0.7536459",
"0.7536459",
"0.7536459",
"0.7536459",
"0.7536459",
"0.734657",
"0.7323082",
"0.71180147",
"0.70616055",
"0.7044013",
"0.69827473",
"0.69788533",
"0.6974818",
"0.6907385",
"0.6889123",
"0.6874353",
"0.6863303",
"0.6850354",
"0.684109",
"0.6835305",
"0.6835305",
"0.6835305",
"0.68198967",
"0.6757605",
"0.6757605",
"0.6757605",
"0.6757605",
"0.6757605",
"0.6757605"
] | 0.7576983 | 1 |
Return the last edit time. | def last_edited(self):
return self._last_edited | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def last_edit(self) -> datetime.datetime:\n self.update_status()\n return datetime.datetime.fromtimestamp(self._last_edit)",
"def get_last_update_time(self):\n return self.last_update_time",
"def last_modified_time(self) -> str:\n return pulumi.get(self, \"last_modified_time\")",
"def last_modified_time(self) -> str:\n return pulumi.get(self, \"last_modified_time\")",
"def last_update_time(self):\n return self._last_update_time",
"def last_modified_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"last_modified_time\")",
"def last_modified_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_modified_time\")",
"def last_updated_time(self) -> str:\n return pulumi.get(self, \"last_updated_time\")",
"def last_updated_time(self) -> datetime.datetime:\n return self.__last_updated_time",
"def last_updated_time(self) -> datetime.datetime:\n return self.__last_updated_time",
"def getLastModifiedTime(self): #$NON-NLS-1$\r",
"def time_last_modified(self):\n return self.properties.get(\"TimeLastModified\", None)",
"def last_edited(self, value):\n if self._last_edited != value:\n self._last_edited = value\n return self._last_edited",
"def last_modified_at(self) -> str:\n return pulumi.get(self, \"last_modified_at\")",
"def last_modified_at(self) -> str:\n return pulumi.get(self, \"last_modified_at\")",
"def modification_time(self) -> str:\n return pulumi.get(self, \"modification_time\")",
"def last_updated_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_updated_time\")",
"def get_last_time(self):\n \n return self._last",
"def time_modified(self) -> str:\n return pulumi.get(self, \"time_modified\")",
"def time_modified(self) -> str:\n return pulumi.get(self, \"time_modified\")",
"def ModifyTime(self):\n if self.force_auto_sync:\n self.get('ModifyTime')\n return self._ModifyTime",
"def get_last_updated_at(self):\n return self.last_updated",
"def last_modified_at(self):\n return self.viztrail.last_modified_at",
"def last_modified_at(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"last_modified_at\")",
"def dt_last_update(self):\n return self.last_update",
"def lasttime(self):\n if hasattr(self, \"_lasttime\"):\n return self._lasttime\n else:\n return None",
"def last_modified(self):\n return os.path.getmtime(self.filename)",
"def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")",
"def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")",
"def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")"
] | [
"0.8686858",
"0.7587613",
"0.7560448",
"0.7560448",
"0.7485426",
"0.74718714",
"0.745587",
"0.7413081",
"0.7365227",
"0.7365227",
"0.7359187",
"0.7351504",
"0.7343039",
"0.7298599",
"0.7298599",
"0.72727555",
"0.72685224",
"0.72622776",
"0.7254894",
"0.7254894",
"0.72354203",
"0.7180461",
"0.71122015",
"0.7100772",
"0.70875555",
"0.70468897",
"0.701618",
"0.6997543",
"0.6997543",
"0.6997543"
] | 0.8167432 | 1 |
Function that implements the algorithm for the inversion of a LTI system. The output of the algorithm uhat is such that this signal, filtered by the system produces the signal y. | def stbinv(A, B, C, D, y, t):
# Description to help the user
# calculate the number of samples of the output
N = np.shape(y)[
1
] # the number of samples is the number of columns of the data matrix y
# calculate system's dimensions: number of states and number of inputs
m = B.shape[1] # number of inputs
n = A.shape[0] # number of states
# initialize the variable v (additional input)
v = np.zeros((n, N)) # it will be important later
# initializing the flag variable
flag = 0
# initializing the flag variable for the vrft method
flag_vr = 0
# initializing the counter of reduction steps done by the algorithm
kround = 0
# starting the loop of the reduction procedure
while flag == 0:
# run a step of the reduction order algorithm
Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat = invredc(A, B, C, D, y, v)
# increase the counter of reductions
kround = kround + 1
# preallocating the state vector of the inverse system
xhat = np.zeros((nhat, N - kround)) # it must have N-kround samples
# preallocating the calculated input
uhat = np.zeros((m, N - kround))
# defining the reduced time vector
tt = t[:, 0 : N - kround]
# test the conditions of invertibility
if phat < m:
# if this condition is true, then the algorithm has failed and it is not possible to find the inverse
flag = 1
flag_vr = 1
# if this is the case, we print a message and end the execution
# print('The inversion algorithm has failed')
return uhat, tt, flag_vr
else:
if rhat == m:
# ((rhat==m)&(rhat==phat)):
# if this condition is true, then the algorithm is done. We can calculate the signal u
flag = 2
# calculating the inverse of the feedforward matrix
# E=np.linalg.inv(Dhat)
E = np.linalg.pinv(Dhat)
else:
# if none of the conditions above is true, then we need to proceed to another round of the reduction step of the algorithm
A = Ahat
B = Bhat
C = Chat
D = Dhat
y = yhat
v = vhat
# after the reduction procedure is done, then the system can be inverted
# calculating the dynamic matrix of the inverse system
Ainv = Ahat - Bhat @ E @ Chat
# eigenvalues of the inverse system's dynamic matrix
wv, v = np.linalg.eig(Ainv) # w=eigenvalues, v=eigenvectors
# calculating the input matrix of the inverse system
Binv = Bhat @ E
# calculating the output matrix of the inverse system
Cinv = -E @ Chat
# calculating the feedforward matrix of the inverse system
Dinv = E
# test if the inverse dynamic system is stable
wbool = wv > 1
wsum = np.sum(wbool)
# test if wsum is greater than 1
if wsum > 0:
# if wsum is greater than 1, then, the inverse system is unstable, so we end the execution of the algorithm
# print('The inverse system is unstable')
flag_vr = 2
return uhat, tt, flag_vr
else:
# if wsum=0, then the inverse system is stable, and we can calculate the input signal
# calculate the first value for the output (t=0)
uhat[:, 0] = Cinv @ xhat[:, 0] + Dinv @ yhat[:, 0]
# calculate the states and the output of the inverse system
for k in range(0, N - 1 - kround):
xhat[:, k + 1] = Ainv @ xhat[:, k] + Binv @ yhat[:, k] + vhat[:, k]
uhat[:, k + 1] = Cinv @ xhat[:, k + 1] + Dinv @ yhat[:, k + 1]
return uhat, tt, flag_vr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def I(x, y):\n plug_length = 2\n if isinstance(x, ndarray) and isinstance(y, ndarray):\n # vectorized version\n u = zeros((x.shape[0], y.shape[1]))\n #u1 = less(x, Lx/2.0 - 0.1)\n #u1 += greater(x, Lx/2.0 + 0.1)\n #put(u, u1, 2.0) does not work, u1 should be 2D array\n # loop version:\n for i in range(len(x)):\n if x[i] < Lx/2.0 - plug_length or \\\n x[i] > Lx/2.0 + plug_length:\n u[i,:] = 0.0\n else:\n u[i,:] = 1.0\n else:\n # x and y are floats (assumed)\n if x < Lx/2.0 - plug_length or \\\n x > Lx/2.0 + plug_length:\n u = 0.0\n else:\n u = 1.0\n return u",
"def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n y = x # default output is all states\n \n return y",
"def sys(self, t, x, u):\n xk = fft(x)\n\n # 4/3 truncation rule\n # dealiasing due to triple nonlinearity\n # note: you could do zero-padding to improve memory\n # efficiency\n xk[self.n_states // 4 : 3 * self.n_states // 4] = 0j\n x = ifft(xk)\n\n yk = (-self.k**2 * xk.ravel() / 2) * 1j\n y = ifft(yk) + 1j * abs(x) ** 2 * x + u\n return y",
"def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n # Using u = ubar to avoid algeabric loops\n \n y = self.plant.h( x , self.plant.ubar , t )\n \n return y",
"def gen_INT_inv_f_wrt_yt(yt_arr, phi_arr, INT_inv_f_arr, f_given, cond_GT):\n re = 0.\n INT_inv_f_arr[0] = re\n tmp_f1 = 1./f_given(phi_arr[0], cond_GT)\n tmp_f2 = tmp_f1\n for i in range(1, size(yt_arr)):\n dy = yt_arr[i] - yt_arr[i-1]\n tmp_f1 = tmp_f2\n tmp_f2 = 1./f_given(phi_arr[i], cond_GT)\n re += 0.5 * dy * (tmp_f1 + tmp_f2)\n INT_inv_f_arr[i] = re\n return 0",
"def __Heun_method(cls, diff, x, Y0, u=None, deltaT=None):\n Y = [0 for x in range(len(x))]\n Y[0] = Y0\n for i in range(len(x) - 1):\n Y[i + 1] = Y[i] + (diff(x[i], Y[i], u[i]) + diff(x[i + 1], Y[i] + deltaT * diff(x[i], Y[i], u[i]),\n u[i + 1])) * deltaT / 2\n return Y",
"def _inverse(self, y):\n d = self._compute_shared(y=y)\n rely = y - d.y_k # tf.where(d.out_of_bounds, tf.zeros_like(y), y - d.y_k)\n term2 = rely * (d.d_kp1 + d.d_k - 2 * d.s_k)\n # These terms are the a, b, c terms of the quadratic formula.\n a = d.h_k * (d.s_k - d.d_k) + term2\n b = d.h_k * d.d_k - term2\n c = -d.s_k * rely\n # The expression used here has better numerical behavior for small 4*a*c.\n relx = tf.where(\n tf.equal(rely, 0), tf.zeros_like(a),\n (2 * c) / (-b - tf.sqrt(b**2 - 4 * a * c)))\n return relx * d.w_k + d.x_k #tf.where(d.out_of_bounds, y, relx * d.w_k + d.x_k)",
"def rhs_fenics(y,t):\n #print \"time: \",t\n uprev.vector()[:]=y\n f.t = t #dolfin needs to know the current time for cos(t)\n uprime_solver.solve()\n return uprime_solution.vector().array()",
"def inv(self, y):\n pass",
"def inv_signal(self, y):\n spect = self.inv_spectrogram(y)\n if self.use_complex:\n return self.stft_fn.inverse(spect, complex=self.use_complex)\n return self.stft_fn.inverse(spect, torch.rand(*spect.shape)*2*np.pi)\n # return self.stft_fn.inverse(spect, torch.zeros(*spect.shape))",
"def inverse_transform(self, y: Array2D) -> Array2D:",
"def lowpass1(y, dt, fc=3) :\r\n tau=1/(2*np.pi*fc)\r\n alpha=dt/(tau+dt)\r\n y_filt=np.zeros(y.shape)\r\n y_filt[0]=y[0]\r\n for i in np.arange(1,len(y)):\r\n y_filt[i]=alpha*y[i] + (1-alpha)*y_filt[i-1]\r\n return y_filt",
"def switch_to_untuned_inputs(self):\n\n self.h_e=self.inputs.noise_flat.T\n self.h=np.vstack([self.h_e,self.h_i])",
"def inverseLaplace(Y_s,t=None):\n \n # load the step response as a system in scipy.signal\n num,den = symToTransferFn(Y_s)\n \n # evaluate in time domain\n t,y = sp.impulse((num,den),T=t)\n return t,y",
"def vi1(t):\n u_t = 1*(t>0)\n return (np.sin(2000*np.pi*t)+np.cos(2e6*np.pi*t)) * u_t",
"def inverse(self, y):\n device = y.device\n return t.einsum('ij,k,kj->ik', y, 1. / t.sqrt(self.eig).to(device), self.rot.to(device))",
"def diff(t, y):\n origin = root.vectorize()\n root.tensorize(y)\n self.time = t if not imaginary else None\n self.eom()\n ans = root.vectorize(use_aux=True)\n root.tensorize(origin)\n return ans",
"def _inverse_ops(self, Yl, Yh):\n a = len(Yh) # No of levels.\n device = self.device\n\n # If biort has 6 elements instead of 4, then it's a modified\n # rotationally symmetric wavelet\n # FIXME: there's probably a nicer way to do this\n if len(self.biort) == 4:\n h0o, g0o, h1o, g1o = self.biort\n elif len(self.biort) == 6:\n h0o, g0o, h1o, g1o, h2o, g2o = self.biort\n else:\n raise ValueError('Biort wavelet must have 6 or 4 components.')\n\n # If qshift has 12 elements instead of 8, then it's a modified\n # rotationally symmetric wavelet\n # FIXME: there's probably a nicer way to do this\n if len(self.qshift) == 8:\n h0a, h0b, g0a, g0b, h1a, h1b, g1a, g1b = self.qshift\n elif len(self.qshift) == 12:\n h0a, h0b, g0a, g0b, h1a, h1b, \\\n g1a, g1b, h2a, h2b, g2a, g2b = self.qshift\n else:\n raise ValueError('Qshift wavelet must have 12 or 8 components.')\n\n level = a - 1\n Z = Yl\n\n # This ensures that for level 1 we never do the following\n while level >= 1:\n if self.complex:\n lh = c2q(tf.real(Yh[level][:,:,0:6:5]),\n tf.imag(Yh[level][:,:,0:6:5]))\n hl = c2q(tf.real(Yh[level][:,:,2:4:1]),\n tf.imag(Yh[level][:,:,2:4:1]))\n hh = c2q(tf.real(Yh[level][:,:,1:5:3]),\n tf.imag(Yh[level][:,:,1:5:3]))\n else:\n lh = c2q(Yh[level].real[:,:,0:6:5],\n Yh[level].imag[:,:,0:6:5])\n hl = c2q(Yh[level].real[:,:,2:4:1],\n Yh[level].imag[:,:,2:4:1])\n hh = c2q(Yh[level].real[:,:,1:5:3],\n Yh[level].imag[:,:,1:5:3])\n\n # Do even Qshift filters on columns.\n y1 = colifilt(Z, g0b, g0a, device=device, name='l%d_ll_col_low' % level) + \\\n colifilt(lh, g1b, g1a, device=device, name='l%d_lh_col_high' % level)\n\n if len(self.qshift) >= 12:\n y2 = colifilt(hl, g0b, g0a, device=device, name='l%d_hl_col_low' % level)\n y2bp = colifilt(hh, g2b, g2a, device=device, name='l%d_hh_col_bp' % level)\n\n # Do even Qshift filters on rows.\n Z = rowifilt(y1, g0b, g0a, device=device, name='l%d_ll_row_low' % level) + \\\n rowifilt(y2, g1b, g1a, device=device, name='l%d_hl_row_high' % level) + \\\n rowifilt(y2bp, g2b, g2a, device=device, name='l%d_hh_row_bp' % level)\n else:\n y2 = colifilt(hl, g0b, g0a, device=device, name='l%d_hl_col_low' % level) + \\\n colifilt(hh, g1b, g1a, device=device, name='l%d_hh_col_high' % level)\n\n # Do even Qshift filters on rows.\n Z = rowifilt(y1, g0b, g0a, device=device, name='l%d_ll_row_low' % level) + \\\n rowifilt(y2, g1b, g1a, device=device, name='l%d_hl_row_high' % level)\n\n # Check size of Z and crop as required\n Z_r, Z_c = Z.get_shape().as_list()[-2:]\n S_r, S_c = Yh[level-1].get_shape().as_list()[-2:]\n # check to see if this result needs to be cropped for the rows\n if Z_r != S_r * 2:\n Z = Z[:,:, 1:-1, :]\n # check to see if this result needs to be cropped for the cols\n if Z_c != S_c * 2:\n Z = Z[:,:, :, 1:-1]\n\n # Assert that the size matches at this stage\n Z_r, Z_c = Z.get_shape().as_list()[-2:]\n if Z_r != S_r * 2 or Z_c != S_c * 2:\n raise ValueError(\n 'Sizes of highpasses {}x{} are not '.format(Z_r, Z_c) +\n 'compatible with {}x{} from next level'.format(S_r, S_c))\n\n level = level - 1\n\n if level == 0:\n if self.complex:\n lh = c2q(tf.real(Yh[0][:,:,0:6:5]),\n tf.imag(Yh[0][:,:,0:6:5]))\n hl = c2q(tf.real(Yh[0][:,:,2:4:1]),\n tf.imag(Yh[0][:,:,2:4:1]))\n hh = c2q(tf.real(Yh[0][:,:,1:5:3]),\n tf.imag(Yh[0][:,:,1:5:3]))\n else:\n lh = c2q(Yh[0].real[:,:,0:6:5],\n Yh[0].imag[:,:,0:6:5])\n hl = c2q(Yh[0].real[:,:,2:4:1],\n Yh[0].imag[:,:,2:4:1])\n hh = c2q(Yh[0].real[:,:,1:5:3],\n Yh[0].imag[:,:,1:5:3])\n\n # Do odd top-level filters on columns.\n y1 = colfilter(Z, g0o, device=device, name='l0_ll_col_low') + \\\n colfilter(lh, g1o, device=device, name='l0_lh_col_high')\n\n if len(self.biort) >= 6:\n y2 = colfilter(hl, g0o, device=device, name='l0_hl_col_low')\n y2bp = colfilter(hh, g2o, device=device, name='l0_hh_col_bp')\n\n # Do odd top-level filters on rows.\n Z = rowfilter(y1, g0o, device=device, name='l0_ll_row_low') + \\\n rowfilter(y2, g1o, device=device, name='l0_hl_row_high') + \\\n rowfilter(y2bp, g2o, device=device, name='l0_hh_row_bp')\n else:\n y2 = colfilter(hl, g0o, device=device, name='l0_hl_col_low') + \\\n colfilter(hh, g1o, device=device, name='l0_hh_col_high')\n\n # Do odd top-level filters on rows.\n Z = rowfilter(y1, g0o, device=device, name='l0_ll_row_low') + \\\n rowfilter(y2, g1o, device=device, name='l0_hl_row_high')\n\n return Z",
"def test_lu_forward_sub():\t\n\t# test 1\n\tL = np.array([\n\t\t[ 2, 3,-4, 2],\n\t\t[-2, 1,-2, 1],\n\t\t[ 1,-1, 3,-1],\n\t\t[-3, 2, 2, 2]])\t\n\n\tb = np.array([4, -8, 9, 6])\n\n\ty = lu_forward_sub(L, b) \t\t\n\ty_soln = np.array([4,0,5,8])\t\t\t\t\t\t# correct output of LU_FORWARD_SUB\n\tassert norm(y - y_soln) < 1.e-10\n\n\t# test 2\n\tL2 = np.array([\n\t\t [0.01, 0., 0., 0., 0., 0., 0., 0., 0., 0., 1],\n\t\t [-100., 0.01, 0., 0., 0., 0., 0., 0., 0., 0., 100],\n\t\t [0., -100., 0.01, 0., 0., 0., 0., 0., 0., 0., 10000],\n\t\t [0., 0., -100., 0.01, 0., 0., 0., 0., 0., 0., 1000000],\n\t\t [0., 0., 0., -100., 0.01, 0., 0., 0., 0., 0., 100000000],\n\t\t [0., 0., 0., 0., -100., 0.01, 0., 0., 0., 0., 10000000000],\n\t\t [0., 0., 0., 0., 0., -100., 0.01, 0., 0., 0., 1000000000000],\n\t\t [0., 0., 0., 0., 0., 0., -100., 0.01, 0., 0., 100000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., -100., 0.01, 0., 10000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., -100, 0.01, 1000000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., 0., -100., 100000000000000000000]])\n\n\tb2 = np.array ([[1.01], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [0.]])\n\n\ty2 = lu_forward_sub(L2, b2) \t\t\n\ty_soln2 = np.array([1.01, -101.99, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 99])\t\t\t\t\t\t# correct output of LU_FORWARD_SUB\n\tassert norm(y2 - y_soln2) < 1.e-10",
"def inverse_diff(y,x0):\n\n return np.concatenate((np.expand_dims(np.zeros(y.shape[0:-1]),axis=-1),np.cumsum(y,axis=-1)),axis=-1)+np.expand_dims(x0,axis=-1)",
"def inverse_diff(y,x0):\n \n return np.concatenate((np.expand_dims(np.zeros(y.shape[0:-1]),axis=-1),np.cumsum(y,axis=-1)),axis=-1)+np.expand_dims(x0,axis=-1)",
"def func2(y, j, h, add_u = 0):\n y_temp = y[j] + add_u\n N = xsize\n k = np.zeros(xsize)\n for i in range(xsize):\n k[i] = -(1/4.)*(1./h)*(y_temp[(i+1)%N]**2-y_temp[(i-1)%N]**2)\n return k",
"def selu(x):\n alpha = 1.6732632423543772848170429916717\n scale = 1.0507009873554804934193349852946\n return scale * elu(x, alpha)",
"def dual_inf_ODE(t, y, params):\n # Unpack states ------------------------------------------------------------------------------------\n # The states are as follows. All quantities are absolute numbers, not concentrations, so no volume scaling is made\n # during transport between the upper respiratory tract (URT) and the lower respiratory tract (LRT)\n # 0. T1 - target cells in URT\n # 1. E1WT - eclipse-stage cells in URT w/ wildtype virion (infected but not producing virions)\n # 2. I1WT - wildtype infected cells producing virions (URT)\n # 3. VTWT - wildtype virions SAMPLED by a pharyngeal throat swab in URT\n # 4. T1TIP - target cells in URT infected w/ TIP\n # 5. E1D - eclipse-stage cells in URT w/ coinfection by wildtype virus and TIP (infected but not producing virion)\n # 6. I1D - coinfected cells producing both wildtype virion and TIP (URT)\n # 7. VTTIP - TIP virions SAMPLED by a pharyngeal throat swab in URT {assume a hypothetical equivalent test to WT}\n # 8. T2 - target cells in LRT\n # 9. E2WT - eclipse-stage cells in LRT w/ wildtype virion (infected but not producing virions)\n # 10. I2WT - wildtype infected cells producing virions (LRT)\n # 11. VSWT - wildtype virions SAMPLED by a sputum sample from LRT\n # 12. T2TIP - target cells in LRT infected w/ TIP\n # 13. E2D - eclipse-stage cells in LRT w/ coinfection by wildtype virus and TIP (infected but not producing virion)\n # 14. I2D - coinfected cells producing both wildtype virion and TIP (LRT)\n # 15. VSTIP - TIP virions SAMPLEd by a sputum sample from LRT\n T1, E1WT, I1WT, VTWT, T1TIP, E1D, I1D, VTTIP, T2, E2WT, I2WT, VSWT, T2TIP, E2D, I2D, VSTIP = y\n\n # Unpack parameters ------------------------------------------------------------------------------------\n # beta_T = target cell infection rate constant in URT, divided by sampling fraction f1 (beta_T = beta1/f1)\n # k1 = eclipse phase rate constant, for transition from nonproductive infected cell to productive.\n # pi_T = virion production rate by infected cells in URT, multiplied by sampling fraction f1 (pi_T = f1*p1)\n # delta1 = clearance rate of virion-producing infected cells\n # beta_S, analogous to beta_T but for LRT (beta_S = beta2/f2)\n # k2, analogous to k1 but for LRT\n # pi_S, analogous to pi_T but for LRT (pi_S = f2*p2)\n # delta2, analogous to delta1 but for LRT\n # c = clearance rate constant for virions in both URT and LRT\n # w = exponential growth rate for the clearance of virion-producing infected cells (i.e. adaptive immunity)\n # gamma = scaled rate of virion transfer from URT to LRT, calculates production of VS from VT (gamma = f2/f1*g12)\n # rho = relative TIP virion production rate from coinfected cells (vs. wildtype virion production from coinf. cells)\n # psi = relative wildtype virion production rate from coinfected cells (vs. wildtype virion production from singly infected cells)\n # xi = relative target cell infection rate for TIP versus virion.\n # See Ruian Ke et al. medRxiv 10.1101/2020.09.25.20201772v1 for further definitions, parameter names match.\n beta_T, k1, pi_T, delta1, beta_S, k2, pi_S, delta2, c, w, gamma, rho, psi, xi, delta_TIP = params\n\n n_entries = 16\n dydt = np.zeros(n_entries)\n\n # Calculate effect of adaptive immune response\n if t >= 14:\n delta1 = delta1*np.exp(w*(t-14))\n delta2 = delta2*np.exp(w*(t-14))\n else:\n delta1 = delta1\n delta2 = delta2\n\n # dT1/dt\n dydt[0] = -beta_T*VTWT*T1 - xi*beta_T*VTTIP*T1\n\n # dE1WT/dt\n dydt[1] = +beta_T*VTWT*T1 - k1*E1WT\n\n # dI1WT/dt\n dydt[2] = k1*E1WT - delta1*I1WT\n\n # dVTWT/dt\n dydt[3] = pi_T*I1WT - c*VTWT + psi*pi_T*I1D\n\n # dT1TIP/dt\n dydt[4] = xi*beta_T*VTTIP*T1 - beta_T*VTWT*T1TIP\n\n # dE1D/dt\n dydt[5] = beta_T*VTWT*T1TIP - k1*E1D\n\n # dI1D/dt\n #dydt[6] = k1*E1D - delta1*I1D\n # Note that delta_TIP=1.\n # Non-unity values explore differential effect on immune clearance of TIP-carrying infected cells.\n dydt[6] = k1*E1D - delta1*delta_TIP*I1D\n\n # dVTTIP/dt\n dydt[7] = rho*pi_T*I1D - c*VTTIP\n\n # dT2/dt\n dydt[8] = -beta_S*VSWT*T2 - xi*beta_S*VSTIP*T2\n\n # dE2WT/dt\n dydt[9] = beta_S*VSWT*T2 - k2*E2WT\n\n # dI2WT/dt\n dydt[10] = k2*E2WT - delta2*I2WT\n\n # dVSWT/dt\n dydt[11] = pi_S*I2WT - c*VSWT + psi*pi_S*I2D + gamma*VTWT\n\n # dT2TIP/dt\n dydt[12] = xi*beta_S*VSTIP*T2 - beta_S*VSWT*T2TIP\n\n # dE2D/dt\n dydt[13] = beta_S*VSWT*T2TIP - k2*E2D\n\n # dI2D/dt\n dydt[14] = k2*E2D - delta2*delta_TIP*I2D\n\n # dVSTIP/dt\n dydt[15] = rho*pi_S*I2D - c*VSTIP + gamma*VTTIP\n\n return dydt",
"def __step__(self,f,t,u,dt,x=None,estimate_error=False,use_butcher=False):\n if self.alphahat is None:\n use_butcher = True\n\n m=len(self)\n u_old = u # Initial value\n y = [np.zeros_like(np.atleast_1d(u)) for i in range(m+1)]\n fy = [np.zeros_like(np.atleast_1d(u)) for i in range(m)]\n\n # First stage\n y[0][:]=u_old\n if x is not None: fy[0][:]=f(t,y[0],x)\n else: fy[0][:]=f(t,y[0])\n\n if use_butcher: # Use Butcher coefficients\n for i in range(1,m): # Compute stage i\n y[i][:] = u_old\n for j in range(i):\n y[i] += self.A[i,j]*dt*fy[j]\n if x is not None: fy[i][:] = f(t[-1]+self.c[i]*dt,y[i],x)\n else: fy[i][:] = f(t+self.c[i]*dt,y[i])\n u_new=u_old+dt*sum([self.b[j]*fy[j] for j in range(m)])\n if estimate_error:\n u_hat=u+dt*sum([self.bhat[j]*fy[j] for j in range(m)])\n\n else: # Use Shu-Osher coefficients\n v = 1 - self.alpha.sum(1)\n for i in range(1,m+1):\n y[i] = v[i]*u_old\n for j in range(i):\n y[i] += self.alpha[i,j]*y[j] + dt*self.beta[i,j]*fy[j]\n if i<m:\n if x is not None: fy[i][:] = f(t+self.c[i]*dt,y[i],x)\n else: fy[i][:] = f(t+self.c[i]*dt,y[i])\n u_new = y[m]\n\n if estimate_error:\n u_hat = np.zeros_like(np.atleast_1d(u))\n #if dt<1e-10:\n #print(\"Warning: very small step size: {} {}\".format(dt, t[-1]))\n u_hat = (1-np.sum(self.alphahat[-1,:]))*u_old\n for j in range(m):\n u_hat += self.alphahat[-1,j]*y[j] + dt*self.betahat[-1,j]*fy[j]\n\n if estimate_error:\n return u_new, np.max(np.abs(u_new-u_hat))\n else:\n return u_new",
"def _inverse(self, x):\n alpha, beta = self._get_alpha_beta()\n diff = x - self.x0\n r = tf.linalg.norm(diff, axis=-1, keepdims=True)\n h = 1. / (alpha + r)\n beta_h = beta * h\n return x + beta_h * diff",
"def helmholtz(X, u):\r\n ud = _np.zeros_like(u)\r\n copy = _np.ascontiguousarray\r\n\r\n theta = div(X, u)\r\n gth = scl_grad(X, theta)\r\n\r\n for i in range(3):\r\n poly1 = _interp(X[0], gth[i,:,:,:], axis=0)\r\n poly2 = poly1.antiderivative(nu=2)\r\n ud[i,...] += poly2(X[0])\r\n\r\n poly1 = _interp(X[1], gth[i,:,:,:], axis=1)\r\n poly2 = poly1.antiderivative(nu=2)\r\n ud[i,...] += poly2(X[1])\r\n\r\n poly1 = _interp(X[2], gth[i,:,:,:], axis=2)\r\n poly2 = poly1.antiderivative(nu=2)\r\n ud[i,...] += poly2(X[2])\r\n\r\n return ud",
"def toeplitz_inverse_multiplication(u, x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4):\n\n y = fft(D_phi*u)\n a = Lambda_1*fft(D_psi*(1/D_phi)*ifft(Lambda_2*y))\n b = Lambda_3*fft(D_psi*(1/D_phi)*ifft(Lambda_4*y))\n y = (1/D_psi)*real(ifft(a-b))/(x_0*(phi-psi))\n \n return y",
"def ttd_u_func(self):\n T_i1 = T_mix_ph(self.inl[0].to_flow(), T0=self.inl[0].T.val_SI)\n T_o2 = T_mix_ph(self.outl[1].to_flow(), T0=self.outl[1].T.val_SI)\n return self.ttd_u.val - T_i1 + T_o2",
"def h_imu(self, u):\n\t\tdelta_R_prev = torch.eye(3)\n\t\tdelta_v_prev = torch.zeros(3)\n\t\tdelta_p_prev = torch.zeros(3)\n\t\tself.J = torch.zeros(u.shape[0], 9, 8)\n\t\tfor k in range(u.shape[0]):\n\t\t\tself.J[k, :3, :3] = delta_R_prev*self.delta_t\n\t\t\tself.J[k, 3:6, :3] = -delta_R_prev.mm(self.skew(u[k, 3:]))*self.delta_t\n\t\t\tself.J[k, 3:6, 3:6] = delta_R_prev*self.delta_t\n\t\t\tself.J[k, 3:6, :3] = -1/2*delta_R_prev.mm(self.skew(u[k, 3:]))*(self.delta_t**2)\n\t\t\tself.J[k, 6:9, 3:6] = 1/2*delta_R_prev*(self.delta_t**2)\n\t\t\tdelta_R = delta_R_prev.mm(SO3.exp(u[k, :3]*self.delta_t).as_matrix())\n\t\t\tdelta_v = delta_v_prev + delta_R.mv(u[k, 3:]*self.delta_t)\n\t\t\tdelta_p = delta_p_prev + delta_v*self.delta_t + delta_R.mv(u[k, 3:]*self.delta_t)*(self.delta_t**2)/2\n\t\t\tdelta_R_prev = SO3.from_matrix(delta_R, normalize=True).as_matrix()\n\t\t\tdelta_v_prev = delta_v\n\t\t\tdelta_p_prev = delta_p\n\n\t\treturn torch.cat((SO3.from_matrix(delta_R).log(),\n\t\t\t\t\t delta_v,\n\t\t\t\t\t delta_p), 0)"
] | [
"0.61993027",
"0.6093398",
"0.60126185",
"0.59149575",
"0.5895828",
"0.5891714",
"0.5883189",
"0.58782166",
"0.5834867",
"0.58016485",
"0.57893735",
"0.57810235",
"0.5741158",
"0.5739637",
"0.5727934",
"0.5716083",
"0.5700649",
"0.56995004",
"0.56938654",
"0.5658236",
"0.5645146",
"0.5606541",
"0.5602876",
"0.55836296",
"0.55779946",
"0.5573852",
"0.5573675",
"0.5566042",
"0.55509216",
"0.554989"
] | 0.6189042 | 1 |
Function that does the transformation of a MIMO transfer function to a statespace model. | def mtf2ss(G):
# Description to help the user
# calculating the number of outputs
p = len(G)
# calculating the number of inputs
m = len(G[0])
# creating a list for each matrix
A = []
B = []
C = []
D = []
nss = 0
# loop that get the SISO state-space transformations
for i in range(0, p):
# outputs - first index of the MIMO process list
A.append([])
B.append([])
C.append([])
D.append([])
for j in range(0, m):
# inputs - second index of the MIMO process list
if G[i][j] != 0:
# transform the individual SISO systems to a state-space model
Aij, Bij, Cij, Dij = signal.tf2ss(G[i][j].num, G[i][j].den)
# calculate the size of the A matrix
nss = nss + Aij.shape[0]
# organizing the matrices on a list
A[i].append(Aij)
B[i].append(Bij)
C[i].append(Cij)
D[i].append(Dij)
else:
A[i].append([])
B[i].append([])
C[i].append([])
D[i].append([])
# preallocation of the system's matrices
Ass = np.zeros((nss, nss))
Bss = np.zeros((nss, m))
Css = np.zeros((p, nss))
Dss = np.zeros((p, m))
# counters
ct = 0
# loop that organize the MIMO list obtained above on the state-space model
for i in range(0, p):
# loop on the outputs
for j in range(0, m):
# loop on the inputs
# test if the matrix isn't zero
if len(A[i][j]) > 0:
# calculate the size of the dynamic matrix
nij = A[i][j].shape[0]
# organizing the dynamic matrix
Ass[ct : ct + nij, ct : ct + nij] = A[i][j]
# organizing the input matrix
Bss[ct : ct + nij, j] = B[i][j][:, 0]
# organizing the output matrix
Css[i, ct : ct + nij] = C[i][j][0, :]
# organizing the feedforward matrix
Dss[i, j] = D[i][j]
# increase the counter
ct = ct + nij
return Ass, Bss, Css, Dss | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def forward(self, x):\n return torch.mm(x, self.matrixS)",
"def forward(self, state):\n x = self._format(state)\n x = self.activation_fc(self.input_layer(x))\n for hidden_layer in self.hidden_layers:\n x = self.activation_fc(hidden_layer(x))\n x = self.output_layer(x)\n x = self.out_activation_fc(x)\n return self.rescale_fn(x)",
"def _define_model_functions(self):\n # Input of neurons (Batch size x Number of states)\n states = Input(shape=(self.num_states,), dtype=tf.float32, name=\"states\")\n\n # Hidden layers\n layer_1 = layers.Dense(self.hidden_arch[0], activation=self.activation)(states)\n layers_n = [None for _ in range(len(self.hidden_arch))]\n layers_n[0] = layer_1\n for idx, n_neurons in enumerate(self.hidden_arch[1:]):\n layers_n[idx + 1] = layers.Dense(\n n_neurons,\n activation=self.activation,\n )(layers_n[idx])\n\n # Output of neurons is q(s, a) function\n q_s_a = layers.Dense(self.num_actions, name=\"q_s_a\")(layers_n[-1])\n\n # Get the model\n self.model = Model(inputs=states, outputs=q_s_a)\n\n # Loss function and optimizer\n self.loss = losses.MeanSquaredError(reduction=\"auto\", name=\"mean_squared_error\")\n\n self.optimizer = optimizers.Adam(\n learning_rate=self.learning_rate,\n beta_1=self.beta1,\n beta_2=self.beta2,\n name=\"Adam\",\n )",
"def galactic_to_MS():\n return MS_MATRIX",
"def transition_func(\n self,\n state: np.ndarray,\n state_noise: Optional[np.ndarray] = None,\n control_vect: Optional[np.ndarray] = None\n ) -> np.ndarray:\n pass",
"def _new_target_from_state(self):\n return self.global_transform.apply(self.model.instance(self.weights))",
"def transf(self,f):\r\n raise NotImplementedError",
"def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r",
"def convert_to_onnx(self, func):\n\n self.visit(func)\n self._add_output(self._node_dict[self.last_node])\n model = self._mc.make_model()\n return run_onnx_optimizer(model)",
"def _get_MG_transfer_function(self, cosmo, z):\n # Sampling scale factor from a very small (at CMB for example)\n # all the way to 1 here and today for the transfer function.\n # For a < a_single it is GR (no early MG)\n if isinstance(z, (int, float)):\n a_single = 1/(1+z)\n a = np.linspace(a_single, 1, 100)\n # a_single is for example like for the CMB surface\n else:\n if z[0] != 0.0:\n stepsize = z[1]-z[0]\n samplesize = int(z[0]/stepsize)\n z_0_to_zmin = np.linspace(0.0, z[0] - stepsize, samplesize)\n z = np.concatenate((z_0_to_zmin, z))\n a = 1./(1.+z)\n a.sort()\n # Scale-dependant MG case with an array of k\n nk = lib.get_pk_spline_nk(cosmo.cosmo)\n status = 0\n lk, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status)\n check(status, cosmo=cosmo)\n k = np.exp(lk)\n # computing MG factor array\n mgfac_1d = 1\n mgfac_1d += _Sig_MG(cosmo, a, k)\n # converting 1D MG factor to a 2D array, so it is compatible\n # with the transfer_ka input structure in MG_add.tracer and\n # add.tracer\n mgfac_2d = mgfac_1d.reshape(len(a), -1, order='F')\n # setting transfer_ka for this case\n mg_transfer = (a, lk, mgfac_2d)\n\n return mg_transfer",
"def forward(self, state):\n x = self.nonlin(self.fc1(self.in_fn(state)))\n x = self.drop_layer(x)\n x = self.nonlin(self.fc2(x))\n x = self.drop_layer(x)\n return self.fc3(x)",
"def convert_mv(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n y = g.get_node(op.input(\"Vec\")[0])\n y = _op.expand_dims(y, axis=-1)\n y = _op.transpose(y)\n out = _op.nn.dense(x, y)\n out = _op.squeeze(out, axis=[-1])\n g.add_node(op.output(\"Out\")[0], out)",
"def mobius_transform(f):\n return dirichlet_conv(f, mobius)",
"def solve_motion_equations(M, B, state_vars=[], input_vars=[], parameters_values=dict()):\n\n M_shape = M.shape\n B_shape = B.shape\n assert(M_shape[0] == B_shape[0])\n\n # at first we create a buffer for the string that we complete and execute \n # to dynamically define a function and return it\n fnc_str_buffer = '''\ndef f(x, u, uuref, t, pp):\n # System variables\n %s # x_str\n %s # u_str\n \n # Parameters\n %s # par_str\n \n # Sympy Common Expressions\n %s # cse_str\n\n # Vectorfield\n %s # ff_str\n \n return ff\n'''\n\n #################################\n # handle system state variables #\n #################################\n # --> leads to x_str which shows how to unpack the state variables\n x_str = ''\n for var in state_vars:\n x_str += '%s, '%str(var)\n\n # as a last we remove the trailing '; ' to avoid syntax erros\n x_str = x_str + '= x'\n\n ##########################\n # handle input variables #\n ##########################\n # --> leads to u_str which will show how to unpack the inputs of the control system\n u_str = ''\n for var in input_vars:\n u_str += '%s, '%str(var)\n\n # after we remove the trailing '; ' to avoid syntax errors x_str will look like:\n # 'u1, u2, ... , um = u'\n u_str = u_str + '= u'\n\n ############################\n # handle system parameters #\n ############################\n # --> leads to par_str\n par_str = ''\n for k, v in list(parameters_values.items()):\n # 'k' is the name of a system parameter such as mass or gravitational acceleration\n # 'v' is its value in SI units\n par_str += '%s = %s; '%(str(k), str(v))\n\n # as a last we remove the trailing '; ' from par_str to avoid syntax errors\n par_str = par_str[:-2]\n\n # now solve the motion equations w.r.t. the accelerations\n sol = M.solve(B)\n\n # use SymPy's Common Subexpression Elimination\n cse_list, cse_res = sp.cse(sol, symbols=sp.numbered_symbols('q'))\n\n ################################\n # handle common subexpressions #\n ################################\n # --> leads to cse_str\n cse_str = ''\n #cse_list = [(str(l), str(r)) for l, r in cse_list]\n for cse_pair in cse_list:\n cse_str += '%s = %s; '%(str(cse_pair[0]), str(cse_pair[1]))\n\n # add result of cse\n for i in range(M_shape[0]):\n cse_str += 'q%d_dd = %s; '%(i, str(cse_res[0][i]))\n\n cse_str = cse_str[:-2]\n\n ######################\n # create vectorfield #\n ######################\n # --> leads to ff_str\n ff_str = 'ff = ['\n\n for i in range(M_shape[0]):\n ff_str += '%s, '%str(state_vars[2*i+1])\n ff_str += 'q%s_dd, '%(i)\n\n # remove trailing ',' and add closing brackets\n ff_str = ff_str[:-2] + ']'\n\n ############################\n # Create callable function #\n ############################\n # now we can replace all placeholders in the function string buffer\n fnc_str = fnc_str_buffer%(x_str, u_str, par_str, cse_str, ff_str)\n # and finally execute it which will create a python function 'f'\n # pass the current global scope to exec(). this is necessary so that sympy functions like cos/sin can be used\n globals_locals = globals()\n exec(fnc_str, globals_locals)\n\n # now we have defined a callable function that can be used within PyTrajectory\n return globals_locals['f']",
"def forward(self, x):\r\n # 1. step 0\r\n x_0 = self.mg0(x)\r\n a_0 = self.mhsa0(x_0)\r\n a_0 = a_0.unsqueeze(dim = 3) # [m, h, w, 1, c]\r\n a_0 = t.matmul(a_0, self.W_p0).squeeze().permute(0, -1, 1, 2) + x # transformation # [m, c, h, w]\r\n a_0_ = a_0\r\n a_0 = a_0.permute(0, 2, 3, 1)\r\n a_0 = self.mlp0(a_0)\r\n a_0 = a_0.permute(0, -1, 1, 2) + a_0_\r\n x_0 = self.max_pool0(a_0) + self.avg_pool0(a_0)\r\n\r\n # 2. step 1\r\n x_1 = self.mg1(x_0)\r\n a_1 = self.mhsa1(x_1)\r\n a_1 = a_1.unsqueeze(dim = 3) # [m, h, w, 1, c]\r\n a_1 = t.matmul(a_1, self.W_p1).squeeze().permute(0, -1, 1, 2) + x_0 # transformation # [m, c, h, w]\r\n a_1_ = a_1\r\n a_1 = a_1.permute(0, 2, 3, 1)\r\n a_1 = self.mlp1(a_1)\r\n a_1 = a_1.permute(0, -1, 1, 2) + a_1_\r\n x_1 = self.max_pool1(a_1) + self.avg_pool1(a_1)\r\n\r\n # 3. step 2\r\n x_2 = self.mg2(x_1)\r\n a_2 = self.mhsa2(x_2)\r\n a_2 = a_2.unsqueeze(dim = 3) # [m, h, w, 1, c]\r\n a_2 = t.matmul(a_2, self.W_p2).squeeze().permute(0, -1, 1, 2) + x_1 # transformation # [m, c, h, w]\r\n a_2_ = a_2\r\n a_2 = a_2.permute(0, 2, 3, 1)\r\n a_2 = self.mlp0(a_2)\r\n a_2 = a_2.permute(0, -1, 1, 2) + a_2_\r\n\r\n # 4. Upsample\r\n a_1 = self.upsample1(a_1)\r\n a_2 = self.upsample2(a_2)\r\n output = a_0 + a_1 + a_2\r\n\r\n return output",
"def forward_model(green_func_array, M):\n # And get forward model synthetic waveform result:\n synth_forward_model_result_array = np.zeros(np.shape(green_func_array[:,0,:]), dtype=float)\n # Loop over signals:\n for i in range(len(green_func_array[:,0,0])):\n # Loop over components of greens function and MT solution (summing):\n for j in range(len(M)):\n synth_forward_model_result_array[i,:] += green_func_array[i,j,:]*M[j] # greens function for specific component over all time * moment tensor component\n return synth_forward_model_result_array",
"def MS_to_galactic():\n return matrix_transpose(MS_MATRIX)",
"def forward(self, x: torch.Tensor) -> torch.Tensor:\n model_output = None\n #######################################################################\n # Student code begins\n #######################################################################\n\n (N,C,H,W) = x.shape\n\n conv_features = self.conv_layers(x)\n \n flat_features = conv_features.reshape(-1, 500)\n model_output = self.fc_layers(flat_features)\n\n\n #######################################################################\n # Student code ends\n #######################################################################\n return model_output",
"def forward(self, system):\n x_ligand = system[0] # retrieve the featurized component, here the ligand\n x_ligand = x_ligand.float()\n x_ligand = self._activation(self.fully_connected_1(x_ligand))\n return self.fully_connected_out(x_ligand)",
"def forward(self, state):\n x = F.relu(self.input(state))\n for layer in self.layers:\n x = F.relu(layer(x))\n if self.duel:\n # Value function estimator\n val = F.relu(self.val_fc_input(x))\n val = self.val_fc_output(val)\n # Advantage function estimator\n adv = F.relu(self.adv_fc_input(x))\n adv = self.adv_fc_output(adv)\n # Subtract mean so that V and A are uniquely identifiable for a given Q\n return val + adv - adv.mean(1).unsqueeze(1).expand(state.size(0), self.action_size)\n else:\n return self.output(x)",
"def __call__(self, inputs, states):\n \"\"\"Now we have multiple states, state->states\"\"\"\n sigmoid = tf.sigmoid\n # Parameters of gates are concatenated into one multiply for efficiency.\n # states: size = time_lag\n if self._state_is_tuple:\n hs = ()\n for state in states:\n c, h = state # c and h: tensor_size = (batch_size, hidden_size)\n hs += (h,) # hs : size = time_lag, i.e. time_lag * (batch_size, hidden_size)\n else:\n hs = ()\n for state in states:\n c, h = array_ops.split(value=state,\n num_or_size_splits=2,\n axis=1)\n hs += (h,)\n \n meta_variable_size = 4 * self.output_size\n concat = Symmetric_MPS_wavefn(inputs,\n hs,\n meta_variable_size,\n self._num_orders,\n self._virtual_dim,\n True)\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n i, j, f, o = array_ops.split(value=concat,\n num_or_size_splits=4,\n axis=1)\n\n new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j))\n new_h = self._activation(new_c) * sigmoid(o)\n\n if self._state_is_tuple:\n new_state = LSTMStateTuple(new_c, new_h)\n else:\n new_state = array_ops.concat([new_c, new_h], 1)\n return new_h, new_state",
"def fractalTransformation(F,G,M=256,N=50):\n assert isinstance(F,DynamicalSystem) and isinstance(G,DynamicalSystem)\n assert F.check_validity(True,False) and G.check_validity(True,False)\n x = np.linspace(0.0,1.0,M+1)\n transformation = np.vectorize(lambda x:G.pi(F.tau(x,N)))\n return x,transformation(x)",
"def propagate_state(self, msg):\r\n # Previous values\r\n x = self.state_vector[0, 0]\r\n y = self.state_vector[1, 0]\r\n theta = self.state_vector[2, 0]\r\n if theta < -pi:\r\n theta += 2 * pi\r\n elif theta > pi:\r\n theta -= 2 * pi\r\n\r\n # Current values\r\n vel = msg.twist.twist.linear.x\r\n ang = msg.twist.twist.angular.z\r\n nvel = 0\r\n nang = 0\r\n dt = msg.header.stamp.secs + msg.header.stamp.nsecs * 10 ** -9 - self.time_stamp\r\n self.time_stamp = msg.header.stamp.secs + msg.header.stamp.nsecs * 10 ** -9\r\n\r\n # Calculate Jacobians F and G\r\n self.motion_jacobian_state_vector(vel, ang, theta, 0, 0, dt)\r\n self.motion_jacobian_noise_components(vel, ang, theta, 0, 0, dt)\r\n\r\n # Choose motion model\r\n if ang == 0:\r\n # Propagate\r\n self.state_vector[0, 0] = x + (vel + nvel) * dt * cos(theta)\r\n self.state_vector[1, 0] = y + (vel + nvel) * dt * sin(theta)\r\n self.state_vector[2, 0] = theta\r\n else:\r\n # Propagate\r\n self.state_vector[0, 0] = x - ((vel + nvel) / (ang + nang)) * sin(theta) + (\r\n (vel + nvel) / (ang + nang)) * sin(theta + (ang + nang) * dt)\r\n self.state_vector[1, 0] = y + ((vel + nvel) / (ang + nang)) * cos(theta) - (\r\n (vel + nvel) / (ang + nang)) * cos(theta + (ang + nang) * dt)\r\n self.state_vector[2, 0] = theta + (ang + nang) * dt",
"def forward(self, x):\n return self.activation_function(self.backbone_model(x))",
"def forward(self, x_in):\r\n\r\n if x_in.dim() >1:\r\n x_out = torch.mm(x_in, self.w.t()) + self.b # To handle input of vector form\r\n else:\r\n x_out = torch.mv(self.w,x_in) + self.b # To handle input of tensor form (batch of samples)\r\n self.temp = x_in\r\n return x_out",
"def forward(self, state):\n\n _, _, theta, dtheta = (\n state[:, 0], state[:, 1], state[:, 2], state[:, 3])\n\n # predict a change in force\n # we only use relevant information to simplify the problem\n controller_input = torch.stack([\n torch.cos(theta),\n torch.sin(theta),\n dtheta\n ]).T.to(device)\n force = self.f(controller_input)[:, 0]\n\n # observe change in system\n du = self.cartpole(state, force)\n\n return du",
"def forward(self, input):\n return mish(input)",
"def state_transition(self):\n self.x = np.dot(self.F, self.x)\n self.x_noisy = noise_fun(np.dot(self.F, self.x_noisy), self.Q)\n return self.x",
"def calculate_transformation(self, p: np.ndarray, o: np.ndarray):\n self.set_inputs(p)\n self.set_outputs(o)\n self.reset_transformation_to_rest()\n self.reset_output_transformation_to_rest()\n # activation resets the hidden layer to rest (unless primed)\n self.activation(clamps = ['input', 'output'])\n return np.copy(self.t)[0]",
"def SM2m(sm):\n return sm * 1609.344"
] | [
"0.5455332",
"0.5412556",
"0.53700584",
"0.5354151",
"0.5343223",
"0.5263439",
"0.52409655",
"0.5220827",
"0.51881456",
"0.5185633",
"0.5157912",
"0.5157735",
"0.51349413",
"0.5131477",
"0.5121507",
"0.5115568",
"0.5115541",
"0.51097274",
"0.5103481",
"0.50699323",
"0.5063509",
"0.50523865",
"0.5047143",
"0.5045261",
"0.50434965",
"0.5039618",
"0.50110334",
"0.5004982",
"0.50003344",
"0.49963963"
] | 0.58319175 | 0 |
Evaluate version and git revision and save it to a version file Evaluation is based on VERSION variable and git describe if .git directory is present in tree. In case when .git is not available version and git_revision is taken from get_distribution call | def provide_git_revision(cls):
version = str(VERSION)
git_revision = str(GIT_REVISION)
git_date = str(GIT_DATE)
if os.path.exists(".git"):
from subprocess import check_output
command = 'git describe --tags --long --dirty'
version_string = check_output(command.split()).decode('utf-8').strip()
if version_string != 'fatal: No names found, cannot describe anything.':
# git describe -> tag-commits-sha-dirty
version_string = version_string.replace('-dirty', '')
version_string = version_string.lstrip('v')
parts = version_string.split('-')
parts_len = len(parts)
# only tag or git sha
if parts_len == 1:
if cls.is_git_sha(parts[0]):
git_revision = parts[0]
git_revision = git_revision.lstrip('g')
else:
version = parts[0]
if parts_len == 2:
version = parts[0]
git_revision = cls.get_git_revision(parts[1])
if parts_len > 2:
# git sha
git_revision = cls.get_git_revision(parts[-1])
# commits after given tag
commits = cls.get_commits_count(parts[-2])
# version based on tag
version = ''.join(parts[:-1])
if commits is not None:
version = ''.join(parts[:-2])
# normalize rc to rcN for PEP 440 compatibility
version = version.lower()
if version.endswith('rc'):
version += '0'
else:
cls.logger.warning("Git describe command failed for current git repository")
git_date = cls.get_git_date(git_revision)
else:
from pkg_resources import get_distribution
try:
version, git_revision = get_distribution("hivemind").version.split("+")
except:
cls.logger.warning("Unable to get version and git revision from package data")
cls._save_version_file(version, git_revision, git_date)
return version, git_revision | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_target_git_version():\n if os.path.exists(os.path.join(ROOT_DIR, '.git_bleeding_edge')):\n git_version_file = 'git_version_bleeding_edge.txt'\n else:\n git_version_file = 'git_version.txt'\n with open(os.path.join(THIS_DIR, git_version_file)) as f:\n return f.read().strip()",
"def write_version_file(version):\n try:\n git_log = subprocess.check_output(\n ['git', 'log', '-1', '--pretty=%h %ai']).decode('utf-8')\n git_diff = (subprocess.check_output(['git', 'diff', '.']) +\n subprocess.check_output(\n ['git', 'diff', '--cached', '.'])).decode('utf-8')\n if git_diff == '':\n git_status = '(CLEAN) ' + git_log\n else:\n git_status = '(UNCLEAN) ' + git_log\n except Exception as e:\n print(\"Unable to obtain git version information, exception: {}\"\n .format(e))\n git_status = ''\n\n version_file = '.version'\n if os.path.isfile(version_file) is False:\n with open('bilby/' + version_file, 'w+') as f:\n f.write('{}: {}'.format(version, git_status))\n\n return version_file",
"def git_version():\n def _minimal_ext_cmd(cmd):\n # construct minimal environment\n env = {}\n for k in ['SYSTEMROOT', 'PATH']:\n v = os.environ.get(k)\n if v is not None:\n env[k] = v\n # LANGUAGE is used on win32\n env['LANGUAGE'] = 'C'\n env['LANG'] = 'C'\n env['LC_ALL'] = 'C'\n out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0]\n return out\n\n try:\n out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])\n GIT_REVISION = out.strip().decode('ascii')\n except OSError:\n GIT_REVISION = \"Unknown\"\n return GIT_REVISION",
"def git_hash():\n if not exists('qmk_firmware'):\n checkout_qmk()\n\n return open('qmk_firmware/version.txt').read().strip()",
"def get_release_info(version='v1.1-dev', date='2021-07-22'):\n # go to the repository directory\n dir_orig = os.getcwd()\n os.chdir(os.path.dirname(os.path.dirname(__file__)))\n\n # grab git info into string\n try:\n cmd = \"git describe --tags\"\n version = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n version = version.decode('utf-8').strip()\n\n # if there are new commits after the latest release\n if '-' in version:\n version, num_commit = version.split('-')[:2]\n version += '-{}'.format(num_commit)\n\n cmd = \"git log -1 --date=short --format=%cd\"\n date = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n date = date.decode('utf-8').strip()\n except:\n pass\n\n # go back to the original directory\n os.chdir(dir_orig)\n return version, date",
"def get_version():\n git_root = find_git_root(dirname(__file__))\n\n if git_root is not None:\n # Get the version using \"git describe\".\n cmd = \"git describe --tags --match [0-9]*\".split()\n try:\n version = subprocess.check_output(cmd).decode().strip()\n except subprocess.CalledProcessError:\n logger.exception(\"Unable to get version number from git tags\")\n exit(1)\n\n # PEP 386 compatibility\n if \"-\" in version:\n version = \".post\".join(version.split(\"-\")[:2])\n\n # Don't declare a version \"dirty\" merely because a time stamp has\n # changed. If it is dirty, append a \".dev1\" suffix to indicate a\n # development revision after the release.\n with open(os.devnull, \"w\") as fd_devnull:\n subprocess.call([\"git\", \"status\"], stdout=fd_devnull, stderr=fd_devnull)\n\n cmd = \"git diff-index --name-only HEAD\".split()\n try:\n dirty = subprocess.check_output(cmd).decode().strip()\n except subprocess.CalledProcessError:\n logger.exception(\"Unable to get git index status\")\n exit(1)\n\n if dirty != \"\":\n version += \".dev1\"\n\n return version\n\n else:\n try:\n return pkg_resources.working_set.by_key[\"graphql-validate\"].version\n except KeyError:\n return \"0.0.0-unreleased\"",
"def update_version():\n version = os.environ.get('TRAVIS_COMMIT', None) or \\\n subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])\n version_file = path.join('slingsby', 'VERSION')\n with open(version_file, 'w') as fh:\n fh.write(version)",
"def test_get_file_with_git_and_revision(self):\n self._test_get_file(\n tool_name='Git',\n revision='123',\n base_commit_id=None,\n expected_revision='123')",
"def get_git_version(abbrev=4):\n # Read in the version that's currently in RELEASE-VERSION.\n release_version = read_release_version()\n\n # First try to get the current version using \"git describe\".\n tag, count, _ = call_git_describe(abbrev)\n\n if count == '0':\n if tag:\n # Normal tagged release\n version = tag\n else:\n # This is an odd case where the git repo/branch can't find a tag\n version = \"0.dev0\"\n elif count:\n # Non-zero count means a development release after the last tag\n version = \"{}.dev{}\".format(tag, count)\n else:\n # Build count wasn't returned at all. Fall back on the value that's in\n # the packaged RELEASE-VERSION file\n version = release_version\n\n # If the current version is different from what's in the\n # RELEASE-VERSION file, update the file to be current.\n if version != release_version:\n write_release_version(version)\n\n # Finally, return the current version.\n return version",
"def version():\n\n version_full = semver()\n CWD = os.path.dirname(__file__)\n got_git = os.path.exists(os.path.join(os.path.dirname(__file__), \"..\", \".git\"))\n if not got_git:\n return version_full\n try:\n # determine git binary\n git = \"git\"\n try:\n subprocess.check_output([git, \"--version\"])\n except Exception:\n git = \"/usr/bin/git\"\n try:\n subprocess.check_output([git, \"--version\"])\n except Exception as e:\n return version_full\n\n version_full = subprocess.check_output([git, \"describe\", \"--always\", \"--tags\"], cwd=CWD).strip().decode(\"ascii\")\n version_full = version_full.replace(\"-\", \"+\", 1).replace(\"-\", \".\") # Make this consistent with PEP440\n\n except Exception as e:\n print(\"Could not determine PODPAC version from git repo.\\n\" + str(e))\n\n return version_full",
"def gitversion():\n import os\n from subprocess import Popen, PIPE, STDOUT\n origdir = os.getcwd()\n os.chdir(os.path.dirname(__file__))\n try:\n p = Popen(['git', \"describe\", \"--tags\", \"--dirty\", \"--always\"], stdout=PIPE, stderr=STDOUT)\n except EnvironmentError:\n return 'unknown'\n\n os.chdir(origdir)\n out = p.communicate()[0]\n if p.returncode == 0:\n #- avoid py3 bytes and py3 unicode; get native str in both cases\n return str(out.rstrip().decode('ascii'))\n else:\n return 'unknown'",
"def _save_version_file(cls, hivemind_version, git_revision, git_date):\n with open(\"hive/version.py\", 'w') as version_file:\n version_file.write(\"# generated by setup.py\\n\")\n version_file.write(\"# contents will be overwritten\\n\")\n version_file.write(\"VERSION = '{}'\\n\".format(hivemind_version))\n version_file.write(\"GIT_REVISION = '{}'\\n\".format(git_revision))\n version_file.write(\"GIT_DATE = '{}'\\n\".format(git_date))",
"def version():\n import inspect\n import shlex\n import subprocess\n\n def output(command):\n path = os.path.realpath(os.path.dirname(inspect.stack(0)[0][1]))\n return subprocess.check_output(shlex.split(command), cwd=path).strip()\n\n return (\n output(\"git rev-parse --show-toplevel\"),\n output(\"git remote get-url origin\"),\n output(\"git describe --always\"),\n )",
"def _get_code_version():\n git_dir = os.path.dirname(os.path.realpath(__file__))\n cwd = os.getcwd()\n file = os.path.join(cwd, VERSION_FILENAME)\n bash_command = f'cd {git_dir}; ' + \\\n f'git rev-parse HEAD > {file}; ' + \\\n f'cd {cwd}; '\n success = False\n try:\n subprocess.check_call(\n bash_command, stderr=subprocess.DEVNULL, shell=True)\n sucess = True\n except subprocess.CalledProcessError:\n # not a git directory\n bash_command = f'rm {file}; cd {cwd}; '\n subprocess.check_call(bash_command, shell=True)\n except OSError:\n # git command not found\n pass\n return success",
"def get_git_revision_hash() -> str:\n try:\n # We are not interested in gits complaints\n git_hash = subprocess.check_output(\n [\"git\", \"rev-parse\", \"HEAD\"], stderr=subprocess.DEVNULL, encoding=\"utf8\"\n )\n # ie. \"git\" was not found\n # should we return a more generic meta hash here?\n # like \"undefined\"?\n except FileNotFoundError:\n git_hash = \"git_not_available\"\n except subprocess.CalledProcessError:\n # Ditto\n git_hash = \"no_repository\"\n return git_hash.rstrip()",
"def get_version():\n parent_dir = os.path.dirname(os.path.realpath(__file__))\n while True:\n if '.git' in os.listdir(parent_dir):\n break\n parent_dir = os.path.dirname(parent_dir)\n git_log = os.path.join(parent_dir,'.git','logs','HEAD')\n handle = open(git_log,'r')\n log_lines = [l.split('\\t') for l in handle.readlines()]\n #now get latest github commit\n url = 'https://api.github.com/repos/thomasvangurp/epiGBS/commits'\n context = ssl._create_unverified_context()\n result = json.load(urllib.urlopen(url,context=context))\n print('')",
"def getversion_git(path=None):\n _program_dir = path or _get_program_dir()\n cmd = 'git'\n try:\n subprocess.Popen([cmd], stdout=subprocess.PIPE).communicate()\n except OSError:\n # some Windows git versions provide git.cmd instead of git.exe\n cmd = 'git.cmd'\n\n with open(os.path.join(_program_dir, '.git/config')) as f:\n tag = f.read()\n # Try 'origin' and then 'gerrit' as remote name; bail if can't find either.\n remote_pos = tag.find('[remote \"origin\"]')\n if remote_pos == -1:\n remote_pos = tag.find('[remote \"gerrit\"]')\n if remote_pos == -1:\n tag = '?'\n else:\n s = tag.find('url = ', remote_pos)\n e = tag.find('\\n', s)\n tag = tag[(s + 6):e]\n t = tag.strip().split('/')\n tag = f\"[{t[0][:-1]}] {'-'.join(t[3:])}\"\n dp = subprocess.Popen([cmd, '--no-pager',\n 'log', '-1',\n '--pretty=format:\"%ad|%an|%h|%H|%d\"',\n '--abbrev-commit',\n '--date=iso'],\n cwd=_program_dir,\n stdout=subprocess.PIPE)\n info, _ = dp.communicate()\n info = info.decode(config.console_encoding).split('|')\n date = info[0][:-6]\n date = time.strptime(date.strip('\"'), '%Y-%m-%d %H:%M:%S')\n dp = subprocess.Popen([cmd, 'rev-list', 'HEAD'],\n cwd=_program_dir,\n stdout=subprocess.PIPE)\n rev, stderr = dp.communicate()\n rev = f'g{len(rev.splitlines())}'\n hsh = info[3] # also stored in '.git/refs/heads/master'\n if (not date or not tag or not rev) and not path:\n raise VersionParseError\n return (tag, rev, date, hsh)",
"def get_pipe_version(pipe_instance):\n source_file = inspect.getsourcefile(pipe_instance.__class__)\n git_dir = os.path.dirname(source_file)\n\n # ls-files will verify both that a source file is located in a local\n # git repository and that it is under version control.\n # TODO I bet this STILL doesn't work with git add.\n git_ls_files_cmd = 'ls-files --error-unmatch {}'.format(source_file)\n if _run_git_cmd(git_dir, git_ls_files_cmd) == 0:\n # Get the hash and date of the last commit for the pipe.\n git_tight_hash_cmd = 'log -n 1 --pretty=format:\"%h;%aI\" -- {}'.format(source_file)\n git_tight_hash_result = _run_git_cmd(git_dir, git_tight_hash_cmd, get_output=True).split(';'.encode('utf8'))\n if len(git_tight_hash_result) == 2:\n tight_hash, tight_date = git_tight_hash_result\n\n git_tight_dirty_cmd = 'diff-index --name-only HEAD -- {}'.format(source_file)\n tight_dirty = len(_run_git_cmd(git_dir, git_tight_dirty_cmd, get_output=True)) > 0\n\n git_curr_branch_cmd = 'rev-parse --abbrev-ref HEAD'\n curr_branch = _run_git_cmd(git_dir, git_curr_branch_cmd, get_output=True).rstrip()\n\n git_fetch_url_cmd = 'config --get remote.origin.url'\n fetch_url = _run_git_cmd(git_dir, git_fetch_url_cmd, get_output=True).rstrip()\n\n obj_version = CodeVersion(semver=\"0.1.0\", hash=tight_hash, tstamp=tight_date, branch=curr_branch, url=fetch_url, dirty=tight_dirty)\n elif len(git_tight_hash_result) == 1 and git_tight_hash_result[0] == '':\n # git has the file but does not have a hash for the file,\n # which means that the file is a newly git-added file.\n # TODO: fake a hash, use date == now()\n _logger.warning('{}.{}: Source file {} added but not committed to git repository'.format(pipe_instance.__module__, pipe_instance.__class__.__name__, source_file))\n obj_version = CodeVersion(semver=\"0.1.0\", hash='', tstamp='', branch='', url='', dirty=True)\n else:\n raise ValueError(\"Got invalid git hash: expected either a hash;date or a blank, got {}\".format(git_tight_hash_result))\n else:\n _logger.warning('{}.{}: Source file {} not under git version control'.format(pipe_instance.__module__, pipe_instance.__class__.__name__, source_file))\n # TODO: fake a hash, use date == now()\n obj_version = CodeVersion(semver=\"0.1.0\", hash='', tstamp='', branch='', url='', dirty=True)\n\n return obj_version",
"def _get_version():\n try:\n code, output = _run_cmd('git', 'describe', '--tags')\n if code:\n return 'unknown'\n output = output.decode('utf8').strip().split('-')\n if len(output) != 3:\n return 'unknown'\n version = '%s+%s' % (output[0], output[2])\n\n code, _ = _run_cmd('git', 'diff', '--quiet')\n if code:\n version += '+dirty'\n\n return version\n except OSError:\n return 'unknown'",
"def run(self):\n default = 'UNKNOWN'\n\n try:\n from git import Repo\n\n repo = Repo(path.realpath(self.base_dir))\n scm_branch = repo.active_branch.name\n except Exception:\n scm_branch = default\n\n try:\n commit_id = repo.head.reference.commit.hexsha\n except Exception:\n commit_id = default\n\n version_json = {\n 'version': self.version,\n 'buildDate': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + \"Z\",\n 'deployDate': 'NOT-YET-DEPLOYED',\n 'scmBranch': getenv('GIT_BRANCH', scm_branch),\n 'scmLastCommit': getenv('GIT_COMMIT', commit_id)\n }\n\n version_path = path.abspath(self.output_dir)\n file = path.join(version_path, 'version.json')\n\n if not path.exists(version_path):\n makedirs(version_path)\n\n with open(file, 'w') as output:\n dump(version_json, output, indent=2)",
"def cache_git_info(version, cfg):\n branch_dir = os.path.join(cfg['stage_dir'], 'php-%s' % version)\n\n if not os.path.isdir(branch_dir):\n raise IOError(errno.ENOENT, 'Invalid branch directory', branch_dir)\n\n # Create cache directory if needed\n cache_dir = os.path.join(branch_dir, 'cache', 'gitinfo')\n if not os.path.isdir(cache_dir):\n os.mkdir(cache_dir)\n\n # Create cache for branch\n info = git.info(branch_dir)\n cache_file = git.info_filename(branch_dir, branch_dir, cache_dir)\n with open(cache_file, 'w') as f:\n json.dump(info, f)\n\n # Create cache for each extension and skin\n for dirname in ['extensions', 'skins']:\n dir = os.path.join(branch_dir, dirname)\n for subdir in utils.iterate_subdirectories(dir):\n try:\n info = git.info(subdir)\n except IOError:\n pass\n else:\n cache_file = git.info_filename(subdir, branch_dir, cache_dir)\n with open(cache_file, 'w') as f:\n json.dump(info, f)",
"def get_git_revision_short_hash() -> str:\n try:\n #ghash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])\n\n # independent of pyNastran location as long as there is a git folder\n # what about if you use setup_user.py install?\n # what about if you don't have git?\n # can raise a subprocess.CalledProcessError, which means the return code != 0\n ghash = subprocess.check_output(['git', 'describe', '--always'],\n cwd=os.path.dirname(__file__))\n\n ghash = ghash.decode('utf-8').rstrip()\n except Exception:\n # git isn't installed\n ghash = 'no.checksum.error'\n return 'dev.%s' % ghash",
"def get_version_from_git(opts):\n\tstdout = opts.tag or Popen(gitargs, stdout=PIPE).communicate()[0].rstrip('\\n')\n\n\tversion, gitmeta = process_git_tag(opts.regex, stdout)\n\n\treturn version, gitmeta",
"def find_git_revnum():\n git_dir = \"{0}/.git\".format(tc.APPDIR) \n return subprocess.check_output([\"git\", \"--git-dir=%s\" % git_dir, \"describe\", \"--always\"]).strip()",
"def version():\n\n version = None\n output = gitopen(['--version'])\n m = re.search(br\" version ([\\d\\.A-Za-z]+)\", output)\n if m is not None:\n version = m.group(1).decode('utf-8')\n return version",
"def get_version():\n try:\n return check_output(\n \"git describe --tags\".split(\" \")\n ).decode('utf-8').strip()\n except CalledProcessError:\n return check_output(\n \"git rev-parse --short HEAD\".split(\" \")\n ).decode('utf-8').strip()",
"def create_version_file(version='unknown', gitmeta=''):\n\tfname = join(dirname(abspath(__file__)), 'MHLogin', '_version.py')\n\tf = open(fname, 'wb')\n\tf.write(VERSION_PY % {'version': version, 'gitmeta': gitmeta, })\n\tf.close()",
"def build_version_file(provided_version):\n version_json = Path(VERSION_FILE_NAME)\n\n if provided_version == 'auto':\n # Read version.json\n with version_json.open('r') as version_file:\n version = json.load(version_file)\n current_version = version.get('version')\n\n version_parts = [int(part) for part in current_version.split('.')]\n version_parts[-1] += 1 # auto increment last version part. Major + Minor versions must be set manually\n provided_version = '.'.join(str(part) for part in version_parts)\n\n with version_json.open('w') as version_file:\n json.dump({'version': provided_version}, version_file)",
"def extract_version_info():\n version = None\n if os.path.exists('.version'):\n with open('.version') as f:\n line = f.read().rstrip()\n log.info('.version contains \"%s\"', line)\n if line.startswith('openafs-'):\n # Extract version from the git tag name.\n version = re.sub('openafs-[^-]*-', '', line).replace('_', '.')\n elif line.startswith('BP-'):\n # Branch point tags do not contain the version number.\n log.info('.version file has old branch point tag name.')\n else:\n # Use the given version string.\n version = line\n if not version:\n # Unable to lookup version from the .version file, try to extract the\n # version from the source directory name.\n root = os.path.basename(os.path.abspath('.'))\n m = re.match(r'openafs-(.*)', root)\n if m:\n version = m.group(1)\n if not version:\n module.fail_json(msg='Unable to determine version.')\n\n # Determine package version and release from the OpenAFS version.\n m1 = re.match(r'(.*)(pre[0-9]+)', version) # prerelease\n m2 = re.match(r'(.*)dev', version) # development\n m3 = re.match(r'(.*)-([0-9]+)-(g[a-f0-9]+)$', version) # development\n m4 = re.match(r'(.*)-([a-z]+)([0-9]+)', version) # custom\n if m1:\n v = m1.group(1)\n r = \"0.{0}\".format(m1.group(2))\n elif m2:\n v = m2.group(1)\n r = \"0.dev\"\n elif m3:\n v = m3.group(1)\n r = \"{0}.{1}\".format(m3.group(2), m3.group(3))\n elif m4:\n v = m4.group(1).replace('-', '')\n r = \"1.2.{0}.{1}\".format(m4.group(3), m4.group(2))\n else:\n v = version # standard release\n r = \"1\" # increment when repackaging this version\n # '-' are used as delimiters by rpm.\n v = v.replace('-', '_')\n r = r.replace('-', '_')\n return dict(openafs_version=version, package_version=v, package_release=r)",
"def get_version():\n\n with open('__init__.py') as f:\n for line in f.readlines():\n if '__version__' in line:\n apicem_version = line.strip().split(\"=\")[-1].strip(\" '\")\n if '__first_release_date__' in line:\n first_release_data_str = line.strip().split(\"=\")[-1].strip(\" '\")\n first_release_data = date(*[int(num) for num in first_release_data_str.split('.')])\n num_commits = get_cr_num(first_release_data)\n return '{apicem_version}.{num_commits}'.format(\n apicem_version=apicem_version, num_commits=num_commits)\n\n raise ValueError(\"could not read version\")"
] | [
"0.70678526",
"0.68655723",
"0.6796388",
"0.6708447",
"0.65252197",
"0.64766747",
"0.6476001",
"0.64697593",
"0.6447515",
"0.6442917",
"0.6426894",
"0.6379362",
"0.6362795",
"0.6267706",
"0.62258583",
"0.62182444",
"0.6217623",
"0.61762476",
"0.61670643",
"0.6155937",
"0.61094433",
"0.6068927",
"0.60265625",
"0.6004977",
"0.59847707",
"0.59847385",
"0.5983509",
"0.5976251",
"0.5975736",
"0.5966979"
] | 0.73866886 | 0 |
Helper method to save version.py with current version and git_revision | def _save_version_file(cls, hivemind_version, git_revision, git_date):
with open("hive/version.py", 'w') as version_file:
version_file.write("# generated by setup.py\n")
version_file.write("# contents will be overwritten\n")
version_file.write("VERSION = '{}'\n".format(hivemind_version))
version_file.write("GIT_REVISION = '{}'\n".format(git_revision))
version_file.write("GIT_DATE = '{}'\n".format(git_date)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GenerateRevisionFile(self):\n\n print 'Saving revision to %s' % self.revisions_path\n Write(\n self.revisions_path,\n ('{\"chromium_revision\":%d, \"webkit_revision\":%d, '\n '\"v8_revision\":%d}') % (self._chromium_revision,\n self._webkit_revision,\n self._v8_revision))",
"def create_version_file(version='unknown', gitmeta=''):\n\tfname = join(dirname(abspath(__file__)), 'MHLogin', '_version.py')\n\tf = open(fname, 'wb')\n\tf.write(VERSION_PY % {'version': version, 'gitmeta': gitmeta, })\n\tf.close()",
"def update_version():\n version = os.environ.get('TRAVIS_COMMIT', None) or \\\n subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])\n version_file = path.join('slingsby', 'VERSION')\n with open(version_file, 'w') as fh:\n fh.write(version)",
"def save_version(version_name, yml):\n\n output_1 = version(version_name)\n output_2 = path(yml)\n return ' - Save version ' + output_1 + '\\n' + output_2",
"def saveNewVersion(self, *args, **kwargs):\n self.owner = self.song.owner\n self.date_added = self.song.date_added\n self.comment_node = self.song.comment_node\n\n self.project.title = self.song.title\n if not self.id:\n self._save(*args, **kwargs)\n\n self.project.latest_version = self\n self.project.save()\n self._save(*args, **kwargs)\n self.makeLogEntry()",
"def set_version(self, bundle, ctx, filename, version):",
"def persist_version():\r\n #it's not necessary to do this every time we persist, but\r\n #this way we don't have to worry about race conditions with resume.py\r\n #reading this\r\n f = open(os.path.join(get_persist_root_dir(), \"sparkVersion\"), 'w')\r\n from spark.internal.version import VERSION \r\n f.write(VERSION)\r\n f.close()",
"def run(self):\n\n version_str = (\n get_git_version(here))\n\n version_uniparser_dict = (\n get_uniparser_version())\n\n if (version_str is not None or\n version_uniparser_dict is not None):\n\n with open(\n os.path.join(here, 'lingvodoc', 'version.py'), 'w',\n encoding = 'utf-8') as version_py_file:\n\n version_py_file.write(\n self.version_py_template.format(\n repr(version_str),\n repr(version_uniparser_dict)))\n\n # Continuing with setup.\n\n super().run()",
"def revision():\n pass",
"def version():\n with cd(settings.SRC_PATH()):\n new_version = prompt('New version number?')\n run('echo \"window.version=\\'{0}\\';\" > app/static/js/version.js'\n .format(new_version))",
"def build_version_file(provided_version):\n version_json = Path(VERSION_FILE_NAME)\n\n if provided_version == 'auto':\n # Read version.json\n with version_json.open('r') as version_file:\n version = json.load(version_file)\n current_version = version.get('version')\n\n version_parts = [int(part) for part in current_version.split('.')]\n version_parts[-1] += 1 # auto increment last version part. Major + Minor versions must be set manually\n provided_version = '.'.join(str(part) for part in version_parts)\n\n with version_json.open('w') as version_file:\n json.dump({'version': provided_version}, version_file)",
"def updateLastCommitFile(self):\n f = open(self.last_released, 'w')\n f.write(self.new_rev)\n f.close()",
"def version(self):\n self.version_list[-1] = self.revision\n version = '.'.join(self.version_list)\n return version",
"def get_version():\n\n with open('__init__.py') as f:\n for line in f.readlines():\n if '__version__' in line:\n apicem_version = line.strip().split(\"=\")[-1].strip(\" '\")\n if '__first_release_date__' in line:\n first_release_data_str = line.strip().split(\"=\")[-1].strip(\" '\")\n first_release_data = date(*[int(num) for num in first_release_data_str.split('.')])\n num_commits = get_cr_num(first_release_data)\n return '{apicem_version}.{num_commits}'.format(\n apicem_version=apicem_version, num_commits=num_commits)\n\n raise ValueError(\"could not read version\")",
"def set_version(self, version):\n\n def update_version(version, filepath):\n with open(filepath, \"r\") as stream:\n contents = stream.read()\n\n new_contents = _fix_contents_version(contents, version)\n assert contents != new_contents\n with open(filepath, \"w\") as stream:\n stream.write(new_contents)\n\n update_version(version, os.path.join(\".\", \"package.json\"))\n update_version(version, os.path.join(\".\", \"src\", \"setup.py\"))\n update_version(\n version, os.path.join(\".\", \"src\", \"robocorp_code\", \"__init__.py\")\n )",
"def provide_git_revision(cls):\n version = str(VERSION)\n git_revision = str(GIT_REVISION)\n git_date = str(GIT_DATE)\n if os.path.exists(\".git\"):\n from subprocess import check_output\n command = 'git describe --tags --long --dirty'\n version_string = check_output(command.split()).decode('utf-8').strip()\n if version_string != 'fatal: No names found, cannot describe anything.':\n # git describe -> tag-commits-sha-dirty\n version_string = version_string.replace('-dirty', '')\n version_string = version_string.lstrip('v')\n parts = version_string.split('-')\n parts_len = len(parts)\n # only tag or git sha\n if parts_len == 1:\n if cls.is_git_sha(parts[0]):\n git_revision = parts[0]\n git_revision = git_revision.lstrip('g')\n else:\n version = parts[0]\n if parts_len == 2:\n version = parts[0]\n git_revision = cls.get_git_revision(parts[1])\n if parts_len > 2:\n # git sha\n git_revision = cls.get_git_revision(parts[-1])\n # commits after given tag\n commits = cls.get_commits_count(parts[-2])\n # version based on tag\n version = ''.join(parts[:-1])\n if commits is not None:\n version = ''.join(parts[:-2])\n # normalize rc to rcN for PEP 440 compatibility\n version = version.lower()\n if version.endswith('rc'):\n version += '0'\n else:\n cls.logger.warning(\"Git describe command failed for current git repository\")\n git_date = cls.get_git_date(git_revision)\n else:\n from pkg_resources import get_distribution\n try:\n version, git_revision = get_distribution(\"hivemind\").version.split(\"+\")\n except:\n cls.logger.warning(\"Unable to get version and git revision from package data\")\n cls._save_version_file(version, git_revision, git_date)\n return version, git_revision",
"def write_version(settings, version, force=False):\n semver_path = settings['semver_path']\n filename = settings['semver_branch']\n path = os.path.join(semver_path, filename)\n logger.debug(f'write version:{version} to path:{path} with force:{force}')\n\n path_exists = os.path.exists(path)\n if path_exists:\n current_version = read_version(settings)\n if current_version == version:\n logger.debug(f'version is same as current version {current_version}')\n return\n\n if not path_exists or force:\n write_file(path, version)\n semver_repo = Repo(semver_path)\n index = semver_repo.index\n index.add([filename])\n semver_user_name = settings['semver_user_name']\n semver_user_email = settings['semver_user_email']\n author = Actor(semver_user_name, semver_user_email)\n index.commit(f'semver({filename}): {version}', author=author, committer=author, parent_commits=None)",
"def genVersion(*args, **kwargs):\n return generateVersionFileData(Version(*args, **kwargs))",
"def save_as(self, version, run_pre_publishers=True):\n # set the extension to '.comp'\n # refresh the current comp\n self.comp = self.fusion.GetCurrentComp()\n from stalker import Version\n assert isinstance(version, Version)\n # its a new version please update the paths\n version.update_paths()\n version.extension = self.extensions[0]\n version.created_with = self.name\n\n # set project_directory\n self.project_directory = os.path.dirname(version.absolute_path)\n\n # create the main write node\n self.create_main_saver_node(version)\n\n # check if this is a shot related task\n is_shot_related_task = False\n shot = None\n from stalker import Shot\n for task in version.task.parents:\n if isinstance(task, Shot):\n is_shot_related_task = True\n shot = task\n break\n\n fps = None\n imf = None\n if not is_shot_related_task:\n # use the Project image_format\n fps = version.task.project.fps\n imf = version.task.project.image_format\n else:\n # use the shot image_format\n if shot:\n fps = shot.fps\n imf = shot.image_format\n\n # set frame ranges\n self.set_frame_range(\n start_frame=shot.cut_in,\n end_frame=shot.cut_out,\n )\n\n # set comp resolution and fps\n if imf:\n self.comp.SetPrefs({\n # Image Format\n \"Comp.FrameFormat.Width\": imf.width,\n \"Comp.FrameFormat.Height\": imf.height,\n \"Comp.FrameFormat.AspectY\": imf.pixel_aspect,\n \"Comp.FrameFormat.AspectX\": imf.pixel_aspect,\n\n # FPS\n \"Comp.FrameFormat.Rate\": fps,\n\n # set project frame format to 16bit\n \"Comp.FrameFormat.DepthFull\": 2.0,\n \"Comp.FrameFormat.DepthLock\": True,\n })\n\n # replace read and write node paths\n # self.replace_external_paths()\n\n # create the path before saving\n try:\n os.makedirs(version.absolute_path)\n except OSError:\n # path already exists OSError\n pass\n\n version_full_path = os.path.normpath(version.absolute_full_path)\n\n self.comp.Lock()\n self.comp.Save(version_full_path.encode())\n self.comp.Unlock()\n\n # create a local copy\n self.create_local_copy(version)\n\n rfm = RecentFileManager()\n rfm.add(self.name, version.absolute_full_path)\n\n return True",
"def SaveBuildRevisionToSpecifiedFile(self, file_path):\n\n print 'Saving revision to %s' % file_path\n Write(file_path, '%d' % self._build_revision)",
"def get_reversion():\n return to_str(backend.get().af_get_revision())",
"def run_save(self, expanded, unexpanded) :\n\t\tif not self.__version :\n\t\t\treturn self.errormessage(\"Not in a version\")\n\t\telse :\n\t\t\tobjver = self.toObject(self.__context, self.__version)\n\t\t\tif objver is None :\n\t\t\t\treturn self.errormessage(\"Error while accessing version %s\" % self.__version)\n\t\t\telse :\n\t\t\t\tif not self.HasPerms(objver, 'Save/discard Version changes') :\n\t\t\t\t\treturn -1\n\t\t\t\t# for save, remark doesn't have a default value (according to Zope 2.3.0 sources)\n\t\t\t\tobjver.save(remark = (string.join(expanded, ' ') or 'No comment'))\n\t\t\t\tself.htmlmessage(\"Version %s saved\" % self.ObjectPath(objver))",
"def run(self):\n default = 'UNKNOWN'\n\n try:\n from git import Repo\n\n repo = Repo(path.realpath(self.base_dir))\n scm_branch = repo.active_branch.name\n except Exception:\n scm_branch = default\n\n try:\n commit_id = repo.head.reference.commit.hexsha\n except Exception:\n commit_id = default\n\n version_json = {\n 'version': self.version,\n 'buildDate': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + \"Z\",\n 'deployDate': 'NOT-YET-DEPLOYED',\n 'scmBranch': getenv('GIT_BRANCH', scm_branch),\n 'scmLastCommit': getenv('GIT_COMMIT', commit_id)\n }\n\n version_path = path.abspath(self.output_dir)\n file = path.join(version_path, 'version.json')\n\n if not path.exists(version_path):\n makedirs(version_path)\n\n with open(file, 'w') as output:\n dump(version_json, output, indent=2)",
"def do_version(self):\n return \"1.0.0\", True",
"def commitVersion(self, tempFile, stamp):\n os.rename(tempFile, self.getFile(stamp))",
"def write_version_file(version):\n try:\n git_log = subprocess.check_output(\n ['git', 'log', '-1', '--pretty=%h %ai']).decode('utf-8')\n git_diff = (subprocess.check_output(['git', 'diff', '.']) +\n subprocess.check_output(\n ['git', 'diff', '--cached', '.'])).decode('utf-8')\n if git_diff == '':\n git_status = '(CLEAN) ' + git_log\n else:\n git_status = '(UNCLEAN) ' + git_log\n except Exception as e:\n print(\"Unable to obtain git version information, exception: {}\"\n .format(e))\n git_status = ''\n\n version_file = '.version'\n if os.path.isfile(version_file) is False:\n with open('bilby/' + version_file, 'w+') as f:\n f.write('{}: {}'.format(version, git_status))\n\n return version_file",
"def __write_build_version(file_path, identifier, version):\n\n with open(file_path) as fp:\n lines = fp.readlines()\n\n new_lines = []\n for line in lines:\n if line.find(identifier) > -1:\n parts = line.split(identifier)\n parts[-1] = version + '\\n'\n new_line = identifier.join(parts)\n new_lines.append(new_line)\n else:\n new_lines.append(line)\n\n fp2 = open(file_path, 'w')\n fp2.write(''.join(new_lines))\n fp2.close()",
"def version_hash():\n git_hash = current_git_hash()\n return \"%s-%s\" % (__VERSION__, git_hash)",
"def _get_version(self):",
"def command_new_version(self):\n repoinit.new_version(*self.args())"
] | [
"0.6732105",
"0.6625021",
"0.65620464",
"0.6561149",
"0.64542365",
"0.63980365",
"0.6322233",
"0.6319752",
"0.62847656",
"0.6251861",
"0.6206942",
"0.6136045",
"0.6086098",
"0.6048948",
"0.60261697",
"0.601439",
"0.59903044",
"0.59877247",
"0.59568715",
"0.5952817",
"0.59305626",
"0.59299177",
"0.5927936",
"0.5917154",
"0.5913531",
"0.5896734",
"0.5893593",
"0.58606905",
"0.5833559",
"0.5821905"
] | 0.7704217 | 0 |
Read structured entity from metadata store for a given project directory context Context object containing projectDir, the path of the project whose metadata store is to be read from fqId String representing the fully qualified ID of the entity Implementations should return an instance of themselves containing data read from the metadata store KeyError if entity is not in metadata | def readFromMetadata(cls, context, fqId):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_metadata(dirname, use_gpu):\n try:\n if not os.path.isdir(dirname):\n pass\n elif not os.path.exists(os.path.join(dirname, 'metadata.json')):\n pass\n else:\n with open(os.path.join(dirname, 'metadata.json')) as f:\n metadata = json.load(f)\n if use_gpu and ('container_gpu' in metadata):\n container = metadata['container_gpu']\n else:\n container = metadata['container']\n entry_point = metadata['entry_point']\n except (IOError, KeyError, ValueError):\n print('Failed to read metadata from defense directory ', dirname)\n return (container, entry_point)",
"def read_file(self, entity):\n\n return self.cache.read_file(\n entity.objects['project'],\n entity.objects['ref'],\n entity.objects['file']['path']\n )",
"def get(cls, project_id, resource_type, resource_id, key):\n\n try:\n return DBMetadata.find_by(\n deleted=False,\n project_id=project_id,\n resource_id=resource_id,\n resource_type=resource_type,\n key=key)\n\n except exception.NotFound:\n raise exception.MetadataKeyForResourceNotFound(\n key=key,\n resource_type=resource_type,\n resource_id=resource_id)",
"def readFromMetadata(cls, context, fqId):\n newInstance = AssetProvenance()\n (newInstance.section, newInstance.name) = fqId.split(GenericMetadata.COMPOUND_KEY_SEP)\n \n provenance = GenericMetadata.readProvenanceEntries(context)\n keyProto = fqId + GenericMetadata.COMPOUND_KEY_SEP\n dcIdentifier = keyProto + 'dc.identifier'\n newInstance.dcIdentifier = provenance[dcIdentifier]\n dcSource = keyProto + 'dc.source'\n newInstance.dcSource = provenance[dcSource]\n dcTitle = keyProto + 'dc.title'\n newInstance.dcTitle = provenance[dcTitle]\n dcDate = keyProto + 'dc.date'\n newInstance.dcDate = datetime.strptime(provenance[dcDate], AssetProvenance.FMT_DATE)\n dcPublisher = keyProto + 'dc.publisher'\n newInstance.dcPublisher = provenance[dcPublisher]\n dcDescription = keyProto + 'dc.description'\n newInstance.dcDescription = provenance[dcDescription]\n processingNotes = keyProto + 'processing_notes'\n newInstance.processingNotes = provenance[processingNotes]\n \n return newInstance",
"def resolve_repository_entry(self, path):\n\n project, ref, remainingPath = self.resolve_ref_prefix(path)\n if not ref or remainingPath.as_posix() == '.':\n return None\n\n # List parent directory to retrieve entry attributes\n entry = self.get_entry_properties(project, ref, remainingPath.as_posix())\n\n # Approximate entry age by last commit to containing ref\n refTime = iso8601.parse_date(ref.commit['committed_date']).timestamp()\n\n if entry != None:\n if entry['type'] == 'blob':\n fileSize = self.cache.get_file_size(project, ref, remainingPath.as_posix())\n\n # Approximate file age more accurately by its last commit timestamp\n if self.commitTimes:\n entryTime = self.cache.get_file_commit_timestamp(project, ref, remainingPath.as_posix())\n else:\n entryTime = refTime\n\n # Convert mode and strip write bits\n permissions = int(entry['mode'][-3:], 8) & 0o555\n\n return Entity(\n EntityType.REPOSITORY_FILE,\n path,\n create_file_attributes(permissions, entryTime, fileSize),\n {'project': project, 'ref': ref, 'file': entry}\n )\n elif entry['type'] == 'tree':\n return Entity(\n EntityType.REPOSITORY_DIR,\n path,\n create_directory_attributes(refTime),\n {'project': project, 'ref': ref, 'directory': entry}\n )\n\n return None",
"def _load_entity(client, entity_type, entity_id, parent_key=None):\n\n key = _load_key(client, entity_type, entity_id, parent_key)\n entity = client.get(key)\n log('retrieved entity: ' + entity_type + ' for ID: ' + str(entity_id))\n return entity",
"def _load_metadata(self, datapath):\n try:\n metadata = Metadata(datapath)\n return metadata\n except RuntimeError:\n print('Metadata does not exist. Please double check your datapath.')\n return None",
"def get_examples(data_dir, mode, task_id, shard_id):\n file_path = get_full_filename(data_dir, mode, task_id, shard_id)\n relative_path = \"/\".join(file_path.split(\"/\")[3:])\n tf.logging.info(\"Reading file: %s\" % (file_path))\n print(relative_path)\n #client = storage.Client(projectname, credentials=credentials)\n #bucket = client.get_bucket(bucket_name)\n blob = storage_bucket.blob(relative_path)\n if not blob.exists():\n tf.logging.info(\"Path doesn't exist\")\n return None\n nq_data = extract_nq_data(file_path)\n tf.logging.info(\"NQ data Size: \" + str(len(nq_data.keys())))\n\n tf.logging.info(\"Performing entity extraction\")\n fact_extracted_data = entity_link_nq(nq_data)\n return fact_extracted_data",
"def readFromMetadata(cls, context, fqId): \n newInstance = ModelRun()\n (newInstance.modelType, newInstance.runNumber) = fqId.split(GenericMetadata.KEY_SEP)\n \n modelRunEntries = GenericMetadata.readModelRunEntries(context)\n keyProto = fqId + GenericMetadata.KEY_SEP\n \n runDate = keyProto + 'date_utc'\n newInstance.date = datetime.strptime(modelRunEntries[runDate], ModelRun.FMT_DATE)\n runDesc = keyProto + 'description'\n newInstance.description = modelRunEntries[runDesc]\n runCmd = keyProto + 'command'\n newInstance.command = modelRunEntries[runCmd]\n runOutput = keyProto + 'output'\n newInstance.output = modelRunEntries[runOutput]\n \n return newInstance",
"def get_meta(_id):\n dataset = ESDataset.get(id=_id, ignore=404, _source=\"_meta\")\n\n if dataset:\n return RegistryDocument.wraps(dataset).meta\n\n raise NoEntityError(f\"dataset {_id} does not exist.\")",
"def read_local_metadata(self, fld: str) -> Optional[str]:\n return self.read_metadata(self.get_obj_label(), fld)",
"def _metadata_get(self, path):\n fd = self.fs.open(path, \"r\")\n # TODO iterate instead of assuming file < 4MB\n read_bytes = self.fs.read(fd, 0, 4096 * 1024)\n self.fs.close(fd)\n if read_bytes:\n return json.loads(read_bytes.decode())\n else:\n return None",
"def get(self, entity, default=None, check_tags=True):\n\n if not isinstance(entity, Entity):\n raise TypeError('path cache keys are entities; got %r %r' % (type(entity), entity))\n\n with self.conn:\n c = self.conn.cursor()\n c.execute('SELECT path FROM entity_paths WHERE entity_type = ? AND entity_id = ?', (entity['type'], entity['id']))\n row = c.fetchone()\n if row is None:\n return default\n path = os.path.abspath(os.path.join(self.project_root, row[0]))\n\n # Make sure that the entity is actually tagged in the given directory.\n # This guards against moving tagged directories. This does NOT\n # effectively guard against copied directories.\n if check_tags:\n if not any(tag['entity'] is entity for tag in self.sgfs.get_directory_entity_tags(path)):\n log.warning('%s %d is not tagged at %s' % (\n entity['type'], entity['id'], path,\n ))\n return default\n\n return path",
"def readFromMetadata(cls, context, fqId):\n newInstance = ClimatePointStation()\n (newInstance.type, newInstance.id) = fqId.split(GenericMetadata.COMPOUND_KEY_SEP)\n\n climate = GenericMetadata.readClimatePointEntries(context)\n \n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP\n longitude = keyProto + 'longitude'\n newInstance.longitude = float(climate[longitude])\n latitude = keyProto + 'latitude'\n newInstance.latitude = float(climate[latitude])\n elevation = keyProto + 'elevation'\n newInstance.elevation = float(climate[elevation])\n name = keyProto + 'name'\n newInstance.name = climate[name] \n startDate = keyProto + 'startdate'\n try:\n newInstance.startDate = datetime.strptime(climate[startDate], ClimatePointStation.FMT_DATE)\n except KeyError:\n pass \n endDate = keyProto + 'enddate'\n try:\n newInstance.endDate = datetime.strptime(climate[endDate], ClimatePointStation.FMT_DATE)\n except KeyError:\n pass\n variablesKey = keyProto + 'variables'\n try:\n newInstance.variables = climate[variablesKey].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n pass\n data = keyProto + 'data'\n try:\n newInstance.data = climate[data]\n except KeyError:\n pass\n try:\n for var in newInstance.variables:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n newInstance.variablesData[var] = climate[varKey]\n except KeyError:\n pass\n \n return newInstance",
"def read(self, entity, content_type):\n return None",
"def getEntity(self):\n\n fid = file(self.filename)\n entityre = re.compile(\"entity (\\w+) is\", re.IGNORECASE)\n\n matches = entityre.search(fid.read())\n self.entityname = matches.groups()[0]\n return self.entityname",
"def get_metadata(\n self,\n digest: Optional[Digest] = None,\n ignore_errors: bool = True,\n ) -> CommonModel:\n ...",
"def entity(self, rawbase_name, entity_id):\n url = \"%s/record/%s?id=%s\" % (self.api, rawbase_name, entity_id)\n return self.__get_request(url, 5)",
"def _read_data(self) -> MMD:\n\t\tif self.config.source_type == SourceType.LOCAL_FILE:\n\t\t\treturn self._read_files()\n\t\telif self.config.source_type == SourceType.HDFS:\n\t\t\treturn self._read_hdfs()\n\t\telif self.config.source_type == SourceType.NEO4J:\n\t\t\treturn self._read_neo4j(self.config.graph_db)\n\n\t\telse:\n\t\t\traise NotImplementedError(\"The source type {} has not been implemented yet.\".format(loader_config.source_type))",
"def load_entities():\n # TODO dynamic look into entities folder\n return ['location']",
"def get(_id):\n dataset = ESDataset.get(id=_id, ignore=404)\n\n if dataset:\n return RegistryDocument.wraps(dataset)\n\n raise NoEntityError(f\"dataset {_id} does not exist.\")",
"def get_metadata_from_path(path):\n try:\n import yaml\n # assumes index card is in the top-level of path\n index_card = os.path.join(path, \"M_index.yaml\")\n with open(index_card, \"r\") as stream:\n file_info = yaml.safe_load(stream)\n\n metadata_dict = {}\n metadata_dict[\"book_id\"] = file_info[\"book_id\"]\n metadata_dict[\"timestamp_start\"] = file_info[\"start_time\"]\n metadata_dict[\"type\"] = file_info[\"type\"]\n metadata_dict[\"obsid\"] = _convert_book_id_to_obsid(file_info[\"book_id\"])\n # get optional bits\n if \"stop_time\" in file_info:\n metadata_dict[\"timestamp_end\"] = file_info[\"stop_time\"]\n if \"observatory\" in file_info:\n metadata_dict[\"observatory\"] = file_info[\"observatory\"]\n if \"telescope\" in file_info:\n metadata_dict[\"telescope\"] = file_info[\"telescope\"]\n if \"stream_ids\" in file_info:\n metadata_dict[\"stream_ids\"] = file_info[\"stream_ids\"]\n if \"subtype\" in file_info:\n metadata_dict[\"subtype\"] = file_info[\"subtype\"]\n if \"tags\" in file_info:\n metadata_dict[\"tags\"] = file_info[\"tags\"]\n if \"scanification\" in file_info:\n metadata_dict[\"scanification\"] = file_info[\"scanification\"]\n if \"hwp_rate_hz\" in file_info:\n metadata_dict[\"hwp_rate_hz\"] = file_info[\"hwp_rate_hz\"]\n if \"sequencer_ref\" in file_info:\n metadata_dict[\"sequencer_ref\"] = file_info[\"sequencer_ref\"]\n return metadata_dict\n except (ImportError, FileNotFoundError, KeyError):\n pass\n\n return None",
"def get(entity, name=None, version=None, lineage=None):",
"def _readEntriesForSection(projectDir, section):\n sectionDict = dict()\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.R_OK):\n raise IOError(errno.EACCES, \"Unable to read metadata store for project %s\" % \\\n (projectDir,))\n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n if config.has_section(section):\n items = config.items(section)\n for item in items:\n sectionDict[item[0]] = item[1]\n \n return sectionDict",
"def get_metadata (self, name):\n return self.metadata.get(name)",
"def read_facet(self, filename=None, *args, **kwargs): \n # Check if filename exists in metadata.\n if not filename:\n try:\n filename = self.filename\n except:\n print(\"filename must be specified.\")\n result = read_facet(filename, *args, **kwargs)\n for name in self._metadata:\n attr_value = getattr(self, name, None)\n if attr_value and getattr(result, name, None) == None:\n setattr(result, name, attr_value)\n return result",
"def load(self, cloud_identifier):\n with self.container.create_cloud_storage() as storage:\n meta = None\n\n try:\n raw_meta = storage.get_artifact_as_string(cloud_identifier)\n meta = self.codec.deserialize(raw_meta)\n except ArtifactNotFoundError:\n pass\n\n # I don't do this inside the except because\n # if an empty string is returned as the metadata\n # then it will deserialize to None instead of\n # an empty dict.\n if not meta:\n meta = {}\n\n meta = self.mapper.to_response(meta)\n\n return meta",
"def parse_metadata(self, cfg: CFG_DICT) -> None:\n # can_inherit_from() can be over-ridden by subclasses\n # pylint: disable=assignment-from-none\n inherit_from = self.can_inherit_from()\n if self.METADATA_TITLE:\n if self.default_title:\n self.register_metadata(self.get_obj_label(), FLD_TITLE, cast(str, cfg.get(\"title\", self.default_title)))\n else:\n try:\n self.register_metadata(self.get_obj_label(), FLD_TITLE, cast(str, cfg[\"title\"]))\n except KeyError:\n raise ConfigException(f\"Entity {self.get_obj_label()} has no title.\")\n if self.METADATA_ABSTRACT:\n local_abstract = cfg.get(\"abstract\")\n if local_abstract is None and inherit_from is not None:\n self.register_metadata(self.get_obj_label(), FLD_ABSTRACT, inherit_from.abstract, inherited=True)\n elif local_abstract is None and self.default_abstract is not None:\n self.register_metadata(self.get_obj_label(), FLD_ABSTRACT, cast(str, self.default_abstract))\n elif local_abstract is None:\n raise ConfigException(f\"Entity {self.get_obj_label()} has no abstract\")\n else:\n self.register_metadata(self.get_obj_label(), \"abstract\", cast(str, local_abstract))\n if self.METADATA_KEYWORDS:\n local_keyword_set = set(cast(List[str], cfg.get(\"keywords\", [])))\n self.register_metadata(self.get_obj_label(), FLD_KEYWORDS, \",\".join(local_keyword_set))\n if inherit_from:\n keyword_set = inherit_from.keywords\n else:\n keyword_set = set()\n self._keywords = keyword_set.union(local_keyword_set)\n if self.METADATA_ATTRIBUTION:\n inheriting = False\n attrib = cast(MutableMapping[str, str], cfg.get(\"attribution\"))\n if attrib is None and inherit_from is not None:\n attrib = inherit_from.attribution\n inheriting = True\n if attrib:\n attrib_title = attrib.get(\"title\")\n else:\n attrib_title = None\n if attrib_title:\n self.register_metadata(self.get_obj_label(), FLD_ATTRIBUTION, attrib_title, inheriting)\n if self.METADATA_FEES:\n fees = cast(str, cfg.get(\"fees\"))\n if not fees:\n fees = \"none\"\n self.register_metadata(self.get_obj_label(), FLD_FEES, fees)\n if self.METADATA_ACCESS_CONSTRAINTS:\n acc = cast(str, cfg.get(\"access_contraints\"))\n if not acc:\n acc = \"none\"\n self.register_metadata(self.get_obj_label(), FLD_ACCESS_CONSTRAINTS, acc)\n if self.METADATA_CONTACT_INFO:\n cfg_contact_info: MutableMapping[str, str] = cast(MutableMapping[str, str], cfg.get(\"contact_info\", {}))\n org = cfg_contact_info.get(\"organisation\")\n position = cfg_contact_info.get(\"position\")\n if org:\n self.register_metadata(self.get_obj_label(), FLD_CONTACT_ORGANISATION, org)\n if position:\n self.register_metadata(self.get_obj_label(), FLD_CONTACT_POSITION, position)\n if self.METADATA_DEFAULT_BANDS:\n band_map = cast(MutableMapping[str, List[str]], cfg)\n for k, v in band_map.items():\n if len(v):\n self.register_metadata(self.get_obj_label(), k, v[0])\n else:\n self.register_metadata(self.get_obj_label(), k, k)",
"def parse_metadata(self):\n micro_metadata = {}\n ext_meta = None\n self.content_type = 'text/html'\n if self.source_metadata:\n #print(self.source_metadata)\n if len(self.source_metadata)>1:\n try:\n for sm in self.source_metadata:\n if str(sm.get('type').split('/')[-1]).lower() in self.SCHEMA_ORG_CREATIVEWORKS:\n ext_meta = sm\n except:\n pass\n self.source_name = self.getEnumSourceNames().MICRODATA_EMBEDDED\n if not ext_meta:\n ext_meta = self.source_metadata[0]\n\n if ext_meta is not None:\n self.logger.info('FsF-F2-01M : Trying to extract Microdata metadata from -: {}'.format(self.source_name))\n # TODO check syntax - not ending with /, type and @type\n # TODO (important) extend mapping to detect other pids (link to related entities)?\n # TODO replace check_context_type list context comparison by regex\n check_context_type = ['Dataset', 'Collection']\n try:\n #if ext_meta['@context'] in check_context_type['@context'] and ext_meta['@type'] in check_context_type[\"@type\"]:\n if str(ext_meta.get('type')).find('schema.org') > -1:\n micro_metadata = jmespath.search(self.metadata_mapping.value, ext_meta)\n self.namespaces.append('http://schema.org/')\n else:\n self.logger.info('FsF-F2-01M : Failed to parse non schema.org type Microdata')\n except Exception as err:\n #print(err.with_traceback())\n self.logger.info('FsF-F2-01M : Failed to parse Microdata -: {}'.format(err))\n else:\n self.logger.info('FsF-F2-01M : Could not identify Microdata metadata')\n\n return self.source_name, micro_metadata",
"def get_metadata(self, **documentid):\n uid = ''\n if len(documentid) != 1:\n raise TypeError(\"either uid or path\")\n if 'path' in documentid:\n uid = self.get_uid(documentid['path'])\n elif 'uid' in documentid:\n uid = documentid['uid']\n url = u'/'.join([self.conf['api'], \"id\", uid])\n res = self.http.get(\n url, headers=self.document_property_headers, auth=self.auth)\n res.raise_for_status()\n return json.loads(res.content.decode('utf-8'))"
] | [
"0.5949139",
"0.5893366",
"0.5757676",
"0.5671569",
"0.56597054",
"0.55556333",
"0.5489248",
"0.5458346",
"0.5440958",
"0.53803843",
"0.5339604",
"0.5311802",
"0.5290315",
"0.5249724",
"0.5208021",
"0.5197425",
"0.516491",
"0.51528895",
"0.51199585",
"0.50983113",
"0.5068694",
"0.50396556",
"0.5037194",
"0.4996094",
"0.49925256",
"0.49557215",
"0.49532697",
"0.49141216",
"0.4909337",
"0.49036697"
] | 0.60476655 | 0 |
Write ClimatePointStation data to climate point section of metadata for a given project directory context Context object containing projectDir, the path of the project whose metadata store is to be written to | def writeToMetadata(self, context):
fqId = self.type + GenericMetadata.COMPOUND_KEY_SEP + self.id
fqId = fqId.lower()
climatePoints = GenericMetadata.readClimatePointEntries(context)
try:
stations = climatePoints['stations'].split(GenericMetadata.VALUE_DELIM)
except KeyError:
stations = []
# Write station metadata (overwrite if already present)
keys = []
values = []
if fqId not in stations:
stations.append(fqId)
stationsStr = GenericMetadata.VALUE_DELIM.join(stations)
keys.append('stations'); values.append(stationsStr)
# Write attributes for station
keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP
longitude = keyProto + 'longitude'
keys.append(longitude); values.append(self.longitude)
latitude = keyProto + 'latitude'
keys.append(latitude); values.append(self.latitude)
elevation = keyProto + 'elevation'
keys.append(elevation); values.append(self.elevation)
name = keyProto + 'name'
keys.append(name); values.append(self.name)
if self.startDate:
startDate = keyProto + 'startdate'
keys.append(startDate); values.append(self.startDate.strftime(ClimatePointStation.FMT_DATE))
if self.endDate:
endDate = keyProto + 'enddate'
keys.append(endDate); values.append(self.endDate.strftime(ClimatePointStation.FMT_DATE))
if self.variables:
variablesKey = keyProto + 'variables'
variablesValue = GenericMetadata.VALUE_DELIM.join(self.variables)
keys.append(variablesKey); values.append(variablesValue)
if self.data != None:
data = keyProto + 'data'
keys.append(data); values.append(self.data)
elif self.variablesData:
# Try to write data entries for each variable separately
vars = self.variablesData.keys()
for var in vars:
varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'
keys.append(varKey); values.append(self.variablesData[var])
GenericMetadata.writeClimatePointEntries(context, keys, values) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeClimatePointEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION, keys, values)",
"def writeClimatePointEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.CLIMATE_POINT_SECTION, key, value)",
"def write_stewicombo_metadata(file_name, metadata_dict, category=''):\n meta = set_stewicombo_meta(file_name, category=category)\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)",
"def write_central(ds):\n ### make central directory:\n try:\n os.mkdir(GOAL_DIR_CENTRAL)\n except OSError:\n print (\"Creation of the directory failed or already exists\") \n target = GOAL_DIR_CENTRAL + \"data.csv\"\n \n ### write central:\n r_list = list(range(len(ds.index)))\n random.Random(RANDOM_SEED).shuffle(r_list)\n total_df = ds.loc[r_list]\n total_df.to_csv(target, index=False)",
"def write_to_dataset(self, dset) -> \"Dataset\":\n # Spring constellation definition\n system_def = {\n \"0\": \"\", # Unknown\n \"1\": \"G\", # GPS\n \"2\": \"R\", # GLONASS\n \"3\": \"S\", # SBAS\n \"4\": \"E\", # Galileo\n \"5\": \"C\", # BeiDou\n \"6\": \"J\", # QZSS\n }\n\n field_spring_to_where = {\n \"Clock\": \"gnss_satellite_clock\",\n \"GroupDelay\": \"gnss_total_group_delay\",\n \"PseudoRange\": \"gnss_range\",\n \"SatInView\": \"num_satellite_available\",\n \"TropoDelay\": \"troposphere_dT\",\n \"UISD\": \"gnss_ionosphere\",\n \"UsedSat\": \"num_satellite_used\",\n \"EastvsRef\": \"site_pos_vs_ref_east\",\n \"NorthvsRef\": \"site_pos_vs_ref_north\",\n \"VerticalvsRef\": \"site_pos_vs_ref_up\",\n }\n\n # Initialize dataset\n if not self.data:\n log.warn(\"No data in {self.file_path}.\")\n return dset\n dset.num_obs = len(self.data[\"GPSEpoch\"])\n\n # Add time\n dset.add_time(\n \"time\",\n val=[dateutil.parser.parse(v.replace(\"UTC\", \"\")) for v in self.data[\"UTCDateTime\"]],\n scale=\"utc\",\n fmt=\"datetime\",\n write_level=\"operational\",\n )\n\n # Add system field based on Constellation column\n if \"Constellation\" in self.data.keys():\n dset.add_text(\"system\", val=[system_def[str(value)] for value in self.data[\"Constellation\"]])\n\n # Add satellite field based on PRN column\n if \"PRN\" in self.data.keys():\n prn_data = []\n for prn in self.data[\"PRN\"]:\n if prn >= 71 and prn <= 140: # Handling of Galileo satellites\n prn_data.append(\"E\" + str(prn - 70).zfill(2))\n else:\n log.fatal(f\"Spring PRN number '{prn}' is unknown.\")\n\n dset.add_text(\"satellite\", val=prn_data)\n\n # Add position field based on Latitude, Longitude and Height column\n if \"Latitude\" in self.data.keys():\n pos = Position(\n val=np.vstack(\n (self.data[\"Latitude\"] * Unit.deg2rad, self.data[\"Longitude\"] * Unit.deg2rad, self.data[\"Height\"])\n ).T,\n system=\"llh\",\n )\n if \"XPos\" in self.data.keys():\n dset.add_position(\"sat_pos\", itrs=pos.trs, time=\"time\")\n else:\n dset.add_position(\"site_pos\", itrs=pos.trs, time=\"time\")\n\n # Define fields to save in dataset\n remove_time_fields = {\"Constellation\", \"GPSEpoch\", \"GPSWeek\", \"GPSSecond\", \"PRN\", \"\", \"UTCDateTime\"}\n fields = set(self.data.keys()) - remove_time_fields\n\n # Add text and float fields\n for field in fields:\n\n where_fieldname = field_spring_to_where[field] if field in field_spring_to_where.keys() else field.lower()\n\n if self.data[field].dtype.kind in {\"U\", \"S\"}: # Check if numpy type is string\n dset.add_text(where_fieldname, val=self.data[field])\n continue\n\n dset.add_float(where_fieldname, val=self.data[field])\n\n return dset",
"def writeToMetadata(self, context):\n pass",
"def store_climate_observations(\n station_data: pd.DataFrame,\n station_id: int,\n parameter: Parameter,\n time_resolution: TimeResolution,\n period_type: PeriodType,\n folder: Union[str, Path],\n) -> None:\n # Make sure that there is data that can be stored\n if station_data.empty:\n return\n\n request_string = _build_local_store_key(\n station_id, parameter, time_resolution, period_type\n )\n\n local_filepath = build_local_filepath_for_station_data(folder)\n\n local_filepath.parent.mkdir(parents=True, exist_ok=True)\n\n station_data.to_hdf(path_or_buf=local_filepath, key=request_string)",
"def readFromMetadata(cls, context, fqId):\n newInstance = ClimatePointStation()\n (newInstance.type, newInstance.id) = fqId.split(GenericMetadata.COMPOUND_KEY_SEP)\n\n climate = GenericMetadata.readClimatePointEntries(context)\n \n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP\n longitude = keyProto + 'longitude'\n newInstance.longitude = float(climate[longitude])\n latitude = keyProto + 'latitude'\n newInstance.latitude = float(climate[latitude])\n elevation = keyProto + 'elevation'\n newInstance.elevation = float(climate[elevation])\n name = keyProto + 'name'\n newInstance.name = climate[name] \n startDate = keyProto + 'startdate'\n try:\n newInstance.startDate = datetime.strptime(climate[startDate], ClimatePointStation.FMT_DATE)\n except KeyError:\n pass \n endDate = keyProto + 'enddate'\n try:\n newInstance.endDate = datetime.strptime(climate[endDate], ClimatePointStation.FMT_DATE)\n except KeyError:\n pass\n variablesKey = keyProto + 'variables'\n try:\n newInstance.variables = climate[variablesKey].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n pass\n data = keyProto + 'data'\n try:\n newInstance.data = climate[data]\n except KeyError:\n pass\n try:\n for var in newInstance.variables:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n newInstance.variablesData[var] = climate[varKey]\n except KeyError:\n pass\n \n return newInstance",
"def writeToMetadata(self, context):\n fqId = self.section + GenericMetadata.COMPOUND_KEY_SEP + self.name\n fqId = fqId.lower()\n \n # Write self to the appropriate section\n GenericMetadata.writeEntryToSection(context, self.section, self.name, self.dcIdentifier)\n \n # Write to provenance section\n provenanceEntries = GenericMetadata.readProvenanceEntries(context)\n try:\n entities = provenanceEntries['entities'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n entities = []\n # Write entity metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in entities:\n entities.append(fqId)\n entitiesStr = GenericMetadata.VALUE_DELIM.join(entities)\n keys.append('entities'); values.append(entitiesStr)\n # Write attributes for entity\n keyProto = fqId + GenericMetadata.COMPOUND_KEY_SEP\n dcIdentifier = keyProto + 'dc.identifier'\n keys.append(dcIdentifier); values.append(self.dcIdentifier)\n dcSource = keyProto + 'dc.source'\n keys.append(dcSource); values.append(self.dcSource)\n dcTitle = keyProto + 'dc.title'\n keys.append(dcTitle); values.append(self.dcTitle)\n if self.dcDate:\n dcDate = keyProto + 'dc.date'\n keys.append(dcDate); values.append(self.dcDate.strftime(AssetProvenance.FMT_DATE))\n dcPublisher = keyProto + 'dc.publisher'\n keys.append(dcPublisher); values.append(self.dcPublisher)\n dcDescription = keyProto + 'dc.description'\n keys.append(dcDescription); values.append(self.dcDescription)\n processingNotes = keyProto + 'processing_notes'\n keys.append(processingNotes); values.append(self.processingNotes)\n GenericMetadata.writeProvenanceEntries(context, keys, values)",
"def write_metadata_to_file(self, path):\n return write_metadata_to_ma_file(path, self)",
"def save_metadata(self, directory: pathlib.Path, **kwargs) -> None:\n path_to_metadata = directory / (self.name + \".json\")\n metadata = {\"latent_dim\": self.latent_dim, \"name\": self.name}\n with open(path_to_metadata, \"w\") as f:\n json.dump(metadata, f, indent=4, sort_keys=True, **kwargs)",
"def save_metadata(self, directory: pathlib.Path, **kwargs) -> None:\n path_to_metadata = directory / (self.name + \".json\")\n metadata = {\"latent_dim\": self.latent_dim, \"name\": self.name}\n with open(path_to_metadata, \"w\") as f:\n json.dump(metadata, f, indent=4, sort_keys=True, **kwargs)",
"def writeClimateGridEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.CLIMATE_GRID_SECTION, key, value)",
"def _write_attributes_(self):\n #Open the Netcdf GRID file output from PreMOD\n try: dataset = Dataset(self.netcdf_file,'r+',format='NETCDF4')\n except Exception, e:\n print \"ERROR: %s\" % e\n sys.exit()\n\n dataset.title = self.title \n dataset.description = self.description\n dataset.ngh_file = self.ngh_file\n dataset.rtr_file = self.rtr_file\n dataset.netcdf_file = self.netcdf_file\n dataset.epsg = 4326\n dataset.close()",
"def writer(output, output_name, output_data):\n\n kml = simplekml.Kml(name=output_name)\n for exif in output_data:\n if('Latitude' in exif.keys() and\n 'Latitude Reference' in exif.keys() and\n 'Longitude Reference' in exif.keys() and\n 'Longitude' in exif.keys()):\n\n if 'Original Date' in exif.keys():\n dt = exif['Original Date']\n else:\n dt = 'N/A'\n\n if exif['Latitude Reference'] == 'S':\n latitude = '-' + exif['Latitude']\n else:\n latitude = exif['Latitude']\n\n if exif['Longitude Reference'] == 'W':\n longitude = '-' + exif['Longitude']\n else:\n longitude = exif['Longitude']\n\n kml.newpoint(name=exif['Name'],\n description='Originally Created: ' + dt,\n coords=[(longitude, latitude)])\n else:\n pass\n kml.save(os.path.join(output, output_name))",
"def write_prepared_data(prepped: PreparedData, directory):\n if not os.path.exists(directory):\n os.mkdir(directory)\n prepped.measurements.to_csv(os.path.join(directory, MEASUREMENTS_FILE))\n with open(os.path.join(directory, COORDS_FILE), \"w\") as f:\n json.dump(prepped.coords, f)\n with open(os.path.join(directory, NAME_FILE), \"w\") as f:\n f.write(prepped.name)",
"def write_metadata(dir_path, fs, *metas, global_metadata=True):\n assert metas\n md = metas[0]\n with fs.open(\"/\".join([dir_path, \"_common_metadata\"]), \"wb\") as fil:\n md.write_metadata_file(fil)\n if global_metadata:\n for meta in metas[1:]:\n md.append_row_groups(meta)\n with fs.open(\"/\".join([dir_path, \"_metadata\"]), \"wb\") as fil:\n md.write_metadata_file(fil)",
"def write_data(city, parsed_data, category_id, f):\n f.write(\"#{}\\n\".format(category_id))\n for event in parsed_data:\n parsed_name = name_parser(event[\"name\"])\n f.write(\"{};{};{};{};{}\\n\".format(event[\"coordinates\"][0],\n event[\"coordinates\"][1],\n event[\"date\"], parsed_name,\n event[\"id\"]))\n f.write(\"!#\\n\")",
"def write_atm_files(data_df, metadata_df, common_header_info, profnum_var, lon_var, lat_var, additional_var,\n altitude_var, pressure_var, temperature_var=None, h2o_var=None, additional_var_atm_name=None,\n out_dir='.', out_prefix='aircraft', atm_name_fmt='{prefix}_{lat}_{lon}_{date}_{profnum}_{var}.atm',\n skip_nans=True, min_num_pts=0):\n unit_in_meta = 'unit' in metadata_df.index\n units_in_meta = 'units' in metadata_df.index\n if unit_in_meta and units_in_meta:\n raise ValueError('metadata_df contains both \"unit\" and \"units\" rows, please consolidate to one or the other')\n elif units_in_meta:\n unit_key = 'units'\n elif unit_in_meta:\n unit_key = 'unit'\n else:\n raise ValueError('metadata_df contains neither \"unit\" not \"units\" rows, one must be present')\n\n def atm_colname(atm_name, aircraft_varname, default='?'):\n if aircraft_varname is None:\n return '{}_{}'.format(atm_name, default)\n else:\n return '{}_{}'.format(atm_name, metadata_df.loc[unit_key, aircraft_varname])\n\n prof_inds = data_df[profnum_var]\n unique_prof_inds = prof_inds.unique()\n\n # Construct the mapping between data frame column names and .atm column names. The latter are typically formatted\n # {name}_{unit}\n var_mapping = OrderedDict()\n var_mapping['altitude'] = (atm_colname('Altitude', altitude_var), altitude_var)\n var_mapping['pressure'] = (atm_colname('Pressure', pressure_var), pressure_var)\n var_mapping['temperature'] = (atm_colname('Temperature', temperature_var, 'C'), temperature_var)\n var_mapping['h2o'] = (atm_colname('H2O_profile', h2o_var, 'ppm'), h2o_var)\n var_mapping['additional'] = (atm_colname(additional_var_atm_name + '_profile', additional_var), additional_var)\n\n for uind in unique_prof_inds:\n this_header_info = deepcopy(common_header_info)\n if uind <= 0:\n # Assume any profiles have a number > 0 and that <= 0 indicates a non-profile segment\n continue\n\n logger.info('Generating .atm file for profile {}'.format(uind))\n\n pp = prof_inds == uind\n all_data = OrderedDict()\n utc_dates = data_df.index[pp]\n for atm_name, old_name in var_mapping.values():\n if old_name is not None:\n all_data[atm_name] = data_df.loc[pp, old_name]\n else:\n all_data[atm_name] = pd.Series(np.full([pp.sum()], np.nan), index=utc_dates)\n\n if skip_nans:\n n_points = np.sum(~np.isnan(all_data[var_mapping['additional'][0]]))\n else:\n n_points = np.size(all_data[var_mapping['additional'][0]])\n\n if n_points < min_num_pts:\n logger.important('Profile {prof_num} has < {min_num} ({num_valid}/{num_total}) valid points, skipping'.format(\n prof_num=uind, min_num=min_num_pts, num_valid=n_points, num_total=np.size(all_data[var_mapping['additional'][0]]))\n )\n continue\n\n # Match to tccon site\n prof_datetime = utc_dates.min()\n prof_date = pd.Timestamp(prof_datetime.year, prof_datetime.month, prof_datetime.day)\n lat = np.nanmean(data_df.loc[pp, lat_var].to_numpy())\n lon = data_df.loc[pp, lon_var].to_numpy()\n lon[lon > 180] -= 360\n\n dateline_lon_signs = np.sign(lon[np.abs(lon) > 175])\n if dateline_lon_signs.size > 0 and not np.all(dateline_lon_signs == dateline_lon_signs[0]):\n logger.debug('Longitude straddles date line')\n lon[lon < 0] += 360\n lon = np.nanmean(lon)\n if lon > 180:\n lon -= 360\n else:\n lon = np.nanmean(lon)\n\n tccon_site = _match_to_tccon_site(lon, lat, prof_date)\n this_header_info.update(tccon_site)\n\n this_atm_file = atm_name_fmt.format(prefix=out_prefix,\n lon=mod_utils.format_lon(lon, prec=1),\n lat=mod_utils.format_lat(lat, prec=1),\n date=prof_date.strftime(_atm_filedate_fmt),\n profnum=uind,\n var=additional_var_atm_name)\n this_atm_file = os.path.join(out_dir, this_atm_file)\n _write_single_atm_file(this_atm_file, data=all_data, utc_dates=utc_dates, var_mapping=var_mapping,\n header_info=this_header_info, skip_nans=skip_nans)",
"def readClimatePointEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION)",
"def _write(self):\n\n output_path = os.path.join(config.S3_OUTPUT, config.DATAFRAME_ARTISTS)\n dataframe = self._cache.get_source(config.DATAFRAME_ARTISTS)\n\n print('Writing dataframe to {}'.format(output_path))\n\n dataframe.write.parquet(\n output_path,\n mode='overwrite'\n )",
"def writeCheckpoint(self, metadata_key, times):\r\n # Make sure that the directory exists\r\n try:\r\n os.makedirs(self.tmpDir)\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise\r\n\r\n # Open a temporary file\r\n (file_handle, tmp_filename) = tempfile.mkstemp(dir=self.tmpDir)\r\n wrapped_file = os.fdopen(file_handle, 'w')\r\n wrapped_file.write(json.dumps(times))\r\n wrapped_file.close()\r\n os.rename(tmp_filename, self.tmpDir + metadata_key)",
"def write_cmd_metadata(path):\n\n metadata = get_cmd_metadata()\n message = metadata.all()\n df = message.df\n df.to_csv(path)",
"def _StageMetadata(json_metadata, storage_service, staged_file: str):\n # Write computed metadata to object storage.\n temp_run_dir = temp_dir.GetRunDirPath()\n local_file = os.path.join(temp_run_dir, os.path.basename(staged_file))\n with open(local_file, 'w') as f:\n json.dump(json_metadata, f)\n storage_service.Copy(local_file, staged_file)",
"def _write_outputs(self):\n\n #########################\n # Create necessary variables for generic metadata file, as well as\n # generate and fill metadata file, if user wants it\n record_start = pd.to_datetime(self.dt_array[0]).date()\n record_end = pd.to_datetime(self.dt_array[-1]).date()\n\n if self.metadata_mode == 1 and self.script_mode == 1:\n # user wants to fill metadata and it is the correct mode\n\n # First check to see if metadata file already exists\n if not os.path.isfile('correction_metadata.xlsx'):\n # file does not exist, create new one\n metadata_info = pd.DataFrame({'station_name': self.station_name, 'station_lat': self.station_lat,\n 'station_lon': self.station_lon, 'station_elev_m': self.station_elev,\n 'record_start': record_start, 'record_end': record_end,\n 'anemom_height_m': self.ws_anemometer_height,\n 'output_file_path': self.output_file_path}, index=np.array([1]))\n\n with pd.ExcelWriter('correction_metadata.xlsx', date_format='YYYY-MM-DD',\n datetime_format='YYYY-MM-DD HH:MM:SS', engine='openpyxl', mode='w') as writer:\n metadata_info.to_excel(writer, header=True, index=False, sheet_name='Sheet1')\n else:\n # file is already created, so we need to read it in, append our new information to the bottom of it\n # and then save the info\n metadata_info = pd.read_excel('correction_metadata.xlsx', sheet_name=0, index_col=None, engine='xlrd',\n keep_default_na=False, verbose=True, skip_blank_lines=True)\n\n new_meta_info = pd.DataFrame({'station_name': self.station_name, 'station_lat': self.station_lat,\n 'station_lon': self.station_lon, 'station_elev_m': self.station_elev,\n 'record_start': record_start, 'record_end': record_end,\n 'anemom_height_m': self.ws_anemometer_height,\n 'output_file_path': self.output_file_path}, index=np.array([1]))\n\n output_metadata = pd.concat([metadata_info, new_meta_info], ignore_index=True)\n\n with pd.ExcelWriter('correction_metadata.xlsx', date_format='YYYY-MM-DD',\n datetime_format='YYYY-MM-DD HH:MM:SS', engine='openpyxl', mode='w') as writer:\n output_metadata.to_excel(writer, header=True, index=False, sheet_name='Sheet1')\n\n else:\n # do nothing\n pass\n\n # if we are using a network-specific metadata file, we need to update the run count to pass it on\n if self.metadata_path is not None:\n current_row = self.metadata_df.run_count.ne(2).idxmax() - 1\n current_run = self.metadata_df.run_count.iloc[current_row] + 1\n\n self.metadata_df.run_count.iloc[current_row] = current_run\n self.metadata_df.record_start.iloc[current_row] = record_start\n self.metadata_df.record_end.iloc[current_row] = record_end\n self.metadata_df.output_path.iloc[current_row] = self.output_file_path\n\n with pd.ExcelWriter(self.metadata_path, date_format='YYYY-MM-DD',\n datetime_format='YYYY-MM-DD', engine='openpyxl', mode='w') as writer:\n self.metadata_df.to_excel(writer, header=True, index=True, sheet_name='Sheet1')\n\n #########################\n # Generate output file\n # Create any final variables, then create panda dataframes to save all the data\n # Includes the following sheets:\n # Corrected Data : Actual corrected values\n # Delta : Magnitude of difference between original data and corrected data\n # Filled Data : Tracks which data points have been filled by script generated values instead of provided\n # Data that is provided and subsequently corrected by the script do not count as filled values.\n print(\"\\nSystem: Saving corrected data to .xslx file.\")\n\n # Create any individually-requested output data\n ws_2m = _wind_height_adjust(uz=self.data_ws, zw=self.ws_anemometer_height)\n\n # Create corrected-original delta numpy arrays\n diff_tavg = np.array(self.data_tavg - self.original_df.tavg)\n diff_tmax = np.array(self.data_tmax - self.original_df.tmax)\n diff_tmin = np.array(self.data_tmin - self.original_df.tmin)\n diff_tdew = np.array(self.data_tdew - self.original_df.tdew)\n diff_ea = np.array(self.data_ea - self.original_df.ea)\n diff_rhavg = np.array(self.data_rhavg - self.original_df.rhavg)\n diff_rhmax = np.array(self.data_rhmax - self.original_df.rhmax)\n diff_rhmin = np.array(self.data_rhmin - self.original_df.rhmin)\n diff_rs = np.array(self.data_rs - self.original_df.rs)\n diff_rs_tr = np.array(self.opt_rs_tr - self.orig_rs_tr)\n diff_rso = np.array(self.rso - self.original_df.rso)\n diff_ws = np.array(self.data_ws - self.original_df.ws)\n diff_precip = np.array(self.data_precip - self.original_df.precip)\n diff_etr = np.array(self.etr - self.original_df.etr)\n diff_eto = np.array(self.eto - self.original_df.eto)\n\n # Create datetime for output dataframe\n datetime_df = pd.DataFrame({'year': self.data_year, 'month': self.data_month, 'day': self.data_day})\n datetime_df = pd.to_datetime(datetime_df[['month', 'day', 'year']])\n\n # Create output dataframe\n output_df = pd.DataFrame({'date': datetime_df, 'year': self.data_year, 'month': self.data_month,\n 'day': self.data_day, 'TAvg (C)': self.data_tavg, 'TMax (C)': self.data_tmax,\n 'TMin (C)': self.data_tmin, 'TDew (C)': self.data_tdew,\n 'Vapor Pres (kPa)': self.data_ea, 'RHAvg (%)': self.data_rhavg,\n 'RHMax (%)': self.data_rhmax, 'RHMin (%)': self.data_rhmin, 'Rs (w/m2)': self.data_rs,\n 'Opt_Rs_TR (w/m2)': self.opt_rs_tr, 'Rso (w/m2)': self.rso,\n 'Windspeed (m/s)': self.data_ws, 'Precip (mm)': self.data_precip,\n 'ETr (mm)': self.etr, 'ETo (mm)': self.eto, 'ws_2m (m/s)': ws_2m},\n index=datetime_df)\n\n # Creating difference dataframe to track amount of correction\n delta_df = pd.DataFrame({'date': datetime_df, 'year': self.data_year, 'month': self.data_month,\n 'day': self.data_day, 'TAvg (C)': diff_tavg, 'TMax (C)': diff_tmax,\n 'TMin (C)': diff_tmin, 'TDew (C)': diff_tdew,\n 'Vapor Pres (kPa)': diff_ea, 'RHAvg (%)': diff_rhavg, 'RHMax (%)': diff_rhmax,\n 'RHMin (%)': diff_rhmin, 'Rs (w/m2)': diff_rs, 'Opt - Orig Rs_TR (w/m2)': diff_rs_tr,\n 'Rso (w/m2)': diff_rso, 'Windspeed (m/s)': diff_ws, 'Precip (mm)': diff_precip,\n 'ETr (mm)': diff_etr, 'ETo (mm)': diff_eto}, index=datetime_df)\n\n # Creating a fill dataframe that tracks where missing data was filled in\n fill_df = pd.DataFrame({'date': datetime_df, 'year': self.data_year, 'month': self.data_month,\n 'day': self.data_day, 'TMax (C)': self.fill_tmax, 'TMin (C)': self.fill_tmin,\n 'TDew (C)': self.fill_tdew, 'Vapor Pres (kPa)': self.fill_ea, 'Rs (w/m2)': self.fill_rs,\n 'Complete Record Rso (w/m2)': self.fill_rso},\n index=datetime_df)\n\n # Open up pandas excel writer\n output_writer = pd.ExcelWriter(self.output_file_path, engine='xlsxwriter')\n # Convert data frames to xlsxwriter excel objects\n output_df.to_excel(output_writer, sheet_name='Corrected Data', na_rep=self.missing_fill_value)\n delta_df.to_excel(output_writer, sheet_name='Delta (Corr - Orig)', na_rep=self.missing_fill_value)\n fill_df.to_excel(output_writer, sheet_name='Filled Data', na_rep=self.missing_fill_value)\n # Save output file\n output_writer.save()\n\n logger = open(self.log_file, 'a')\n if self.script_mode == 1 and self.fill_mode == 1:\n if np.isnan(self.eto).any() or np.isnan(self.etr).any():\n print(\"\\nSystem: After finishing corrections and filling data, \"\n \"ETr and ETo still had missing observations.\")\n logger.write('After finishing corrections and filling data, '\n 'ETr and ETo still had missing observations. \\n')\n else:\n logger.write('The output file for this station has a complete record of ETo and ETr observations. \\n')\n else:\n pass\n logger.write('\\nThe file has been successfully processed and output files saved at %s.' %\n dt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n logger.close()",
"def create_metadata(scene: \"Scenemaker\") -> None:\r\n create_datadir()\r\n\r\n with open(dirpath / cng.GENERATED_DATA_DIR / cng.METADATA_FILE, \"w+\") as f:\r\n f.write(str(scene.num2name))",
"def write_point(datum):\n measurement = {\n \"measurement\": \"weather\",\n \"tags\": {\n \"location\": LOCATION\n },\n \"time\": datetime.now().isoformat(),\n \"fields\": datum\n }\n CHANNEL.basic_publish(exchange='',\n routing_key='scribe',\n body=json.dumps(measurement))",
"def save(cls, context):\n\n data = context.get_stored_dict()\n files = {}\n\n def save_in_file(file, key, value):\n if file in files.keys():\n files[file][key] = value\n else:\n files[file] = {key: value}\n\n for key, val in data.items():\n if context.extends is not None and key in context.key_origins:\n save_in_file(context.key_origins[key], key, val)\n else:\n save_in_file(context.profile, key, val)\n\n for profile, content in files.items():\n metadata.update_metadata(\n context.workspace,\n profile,\n 'config',\n content)",
"def _write_map_provenance(cfg, cube, plot_path, title, *attrs):\n cube = cube.copy()\n ancestors = []\n for attr in attrs:\n ancestors.extend(attr['filename'].split('|'))\n netcdf_path = mlr.get_new_path(cfg, plot_path)\n io.iris_save(cube, netcdf_path)\n record = {\n 'ancestors': ancestors,\n 'authors': ['schlund_manuel'],\n 'caption': f\"Geographical distribution of {cube.long_name} for \"\n f\"{title}.\",\n 'plot_types': ['geo'],\n 'references': ['schlund20jgr'],\n }\n with ProvenanceLogger(cfg) as provenance_logger:\n provenance_logger.log(netcdf_path, record)\n provenance_logger.log(plot_path, record)",
"def make_points_file(in_path, grid_id_name='GRIDMET_ID'):\n if not os.path.isfile(in_path):\n raise FileNotFoundError('Input summary CSV file: '+\\\n '{}\\nwas not found.'.format(in_path))\n print(\n '\\nMapping point data for climate stations in: \\n',\n in_path, '\\n'\n )\n in_df = pd.read_csv(in_path, index_col='STATION_ID', na_values=[-999])\n # add in potentially missing columns to avoid errors when no ratios exist\n # in input that are expected by schema/attribute table\n missing_vars = list(set(PT_ATTRS).difference(in_df.columns))\n in_df = in_df.reindex(columns=list(in_df.columns) + missing_vars)\n # save shapefile to \"spatial\" subdirectory of in_path\n path_root = os.path.split(in_path)[0]\n file_name = os.path.split(in_path)[1]\n # get variable name from input file prefix\n var_name = file_name.split('_summ')[0]\n out_dir = OPJ(path_root, 'spatial')\n out_file = OPJ(out_dir, '{v}_summary_pts.shp'.format(v=var_name))\n print( \n 'Creating point shapefile of station bias ratios, saving to: \\n',\n os.path.abspath(out_file),\n '\\n'\n )\n # create output directory if does not exist\n if not os.path.isdir(out_dir):\n print(\n out_dir, \n ' does not exist, creating directory.\\n'\n )\n os.mkdir(out_dir)\n\n crs = from_epsg(4326) # WGS 84 projection\n # attributes of shapefile\n schema = { \n 'geometry': 'Point', \n 'properties': { \n 'Jan': 'float',\n 'Feb': 'float',\n 'Mar': 'float',\n 'Apr': 'float',\n 'May': 'float',\n 'Jun': 'float',\n 'Jul': 'float',\n 'Aug': 'float',\n 'Sep': 'float',\n 'Oct': 'float',\n 'Nov': 'float',\n 'Dec': 'float',\n 'summer': 'float',\n 'growseason': 'float',\n 'annual': 'float',\n 'Jan_cnt': 'float',\n 'Feb_cnt': 'float',\n 'Mar_cnt': 'float',\n 'Apr_cnt': 'float',\n 'May_cnt': 'float',\n 'Jun_cnt': 'float',\n 'Jul_cnt': 'float',\n 'Aug_cnt': 'float',\n 'Sep_cnt': 'float',\n 'Oct_cnt': 'float',\n 'Nov_cnt': 'float',\n 'Dec_cnt': 'float',\n 'summer_cnt': 'float',\n 'grow_cnt': 'float',\n 'annual_cnt': 'float',\n 'Jan_std': 'float',\n 'Feb_std': 'float',\n 'Mar_std': 'float',\n 'Apr_std': 'float',\n 'May_std': 'float',\n 'Jun_std': 'float',\n 'Jul_std': 'float',\n 'Aug_std': 'float',\n 'Sep_std': 'float',\n 'Oct_std': 'float',\n 'Nov_std': 'float',\n 'Dec_std': 'float',\n 'summer_std': 'float',\n 'grow_std': 'float',\n 'annual_std': 'float',\n 'Jan_cv': 'float',\n 'Feb_cv': 'float',\n 'Mar_cv': 'float',\n 'Apr_cv': 'float',\n 'May_cv': 'float',\n 'Jun_cv': 'float',\n 'Jul_cv': 'float',\n 'Aug_cv': 'float',\n 'Sep_cv': 'float',\n 'Oct_cv': 'float',\n 'Nov_cv': 'float',\n 'Dec_cv': 'float',\n 'summer_cv': 'float',\n 'grow_cv': 'float',\n 'annual_cv': 'float',\n 'STATION_ID': 'str',\n grid_id_name: 'int'\n }}\n\n # remove nans- gdal will not recognize \n in_df = in_df.where(pd.notnull(in_df), None)\n\n # create shapefile from points, overwrite if exists\n with collection(\n out_file, 'w', \n driver='ESRI Shapefile', \n crs=crs, \n schema=schema) as output:\n # loop through stations and add point data to shapefile\n for index, row in in_df.iterrows():\n print(\n 'Saving point data for station: ',\n index, \n )\n point = Point(float(row.STATION_LON), float(row.STATION_LAT))\n output.write({\n 'properties': {\n 'Jan': row['Jan_mean'],\n 'Feb': row['Feb_mean'],\n 'Mar': row['Mar_mean'],\n 'Apr': row['Apr_mean'],\n 'May': row['May_mean'],\n 'Jun': row['Jun_mean'],\n 'Jul': row['Jul_mean'],\n 'Aug': row['Aug_mean'],\n 'Sep': row['Sep_mean'],\n 'Oct': row['Oct_mean'],\n 'Nov': row['Nov_mean'],\n 'Dec': row['Dec_mean'],\n 'summer': row['summer_mean'],\n 'growseason': row['growseason_mean'],\n 'annual': row['annual_mean'],\n 'Jan_cnt': row['Jan_count'],\n 'Feb_cnt': row['Feb_count'],\n 'Mar_cnt': row['Mar_count'],\n 'Apr_cnt': row['Apr_count'],\n 'May_cnt': row['May_count'],\n 'Jun_cnt': row['Jun_count'],\n 'Jul_cnt': row['Jul_count'],\n 'Aug_cnt': row['Aug_count'],\n 'Sep_cnt': row['Sep_count'],\n 'Oct_cnt': row['Oct_count'],\n 'Nov_cnt': row['Nov_count'],\n 'Dec_cnt': row['Dec_count'],\n 'summer_cnt': row['summer_count'],\n 'grow_cnt': row['growseason_count'],\n 'annual_cnt': row['annual_count'],\n 'Jan_std': row['Jan_stdev'],\n 'Feb_std': row['Feb_stdev'],\n 'Mar_std': row['Mar_stdev'],\n 'Apr_std': row['Apr_stdev'],\n 'May_std': row['May_stdev'],\n 'Jun_std': row['Jun_stdev'],\n 'Jul_std': row['Jul_stdev'],\n 'Aug_std': row['Aug_stdev'],\n 'Sep_std': row['Sep_stdev'],\n 'Oct_std': row['Oct_stdev'],\n 'Nov_std': row['Nov_stdev'],\n 'Dec_std': row['Dec_stdev'],\n 'summer_std': row['summer_stdev'],\n 'grow_std': row['growseason_stdev'],\n 'annual_std': row['annual_stdev'],\n 'Jan_cv': row['Jan_cv'],\n 'Feb_cv': row['Feb_cv'],\n 'Mar_cv': row['Mar_cv'],\n 'Apr_cv': row['Apr_cv'],\n 'May_cv': row['May_cv'],\n 'Jun_cv': row['Jun_cv'],\n 'Jul_cv': row['Jul_cv'],\n 'Aug_cv': row['Aug_cv'],\n 'Sep_cv': row['Sep_cv'],\n 'Oct_cv': row['Oct_cv'],\n 'Nov_cv': row['Nov_cv'],\n 'Dec_cv': row['Dec_cv'],\n 'summer_cv': row['summer_cv'],\n 'grow_cv': row['growseason_cv'],\n 'annual_cv': row['annual_cv'],\n 'STATION_ID': index,\n grid_id_name: row[grid_id_name]\n },\n 'geometry': mapping(point)\n }\n )"
] | [
"0.6422649",
"0.63623416",
"0.5904371",
"0.5384812",
"0.53518283",
"0.53402686",
"0.5301591",
"0.5273172",
"0.5256313",
"0.52478206",
"0.52091837",
"0.52091837",
"0.5190675",
"0.5180507",
"0.5165975",
"0.5148435",
"0.5120567",
"0.510558",
"0.50966644",
"0.5089502",
"0.5077285",
"0.505639",
"0.50251484",
"0.50212973",
"0.50119066",
"0.5011453",
"0.4999434",
"0.49926096",
"0.49827006",
"0.498222"
] | 0.73874813 | 0 |
Write ModelRun data to model run section of metadata for a given project directory Will set run number to value to be stored in metadata context Context object containing projectDir, the path of the project whose metadata store is to be written to Exception if model type has not known Exception if section is not a valid GenericMetadata section | def writeToMetadata(self, context):
if self.modelType not in GenericMetadata.MODEL_TYPES:
raise Exception("Model type %s is not among known model types: %s" % (self.modelType, str(GenericMetadata.MODEL_TYPES) ) )
modelRunEntries = GenericMetadata.readModelRunEntries(context)
try:
runs = modelRunEntries['runs'].split(GenericMetadata.VALUE_DELIM)
except KeyError:
runs = []
# Collected model entry and keys and values into lists so we can write to metadata store in batch
keys = []
values = []
# Generate unique identifier for this model run. Unique ID is a combination of model type and a number
entryNumber = 1
fqId = self.modelType + GenericMetadata.KEY_SEP + str(entryNumber)
while fqId in runs:
entryNumber += 1
fqId = self.modelType + GenericMetadata.KEY_SEP + str(entryNumber)
self.runNumber = entryNumber
# Add new run to list of runs
runs.append(fqId)
runsStr = GenericMetadata.VALUE_DELIM.join(runs)
keys.append('runs'); values.append(runsStr)
# Write attributes for run
keyProto = fqId + GenericMetadata.KEY_SEP
runDate = keyProto + 'date_utc'
keys.append(runDate); values.append( self.date.strftime(ModelRun.FMT_DATE) )
runDesc = keyProto + 'description'
keys.append(runDesc); values.append(self.description)
runCmd = keyProto + 'command'
keys.append(runCmd); values.append(self.command)
runOutput = keyProto + 'output'
keys.append(runOutput); values.append(self.output)
# Write to metadata
GenericMetadata.writeModelRunEntries(context, keys, values) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeModelRunEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.MODEL_RUN_SECTION, keys, values)",
"def run_metadata(self, run_metadata):\n\n if run_metadata is not None:\n run_metadata = self._validate_run_metadata(run_metadata)\n runs = ListDict()\n runs.append(run_metadata)\n runs.extend(\n self.station_metadata.runs, skip_keys=[run_metadata.id, \"0\"]\n )\n self._survey_metadata.stations[0].runs = runs",
"def readModelRunEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.MODEL_RUN_SECTION)",
"def _setup_run(cfg: Dict) -> Dict:\n now = datetime.now()\n day = f\"{now.day}\".zfill(2)\n month = f\"{now.month}\".zfill(2)\n hour = f\"{now.hour}\".zfill(2)\n minute = f\"{now.minute}\".zfill(2)\n run_name = f'run_{day}{month}_{hour}{minute}_seed{cfg[\"seed\"]}'\n # cfg[\"run_dir\"] = Path(__file__).absolute().parent / \"runs\" / run_name\n cfg[\"run_dir\"] = cfg[\"run_dir\"] / run_name\n if not cfg[\"run_dir\"].is_dir():\n cfg[\"train_dir\"] = cfg[\"run_dir\"] / \"data\" / \"train\"\n cfg[\"train_dir\"].mkdir(parents=True)\n cfg[\"val_dir\"] = cfg[\"run_dir\"] / \"data\" / \"val\"\n cfg[\"val_dir\"].mkdir(parents=True)\n else:\n raise RuntimeError(f\"There is already a folder at {cfg['run_dir']}\")\n\n # dump a copy of cfg to run directory\n with (cfg[\"run_dir\"] / \"cfg.json\").open(\"w\") as fp:\n temp_cfg = {}\n for key, val in cfg.items():\n if isinstance(val, PosixPath):\n temp_cfg[key] = str(val)\n elif isinstance(val, Dict):\n for k in val:\n if isinstance(val[k], PosixPath):\n val[k] = str(val[k])\n elif isinstance(val, pd.Timestamp):\n temp_cfg[key] = val.strftime(format=\"%d%m%Y\")\n else:\n temp_cfg[key] = val\n json.dump(temp_cfg, fp, sort_keys=True, indent=4)\n\n return cfg",
"def _save_run(index, session, artifact_id, conf, sandbox, run_result):\n run = index.Run()\n run.artifact_id = artifact_id\n run.pps_id = conf['identifiers']['pps_id']\n run.environment_id = run_result.environment_id\n run.challenge_problem_id = conf['identifiers']['challenge_problem_id']\n run.team_id = conf['identifiers']['team_id']\n run.dataset_label = conf['identifiers']['dataset_label']\n\n run.started = long(run_result.start_time)\n run.duration = run_result.runtime\n run.load_average = run_result.load_average\n run.load_max = run_result.load_max\n run.ram_average = long(run_result.ram_average) * 1024\n run.ram_max = long(run_result.ram_max) * 1024\n\n # Save the artifact configuration.\n try:\n artifact_config_path = os.path.abspath(run_result.config_file_path)\n run.artifact_configuration = db.Index.migrate(\n artifact_config_path\n )\n except AttributeError:\n assert run_result.config_file_path is None\n\n \"\"\"\n XXX : we were catching 'NOENT' OS_EXCEPTION\n \"\"\"\n\n # output should always have content\n run.output = db.Index.migrate(utility.path_walk(run_result.output_dir))\n\n # it is not required that logs file/directory is populated\n try:\n run.log = db.Index.migrate(run_result.log_path)\n except db.Empty_Migrate:\n run.log = None\n\n # it is not required that trace file/directory is populated\n try:\n run.trace = db.Index.migrate(utility.path_walk(run_result.trace_dir))\n except db.Empty_Migrate:\n run.trace = None\n\n\n try:\n session.add(run)\n session.commit()\n return run.run_id\n except Exception as e:\n # Clear the saved data.\n for blob_id in (run.artifact_configuration, run.output, run.log,\n run.trace):\n # We'd like to use EAFP here, but Python 2 doesn't support\n # exception chaining (see PEP 3134), so EAFP would require\n # making this its own function. LBYL is more elegant,\n # anyway.\n if blob_id:\n db.Index.remove_blob(blob_id)\n raise e",
"def extract_run(self, filepath, run_idx,\n continue_run=None,\n run_slice=None,\n **kwargs):\n\n # close ourselves if not already done, so we can write using\n # the lower level API\n was_open = False\n if not self.closed:\n self.close()\n was_open = True\n\n # do the copying\n\n # open the other file and get the runs in it and the\n # continuations it has\n wepy_h5 = WepyHDF5(filepath, mode='r')\n\n\n\n with self:\n # normalize our HDF5s path\n self_path = osp.realpath(self.filename)\n # the run index in this file, as determined by the counter\n here_run_idx = self.next_run_idx()\n\n # get the group name for the new run in this HDF5\n target_grp_path = \"/runs/{}\".format(here_run_idx)\n\n with wepy_h5:\n # link the next run, and get its new run index\n new_h5 = wepy_h5.copy_run_slice(run_idx, self_path,\n target_grp_path,\n run_slice=run_slice,\n mode='r+')\n\n # close it since we are done\n new_h5.close()\n\n\n with self:\n\n # run the initialization routines for adding a run, just\n # sets some metadata\n self._add_run_init(here_run_idx, continue_run=continue_run)\n\n run_grp = self._h5['{}/{}'.format(RUNS, here_run_idx)]\n\n # add metadata if given\n for key, val in kwargs.items():\n if key != RUN_IDX:\n run_grp.attrs[key] = val\n else:\n warn('run_idx metadata is set by wepy and cannot be used', RuntimeWarning)\n\n if was_open:\n self.open()\n\n return here_run_idx",
"def write_run(run):\n r=Run(run)\n r.write_all()",
"def add_run_metadata(self, run_metadata, tag, global_step=None):\n if tag in self._session_run_tags:\n raise ValueError(\"The provided tag was already used for this event type\")\n self._session_run_tags[tag] = True\n\n tagged_metadata = event_pb2.TaggedRunMetadata()\n tagged_metadata.tag = tag\n # Store the `RunMetadata` object as bytes in order to have postponed\n # (lazy) deserialization when used later.\n tagged_metadata.run_metadata = run_metadata.SerializeToString()\n event = event_pb2.Event(tagged_run_metadata=tagged_metadata)\n self._add_event(event, global_step)",
"def write(self, model, **kwargs):\n self.section_line_list = []\n self.node_string_list = []\n self.node_connector_string_list = []\n self.node_connector_string_mapping = (\n {}\n ) # A mapping of the node and index to the section\n self.bus_string_list = (\n []\n ) # Only used for nodes - not nodes derived from PV, Loads or Capacitors\n self.nodeID_list = []\n self.sectionID_list = []\n self.section_feeder_mapping = {}\n self.section_line_feeder_mapping = {}\n self.section_headnode_mapping = {}\n\n # Verbose print the progress\n if \"verbose\" in kwargs and isinstance(kwargs[\"verbose\"], bool):\n self.verbose = kwargs[\"verbose\"]\n else:\n self.verbose = False\n\n # Writing the load file\n if self.verbose:\n logger.info(\"Writing the load file...\")\n self.write_load_file(model, **kwargs)\n\n # Writing the network file\n if self.verbose:\n logger.info(\"Writing the network file...\")\n self.write_network_file(model, **kwargs)\n\n # Writing the equipment file\n if self.verbose:\n logger.info(\"Writing the equipment file...\")\n self.write_equipment_file(model, **kwargs)",
"def model_on_directory(args):\n # Types of valid models to run\n type_list = [\"lda\", \"multicore\", \"dtm\", \"ldaseq\"]\n if args.model_type not in type_list:\n print(\"\\\"\" + args.model_type + \"\\\" is not a valid model type. Please specify a valid model type (\" + \", \".join(type_list) + \").\", sys.stderr)\n sys.exit(1)\n\n # Prefix for running lda (modify if files should go to a different directory)\n # FORMAT: \"/work/clambert/models/model_type/YYYYMMDD/HH-MM-SS\"\n suffix = \"-\" + args.suffix if args.suffix else \"\"\n pre = args.save_model_dir + args.model_type + \"/\" + time.strftime(\"%Y%m%d\") + \"/\" + time.strftime(\"%H-%M-%S\") + suffix +\"/\"\n\n if not os.path.exists(pre):\n os.makedirs(pre)\n\n print(timestamp() + \" Model(s) will be saved to\", pre, file=sys.stderr)\n print_params(pre, args)\n\n print(timestamp() + \" Processing corpus.\", file=sys.stderr)\n files_dict, time_slices = order_files(args)\n print(timestamp() + \" Time slices:\", time_slices)\n # Loop for some model types\n if args.model_type in [\"lda\", \"multicore\"]:\n for year, docs in files_dict.items():\n if len(files_dict.items()) == 1:\n year = \"\"\n temp_pre = pre\n else:\n temp_pre = os.path.join(pre, str(year) + \"/\")\n if not os.path.exists(temp_pre):\n os.makedirs(temp_pre)\n print(model_for_year(args, year, docs, temp_pre, time_slices))\n\n # Dynamic models only need to be run once\n elif args.model_type in [\"dtm\", \"ldaseq\"]:\n docs = []\n for year, file_list in files_dict.items():\n docs += file_list\n\n model_for_year(args, None, docs, pre, time_slices)\n print(timestamp() + \" Done.\", file=sys.stderr)",
"def write(nmrCalcRun, targetDir):\n \n intIo.writeDataFiles(nmrCalcRun, targetDir)\n \n jsonDict = intIo.makeJsonDict(nmrCalcRun)\n \n \n # write properties file (must be done at the end\n propFile = uniIo.joinPath(targetDir, intIo.propFileName)\n print 'About to write', propFile\n open(propFile,'w').write(json.dumps(jsonDict, sort_keys=True, \n indent=intIo.propIndent))",
"def oldwrite(nmrCalcRun, topDir=None):\n targetDir = intIo.createTargetDir(nmrCalcRun, topDir=topDir)\n \n # write Properties file\n propDict, dummy = intIo.makeParameterDict(nmrCalcRun)\n propFile = uniIo.joinPath(targetDir, intIo.propFileName)\n open(propFile,'w').write(json.dumps(propDict, sort_keys=True, \n indent=intIo.propIndent))\n \n # Write data file\n \n # get file\n inpFile = propDict.get('INPUT_FILE')\n if inpFile:\n talosInputFile = uniIo.joinPath(targetDir, inpFile)\n else:\n raise Exception(\"No code=INPUT_FILE RunParameter in nmrCalcRun\")\n \n # Get shiftList\n shiftList = intUtil.getMeasurementList(nmrCalcRun)\n if shiftList is None:\n raise Exception(\"Run must have exactly one shift list, %s found\" % len(ll))\n \n # Get residues\n ll = list(nmrCalcRun.findAllData(className='MolResidueData'))\n if len(ll) == 1:\n obj = ll.pop()\n residues = list(obj.residues)\n residues.sort(key=operator.attrgetter('seqId'))\n if len(set(obj.chainCodes)) != 1:\n raise Exception(\"Run MolResidueData did not have a (single) chain\" )\n else:\n raise Exception(\"Run must have excactly one MolResidueData, %s found\" % len(ll))\n \n # \n talosIo.writeShiftFile(open(talosInputFile,'w'), residues, shiftList,\n propDict.get('minShiftQuality'))",
"def runModel(quickLogger,\n\t base,\n modelFile=\"\",\n\t irfs=\"P7SOURCE_V6\",\n run=True):\n \n if(modelFile):\n model = modelFile\n else:\n model = base+\"_likeMinuit.xml\"\n\n\n try:\n checkForFiles(quickLogger,\n [base+\"_srcMaps.fits\",\n model,\n base+\"_ltcube.fits\",\n base+\"_BinnedExpMap.fits\"])\n except(FileNotFound):\n quickLogger.critical(\"One or more needed files do not exist.\")\n return\n\n model_map['srcmaps'] = base+\"_srcMaps.fits\"\n model_map['srcmdl'] = model\n model_map['outfile'] = base+\"_modelMap.fits\"\n model_map['expcube'] = base+\"_ltcube.fits\"\n model_map['irfs'] = irfs\n model_map['bexpmap'] = base+\"_BinnedExpMap.fits\"\n \n runCommand(model_map,quickLogger,run)",
"def _generate_model_metadata(out_file, model):\n # Define which FirstLevelModel attributes are BIDS compliant and which\n # should be bundled in a new \"ModelParameters\" field.\n DATA_ATTRIBUTES = [\n \"t_r\",\n ]\n PARAMETER_ATTRIBUTES = [\n \"drift_model\",\n \"hrf_model\",\n \"standardize\",\n \"high_pass\",\n \"target_shape\",\n \"signal_scaling\",\n \"drift_order\",\n \"scaling_axis\",\n \"smoothing_fwhm\",\n \"target_affine\",\n \"slice_time_ref\",\n \"fir_delays\",\n ]\n ATTRIBUTE_RENAMING = {\n \"t_r\": \"RepetitionTime\",\n }\n\n # Fields for the top level of the dictionary\n DATA_ATTRIBUTES.sort()\n data_attributes = {\n attr_name: getattr(model, attr_name)\n for attr_name in DATA_ATTRIBUTES\n if hasattr(model, attr_name)\n }\n data_attributes = {\n ATTRIBUTE_RENAMING.get(k, k): v for k, v in data_attributes.items()\n }\n\n # Fields for a nested section of the dictionary\n # The ModelParameters field is an ad-hoc way to retain useful info.\n PARAMETER_ATTRIBUTES.sort()\n model_attributes = {\n attr_name: getattr(model, attr_name)\n for attr_name in PARAMETER_ATTRIBUTES\n if hasattr(model, attr_name)\n }\n model_attributes = {\n ATTRIBUTE_RENAMING.get(k, k): v for k, v in model_attributes.items()\n }\n\n model_metadata = {\n \"Description\": \"A statistical map generated by Nilearn.\",\n **data_attributes,\n \"ModelParameters\": model_attributes,\n }\n\n with open(out_file, \"w\") as f_obj:\n json.dump(model_metadata, f_obj, indent=4, sort_keys=True)",
"def set_model_output(self, path):\n\n file = f'model_R{str(self.time_span).replace(\".\", \"_\")} ({str(self.date_time).replace(\":\",\"_\")}).csv'\n self.model_output_file = path_inc(path, file)",
"def _write_params_file(model_config: base_model_params.BaseModelParams,\n job_log_dir: str) -> None:\n if jax.process_index() == 0:\n params_fpath = os.path.join(job_log_dir, 'model_params.txt')\n if not tf.io.gfile.exists(job_log_dir):\n tf.io.gfile.makedirs(job_log_dir)\n with tf.io.gfile.GFile(params_fpath, 'w') as params_file:\n datasets = model_config.datasets()\n for dataset in datasets:\n params_file.write(dataset.ToText())\n params_file.write('\\n\\n')\n params_file.write(model_config.task().ToText())",
"def fmt_run_path(model_data_dir, case, ivc, dom):\n filename = \"wrfout_d\"+dom+\"_*\"\n\n prefix_casestudy = \"CaseStudy_\"\n dtuple = datetime.strptime(case, \"%Y-%m-%d_%H:%M\")\n stime = dtuple.strftime('%-m-%-d-%Y')\n sims = \"/\"+prefix_casestudy+stime+\"/\"+ivc+\"_\"+stime.replace(\"-\", \"_\")+\"/\"\n\n model_path = model_data_dir+sims+filename\n\n return model_path, stime",
"def set_log_dir(self, model_path=None):\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n# now = datetime.datetime.now()\n# \n# # If we have a model path with date and epochs use them\n# if model_path:\n# # Continue from we left of. Get epoch and date from the file name\n# # A sample model path might look like:\n# # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5\n# regex = r\".*/[\\w-]+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})/mask\\_rcnn\\_[\\w-]+(\\d{4})\\.h5\"\n# m = re.match(regex, model_path)\n# if m:\n# now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n# int(m.group(4)), int(m.group(5)))\n# # Epoch number in file is 1-based, and in Keras code it's 0-based.\n# # So, adjust for that then increment by one to start from the next epoch\n# self.epoch = int(m.group(6)) - 1 + 1\n# print('Re-starting from epoch %d' % self.epoch)\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \n \"siamese_{}_{}_{}\".format(self.config.MODEL.lower(), \n self.config.NAME.lower(), \n self.config.EXPERIMENT.lower()))\n\n # Create log_dir if not exists\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"siamese_mrcnn_*epoch*.h5\")\n self.checkpoint_path = self.checkpoint_path.replace(\"*epoch*\", \"{epoch:04d}\")",
"def crw_model_info(filename, metadata_url, output):\n r2dt.write_crw(filename, metadata_url, output)",
"def readFromMetadata(cls, context, fqId): \n newInstance = ModelRun()\n (newInstance.modelType, newInstance.runNumber) = fqId.split(GenericMetadata.KEY_SEP)\n \n modelRunEntries = GenericMetadata.readModelRunEntries(context)\n keyProto = fqId + GenericMetadata.KEY_SEP\n \n runDate = keyProto + 'date_utc'\n newInstance.date = datetime.strptime(modelRunEntries[runDate], ModelRun.FMT_DATE)\n runDesc = keyProto + 'description'\n newInstance.description = modelRunEntries[runDesc]\n runCmd = keyProto + 'command'\n newInstance.command = modelRunEntries[runCmd]\n runOutput = keyProto + 'output'\n newInstance.output = modelRunEntries[runOutput]\n \n return newInstance",
"def setup(self, run, run_id):\n\n raise NotImplementedError",
"def write_metadata(config, train_image_metadata, val_image_metadata):\n with open(config.ImageDataConfig.preprocessed_image_metadata_filename, 'wb') as f:\n pickle.dump({'train': train_image_metadata, 'val': val_image_metadata}, f, pickle.HIGHEST_PROTOCOL)",
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def build_instance(self, model=None):\n if not issubclass(model, AbstractModel):\n raise TypeError(\"argument model expect subclass of AbstractModel\")\n\n # gen instance id\n model_name = \"%s_%s_%.1f\" % (model.AUTHOR, model.__name__, model.VERSION)\n instance_id = model_name + '_' + strftime(\"%Y-%m-%d_%H-%M-%S\", localtime())\n self.log('build instance: %s' % instance_id)\n\n # init new instance directory\n self.log('init instance directory')\n instance_path = os.path.join(self.root_path, INSTANCE_FOLDER, instance_id)\n if not os.path.exists(instance_path):\n os.mkdir(instance_path)\n\n instance_visual_result_folder_path = os.path.join(instance_path, VISUAL_RESULT_FOLDER)\n if not instance_visual_result_folder_path:\n os.mkdir(instance_visual_result_folder_path)\n\n instance_source_folder_path = os.path.join(instance_path, 'src_code')\n if not os.path.exists(instance_source_folder_path):\n os.mkdir(instance_source_folder_path)\n\n instance_summary_folder_path = os.path.join(instance_path, 'summary')\n if not os.path.exists(instance_summary_folder_path):\n os.mkdir(instance_summary_folder_path)\n\n self.log('dump instance source code')\n instance_source_path = os.path.join(instance_source_folder_path, instance_id + '.py')\n try:\n copy(inspect.getsourcefile(model), instance_source_path)\n except IOError as e:\n print(e)\n\n self.log(\"build_metadata\")\n metadata = model.build_metadata()\n\n self.log('dump metadata')\n metadata[MODEL_METADATA_KEY_INSTANCE_ID] = instance_id\n metadata[MODEL_METADATA_KEY_INSTANCE_PATH] = instance_path\n metadata[MODEL_METADATA_KEY_INSTANCE_VISUAL_RESULT_FOLDER_PATH] = instance_visual_result_folder_path\n metadata[MODEL_METADATA_KEY_INSTANCE_SOURCE_FOLDER_PATH] = instance_source_folder_path\n metadata[MODEL_METADATA_KEY_INSTANCE_SOURCE_PATH] = instance_source_path\n metadata[MODEL_METADATA_KEY_INSTANCE_SUMMARY_FOLDER_PATH] = instance_summary_folder_path\n metadata[MODEL_METADATA_KEY_INSTANCE_CLASS_NAME] = model.__name__\n metadata[MODEL_METADATA_KEY_README] = self.gen_readme()\n metadata[MODEL_METADATA_KEY_METADATA_PATH] = os.path.join(instance_path, 'instance.meta')\n dump_json(metadata, metadata[MODEL_METADATA_KEY_METADATA_PATH])\n\n self.log('build complete')\n return instance_path",
"def run(self) -> int:\n if self.model_name:\n _ = self.is_exluded_model(self.model_name)\n logger.info(f\"Running audit of model [bold magenta]{self.model_name}.[/bold magenta]\\n\")\n path_file, schema_exists, _ = self.find_model_schema_file(self.model_name)\n if not path_file:\n logger.info(f\"Could not find {self.model_name} in the project at {self.dbt_path}\")\n return 1\n if not schema_exists:\n logger.info(\"The model is not documented.\")\n return 1\n self.model_content = open_yaml(path_file)\n self.derive_model_coverage()\n else:\n logger.info(f\"Running audit of dbt project in {self.dbt_path}.\\n\")\n self.derive_project_coverage()\n return 0",
"def insert_run_metadata(pool, sim_tag, source_id, variable_id, fgt, metadata, template_path=None):\n\n connection = pool.connection()\n\n try:\n\n sql_statement = \"INSERT INTO `run_info` (`sim_tag`, `source`, `variable`, `fgt`, `metadata`) \" \\\n \"VALUES ( %s, %s, %s, %s, %s)\"\n data = (sim_tag, source_id, variable_id, fgt, json.dumps(metadata))\n\n if template_path is not None:\n template = convertToBinaryData(template_path)\n sql_statement = \"INSERT INTO `run_info` (`sim_tag`, `source`, `variable`, `fgt`, `metadata`, `template`) \" \\\n \"VALUES ( %s, %s, %s, %s, %s, %s)\"\n data = (sim_tag, source_id, variable_id, fgt, json.dumps(metadata), template)\n\n with connection.cursor() as cursor:\n cursor.execute(sql_statement, data)\n\n connection.commit()\n\n return True\n except Exception as exception:\n connection.rollback()\n error_message = \"Insertion failed for run info entry with source={}, variable={}, sim_tag={}, fgt={}, metadata={}\" \\\n .format(source_id, variable_id, sim_tag, fgt, json.dumps(metadata))\n logger.error(error_message)\n traceback.print_exc()\n raise exception\n finally:\n if connection is not None:\n connection.close()",
"def register_run_configuration_metadata(\n self,\n provider: RunConfigurationProvider,\n metadata: RunConfigurationMetadata\n ):\n self.get_container().register_run_configuration_metadata(\n provider, metadata)",
"def _write_cache(step, event_file_suffix=None, **kwargs):\n file_suffix = _TT_EVENT_FILE_SUFFIX\n if event_file_suffix is not None:\n file_suffix = string_ops.string_join([file_suffix, event_file_suffix],\n separator='.')\n # TODO(deveci): Parametrize max_queue, so that flushing op can be called\n # less frequently.\n # Setting max_queue to 100 appears to be safe even when the number of\n # iterations are much lower, as the destructor of the writer flushes it.\n summary_write_ops = []\n summary_writer = summary.create_file_writer_v2(\n self._parameters.trace_dir,\n filename_suffix=file_suffix,\n max_queue=_TT_SUMMARY_MAX_QUEUE)\n graph.add_to_collection(\n TENSOR_TRACER_SUMMARY_COLLECTION, summary_writer)\n\n step_value = step[0]\n dt = step_value.dtype\n\n # The step parameter to a summary write call must be 64-bit.\n if dt.__ne__(dtypes.int64) and dt.__ne__(\n dtypes.uint64) and dt.__ne__(dtypes.float64):\n step_value = math_ops.cast(step_value, dtypes.int64)\n\n with summary_writer.as_default():\n summary_metadata = summary_pb2.SummaryMetadata(\n plugin_data=summary_pb2.SummaryMetadata.PluginData(\n plugin_name=_TT_TENSORBOARD_PLUGIN_NAME))\n for key, value in kwargs.items():\n # Check whether we need to compute aggregated statistics that merge\n # all cores statistics.\n if not self._parameters.collect_summary_per_core:\n # Merge only statistics tensor, if it is any other tensor we simply,\n # concatenate them.\n # Also, if there is only a single core (first dim. is 0), then skip\n # aggregation.\n if key == _TT_SUMMARY_TAG and value.shape.as_list()[0] != 1:\n value = self.aggregate_global_cache(value)\n with ops.control_dependencies([summary_writer.init()]):\n summary_write_ops.append(summary.write(\n _TT_SUMMARY_TAG + '/' + key + '.' + graph_summary_tag,\n value, metadata=summary_metadata,\n step=step_value))\n return control_flow_ops.group(summary_write_ops)",
"def runSegmentMD(self, segment):\n\n amber_start_coords_path = \"{jn}-run/{seg}.rst7\".format(jn=self.jobname, seg=segment.getParentNameString()) \n amber_end_coords_path = \"{jn}-run/{seg}.rst7\".format(jn=self.jobname, seg=segment.getNameString()) \n \n if self.debug:\n amber_info_path = \"{jn}-run/{seg}.inf\".format(jn=self.jobname, seg=segment.getNameString())\n amber_outfile_path = \"{jn}-run/{seg}.out\".format(jn=self.jobname, seg=segment.getNameString())\n amber_trajectory_path = \"{jn}-run/{seg}.nc\".format( jn=self.jobname, seg=segment.getNameString())\n else:\n #amber_info_path = '/dev/null'\n #amber_trajectory_path = '/dev/null'\n #amber_outfile_path = '/dev/null'\n #TODO: Strange bug on REX allows only info and traj to be /dev/null\n amber_info_path = self.tmp_dir + '/{seg}_{id}.inf'.format(seg=segment.getNameString(), id=uuid.uuid1())\n amber_outfile_path = self.tmp_dir + '/{seg}_{id}.out'.format(seg=segment.getNameString(), id=uuid.uuid1()) \n amber_trajectory_path = self.tmp_dir + '/{seg}_{id}.nc'.format( seg=segment.getNameString(), id=uuid.uuid1()) \n\n #TODO dou ble gpu usage on one node, can maybe by done more elegantly?\n\n if self.keep_trajectory_files:\n amber_trajectory_path = \"{jn}-run/{seg}.nc\".format(jn=self.jobname, seg=segment.getNameString())\n \n if self.has_cuda:\n # Assign one cuda visible device per job\n os.environ['CUDA_VISIBLE_DEVICES'] = str(self.cuda_visible_devices[segment.getId() % len(self.cuda_visible_devices)])\n \n # Overwrite the previous settings \n command_line = self.amber_binary + ' -O' + \\\n ' -p ' + self.amber_topology_file + \\\n ' -i ' + self.amber_infile + \\\n ' -c ' + amber_start_coords_path + \\\n ' -o ' + amber_outfile_path + \\\n ' -x ' + amber_trajectory_path + \\\n ' -inf ' + amber_info_path + \\\n ' -r ' + amber_end_coords_path\n \n \n\n #Command line for debugging\n if self.debug:\n os.system('echo {line} >> {jn}-debug/amber_command_lines.log'.format(line=command_line, jn=self.jobname))\n \n #Log and Run MD\n if self.debug:\n logfile = open(\"{jn}-log/{it}.MD_log\".format(jn=self.jobname, it=self.iteration.getNameString()),'a')\n logfile.write(self.MdLogString(segment, status = 0))\n \n # Run MD\n ret_code = os.system(command_line)\n \n # If anything went wrong, sync, wait a second, then retry\n if ret_code != 0:\n os.system('sync; sleep 1')\n ret_code = os.system(command_line)\n \n if self.debug:\n logfile.write(self.MdLogString(segment, status = 1 ))\n logfile.close()\n \n # Remove out files\n if not self.debug:\n try: os.remove(amber_outfile_path)\n except OSError: pass\n try: os.remove(amber_info_path)\n except OSError: pass\n if not self.keep_trajectory_files:\n try: os.remove(amber_trajectory_path)\n except OSError: pass",
"def eval_model(config):\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n if config.model_type == 'fully_connected_mlp':\n from models.fully_connected_mlp import model_struct\n elif config.model_type == 'fully_connected_mlp_2l':\n from models.fully_connected_mlp_2l import model_struct\n elif config.model_type == 'fully_connected_conv':\n from models.fully_connected_conv import model_struct\n elif config.model_type == 'vgg_feature_model':\n from models.vgg_feature_model import model_struct\n else:\n raise Exception\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = config.model_type + '_' + dt_stamp + '/'\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, config.model_output, dt_dataset)\n dir_list = [config.train_checkpoint, config.summary_dir]\n [make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, 'train.tfrecords')\n validation_data = os.path.join(config.tfrecord_dir, 'val.tfrecords')\n feat_mean = 0 # np.mean(np.load(config.mean_file)['feat_list'])\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_images, train_labels = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n num_feats=config.n_features,\n sample=config.sample['train'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n val_images, val_labels = inputs(\n tfrecord_file=validation_data,\n batch_size=1,\n num_feats=config.n_features,\n sample=config.sample['val'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n tf.summary.image('validation images', tf.cast(val_labels, tf.float32))\n\n # Prepare model on GPU\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n\n model = model_struct()\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n features=train_images,\n output_categories=len(config.labels.keys()),\n train_mode=train_mode, batchnorm=config.batch_norm)\n\n # Prepare the cost function\n cost = softmax_cost(\n model.res_logits, train_labels, ratio=config.ratio,\n label_reshape=[\n config.batch_size * config.max_pixels_per_image])\n train_op = tf.train.AdamOptimizer(config.lr).minimize(cost)\n\n tf.summary.scalar(\"cost\", cost)\n\n train_score = correlation(\n model.prob, train_labels) # training accuracy\n tf.summary.scalar(\"training correlation\", train_score)\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n # Validation graph is the same as training except no batchnorm\n val_model = model_struct()\n val_model.build(\n features=val_images,\n output_categories=len(config.labels.keys()))\n\n # Calculate validation accuracy\n val_pred = tf.cast(\n tf.reshape(\n tf.argmax(\n val_model.prob, axis=1),\n [1, config.resize[0], config.resize[1], 1]),\n tf.float32)\n tf.summary.image('validation prediction', val_pred)\n val_score = correlation(\n val_model.prob, tf.reshape(\n val_labels, [np.prod(config.resize), 1]))\n tf.summary.scalar(\"validation correlation\", val_score)\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, val_max, losses = 0, 0, []\n train_acc = 0\n try:\n while not coord.should_stop():\n start_time = time.time()\n _, loss_value, train_acc = sess.run([train_op, cost, train_score])\n losses.append(loss_value)\n duration = time.time() - start_time\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 100 == 0 and step % 10 == 0:\n if validation_data is not False:\n _, val_acc, val_frame = sess.run(\n [train_op, val_score, val_pred])\n\n np.save(\n os.path.join(\n config.model_output, '%s_val_image' % step),\n val_frame)\n else:\n val_acc = -1 # Store every checkpoint\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy\n format_str = (\n '%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training r = %s | '\n 'Validation r = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, loss_value,\n config.train_batch / duration, float(duration),\n train_acc, val_acc, config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if val_acc > val_max:\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n # Store the new max validation accuracy\n val_max = val_acc\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training F = %s')\n print (format_str % (datetime.now(), step, loss_value,\n config.train_batch / duration,\n float(duration), train_acc))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%straining_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()"
] | [
"0.67631894",
"0.55094665",
"0.54853433",
"0.53479904",
"0.50316596",
"0.4959625",
"0.49440154",
"0.4917791",
"0.48888582",
"0.48095495",
"0.4801151",
"0.4781151",
"0.47765702",
"0.47687408",
"0.47333437",
"0.47103244",
"0.4694231",
"0.46878308",
"0.46798983",
"0.46664116",
"0.46655694",
"0.46343413",
"0.46313304",
"0.4609082",
"0.45933974",
"0.45876712",
"0.458306",
"0.45793656",
"0.45594022",
"0.45448944"
] | 0.74479276 | 0 |
Write AssetProvenance data to provenance section of metadata for a given project directory context Context object containing projectDir, the path of the project whose metadata store is to be written to Exception if section is not a valid GenericMetadata section | def writeToMetadata(self, context):
fqId = self.section + GenericMetadata.COMPOUND_KEY_SEP + self.name
fqId = fqId.lower()
# Write self to the appropriate section
GenericMetadata.writeEntryToSection(context, self.section, self.name, self.dcIdentifier)
# Write to provenance section
provenanceEntries = GenericMetadata.readProvenanceEntries(context)
try:
entities = provenanceEntries['entities'].split(GenericMetadata.VALUE_DELIM)
except KeyError:
entities = []
# Write entity metadata (overwrite if already present)
keys = []
values = []
if fqId not in entities:
entities.append(fqId)
entitiesStr = GenericMetadata.VALUE_DELIM.join(entities)
keys.append('entities'); values.append(entitiesStr)
# Write attributes for entity
keyProto = fqId + GenericMetadata.COMPOUND_KEY_SEP
dcIdentifier = keyProto + 'dc.identifier'
keys.append(dcIdentifier); values.append(self.dcIdentifier)
dcSource = keyProto + 'dc.source'
keys.append(dcSource); values.append(self.dcSource)
dcTitle = keyProto + 'dc.title'
keys.append(dcTitle); values.append(self.dcTitle)
if self.dcDate:
dcDate = keyProto + 'dc.date'
keys.append(dcDate); values.append(self.dcDate.strftime(AssetProvenance.FMT_DATE))
dcPublisher = keyProto + 'dc.publisher'
keys.append(dcPublisher); values.append(self.dcPublisher)
dcDescription = keyProto + 'dc.description'
keys.append(dcDescription); values.append(self.dcDescription)
processingNotes = keyProto + 'processing_notes'
keys.append(processingNotes); values.append(self.processingNotes)
GenericMetadata.writeProvenanceEntries(context, keys, values) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeProvenanceEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION, keys, values)",
"def writeProvenanceEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.PROVENANCE_SECTION, key, value)",
"def _write_map_provenance(cfg, cube, plot_path, title, *attrs):\n cube = cube.copy()\n ancestors = []\n for attr in attrs:\n ancestors.extend(attr['filename'].split('|'))\n netcdf_path = mlr.get_new_path(cfg, plot_path)\n io.iris_save(cube, netcdf_path)\n record = {\n 'ancestors': ancestors,\n 'authors': ['schlund_manuel'],\n 'caption': f\"Geographical distribution of {cube.long_name} for \"\n f\"{title}.\",\n 'plot_types': ['geo'],\n 'references': ['schlund20jgr'],\n }\n with ProvenanceLogger(cfg) as provenance_logger:\n provenance_logger.log(netcdf_path, record)\n provenance_logger.log(plot_path, record)",
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def writeToMetadata(self, context):\n pass",
"def _prep_metadata(md_sect, path):\n if not set(md_sect).issuperset(metadata_required_fields):\n missing = metadata_required_fields - set(md_sect)\n raise ConfigError(\"Required fields missing: \" + '\\n'.join(missing))\n\n module = md_sect.get('module')\n if not module.isidentifier():\n raise ConfigError(\"Module name %r is not a valid identifier\" % module)\n\n md_dict = {}\n\n # Description file\n if 'description-file' in md_sect:\n description_file = path.parent / md_sect.get('description-file')\n try:\n with description_file.open(encoding='utf-8') as f:\n raw_desc = f.read()\n except FileNotFoundError:\n raise ConfigError(\n \"Description file {} does not exist\".format(description_file)\n )\n ext = description_file.suffix\n try:\n mimetype = readme_ext_to_content_type[ext]\n except KeyError:\n log.warning(\"Unknown extension %r for description file.\", ext)\n log.warning(\" Recognised extensions: %s\",\n \" \".join(readme_ext_to_content_type))\n mimetype = None\n\n if mimetype == 'text/x-rst':\n # rst check\n stream = io.StringIO()\n res = render(raw_desc, stream)\n if not res:\n log.warning(\"The file description seems not to be valid rst for PyPI;\"\n \" it will be interpreted as plain text\")\n log.warning(stream.getvalue())\n\n md_dict['description'] = raw_desc\n md_dict['description_content_type'] = mimetype\n\n if 'urls' in md_sect:\n project_urls = md_dict['project_urls'] = []\n for label, url in sorted(md_sect.pop('urls').items()):\n project_urls.append(\"{}, {}\".format(label, url))\n\n for key, value in md_sect.items():\n if key in {'description-file', 'module'}:\n continue\n if key not in metadata_allowed_fields:\n closest = difflib.get_close_matches(key, metadata_allowed_fields,\n n=1, cutoff=0.7)\n msg = \"Unrecognised metadata key: {!r}\".format(key)\n if closest:\n msg += \" (did you mean {!r}?)\".format(closest[0])\n raise ConfigError(msg)\n\n k2 = key.replace('-', '_')\n md_dict[k2] = value\n if key in metadata_list_fields:\n if not isinstance(value, list):\n raise ConfigError('Expected a list for {} field, found {!r}'\n .format(key, value))\n if not all(isinstance(a, str) for a in value):\n raise ConfigError('Expected a list of strings for {} field'\n .format(key))\n elif key == 'requires-extra':\n if not isinstance(value, dict):\n raise ConfigError('Expected a dict for requires-extra field, found {!r}'\n .format(value))\n if not all(isinstance(e, list) for e in value.values()):\n raise ConfigError('Expected a dict of lists for requires-extra field')\n for e, reqs in value.items():\n if not all(isinstance(a, str) for a in reqs):\n raise ConfigError('Expected a string list for requires-extra. (extra {})'\n .format(e))\n else:\n if not isinstance(value, str):\n raise ConfigError('Expected a string for {} field, found {!r}'\n .format(key, value))\n\n # What we call requires in the ini file is technically requires_dist in\n # the metadata.\n if 'requires' in md_dict:\n md_dict['requires_dist'] = md_dict.pop('requires')\n\n # And what we call dist-name is name in the metadata\n if 'dist_name' in md_dict:\n md_dict['name'] = md_dict.pop('dist_name')\n\n # Move dev-requires into requires-extra\n reqs_noextra = md_dict.pop('requires_dist', [])\n reqs_by_extra = md_dict.pop('requires_extra', {})\n dev_requires = md_dict.pop('dev_requires', None)\n if dev_requires is not None:\n if 'dev' in reqs_by_extra:\n raise ConfigError(\n 'dev-requires occurs together with its replacement requires-extra.dev.')\n else:\n log.warning(\n '“dev-requires = ...” is obsolete. Use “requires-extra = {\"dev\" = ...}” instead.')\n reqs_by_extra['dev'] = dev_requires\n\n # Add requires-extra requirements into requires_dist\n md_dict['requires_dist'] = \\\n reqs_noextra + list(_expand_requires_extra(reqs_by_extra))\n\n md_dict['provides_extra'] = sorted(reqs_by_extra.keys())\n\n # For internal use, record the main requirements as a '.none' extra.\n reqs_by_extra['.none'] = reqs_noextra\n\n return md_dict, module, reqs_by_extra",
"def test_construct_and_write_metadata(tmp_path):\n\n prov = Provenance()\n prov.start_activity(\"test\")\n prov.finish_activity()\n prov_activity = prov.finished_activities[0]\n\n reference = meta.Reference(\n contact=meta.Contact(\n name=\"Somebody\", email=\"[email protected]\", organization=\"CTA Consortium\"\n ),\n product=meta.Product(\n description=\"An Amazing Product\",\n creation_time=\"2020-10-11 15:23:31\",\n data_category=\"S\",\n data_level=\"DL1\",\n data_association=\"Subarray\",\n data_model_name=\"Unofficial DL1\",\n data_model_version=\"1.0\",\n data_model_url=\"http://google.com\",\n format=\"hdf5\",\n ),\n process=meta.Process(_type=\"Simulation\", subtype=\"Prod3b\", _id=423442,),\n activity=meta.Activity.from_provenance(prov_activity.provenance),\n instrument=meta.Instrument(\n site=\"CTA-North\",\n class_=\"Array\",\n type_=\"Layout H1B\",\n version=\"1.0\",\n id_=\"threshold\",\n ),\n )\n\n ref_dict = reference.to_dict()\n assert ref_dict[\"CTA PRODUCT FORMAT\"] == \"hdf5\"\n\n import uuid # pylint: disable=import-outside-toplevel\n\n assert str(uuid.UUID(ref_dict[\"CTA PRODUCT ID\"])) == ref_dict[\"CTA PRODUCT ID\"]\n\n # check that we can write this to the header of a typical table file in multiple\n # formats:\n from astropy.table import Table # pylint: disable=import-outside-toplevel\n\n table = Table(dict(x=[1, 2, 3], y=[15.2, 15.2, 14.5]))\n table.meta = ref_dict\n for file_name in [tmp_path / \"test.fits\", tmp_path / \"test.ecsv\"]:\n table.write(file_name)\n\n # write to pytables file\n\n import tables # pylint: disable=import-outside-toplevel\n\n with tables.open_file(tmp_path / \"test.h5\", mode=\"w\") as h5file:\n meta.write_to_hdf5(ref_dict, h5file)",
"def writeToMetadata(self, context):\n fqId = self.type + GenericMetadata.COMPOUND_KEY_SEP + self.id\n fqId = fqId.lower()\n\n climatePoints = GenericMetadata.readClimatePointEntries(context)\n try:\n stations = climatePoints['stations'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n stations = []\n # Write station metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in stations:\n stations.append(fqId)\n stationsStr = GenericMetadata.VALUE_DELIM.join(stations)\n keys.append('stations'); values.append(stationsStr)\n # Write attributes for station\n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP \n longitude = keyProto + 'longitude'\n keys.append(longitude); values.append(self.longitude)\n latitude = keyProto + 'latitude'\n keys.append(latitude); values.append(self.latitude)\n elevation = keyProto + 'elevation'\n keys.append(elevation); values.append(self.elevation)\n name = keyProto + 'name'\n keys.append(name); values.append(self.name)\n if self.startDate:\n startDate = keyProto + 'startdate'\n keys.append(startDate); values.append(self.startDate.strftime(ClimatePointStation.FMT_DATE))\n if self.endDate:\n endDate = keyProto + 'enddate'\n keys.append(endDate); values.append(self.endDate.strftime(ClimatePointStation.FMT_DATE))\n if self.variables:\n variablesKey = keyProto + 'variables'\n variablesValue = GenericMetadata.VALUE_DELIM.join(self.variables)\n keys.append(variablesKey); values.append(variablesValue)\n if self.data != None:\n data = keyProto + 'data'\n keys.append(data); values.append(self.data)\n elif self.variablesData:\n # Try to write data entries for each variable separately\n vars = self.variablesData.keys()\n for var in vars:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n keys.append(varKey); values.append(self.variablesData[var])\n GenericMetadata.writeClimatePointEntries(context, keys, values)",
"def _StageMetadata(json_metadata, storage_service, staged_file: str):\n # Write computed metadata to object storage.\n temp_run_dir = temp_dir.GetRunDirPath()\n local_file = os.path.join(temp_run_dir, os.path.basename(staged_file))\n with open(local_file, 'w') as f:\n json.dump(json_metadata, f)\n storage_service.Copy(local_file, staged_file)",
"def write_stewicombo_metadata(file_name, metadata_dict, category=''):\n meta = set_stewicombo_meta(file_name, category=category)\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)",
"def readProvenanceEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION)",
"def write_metadata(file_name, metadata_dict, category='',\n datatype=\"inventory\", parameters=None):\n if (datatype == \"inventory\") or (datatype == \"source\"):\n meta = set_stewi_meta(file_name, stewiformat=category)\n if datatype == 'inventory':\n meta.tool_meta = {\"parameters\": parameters,\n \"sources\": metadata_dict}\n else:\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)\n elif datatype == \"validation\":\n file = (paths.local_path / 'validation' /\n f'{file_name}_validationset_metadata.json')\n with file.open('w') as fi:\n fi.write(json.dumps(metadata_dict, indent=4))",
"def _post_src_install_write_metadata(settings):\n\n\teapi_attrs = _get_eapi_attrs(settings.configdict['pkg']['EAPI'])\n\n\tbuild_info_dir = os.path.join(settings['PORTAGE_BUILDDIR'], 'build-info')\n\n\tmetadata_keys = ['IUSE']\n\tif eapi_attrs.iuse_effective:\n\t\tmetadata_keys.append('IUSE_EFFECTIVE')\n\n\tfor k in metadata_keys:\n\t\tv = settings.configdict['pkg'].get(k)\n\t\tif v is not None:\n\t\t\twrite_atomic(os.path.join(build_info_dir, k), v + '\\n')\n\n\t# The following variables are irrelevant for virtual packages.\n\tif settings.get('CATEGORY') != 'virtual':\n\n\t\tfor k in ('CHOST',):\n\t\t\tv = settings.get(k)\n\t\t\tif v is not None:\n\t\t\t\twrite_atomic(os.path.join(build_info_dir, k), v + '\\n')\n\n\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t'BUILD_TIME'), encoding=_encodings['fs'], errors='strict'),\n\t\tmode='w', encoding=_encodings['repo.content'],\n\t\terrors='strict') as f:\n\t\tf.write(_unicode_decode(\"%.0f\\n\" % (time.time(),)))\n\n\tuse = frozenset(settings['PORTAGE_USE'].split())\n\tfor k in _vdb_use_conditional_keys:\n\t\tv = settings.configdict['pkg'].get(k)\n\t\tfilename = os.path.join(build_info_dir, k)\n\t\tif v is None:\n\t\t\ttry:\n\t\t\t\tos.unlink(filename)\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\t\tcontinue\n\n\t\tif k.endswith('DEPEND'):\n\t\t\tif eapi_attrs.slot_operator:\n\t\t\t\tcontinue\n\t\t\ttoken_class = Atom\n\t\telse:\n\t\t\ttoken_class = None\n\n\t\tv = use_reduce(v, uselist=use, token_class=token_class)\n\t\tv = paren_enclose(v)\n\t\tif not v:\n\t\t\ttry:\n\t\t\t\tos.unlink(filename)\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\t\tcontinue\n\t\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t\tk), encoding=_encodings['fs'], errors='strict'),\n\t\t\tmode='w', encoding=_encodings['repo.content'],\n\t\t\terrors='strict') as f:\n\t\t\tf.write(_unicode_decode(v + '\\n'))\n\n\tif eapi_attrs.slot_operator:\n\t\tdeps = evaluate_slot_operator_equal_deps(settings, use, QueryCommand.get_db())\n\t\tfor k, v in deps.items():\n\t\t\tfilename = os.path.join(build_info_dir, k)\n\t\t\tif not v:\n\t\t\t\ttry:\n\t\t\t\t\tos.unlink(filename)\n\t\t\t\texcept OSError:\n\t\t\t\t\tpass\n\t\t\t\tcontinue\n\t\t\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t\t\tk), encoding=_encodings['fs'], errors='strict'),\n\t\t\t\tmode='w', encoding=_encodings['repo.content'],\n\t\t\t\terrors='strict') as f:\n\t\t\t\tf.write(_unicode_decode(v + '\\n'))",
"def _WriteStorageMetadata(self):\n stream_name = 'metadata.txt'\n if self._HasStream(stream_name):\n return\n\n stream_data = (\n '[plaso_storage_file]\\n'\n 'format_version: {0:d}\\n'\n 'serialization_format: {1:s}\\n'\n 'storage_type: {2:s}\\n'\n '\\n').format(\n self._FORMAT_VERSION, self.serialization_format, self.storage_type)\n\n stream_data = stream_data.encode('utf-8')\n self._WriteStream(stream_name, stream_data)",
"def write_metadata_to_file(self, path):\n return write_metadata_to_ma_file(path, self)",
"def _writeVersionToMetadata(config):\n if not config.has_section(GenericMetadata.ECOHYDROLIB_SECION):\n config.add_section(GenericMetadata.ECOHYDROLIB_SECION)\n \n if not config.has_option(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY):\n config.set(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY, GenericMetadata._ecohydrolibVersion)\n return\n \n metadataVersion = config.get(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY)\n if metadataVersion != GenericMetadata._ecohydrolibVersion:\n raise MetadataVersionError(metadataVersion)",
"def create_metadata(scene: \"Scenemaker\") -> None:\r\n create_datadir()\r\n\r\n with open(dirpath / cng.GENERATED_DATA_DIR / cng.METADATA_FILE, \"w+\") as f:\r\n f.write(str(scene.num2name))",
"def _writeEntriesToSection(projectDir, section, keys, values, callback=None):\n numKeys = len(keys)\n if numKeys != len(values):\n raise Exception( \"%d keys specified for %d values\" % (numKeys, len(values)) )\n \n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entries\n if not config.has_section(section):\n config.add_section(section)\n for i in xrange(numKeys):\n config.set(section, keys[i], values[i])\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def _update_filesystem_metadata(self, metadata):\n directory, fname = os.path.split(self.fname)\n fbase = os.path.splitext(fname)[0]\n \n # Test for presence and size of zip file\n zip_file = fbase + '.zip'\n zip_path = os.path.join(directory, zip_file)\n \n if os.path.isfile(zip_path):\n location = 'on_disk'\n data_file_size = os.path.getsize(zip_path)\n else:\n location = 'on_tape'\n data_file_size = 0\n \n # Test for presence of quick look PNG file\n quicklook_file = fbase + '.png'\n quicklook_path = os.path.join(directory, quicklook_file)\n \n if not os.path.isfile(quicklook_path):\n quicklook_file = ''\n\n # Add to metadata dictionary\n item_map = {'directory': directory, 'metadata_file': fname,\n 'data_file': zip_file, 'location': location, \n 'data_file_size': data_file_size, 'quicklook_file': quicklook_file}\n \n for key, value in item_map.items():\n metadata[key] = value",
"def write_metadata(dir_path, fs, *metas, global_metadata=True):\n assert metas\n md = metas[0]\n with fs.open(\"/\".join([dir_path, \"_common_metadata\"]), \"wb\") as fil:\n md.write_metadata_file(fil)\n if global_metadata:\n for meta in metas[1:]:\n md.append_row_groups(meta)\n with fs.open(\"/\".join([dir_path, \"_metadata\"]), \"wb\") as fil:\n md.write_metadata_file(fil)",
"def gen_metadata(args):\n with open(args.bibfile) as bibfile:\n bib_db = BibTexParser(common_strings=True).parse_file(bibfile)\n entries = sorted(list(bib_db.entries),\n key=lambda x: x['year'], reverse=True)\n list([update_file(entry) for entry in entries])\n annotations = [entry_to_annotation(entry, args.PI) for entry in entries]\n stream = open(args.metadata, 'w')\n yaml.dump(annotations, stream, width=192, default_flow_style=False)\n stream.close()",
"def make_report_metadata(metadata, out_dir):\n metadata_qa = {}\n # Edit the product type and description\n metadata_qa[\"ProductType\"] = {\"ProductTypeName\": \"MeerKATReductionProduct\",\n \"ReductionName\": \"Continuum Image Quality Report\"}\n\n desc_prefix = metadata[\"Description\"].split(':')[0]\n metadata_qa[\"Description\"] = desc_prefix + \": Continuum image quality report\"\n\n # Copy remaining keys from original metadata\n report_keys = [\"StartTime\", \"CaptureBlockId\",\n \"ProposalId\", \"Observer\", \"ScheduleBlockIdCode\",\n \"Run\"]\n for key in report_keys:\n metadata_qa[key] = metadata[key]\n\n write_metadata(metadata_qa, out_dir)",
"def save(cls, context):\n\n data = context.get_stored_dict()\n files = {}\n\n def save_in_file(file, key, value):\n if file in files.keys():\n files[file][key] = value\n else:\n files[file] = {key: value}\n\n for key, val in data.items():\n if context.extends is not None and key in context.key_origins:\n save_in_file(context.key_origins[key], key, val)\n else:\n save_in_file(context.profile, key, val)\n\n for profile, content in files.items():\n metadata.update_metadata(\n context.workspace,\n profile,\n 'config',\n content)",
"def _readEntriesForSection(projectDir, section):\n sectionDict = dict()\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.R_OK):\n raise IOError(errno.EACCES, \"Unable to read metadata store for project %s\" % \\\n (projectDir,))\n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n if config.has_section(section):\n items = config.items(section)\n for item in items:\n sectionDict[item[0]] = item[1]\n \n return sectionDict",
"def readFromMetadata(cls, context, fqId):\n newInstance = AssetProvenance()\n (newInstance.section, newInstance.name) = fqId.split(GenericMetadata.COMPOUND_KEY_SEP)\n \n provenance = GenericMetadata.readProvenanceEntries(context)\n keyProto = fqId + GenericMetadata.COMPOUND_KEY_SEP\n dcIdentifier = keyProto + 'dc.identifier'\n newInstance.dcIdentifier = provenance[dcIdentifier]\n dcSource = keyProto + 'dc.source'\n newInstance.dcSource = provenance[dcSource]\n dcTitle = keyProto + 'dc.title'\n newInstance.dcTitle = provenance[dcTitle]\n dcDate = keyProto + 'dc.date'\n newInstance.dcDate = datetime.strptime(provenance[dcDate], AssetProvenance.FMT_DATE)\n dcPublisher = keyProto + 'dc.publisher'\n newInstance.dcPublisher = provenance[dcPublisher]\n dcDescription = keyProto + 'dc.description'\n newInstance.dcDescription = provenance[dcDescription]\n processingNotes = keyProto + 'processing_notes'\n newInstance.processingNotes = provenance[processingNotes]\n \n return newInstance",
"def generate_metadata(self):\n self.metadata = {\n 'title': os.path.basename(self.source_file).rsplit('.', 1)[0],\n 'url': self.relative_destination_file,\n 'full_path': os.path.dirname(self.relative_destination_file),\n 'short_path': self.shorten_path(\n os.path.dirname(self.relative_destination_file))\n }",
"def put_metadata(self, metadata, tombstone=False):\n if tombstone:\n # We don't write tombstone files. So do nothing.\n return\n assert self.data_file is not None, \\\n \"put_metadata: no file to put metadata into\"\n metadata = _adjust_metadata(metadata)\n self.threadpool.run_in_thread(write_metadata, self.data_file, metadata)\n self.metadata = metadata\n self._filter_metadata()",
"def bundle_metadata(self, metadata):\n\n metadata_file = None\n try:\n metadata_file = tempfile.NamedTemporaryFile(delete=False)\n except IOError:\n task_error('Cannot create metadata file in working directory')\n\n metadata_file.write(metadata)\n fname = metadata_file.name\n metadata_file.close()\n\n metadata_file = open(fname, mode='rb')\n\n # metadata_file.seek(0)\n\n if self.empty_tar:\n tarball = tarfile.TarFile(name=self.bundle_path, mode='w')\n self.empty_tar = False\n else:\n tarball = tarfile.TarFile(name=self.bundle_path, mode='a')\n\n try:\n tar_info = tarfile.TarInfo('metadata.txt')\n tar_info.size = len(metadata)\n tar_info.mtime = time.time()\n tarball.addfile(tar_info, metadata_file)\n metadata_file.close()\n tarball.close()\n os.remove(fname)\n except Exception, ex:\n print ex\n traceback.print_exc(file=sys.stdout)\n raise ex",
"def setProvenance(args, syn):\n \n activity = Activity(name=args.name, description=args.description)\n if args.used:\n for item in args.used:\n activity.used(item)\n if args.executed:\n for item in args.executed:\n activity.used(item, wasExecuted=True)\n activity = syn.setProvenance(args.id, activity)\n\n # Display the activity record, if -o or -output specified\n if args.output:\n if args.output=='STDOUT':\n sys.stdout.write(json.dumps(activity))\n sys.stdout.write('\\n')\n else:\n with open(args.output, 'w') as f:\n f.write(json.dumps(activity))\n f.write('\\n')\n else:\n print 'Set provenance record %s on entity %s\\n' % (str(activity['id']), str(args.id))",
"def _StoreMetadataToFile(payload_dir, metadata_obj):\n file_dict = {SHA1_ATTR: metadata_obj.sha1,\n SHA256_ATTR: metadata_obj.sha256,\n SIZE_ATTR: metadata_obj.size,\n ISDELTA_ATTR: metadata_obj.is_delta_format}\n metadata_file = os.path.join(payload_dir, METADATA_FILE)\n with open(metadata_file, 'w') as file_handle:\n json.dump(file_dict, file_handle)"
] | [
"0.60347295",
"0.60217524",
"0.5577184",
"0.5552108",
"0.553339",
"0.55101997",
"0.5417018",
"0.53029364",
"0.52820504",
"0.5193149",
"0.5164625",
"0.5141724",
"0.5113907",
"0.5058686",
"0.50282586",
"0.50007135",
"0.49816483",
"0.4901674",
"0.48992616",
"0.48467356",
"0.4813063",
"0.4808998",
"0.47717354",
"0.47660962",
"0.47555894",
"0.47391805",
"0.47320643",
"0.46770504",
"0.46725112",
"0.46554413"
] | 0.67359984 | 0 |
Return string representing original command line, as close as possible, used to run the command. Will convert all paths in the command line to absolute path, if a nonpath element has spaces in it, they will be quoted. String with each element of sys.argv separated by a space | def getCommandLine():
import sys, os
cmdline = os.path.abspath(sys.argv[0])
for elem in sys.argv[1:]:
cmdline += ' ' + ecohydrolib.util.getAbsolutePathOfItem(elem)
return cmdline | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_cli_string():\n return os.path.basename(sys.argv[0]) + \" \" + \" \".join(sys.argv[1:])",
"def cmdstr(cmd):\n if isinstance(cmd, str):\n return cmd\n\n quoted = []\n for arg in cmd:\n if isinstance(arg, Path):\n arg = str(arg)\n if ' ' in arg:\n arg = '\"%s\"' % (arg,)\n quoted.append(arg)\n return ' '.join(quoted)",
"def shell_command(self):\n # TODO: fix this naive version by adding quotes where appropriate\n return \" \".join(self.args)",
"def _joinArgv(argv):\n cmdstr = \"\"\n for arg in argv:\n if ' ' in arg:\n cmdstr += '\"%s\"' % _escapeArg(arg)\n else:\n cmdstr += _escapeArg(arg)\n cmdstr += ' '\n if cmdstr.endswith(' '): cmdstr = cmdstr[:-1] # strip trailing space\n return cmdstr",
"def sh_quote_safe_cmdline(args):\n return str.join(' ', (sh_quote_safe(arg) for arg in args))",
"def sh_quote_unsafe_cmdline(args):\n return str.join(' ', (sh_quote_unsafe(arg) for arg in args))",
"def cmdify(self):\n return \" \".join(\n itertools.chain(\n [_quote_if_contains(self.command, r\"[\\s^()]\")],\n (_quote_if_contains(arg, r\"[\\s^]\") for arg in self.args),\n )\n )",
"def _cmdline(process):\n return \" \".join(process.cmdline())",
"def cmd(*args):\r\n return \" \".join([str(arg) for arg in args])",
"def shlex_join(argv):\n def quote(arg):\n if arg.find(\" \") >= 0:\n return '\"%s\"' % arg\n else:\n return arg\n return \" \".join([quote(arg) for arg in argv])",
"def _make_cmdline(self, line):\n if isinstance(line, list):\n parts = line\n else:\n parts = line.split(\" \", 1)\n cmd = parts[0]\n exe = os.path.join(BINDIR, cmd)\n\n python_cmds = [\"samba-tool\",\n \"samba_dnsupdate\",\n \"samba_upgradedns\",\n \"script/traffic_replay\",\n \"script/traffic_learner\"]\n\n if os.path.exists(exe):\n parts[0] = exe\n if cmd in python_cmds and os.getenv(\"PYTHON\", None):\n parts.insert(0, os.environ[\"PYTHON\"])\n\n if not isinstance(line, list):\n line = \" \".join(parts)\n\n return line",
"def StringifyCommand(cmd):\n ret = ''\n grouping = 0\n for a in cmd:\n if grouping == 0 and len(ret) > 0:\n ret += \" \\\\\\n \"\n elif grouping > 0:\n ret += \" \"\n if grouping == 0:\n grouping = 1\n if a.startswith('-') and len(a) == 2:\n grouping = 2\n ret += a\n grouping -= 1\n return ret",
"def get_cmdline(self):\n\n return [self._args.t] + self._args.target_args[1:]",
"def args(self) -> typing.Tuple[str, typing.List[str]]:\n func = inspect.stack()[1][3]\n command = func[len(self.CMD_PREFIX):]\n return ('{} {}'.format(sys.argv[0], command),\n sys.argv[2:])",
"def list2cmdline(seq):\n\n result = []\n needquote = False\n for arg in seq:\n bs_buf = []\n\n # Add a space to separate this argument from the others\n if result:\n result.append(' ')\n\n needquote = (\" \" in arg) or (\"\\t\" in arg) or (not arg) or (\"(\" in arg) or (\")\" in arg)\n if needquote:\n result.append('\"')\n\n for c in arg:\n if c == '\\\\':\n # Don't know if we need to double yet.\n bs_buf.append(c)\n elif c == '\"':\n # Double backslashes.\n result.append('\\\\' * len(bs_buf) * 2)\n bs_buf = []\n result.append('\\\\\"')\n else:\n # Normal char\n if bs_buf:\n result.extend(bs_buf)\n bs_buf = []\n result.append(c)\n\n # Add remaining backslashes, if any.\n if bs_buf:\n result.extend(bs_buf)\n\n if needquote:\n result.extend(bs_buf)\n result.append('\"')\n\n return ''.join(result)",
"def parse_cmdline(self, command_line):\n components = shlex.split(\n io.StringIO(unicode(command_line)), posix=False\n )\n return components[0].strip('\"'), components[1:]",
"def rebuild_command(args):\n return \"%s\\n\" % (\" \".join(args)).replace(\"\\\\\", \"\\\\\\\\\")",
"def clean_command_lines(cmd):\r\n cmd = ' '.join(cmd.split())\r\n return cmd",
"def command(self) -> str:\n cmd = ''\n if self.argv:\n cmd = self.argv[0]\n return cmd",
"def gen_command_line(self, testcase):\n found_double_at = False\n new_args = []\n\n for arg in self.target_cmdline:\n if arg == '@@':\n found_double_at = True\n new_args.append(testcase)\n else:\n new_args.append(arg)\n\n if found_double_at:\n stdin = None\n else:\n with open(testcase, 'rb') as inf:\n stdin = inf.read()\n\n return new_args, stdin",
"def _unwrap_command_line(s: str) -> str:\n if not _command_escape_pattern.fullmatch(s):\n return s\n indent = \"\".join(itertools.takewhile(lambda c: c.isspace(), s))\n cmd = s[(len(indent) + 8):-len(_command_escape_comment)]\n return indent + cmd",
"def formatPath(a):\n return \"\".join([\" %s \" % cmd + \" \".join([str(p) for p in params]) for cmd, params in a])",
"def prepare_executable_cmd(args: dict):\n return [str(args[\"executable\"].resolve(strict=True)),\n \"-m\", str(args[\"model\"].resolve(strict=True)),\n \"-d\", args[\"device\"]]",
"def getline(self):\n cmd = [self.command]\n for arg in self.args:\n if arg:\n cmd.append(str(arg))\n return \" \".join(cmd).strip()+CRLF",
"def argv(self) -> List[str]:\n if self.command:\n rtn = [utils.strip_quotes(self.command)]\n for cur_token in self.arg_list:\n rtn.append(utils.strip_quotes(cur_token))\n else:\n rtn = []\n\n return rtn",
"def command_list_to_str(command):\n return \" \".join(pipes.quote(i) for i in command)",
"def _processCmdLine(ctx, taskParams):\n\n startNode = ctx.getStartDirNode(taskParams['$startdir'])\n\n bconf = ctx.getbconf(startNode)\n btypeDir = bconf.selectedBuildTypeDir\n startdir = bconf.startdir\n cmdArgs = taskParams['run']\n\n # By default 'shell' is True to get rid of some problems with Waf and Windows\n shell = cmdArgs.get('shell', True)\n\n cmdline = cmdArgs.get('cmd', '').strip()\n if not cmdline and taskParams['$runnable']:\n cmdline = taskParams['$real.target']\n\n if not cmdline:\n return cmdline, shell\n\n extraArgs = cmdArgs.get('extra-args')\n if extraArgs:\n cmdline = '%s %s' % (cmdline, ' '.join(extraArgs))\n\n shell = cmdHasShellSymbols(cmdline) if not shell else shell\n cmdSplitted = _splitCmdLine(cmdline, taskParams['name'], bconf.path)\n if not shell:\n # Waf cannot work correctly with paths with whitespaces when\n # 'shell' is False.\n # TODO: try to make solution for 'shell' == False\n if any(' ' in s for s in cmdSplitted):\n shell = True\n\n paths = [cmdArgs['cwd'], startdir, btypeDir]\n paths.extend(os.environ.get('PATH', '').split(os.pathsep))\n fkw = {\n 'path_list' : paths, 'quiet' : True,\n 'exts' : EXE_FILE_EXTS, 'mandatory' : False\n }\n\n partsCount = len(cmdSplitted)\n launcher = cmdSplitted[0]\n cmdExt = os.path.splitext(launcher)[1]\n if partsCount == 1 and cmdExt:\n find = lambda x: ctx.find_program(x, **fkw)\n result = _makeCmdForScript(cmdline, cmdExt, find)\n if result:\n cmdline = result\n\n elif partsCount > 1 and not shell and not _RE_STARTS_WITH_SUBST.match(launcher):\n # Waf raises exception in verbose mode with 'shell' == False if it\n # cannot find full path to executable and on windows cmdline\n # like 'python file.py' doesn't work.\n # So here is the attempt to find full path for such a case.\n result = ctx.find_program(launcher, **fkw)\n if result:\n launcher = result[0]\n cmdSplitted[0] = launcher\n cmdSplitted = [ x.replace(r'\"', r'\\\"') for x in cmdSplitted]\n cmdline = ' '.join('\"%s\"' % s if ' ' in s else s for s in cmdSplitted)\n\n return cmdline, shell",
"def get_arguments_string(self):\n result = self.__get_client_server_arg_string('')\n result = self.__get_x_args_string(result)\n result = self.__get_xx_args_string(result)\n result = self.__get_system_property_args_string(result)\n result = self.__get_unsorted_args_string(result)\n return result",
"def command_and_args(self) -> str:\n if self.command and self.args:\n rtn = f'{self.command} {self.args}'\n elif self.command:\n # there were no arguments to the command\n rtn = self.command\n else:\n rtn = ''\n return rtn",
"def fullCmdStr(self):\n return \"%s %s\" % (self.locCmdID, self.cmdStr)"
] | [
"0.69824934",
"0.6724259",
"0.6527682",
"0.6467328",
"0.6254741",
"0.6226238",
"0.60794044",
"0.60746896",
"0.60577667",
"0.6052372",
"0.60297465",
"0.59857696",
"0.5983711",
"0.58930767",
"0.5883761",
"0.5880378",
"0.5875304",
"0.58608395",
"0.5846903",
"0.5805317",
"0.57934475",
"0.57823074",
"0.57652074",
"0.57645154",
"0.5761606",
"0.5750861",
"0.5731134",
"0.5710868",
"0.57004094",
"0.5638274"
] | 0.7480482 | 0 |
Check if metadata store is compatible with current version of ecohydrolib. Accepts project directory as this method is used in the constructor to the Context class. projectDir, the path of the project whose metadata store is to be written to MetadataVersionError if a version already exists in the metadata store and is different than GenericMetadata._ecohydrolibVersion | def checkMetadataVersion(projectDir):
metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)
if os.path.exists(metadataFilepath):
if not os.access(metadataFilepath, os.R_OK):
raise IOError(errno.EACCES, "Unable to read metadata store for project %s" % \
(projectDir,))
# Read metadata store
config = ConfigParser.RawConfigParser()
config.read(metadataFilepath)
if config.has_section(GenericMetadata.ECOHYDROLIB_SECION):
if config.has_option(GenericMetadata.ECOHYDROLIB_SECION, \
GenericMetadata.VERSION_KEY):
metadataVersion = config.get(GenericMetadata.ECOHYDROLIB_SECION, \
GenericMetadata.VERSION_KEY)
if metadataVersion != GenericMetadata._ecohydrolibVersion:
raise MetadataVersionError(metadataVersion) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_version(self, project, targetdir):\r\n versionfile = os.path.join(targetdir, 'project.version')\r\n if (os.path.exists(versionfile)):\r\n file_ = open(versionfile, \"r\")\r\n projectname = file_.read().strip()\r\n file_.close()\r\n if (projectname == project.objectname):\r\n return True\r\n return False",
"def check_dataset_old_metadata_location(**_):\n old_metadata = get_pre_0_3_4_datasets_metadata()\n\n if not old_metadata:\n return True, False, None\n\n problems = (\n WARNING + \"There are metadata files in the old location.\"\n '\\n (use \"renku migrate\" to move them)\\n\\n\\t'\n + \"\\n\\t\".join(click.style(str(path.relative_to(project_context.path)), fg=\"yellow\") for path in old_metadata)\n + \"\\n\"\n )\n\n return False, False, problems",
"def _writeVersionToMetadata(config):\n if not config.has_section(GenericMetadata.ECOHYDROLIB_SECION):\n config.add_section(GenericMetadata.ECOHYDROLIB_SECION)\n \n if not config.has_option(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY):\n config.set(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY, GenericMetadata._ecohydrolibVersion)\n return\n \n metadataVersion = config.get(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY)\n if metadataVersion != GenericMetadata._ecohydrolibVersion:\n raise MetadataVersionError(metadataVersion)",
"def test_version():\n with open(\"pyproject.toml\") as f:\n tomllines = f.read().splitlines()\n tomlversion = set([l for l in tomllines if \"version =\" in l])\n initversion = set([f'version = \"{mei2volpiano.__version__}\"'])\n # set is there to catch any duplicate/additional entries\n assert initversion == tomlversion",
"def checkVersions():\n item = Item(fromScene=True)\n\n for ns, componentMData in item.components.iteritems():\n if ns == 'cam':\n # todo tratar versoes da camera\n continue\n\n if componentMData['assembleMode'] == 'reference':\n refComponent = ReferenceComponent(ns, componentMData, parent=item)\n refComponent.checkDBForNewVersion()\n\n elif componentMData['assembleMode'] == 'xlo':\n xloComponent = XloComponent(ns, componentMData, parent=item)\n xloComponent.checkDBForNewVersion()\n xloComponent.checkDBForNewCacheVersion()\n\n elif componentMData['assembleMode'] == 'cache':\n cacheComponent = CacheComponent(ns, componentMData, parent=item)\n cacheComponent.checkDBForNewVersion()\n\n item.putDataToDB()",
"def environment_needs_upgrade(self, db_dummy=None):\n db_installed_version = self.get_version()\n \n if db_installed_version > DB_VERSION:\n raise TracError('''Current db version (%d) newer than supported by\n this version of the %s (%d).''' % (db_installed_version,\n PLUGIN_NAME,\n DB_VERSION))\n return db_installed_version < DB_VERSION",
"def test_version(self) -> None:\n with open(\"pyproject.toml\") as f:\n for line in f:\n if \"version\" in line:\n version = line.split()[-1].replace('\"', \"\")\n break\n self.assertEqual(__version__, version)",
"def _check_accessor_for_version_problems(self):\n\n old_prov = self._cache_accessor.load_provenance()\n if old_prov is None:\n return\n\n new_prov = self._cache_accessor.provenance\n if old_prov.exactly_matches(new_prov):\n return\n\n if old_prov.nominally_matches(new_prov):\n # If we have a nominal match but not an exact match, that means the\n # user must changed a function's bytecode but not its version. To report\n # this, we first need to figure out which function changed. It could be\n # the one for this task, or it could be any immediate non-persisted\n # ancestor of this one. Fortunately, each provenance contains links to each of\n # its dependency digests, and a digest of non-persisted value contains that\n # value's provenance, so we can recursively search through our ancestor\n # provenances until we find which one caused the mismatch.\n def locate_mismatched_provenances_and_raise(old_prov, new_prov):\n assert old_prov.nominally_matches(new_prov)\n # If the bytecode doesn't match, we found the problematic pair.\n if old_prov.bytecode_hash != new_prov.bytecode_hash:\n message = f\"\"\"\n Found a cached artifact with the same descriptor\n ({self._cache_accessor.provenance.descriptor!r})\n and version (major={old_prov.code_version_major!r},\n minor={old_prov.code_version_minor!r}),\n but created by different code.\n It appears that the code function that outputs\n {new_prov.descriptor}\n was changed (old bytecode hash {old_prov.bytecode_hash!r};\n new bytecode hash {new_prov.bytecode_hash!r})\n but the function's version number was not.\n Change @version(major=) to indicate that your\n function's behavior has changed, or @version(minor=)\n to indicate that it has *not* changed.\n \"\"\"\n raise CodeVersioningError(oneline(message), new_prov.descriptor)\n # If the provenances nominally match, they must have essentially the\n # same structure.\n assert len(old_prov.dep_digests) == len(new_prov.dep_digests)\n # Since these provenances match nominally and have matching bytcode,\n # the mismatch must be in one of their dependencies. We'll iterate\n # through them to figure out which one.\n for old_dep_digest, new_dep_digest in zip(\n old_prov.dep_digests, new_prov.dep_digests\n ):\n # If this digest pair matches, it must not be where the problem is.\n if old_dep_digest.exact_hash == new_dep_digest.exact_hash:\n continue\n\n # Not all digests have provenances, but these should. Digests of\n # non-persisted values have provenances, and if these were persisted\n # then their exact hashes would be the same as their nominal hashes,\n # so they would have matched above.\n old_dep_prov = old_dep_digest.provenance\n new_dep_prov = new_dep_digest.provenance\n locate_mismatched_provenances_and_raise(old_dep_prov, new_dep_prov)\n assert False\n\n try:\n locate_mismatched_provenances_and_raise(old_prov, new_prov)\n except AssertionError as e:\n message = f\"\"\"\n Enncountered an internal error while performing an assisted versioning\n check. This should be impossible and is probably a bug in Bionic; please\n report this stace track to the developers. However, it's also likely\n that you need to update the ``@version`` annotation on the function\n that outputs {self._cache_accessor.provenance.descriptor}.\n If that doesn't fix the warning, you can try filtering the warning with\n ``warnings.filterwarnings``; deleting the disk cache; or disabling\n assisted versioning.\n \"\"\"\n logger.warn(oneline(message), exc_info=e)\n\n self._cache_accessor.update_provenance()",
"def test1_version(self):\n lVersion = rdbhdb.__version__.split('.')\n nVersion = need_version.split('.')\n self.assert_(lVersion >= nVersion, rdbhdb.__version__)",
"def test_version_is_written_into_file_info_file(self):\n # This functionality is only provided by the msvc compiler.\n if not (self.is_visual_studio_config() and self.is_shared_libraries_config()):\n return\n\n self.generate_project()\n self.build_target(simpleonelibcpftestprojectfixture.MYLIB_TESTS_TARGET)\n\n # VERIFY\n package = 'MyLib'\n packageType = 'LIB'\n owner = 'Knitschi'\n version = self.get_package_version(package)\n binBaseDir = self.locations.get_full_path_binary_output_folder(testprojectfixture.PARENT_CONFIG, testprojectfixture.COMPILER_CONFIG)\n \n libFile = binBaseDir / self.get_package_shared_lib_path(package, packageType, version)\n shortLibFile = self.get_shared_lib_short_name(package, packageType, version)\n\n print(libFile)\n\n # Read the properties from the binary file.\n props = testprojectfixture.get_file_properties(str(libFile))['StringFileInfo']\n\n # Compare the values\n self.assertEqual(props['CompanyName'], owner)\n self.assertEqual(props['FileDescription'], 'A C++ library used for testing the CPF')\n self.assertEqual(props['FileVersion'], '{0}'.format(version))\n self.assertEqual(props['InternalName'], 'MyLib')\n self.assertEqual(props['LegalCopyright'], 'Copyright {0} {1}'.format(datetime.datetime.now().year, owner) )\n self.assertEqual(props['OriginalFilename'], str(shortLibFile))\n self.assertEqual(props['ProductName'], 'MyLib')\n self.assertEqual(props['ProductVersion'], '{0}'.format(version))",
"def compatible_version(self):\n\n cursor = self.disk_connection.cursor()\n try:\n row = cursor.execute(\"\"\"\n SELECT COUNT(schema_version_hash) FROM version WHERE schema_version_hash=(?);\n \"\"\", (self.schema_version_hash,)).fetchone()\n return row[0] > 0\n except sqlite3.Error: # pylint: disable=broad-except\n return False",
"def test_loqusdb_wrong_version(loqus_exe):\n # GIVEN a loqusdb version < 2.5\n loqus_extension = LoqusDB(loqusdb_binary=loqus_exe, version=1.0)\n # WHEN instantiating an adapter\n with pytest.raises(SyntaxError):\n # THEN assert a syntax error is raised since version is wrong\n loqus_extension.version_check()",
"def validate_project_version(config: Dict[str, Any]) -> None:\n spacy_version = config.get(\"spacy_version\", None)\n if spacy_version and not is_compatible_version(about.__version__, spacy_version):\n err = (\n f\"The {PROJECT_FILE} specifies a spaCy version range ({spacy_version}) \"\n f\"that's not compatible with the version of spaCy you're running \"\n f\"({about.__version__}). You can edit version requirement in the \"\n f\"{PROJECT_FILE} to load it, but the project may not run as expected.\"\n )\n msg.fail(err, exits=1)",
"def validate_metadata_directories() -> None:\n directory_contents = _get_third_party_python_libs_directory_contents()\n # Each python metadata directory name contains a python library name that\n # does not have uniform case. This is because we cannot guarantee the casing\n # of the directory names generated and there are no options that we can\n # provide to `pip install` to actually guarantee that a certain casing\n # format is used to create the directory names. The only official guidelines\n # for naming directories is that it must start with the string:\n # '<library_name>-<library-version>' but no casing guidelines are specified.\n # Therefore, in order to efficiently check if a python library's metadata\n # exists in a directory, we need to normalize the directory name. Otherwise,\n # we would need to check every permutation of the casing.\n normalized_directory_names = {\n normalize_directory_name(name)\n for name in os.listdir(common.THIRD_PARTY_PYTHON_LIBS_DIR)\n if os.path.isdir(os.path.join(common.THIRD_PARTY_PYTHON_LIBS_DIR, name))\n }\n for normalized_library_name, version_string in directory_contents.items():\n # Direct URL libraries are guaranteed to have metadata directories,\n # because that's how _get_third_party_python_libs_directory_contents\n # obtains the version_string being checked here.\n if version_string.startswith('git+'):\n continue\n # Possible names of the metadata directory installed when <library_name>\n # is installed.\n possible_normalized_directory_names = (\n _get_possible_normalized_metadata_directory_names(\n normalized_library_name, version_string))\n # If any of the possible metadata directory names show up in the\n # directory, that is confirmation that <library_name> was installed\n # correctly with the correct metadata.\n if not any(\n normalized_directory_name in normalized_directory_names\n for normalized_directory_name in\n possible_normalized_directory_names):\n raise Exception(\n 'The python library %s was installed without the correct '\n 'metadata folders which may indicate that the convention for '\n 'naming the metadata folders have changed. Please go to '\n '`scripts/install_python_prod_dependencies` and modify our '\n 'assumptions in the '\n '_get_possible_normalized_metadata_directory_names'\n ' function for what metadata directory names can be.' %\n normalized_library_name)",
"def test_version(self):\n version_instance = get_version('kolibri', __file__)\n self.assertIn(version_instance.major_version, kolibri.__version__)",
"def test_platform_does_not_exist(self):\n version = Version.objects.get(pk=115509)\n for file in version.files.all():\n file.platform = amo.PLATFORM_LINUX.id\n file.save()\n\n version, file = self.get('1.2', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1",
"def check_build(self, bld_num):\n # QQQ In future this should be replaced with a query to the\n # build database\n bld_dir = os.path.join(self.ver_dir, str(bld_num))\n for plat in self.plats.keys():\n if self.plats[plat]:\n # QQQ Assumes format of filename unique to couchbase-server\n files = glob.glob(\"{}/couchbase-server-enterprise?{}*{}*\".format(\n bld_dir, self.version, plat\n ))\n files = [x for x in files if not (x.endswith(\".md5\") or x.endswith(\".sha256\"))]\n if len(files) == 0:\n print (\"Platform {} is missing\".format(plat))\n return False\n return True",
"def testVersion(self):\n project = self.session.create_project()\n\n self.assertTrue(project.version is None,\n \"New template project has no version.\")\n\n with self.assertRaises(ValueError):\n project.version = \"test\"",
"def _version_test(self, archive_dir):\r\n root = os.listdir(archive_dir)\r\n course_directory = archive_dir / root[0]\r\n return get_version(course_directory)",
"def test_platform_exists(self):\n version = Version.objects.get(pk=115509)\n for file in version.files.all():\n file.platform = amo.PLATFORM_LINUX.id\n file.save()\n\n version, file = self.get('1.2', self.version_int,\n self.app, amo.PLATFORM_LINUX)\n assert version == self.version_1_2_2",
"def test_set_config__unsupported_datafile_version(self):\n\n test_datafile = json.dumps(self.config_dict_with_features)\n mock_logger = mock.Mock()\n mock_notification_center = mock.Mock()\n\n with mock.patch('optimizely.config_manager.BaseConfigManager._validate_instantiation_options'):\n project_config_manager = config_manager.StaticConfigManager(\n datafile=test_datafile, logger=mock_logger, notification_center=mock_notification_center,\n )\n\n invalid_version_datafile = self.config_dict_with_features.copy()\n invalid_version_datafile['version'] = 'invalid_version'\n test_datafile = json.dumps(invalid_version_datafile)\n\n # Call set_config with datafile having invalid version\n project_config_manager._set_config(test_datafile)\n mock_logger.error.assert_called_once_with(\n 'This version of the Python SDK does not support ' 'the given datafile version: \"invalid_version\".'\n )\n self.assertEqual(0, mock_notification_center.call_count)",
"def addon_phlex(self):\n print(\"Checking phlex version\")\n repo = self.github.get_repo('d8ahazard/Phlex')\n remote_version = list(repo.get_commits())[0].sha\n file = \"{}/Dockerfile\".format(self.name)\n remote_file = self.get_file_obj(file)\n masterfile = self.repoupdater.get_file_content(remote_file)\n file_version = masterfile.split('Phlex/archive/')[1]\n file_version = file_version.split('.zip')[0]\n if self.verbose:\n print(\"Current version\", file_version)\n print(\"Available version\", remote_version)\n if remote_version != file_version:\n msg = COMMIT_MSG.format('Phlex', remote_version)\n new_content = self.repoupdater.get_file_content(remote_file)\n new_content = new_content.replace(file_version, remote_version)\n self.repoupdater.commit(file, msg, new_content, remote_file.sha)\n else:\n print(\"Phlex already have the newest version\", file_version)",
"def test_new_version_is_propagated_to_ConfigVersion_file(self):\n # Setup\n self.generate_project()\n self.build_target(simpleonelibcpftestprojectfixture.PACKAGE_ARCHIVES_MYLIB_TARGET)\n\n # Execute\n # Make a dummy change\n sourceFile = self.locations.get_full_path_source_folder() / \"MyLib/MyLib/function.cpp\"\n with open(str(sourceFile), \"a\") as f:\n f.write(\"\\n\")\n # Commit the change\n self.osa.execute_command_output(\n 'git commit . -m \"Dummy change\"',\n cwd=self.cpf_root_dir,\n print_output=miscosaccess.OutputMode.ON_ERROR\n )\n # Do the incremental generate\n self.run_python_command('2_Generate.py')\n # Build the package archive target\n self.build_target(simpleonelibcpftestprojectfixture.PACKAGE_ARCHIVES_MYLIB_TARGET)\n\n # Verify\n packageVersionFromGit = self.get_package_version(\"MyLib\") # The version from git\n packageVersionVar = 'PACKAGE_VERSION'\n packageVersionConfigFile = self.locations.get_full_path_config_makefile_folder(testprojectfixture.PARENT_CONFIG) / \"MyLib/_pckg/dev/MyLib/lib/cmake/MyLib/MyLibConfigVersion.cmake\"\n packageVersionFromConfigFile = self.get_cmake_variables_in_file([packageVersionVar], packageVersionConfigFile)[packageVersionVar]\n\n self.assertEqual(packageVersionFromConfigFile, packageVersionFromGit)",
"def get_version_from_project_dir(self):\n versions = self.get_versions_from_path(self.project_directory)\n version = None\n\n if versions and len(versions):\n version = versions[0]\n\n return version",
"def test_dev_version_if_dirty(self, mock_git_info): # pylint: disable=invalid-name, unused-argument\n # Test `patch` part\n self.get_dev_version('patch')\n self.assertEqual(self.project.version, '1.2.4.dev')\n # Test `minor` part\n self.get_dev_version('minor')\n self.assertEqual(self.project.version, '1.3.0.dev')\n # Test `major` part\n self.get_dev_version('major')\n self.assertEqual(self.project.version, '2.0.0.dev')\n # Test incorrect part\n self.project.set_property('semver_git_tag_increment_part', 'incorrect')\n with self.assertRaises(BuildFailedException) as context:\n set_version_from_git_tag(self.project, self.logger)\n err_msg = str(context.exception)\n self.assertTrue(\n (\"Incorrect value for `semver_git_tag_increment_part` property. \"\n \"Has to be in (`major`, `minor`, `patch`), \"\n \"but `incorrect` passed.\") in err_msg)",
"def _check_and_mk(self, custom_db_path):\n rayvision_db_env = os.environ.get(RAYVISION_DB, \"\")\n if bool(custom_db_path) and os.path.exists(custom_db_path):\n db_path = custom_db_path\n elif os.path.exists(rayvision_db_env):\n db_path = rayvision_db_env\n else:\n if self.api.user_info['local_os'] == \"windows\":\n db_path = os.path.join(os.environ[WINDOWS_LOCAL_ENV], RENDERFARM_SDK)\n else:\n db_path = os.path.join(os.environ[LINUX_LOCAL_ENV], RENDERFARM_SDK)\n\n return db_path",
"def test_check_django_compatability_mismatch(self):\n django_spanner.__version__ = \"2.2\"\n django.VERSION = (3, 1, 19, \"alpha\", 0)\n with self.assertRaises(ImproperlyConfigured):\n check_django_compatability(self.SUPPORTED_DJANGO_VERSIONS)",
"def needs_commons_db(self):\n return False",
"def test_getVersion(self):\n version = (\"twisted\", 2, 1, 0)\n project = self.makeProject(version)\n self.assertEqual(project.getVersion(), Version(*version))",
"def check_version():\n reset_flag = False\n try:\n data = du.read_yml(du.DEFAULT)\n if (\n data[\"version\"].split(\".\")[0] != __version__.split(\".\")[0]\n ): # If Version if different from \"1.x.y\" remove data:\n reset_flag = True\n except (KeyError, FileNotFoundError, TypeError):\n reset_flag = True\n\n if reset_flag:\n print(\"Your configuration file version is older than 1.0.0\")\n print(\n \"Your .Experiment file will be removed, please run daf.init to generate an up-to-date file\"\n )\n if os.path.isfile(du.DEFAULT):\n os.remove(du.DEFAULT)\n sys.exit(0)"
] | [
"0.65770215",
"0.55975527",
"0.5539493",
"0.5345174",
"0.5288473",
"0.52460235",
"0.5190399",
"0.5163017",
"0.51418436",
"0.51215893",
"0.51166844",
"0.50982404",
"0.5070804",
"0.5055564",
"0.5011875",
"0.49932083",
"0.496476",
"0.49626192",
"0.4957123",
"0.49514368",
"0.4948937",
"0.49288005",
"0.49193522",
"0.4886219",
"0.48854843",
"0.48716393",
"0.48533374",
"0.48499978",
"0.48410913",
"0.48218897"
] | 0.7714848 | 0 |
Write EcohydroLib version to ECOHYDROLIB_SECION of metadata. config ConfigParser to write version information to MetadataVersionError if a version already exists in the metadata store and is different than GenericMetadata._ecohydrolibVersion | def _writeVersionToMetadata(config):
if not config.has_section(GenericMetadata.ECOHYDROLIB_SECION):
config.add_section(GenericMetadata.ECOHYDROLIB_SECION)
if not config.has_option(GenericMetadata.ECOHYDROLIB_SECION, \
GenericMetadata.VERSION_KEY):
config.set(GenericMetadata.ECOHYDROLIB_SECION, \
GenericMetadata.VERSION_KEY, GenericMetadata._ecohydrolibVersion)
return
metadataVersion = config.get(GenericMetadata.ECOHYDROLIB_SECION, \
GenericMetadata.VERSION_KEY)
if metadataVersion != GenericMetadata._ecohydrolibVersion:
raise MetadataVersionError(metadataVersion) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bumpversion(path=\"setup.cfg\"):\n config = ConfigParser()\n config.read(path)\n cfg = open(path, 'w')\n new_version = \"0.0.0\"\n if config.has_option('metadata', 'version'):\n old_version = config.get('metadata', 'version')\n major, minor, patch = old_version.split(\".\")\n new_version = \"%s.%s.%s\" % (major, minor, int(patch) + 1)\n if not config.has_section('metadata'):\n config.add_section('metadata')\n config.set('metadata', 'version', new_version)\n config.write(cfg)\n cfg.close()\n return new_version",
"def _save_version_file(cls, hivemind_version, git_revision, git_date):\n with open(\"hive/version.py\", 'w') as version_file:\n version_file.write(\"# generated by setup.py\\n\")\n version_file.write(\"# contents will be overwritten\\n\")\n version_file.write(\"VERSION = '{}'\\n\".format(hivemind_version))\n version_file.write(\"GIT_REVISION = '{}'\\n\".format(git_revision))\n version_file.write(\"GIT_DATE = '{}'\\n\".format(git_date))",
"def checkMetadataVersion(projectDir):\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.R_OK):\n raise IOError(errno.EACCES, \"Unable to read metadata store for project %s\" % \\\n (projectDir,))\n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n if config.has_section(GenericMetadata.ECOHYDROLIB_SECION):\n if config.has_option(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY):\n metadataVersion = config.get(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY)\n if metadataVersion != GenericMetadata._ecohydrolibVersion:\n raise MetadataVersionError(metadataVersion)",
"def update_setupcfg_version(filename, version):\n\n setup_cfg = open(filename).readlines()\n current_section = None\n updated = False\n\n for idx, line in enumerate(setup_cfg):\n m = ConfigParser.SECTCRE.match(line)\n if m:\n if current_section == 'metadata':\n # We already parsed the entire metadata section without finding\n # a version line, and are now moving into a new section\n break\n current_section = m.group('header')\n continue\n\n if '=' not in line:\n continue\n\n opt, val = line.split('=', 1)\n opt, val = opt.strip(), val.strip()\n if current_section == 'metadata' and opt == 'version':\n setup_cfg[idx] = 'version = %s\\n' % version\n updated = True\n break\n\n if updated:\n open(filename, 'w').writelines(setup_cfg)\n logger.info(\"Set %s's version to %r\" % (os.path.basename(filename),\n version))",
"def write_version(settings, version, force=False):\n semver_path = settings['semver_path']\n filename = settings['semver_branch']\n path = os.path.join(semver_path, filename)\n logger.debug(f'write version:{version} to path:{path} with force:{force}')\n\n path_exists = os.path.exists(path)\n if path_exists:\n current_version = read_version(settings)\n if current_version == version:\n logger.debug(f'version is same as current version {current_version}')\n return\n\n if not path_exists or force:\n write_file(path, version)\n semver_repo = Repo(semver_path)\n index = semver_repo.index\n index.add([filename])\n semver_user_name = settings['semver_user_name']\n semver_user_email = settings['semver_user_email']\n author = Actor(semver_user_name, semver_user_email)\n index.commit(f'semver({filename}): {version}', author=author, committer=author, parent_commits=None)",
"def update_version(self, version):\n self._metadata['version'] = version\n\n if self._type == '.json':\n with open(self._filename, 'w') as f:\n f.write(json.dumps(self._metadata, indent=2))\n\n dof_filename = os.path.join(self.path, self.name + '.dof')\n if os.path.isfile(dof_filename):\n dof_file = DOFFile(dof_filename)\n dof_file.update_version(version)",
"def test_new_version_is_propagated_to_ConfigVersion_file(self):\n # Setup\n self.generate_project()\n self.build_target(simpleonelibcpftestprojectfixture.PACKAGE_ARCHIVES_MYLIB_TARGET)\n\n # Execute\n # Make a dummy change\n sourceFile = self.locations.get_full_path_source_folder() / \"MyLib/MyLib/function.cpp\"\n with open(str(sourceFile), \"a\") as f:\n f.write(\"\\n\")\n # Commit the change\n self.osa.execute_command_output(\n 'git commit . -m \"Dummy change\"',\n cwd=self.cpf_root_dir,\n print_output=miscosaccess.OutputMode.ON_ERROR\n )\n # Do the incremental generate\n self.run_python_command('2_Generate.py')\n # Build the package archive target\n self.build_target(simpleonelibcpftestprojectfixture.PACKAGE_ARCHIVES_MYLIB_TARGET)\n\n # Verify\n packageVersionFromGit = self.get_package_version(\"MyLib\") # The version from git\n packageVersionVar = 'PACKAGE_VERSION'\n packageVersionConfigFile = self.locations.get_full_path_config_makefile_folder(testprojectfixture.PARENT_CONFIG) / \"MyLib/_pckg/dev/MyLib/lib/cmake/MyLib/MyLibConfigVersion.cmake\"\n packageVersionFromConfigFile = self.get_cmake_variables_in_file([packageVersionVar], packageVersionConfigFile)[packageVersionVar]\n\n self.assertEqual(packageVersionFromConfigFile, packageVersionFromGit)",
"def set_version(self, version):\n\n def update_version(version, filepath):\n with open(filepath, \"r\") as stream:\n contents = stream.read()\n\n new_contents = _fix_contents_version(contents, version)\n assert contents != new_contents\n with open(filepath, \"w\") as stream:\n stream.write(new_contents)\n\n update_version(version, os.path.join(\".\", \"package.json\"))\n update_version(version, os.path.join(\".\", \"src\", \"setup.py\"))\n update_version(\n version, os.path.join(\".\", \"src\", \"robocorp_code\", \"__init__.py\")\n )",
"def create_version_file(version='unknown', gitmeta=''):\n\tfname = join(dirname(abspath(__file__)), 'MHLogin', '_version.py')\n\tf = open(fname, 'wb')\n\tf.write(VERSION_PY % {'version': version, 'gitmeta': gitmeta, })\n\tf.close()",
"def _post_src_install_write_metadata(settings):\n\n\teapi_attrs = _get_eapi_attrs(settings.configdict['pkg']['EAPI'])\n\n\tbuild_info_dir = os.path.join(settings['PORTAGE_BUILDDIR'], 'build-info')\n\n\tmetadata_keys = ['IUSE']\n\tif eapi_attrs.iuse_effective:\n\t\tmetadata_keys.append('IUSE_EFFECTIVE')\n\n\tfor k in metadata_keys:\n\t\tv = settings.configdict['pkg'].get(k)\n\t\tif v is not None:\n\t\t\twrite_atomic(os.path.join(build_info_dir, k), v + '\\n')\n\n\t# The following variables are irrelevant for virtual packages.\n\tif settings.get('CATEGORY') != 'virtual':\n\n\t\tfor k in ('CHOST',):\n\t\t\tv = settings.get(k)\n\t\t\tif v is not None:\n\t\t\t\twrite_atomic(os.path.join(build_info_dir, k), v + '\\n')\n\n\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t'BUILD_TIME'), encoding=_encodings['fs'], errors='strict'),\n\t\tmode='w', encoding=_encodings['repo.content'],\n\t\terrors='strict') as f:\n\t\tf.write(_unicode_decode(\"%.0f\\n\" % (time.time(),)))\n\n\tuse = frozenset(settings['PORTAGE_USE'].split())\n\tfor k in _vdb_use_conditional_keys:\n\t\tv = settings.configdict['pkg'].get(k)\n\t\tfilename = os.path.join(build_info_dir, k)\n\t\tif v is None:\n\t\t\ttry:\n\t\t\t\tos.unlink(filename)\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\t\tcontinue\n\n\t\tif k.endswith('DEPEND'):\n\t\t\tif eapi_attrs.slot_operator:\n\t\t\t\tcontinue\n\t\t\ttoken_class = Atom\n\t\telse:\n\t\t\ttoken_class = None\n\n\t\tv = use_reduce(v, uselist=use, token_class=token_class)\n\t\tv = paren_enclose(v)\n\t\tif not v:\n\t\t\ttry:\n\t\t\t\tos.unlink(filename)\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\t\tcontinue\n\t\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t\tk), encoding=_encodings['fs'], errors='strict'),\n\t\t\tmode='w', encoding=_encodings['repo.content'],\n\t\t\terrors='strict') as f:\n\t\t\tf.write(_unicode_decode(v + '\\n'))\n\n\tif eapi_attrs.slot_operator:\n\t\tdeps = evaluate_slot_operator_equal_deps(settings, use, QueryCommand.get_db())\n\t\tfor k, v in deps.items():\n\t\t\tfilename = os.path.join(build_info_dir, k)\n\t\t\tif not v:\n\t\t\t\ttry:\n\t\t\t\t\tos.unlink(filename)\n\t\t\t\texcept OSError:\n\t\t\t\t\tpass\n\t\t\t\tcontinue\n\t\t\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t\t\tk), encoding=_encodings['fs'], errors='strict'),\n\t\t\t\tmode='w', encoding=_encodings['repo.content'],\n\t\t\t\terrors='strict') as f:\n\t\t\t\tf.write(_unicode_decode(v + '\\n'))",
"def _update_aea_version(cls, agent_config: AgentConfig):\n for item in agent_config.package_dependencies:\n type_ = item.component_type.to_plural()\n dotted_path = f\"vendor.{item.author}.{type_}.{item.name}.aea_version\"\n path = os.path.join(\"vendor\", item.author, type_, item.name)\n cls.nested_set_config(dotted_path, cls.AEA_VERSION_SPECIFIER)\n cls.run_cli_command(\"fingerprint\", \"by-path\", path, cwd=cls._get_cwd())",
"def load_ocp_version_config_file(self, ocp_upgrade_version):\n\n version = Version.coerce(ocp_upgrade_version)\n short_ocp_upgrade_version = \".\".join([str(version.major), str(version.minor)])\n version_before_upgrade = parse_version(\n config.DEPLOYMENT.get(\"installer_version\")\n )\n version_post_upgrade = parse_version(ocp_upgrade_version)\n version_change = version_post_upgrade > version_before_upgrade\n if version_change:\n version_config_file = os.path.join(\n constants.OCP_VERSION_CONF_DIR,\n f\"ocp-{short_ocp_upgrade_version}-config.yaml\",\n )\n logger.debug(f\"config file to be loaded: {version_config_file}\")\n load_config_file(version_config_file)\n else:\n logger.info(\n f\"Upgrade version {version_post_upgrade} is not higher than old version:\"\n f\" {version_before_upgrade}, new config file will not be loaded\"\n )",
"def __write_build_version(file_path, identifier, version):\n\n with open(file_path) as fp:\n lines = fp.readlines()\n\n new_lines = []\n for line in lines:\n if line.find(identifier) > -1:\n parts = line.split(identifier)\n parts[-1] = version + '\\n'\n new_line = identifier.join(parts)\n new_lines.append(new_line)\n else:\n new_lines.append(line)\n\n fp2 = open(file_path, 'w')\n fp2.write(''.join(new_lines))\n fp2.close()",
"def configversion(self, args):\n print(CONFIG_VERSION)",
"def _init_obo_version(self, line):\n if line[0:14] == \"format-version\":\n self.format_version = line[16:-1]\n if line[0:12] == \"data-version\":\n self.data_version = line[14:-1]",
"def upgrade_config_format(self):\n # migrate older config files\n if self.version == 1:\n # capture_init()\n self.version = 3\n\n # If token exists check still valid and can login\n if self.token and self.token != DEFAULT_TOKEN:\n from .api import ping\n\n with suppress(Exception):\n self.username = ping(config=self, cli_login=True, verbose=False)\n\n self.save()\n elif self.version == 2:\n # re-init against new server\n # capture_init()\n self.version = 3\n self.save()",
"def version_keyword(dist, attr, value):\n if value == \"PBR\":\n from pbr.util import setup_cfg_to_setup_kwargs\n\n path = \"setup.cfg\"\n parser = ConfigParser()\n if not os.path.exists(path):\n raise ValueError(\"file '%s' does not exist\" % os.path.abspath(path))\n parser.read(path)\n config = {}\n for section in parser.sections():\n config[section] = dict(parser.items(section))\n attrs = setup_cfg_to_setup_kwargs(config)\n version = str(Version(attrs[\"name\"]))\n os.environ[\"PBR_VERSION\"] = version\n else:\n version = str(Version(dist.metadata.get_name()))\n dist.metadata.version = version",
"def parse_config_versions(tox_config, plugin_config):\n config_asdf = tox_config.get(\"asdf\", {})\n pypy2 = config_asdf.get(\"pypy2\", \"pypy2.7\")\n pypy3 = config_asdf.get(\"pypy3\", \"pypy3.8\")\n plugin_config.pypy2_version = pypy2\n plugin_config.pypy3_version = pypy3",
"def replace_version(version):\n filename = 'conda.recipe/meta.yaml'\n pattern = r'version: .*'\n replacement = 'version: {version}'.format(version=version)\n lines = []\n with open(filename) as meta_file:\n for line in meta_file.readlines():\n lines.append(re.sub(pattern, replacement, line))\n with open(filename, 'w') as meta_file:\n for line in lines:\n meta_file.write(line)",
"def test_version():\n with open(\"pyproject.toml\") as f:\n tomllines = f.read().splitlines()\n tomlversion = set([l for l in tomllines if \"version =\" in l])\n initversion = set([f'version = \"{mei2volpiano.__version__}\"'])\n # set is there to catch any duplicate/additional entries\n assert initversion == tomlversion",
"def test_version_is_written_into_file_info_file(self):\n # This functionality is only provided by the msvc compiler.\n if not (self.is_visual_studio_config() and self.is_shared_libraries_config()):\n return\n\n self.generate_project()\n self.build_target(simpleonelibcpftestprojectfixture.MYLIB_TESTS_TARGET)\n\n # VERIFY\n package = 'MyLib'\n packageType = 'LIB'\n owner = 'Knitschi'\n version = self.get_package_version(package)\n binBaseDir = self.locations.get_full_path_binary_output_folder(testprojectfixture.PARENT_CONFIG, testprojectfixture.COMPILER_CONFIG)\n \n libFile = binBaseDir / self.get_package_shared_lib_path(package, packageType, version)\n shortLibFile = self.get_shared_lib_short_name(package, packageType, version)\n\n print(libFile)\n\n # Read the properties from the binary file.\n props = testprojectfixture.get_file_properties(str(libFile))['StringFileInfo']\n\n # Compare the values\n self.assertEqual(props['CompanyName'], owner)\n self.assertEqual(props['FileDescription'], 'A C++ library used for testing the CPF')\n self.assertEqual(props['FileVersion'], '{0}'.format(version))\n self.assertEqual(props['InternalName'], 'MyLib')\n self.assertEqual(props['LegalCopyright'], 'Copyright {0} {1}'.format(datetime.datetime.now().year, owner) )\n self.assertEqual(props['OriginalFilename'], str(shortLibFile))\n self.assertEqual(props['ProductName'], 'MyLib')\n self.assertEqual(props['ProductVersion'], '{0}'.format(version))",
"def set_version(self, bundle, ctx, filename, version):",
"def test_error_on_incorrect_version(self):\n config = dict(nodes={}, version=2)\n parser = Configuration()\n exception = self.assertRaises(ConfigurationError,\n parser._deployment_from_configuration,\n config, set())\n self.assertEqual(\n \"Deployment configuration has an error. \"\n \"Incorrect version specified.\",\n exception.message\n )",
"def set_version(v):\n old = get_version()\n sys.stderr.write('%s -> %s\\n' % (old, v))\n with open(INIT, 'r+') as f:\n text = f.read()\n text = pattern.sub(\"__version__ = %r\" % v, text)\n f.seek(0)\n f.truncate()\n f.write(text)",
"def _volume_metadata_set(self, volume_path, data):\n data['compat_version'] = 1\n data['version'] = self.version\n return self._metadata_set(self._volume_metadata_path(volume_path), data)",
"def update_version(self, version):\n self.version = CPE.escape_for_cpe23_fs(version)",
"async def update_version(self, version: int):\n async with open(self.__file_name, mode=\"r\") as auth_file:\n tag_data = json.loads(await auth_file.read())\n await auth_file.close()\n async with open(self.__file_name, mode=\"w\") as auth:\n tag_data[\"version\"] = version\n await auth.write(json.dumps(tag_data, indent=2, sort_keys=True))\n await auth.close()\n self.__version = version",
"def save_version(version_name, yml):\n\n output_1 = version(version_name)\n output_2 = path(yml)\n return ' - Save version ' + output_1 + '\\n' + output_2",
"def version():\n g.data['oar_server_version'] = VERSION\n g.data['oar_version'] = VERSION\n g.data['oar_lib_version'] = VERSION\n g.data['api_version'] = API_VERSION\n g.data['apilib_version'] = API_VERSION",
"def persist_version():\r\n #it's not necessary to do this every time we persist, but\r\n #this way we don't have to worry about race conditions with resume.py\r\n #reading this\r\n f = open(os.path.join(get_persist_root_dir(), \"sparkVersion\"), 'w')\r\n from spark.internal.version import VERSION \r\n f.write(VERSION)\r\n f.close()"
] | [
"0.6119199",
"0.5910067",
"0.58422333",
"0.57873607",
"0.54968417",
"0.5398081",
"0.5384361",
"0.53475744",
"0.5263908",
"0.5156256",
"0.5146889",
"0.50837433",
"0.5076105",
"0.50752836",
"0.5016316",
"0.49923375",
"0.49213877",
"0.48776528",
"0.48719254",
"0.48652685",
"0.48600757",
"0.48418644",
"0.48323607",
"0.4825559",
"0.48196918",
"0.481704",
"0.48091227",
"0.4793526",
"0.47934005",
"0.47881618"
] | 0.8542046 | 0 |
Delete an entry from the given section of the metadata store for a given project. context Context object containing projectDir, the path of the project whose metadata store is to be deleted from section The section the key is to be deleted from key The key to be deleted from the given section of the project metadata callback A function that should be called before deleting the entry. The function takes as input the config object. IOError(errno.EACCES) if the metadata store for the project is not writable Exception if section is not a valid GenericMetadata section MetadataVersionError if existing version information in metadata store does not match version of currently running EcohydroLib. | def deleteEntryFromSection(context, section, key, callback=None):
projectDir = context.projectDir
if section not in GenericMetadata.SECTIONS:
raise Exception( "%s is an unknown section" % (section,) )
lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)
metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)
if os.path.exists(metadataFilepath):
if not os.access(metadataFilepath, os.W_OK):
raise IOError(errno.EACCES, "Unable to write to metadata store for project %s" % \
(projectDir,))
else:
if not os.access(projectDir, os.W_OK):
raise IOError(errno.EACCES, "Unable to write to metadata store for project %s" % \
(projectDir,))
# Create metadata file as it does not exist
metadataFD = open(metadataFilepath, 'w')
metadataFD.close()
# Wait for lockfile to be relinquished
while os.path.exists(lockFilepath):
time.sleep(5)
# Write lock file
open(lockFilepath, 'w').close()
# Read metadata store
config = ConfigParser.RawConfigParser()
config.read(metadataFilepath)
GenericMetadata._writeVersionToMetadata(config)
if callback:
callback(config)
# Delete entry
if config.has_section(section):
config.remove_option(section, key)
# Write metadata store
config.write(open(metadataFilepath, 'w'))
# Remove lock file
os.unlink(lockFilepath) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def delete(self, section, name):\n section = self._getSettingName(section)\n self._config.remove_option(section, name)\n self.save()",
"def DeleteSetting(appname, section, key):\n settings = _OptionsDB(appname)\n settings.delete(section, key)",
"def _delete_metadata(self, metadata_role):\n \n # The root metadata role is never deleted without a replacement.\n if metadata_role == 'root':\n return\n \n # Get rid of the current metadata file.\n self._move_current_to_previous(metadata_role)\n \n # Remove knowledge of the role.\n if metadata_role in self.metadata['current']:\n del self.metadata['current'][metadata_role]\n tuf.roledb.remove_role(metadata_role)",
"def deleteManifestEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.MANIFEST_SECTION, key)",
"def _del_entry(self, cvs_path):\n\n del self._entries[cvs_path]",
"def _del_entry(self, cvs_path):\n\n self._make_writable()\n self._del_entry(cvs_path)",
"def delete_metadata(self, volume, keys, deletes=10, delete_size=3):\n self._impl.delete_metadata(volume, keys=keys, deletes=10,\n delete_size=3)",
"def _writeEntriesToSection(projectDir, section, keys, values, callback=None):\n numKeys = len(keys)\n if numKeys != len(values):\n raise Exception( \"%d keys specified for %d values\" % (numKeys, len(values)) )\n \n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entries\n if not config.has_section(section):\n config.add_section(section)\n for i in xrange(numKeys):\n config.set(section, keys[i], values[i])\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def delete(cls, project_id, resource_type, resource_id, key=None):\n\n query = DBMetadata.query()\n filters = [DBMetadata.deleted == 0,\n DBMetadata.resource_type == resource_type,\n DBMetadata.resource_id == resource_id,\n DBMetadata.project_id == project_id]\n\n if key:\n filters.append(DBMetadata.key == key)\n\n query = query.filter(*filters)\n return query.update({\"deleted\": True})",
"def delete(self) -> None:\n try:\n self._logger.debug('Delete old metadata file %s.', self._path)\n os.remove(self._path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n msg = 'Failed to delete old metadata file. {}'.format(ex.strerror)\n raise MetaFileError(msg)",
"def _delete(self, **kwargs):\n\n resource_name = self._get_resource_name(**kwargs)\n config = misc_utils.resolve_config(\n kwargs.pop('config', None),\n kwargs.pop('config_file', None),\n required=False\n )\n\n return self._make_request(\n uri='%s/%s' % (self._metadata['uri'], resource_name),\n method='DELETE',\n config=config\n )",
"def deleteSection():\n data = request.json\n if \"agenda_id\" in data and \"section_position\" in data:\n if connectMongo.getAgendaById(data.get(\"agenda_id\")).found:\n responseWrapper: ResponseWrapper = connectMongo.deleteSection(data.get(\"agenda_id\"),\n data.get(\"section_position\"))\n if responseWrapper.operationDone:\n return jsonify(response=200, agenda=responseWrapper.object.makeJson())\n else:\n return jsonify(response=501, msg=\"Delete Failed\")\n else:\n return jsonify(response=404, msg=\"Agenda not found\")\n else:\n return jsonify(response=400, msg=\"you didn't sent all the necessary information\")",
"def deleteClimatePointEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.CLIMATE_POINT_SECTION, key)",
"def test_vault_delete_vault_section(self):\n pass",
"def remove(ctx, name, project_root):\n\n if name == 'logme':\n raise LogmeError(\"'logme' master logger configuration cannot be removed!\")\n\n with ensure_conf_exist(project_root) as logme_conf:\n\n config = read_config(logme_conf)\n config.remove_section(name)\n\n with logme_conf.open('w+') as conf:\n config.write(conf)",
"def test_unpacker_delete_manifest_metadata_v3(config, mocker, path_map_mock):\n logger_mock = mocker.MagicMock()\n p = Unpacker(config, logger_mock)\n mock_os_remove = mocker.patch(\"os.remove\")\n mock_os_remove.side_effect = [NameError, None]\n p._delete_manifest_metadata(\"0869ea50-e437-443f-8cdb-31a350f88e57\")\n mock_os_remove.assert_called_with(\"/tmp/lta/testing/unpacker/outbox/0869ea50-e437-443f-8cdb-31a350f88e57.metadata.ndjson\")",
"def delete(self, *args, **kwargs):\n print(\"form delete\")\n self.is_deleted = True\n current_section_sequence = self.section_sequence\n\n #This can be modified if we have to hard delete the sections\n\n # for sec_id in current_section_sequence:\n # current_section = Sections.objects.get(id = sec_id )\n # current_section.delete()\n\n self.save()",
"def delete_meta_file(self):\n try:\n self.logger.debug('Delete old metadata file %s.', self.meta_file_path)\n os.remove(self.meta_file_path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n raise MetadataError('Failed to delete old metadata file. {}'\n .format(ex.strerror))",
"def delete_metadata(self, scope, name, key, *, session: \"Session\"):\n if not json_implemented(session=session):\n raise NotImplementedError\n\n try:\n row = session.query(models.DidMeta).filter_by(scope=scope, name=name).one()\n existing_meta = getattr(row, 'meta')\n # Oracle returns a string instead of a dict\n if session.bind.dialect.name in ['oracle', 'sqlite'] and existing_meta is not None:\n existing_meta = json_lib.loads(existing_meta)\n\n if key not in existing_meta:\n raise exception.KeyNotFound(key)\n\n existing_meta.pop(key, None)\n\n row.meta = None\n session.flush()\n\n # Oracle insert takes a string as input\n if session.bind.dialect.name in ['oracle', 'sqlite']:\n existing_meta = json_lib.dumps(existing_meta)\n\n row.meta = existing_meta\n except NoResultFound:\n raise exception.DataIdentifierNotFound(f\"Key not found for data identifier '{scope}:{name}'\")",
"def deleteClimateGridEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.CLIMATE_GRID_SECTION, key)",
"def __delitem__(self, cvs_path):\n\n node = self[cvs_path]\n self._del_entry(cvs_path)\n if isinstance(node, _WritableMirrorDirectoryMixin):\n node._mark_deleted()",
"def delete(name, phonebook):\n\n phonebook_data = read_phonebook(phonebook)\n\n if not phonebook_data.get(name):\n raise NoEntryError(\"This entry does not exist! \"\n \"(Names are case-sensitive.)\")\n\n else:\n print \"Deleting entry:\", name, phonebook_data[name]\n del phonebook_data[name]\n save(phonebook_data, phonebook)",
"def delete_section_or_option(config_file, section, option=None):\n error, parser = _get_config_parsser(config_file)\n if error:\n return error, parser\n try:\n # remove option\n if option:\n removed = parser.remove_option(section, option)\n # remove section\n else:\n removed = parser.remove_section(section)\n if not removed:\n if option:\n return 1, \"no option\"\n else:\n return 1, \"no section\"\n except Exception, e:\n return 1, e\n # back up config file\n _backup_config(config_file)\n # write parser to file\n return _write_config_parser_to_file(parser, config_file)",
"def DeleteConfig(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def delete_account_key(configuration):\n os.remove(configuration.cm_key)",
"def do_delete_configured_volume(self, arg):\n args = self.parse_arguments(arg)\n if len(args) == 0:\n self.perror(\"No storage specified.\")\n return\n self.do_coroutine(self._localStorageRoutines.delete_configured_volume_routine(args[0]))",
"def exposed_delete_data(self, chunk_id):\n local_filename = self.chunk_filename(chunk_id)\n if not os.path.isfile(local_filename):\n return None\n os.remove(local_filename)",
"def deleteGRASSEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.GRASS_SECTION, key)",
"def _prep_metadata(md_sect, path):\n if not set(md_sect).issuperset(metadata_required_fields):\n missing = metadata_required_fields - set(md_sect)\n raise ConfigError(\"Required fields missing: \" + '\\n'.join(missing))\n\n module = md_sect.get('module')\n if not module.isidentifier():\n raise ConfigError(\"Module name %r is not a valid identifier\" % module)\n\n md_dict = {}\n\n # Description file\n if 'description-file' in md_sect:\n description_file = path.parent / md_sect.get('description-file')\n try:\n with description_file.open(encoding='utf-8') as f:\n raw_desc = f.read()\n except FileNotFoundError:\n raise ConfigError(\n \"Description file {} does not exist\".format(description_file)\n )\n ext = description_file.suffix\n try:\n mimetype = readme_ext_to_content_type[ext]\n except KeyError:\n log.warning(\"Unknown extension %r for description file.\", ext)\n log.warning(\" Recognised extensions: %s\",\n \" \".join(readme_ext_to_content_type))\n mimetype = None\n\n if mimetype == 'text/x-rst':\n # rst check\n stream = io.StringIO()\n res = render(raw_desc, stream)\n if not res:\n log.warning(\"The file description seems not to be valid rst for PyPI;\"\n \" it will be interpreted as plain text\")\n log.warning(stream.getvalue())\n\n md_dict['description'] = raw_desc\n md_dict['description_content_type'] = mimetype\n\n if 'urls' in md_sect:\n project_urls = md_dict['project_urls'] = []\n for label, url in sorted(md_sect.pop('urls').items()):\n project_urls.append(\"{}, {}\".format(label, url))\n\n for key, value in md_sect.items():\n if key in {'description-file', 'module'}:\n continue\n if key not in metadata_allowed_fields:\n closest = difflib.get_close_matches(key, metadata_allowed_fields,\n n=1, cutoff=0.7)\n msg = \"Unrecognised metadata key: {!r}\".format(key)\n if closest:\n msg += \" (did you mean {!r}?)\".format(closest[0])\n raise ConfigError(msg)\n\n k2 = key.replace('-', '_')\n md_dict[k2] = value\n if key in metadata_list_fields:\n if not isinstance(value, list):\n raise ConfigError('Expected a list for {} field, found {!r}'\n .format(key, value))\n if not all(isinstance(a, str) for a in value):\n raise ConfigError('Expected a list of strings for {} field'\n .format(key))\n elif key == 'requires-extra':\n if not isinstance(value, dict):\n raise ConfigError('Expected a dict for requires-extra field, found {!r}'\n .format(value))\n if not all(isinstance(e, list) for e in value.values()):\n raise ConfigError('Expected a dict of lists for requires-extra field')\n for e, reqs in value.items():\n if not all(isinstance(a, str) for a in reqs):\n raise ConfigError('Expected a string list for requires-extra. (extra {})'\n .format(e))\n else:\n if not isinstance(value, str):\n raise ConfigError('Expected a string for {} field, found {!r}'\n .format(key, value))\n\n # What we call requires in the ini file is technically requires_dist in\n # the metadata.\n if 'requires' in md_dict:\n md_dict['requires_dist'] = md_dict.pop('requires')\n\n # And what we call dist-name is name in the metadata\n if 'dist_name' in md_dict:\n md_dict['name'] = md_dict.pop('dist_name')\n\n # Move dev-requires into requires-extra\n reqs_noextra = md_dict.pop('requires_dist', [])\n reqs_by_extra = md_dict.pop('requires_extra', {})\n dev_requires = md_dict.pop('dev_requires', None)\n if dev_requires is not None:\n if 'dev' in reqs_by_extra:\n raise ConfigError(\n 'dev-requires occurs together with its replacement requires-extra.dev.')\n else:\n log.warning(\n '“dev-requires = ...” is obsolete. Use “requires-extra = {\"dev\" = ...}” instead.')\n reqs_by_extra['dev'] = dev_requires\n\n # Add requires-extra requirements into requires_dist\n md_dict['requires_dist'] = \\\n reqs_noextra + list(_expand_requires_extra(reqs_by_extra))\n\n md_dict['provides_extra'] = sorted(reqs_by_extra.keys())\n\n # For internal use, record the main requirements as a '.none' extra.\n reqs_by_extra['.none'] = reqs_noextra\n\n return md_dict, module, reqs_by_extra"
] | [
"0.5888773",
"0.542328",
"0.5296165",
"0.52698547",
"0.5267947",
"0.52364236",
"0.5207931",
"0.51816064",
"0.5181545",
"0.5153849",
"0.51224846",
"0.50526917",
"0.50315636",
"0.50260997",
"0.50242794",
"0.5008035",
"0.4993641",
"0.49846393",
"0.49762422",
"0.4963163",
"0.4955998",
"0.4949326",
"0.49476847",
"0.49453136",
"0.49448407",
"0.4930686",
"0.49238768",
"0.49227807",
"0.48884684",
"0.48118395"
] | 0.7882322 | 0 |
Write an entry in the given section to the metadata store for a given project. Will overwrite the value for a key that already exists context Context object containing projectDir, the path of the project whose metadata store is to be written to section The section the key is to be written to key The key to be written to the given section of the project metadata value The value to be written for key stored in the given section of the project metadata callback A function that should be called before writing the entry. The function takes as input the config object. IOError(errno.EACCES) if the metadata store for the project is not writable Exception if section is not a valid GenericMetadata section MetadataVersionError if existing version information in metadata store does not match version of currently running EcohydroLib. | def writeEntryToSection(context, section, key, value, callback=None):
projectDir = context.projectDir
if section not in GenericMetadata.SECTIONS:
raise Exception( "%s is an unknown section" % (section,) )
lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)
metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)
if os.path.exists(metadataFilepath):
if not os.access(metadataFilepath, os.W_OK):
raise IOError(errno.EACCES, "Unable to write to metadata store for project %s" % \
(projectDir,))
else:
if not os.access(projectDir, os.W_OK):
raise IOError(errno.EACCES, "Unable to write to metadata store for project %s" % \
(projectDir,))
# Create metadata file as it does not exist
metadataFD = open(metadataFilepath, 'w')
metadataFD.close()
# Wait for lockfile to be relinquished
while os.path.exists(lockFilepath):
time.sleep(5)
# Write lock file
open(lockFilepath, 'w').close()
# Read metadata store
config = ConfigParser.RawConfigParser()
config.read(metadataFilepath)
GenericMetadata._writeVersionToMetadata(config)
if callback:
callback(config)
# Write new entry
if not config.has_section(section):
config.add_section(section)
config.set(section, key, value)
# Write metadata store
config.write(open(metadataFilepath, 'w'))
# Remove lock file
os.unlink(lockFilepath) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _writeEntriesToSection(projectDir, section, keys, values, callback=None):\n numKeys = len(keys)\n if numKeys != len(values):\n raise Exception( \"%d keys specified for %d values\" % (numKeys, len(values)) )\n \n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entries\n if not config.has_section(section):\n config.add_section(section)\n for i in xrange(numKeys):\n config.set(section, keys[i], values[i])\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def deleteEntryFromSection(context, section, key, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Delete entry\n if config.has_section(section):\n config.remove_option(section, key)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def save(self, section, key, value, target=None):\n if section in self._section_index:\n config = self._configs[self._section_index[section]]\n elif target is not None:\n if target in self._path_index:\n config = self._configs[self._path_index[target]]\n elif target in self._name_index:\n config = self._configs[self._name_index[target]]\n elif target in self._configs:\n config = self._configs[0]\n else:\n raise KeyError(target)\n else:\n raise KeyError(section)\n\n config.save(section, key, value)\n\n # reindex in case new sections were added\n self.__idx(config)",
"def writeProvenanceEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.PROVENANCE_SECTION, key, value)",
"def _writeVersionToMetadata(config):\n if not config.has_section(GenericMetadata.ECOHYDROLIB_SECION):\n config.add_section(GenericMetadata.ECOHYDROLIB_SECION)\n \n if not config.has_option(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY):\n config.set(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY, GenericMetadata._ecohydrolibVersion)\n return\n \n metadataVersion = config.get(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY)\n if metadataVersion != GenericMetadata._ecohydrolibVersion:\n raise MetadataVersionError(metadataVersion)",
"def put(self, cmd_names, section, key, value, env=DEFAULT_ENV):\n\n if not self.document:\n self._read()\n # Empty document prepare the initial structure.\n self.document.update({env: {self._to_key(cmd_names): {section: {key: value}}}})\n # Only update appropriate key value pairs within a section\n self.document[env][self._to_key(cmd_names)][section].update({key: value})",
"def setSection(self, section, item, data):\n if not self.config.has_section(section):\n self.config.add_section(section)\n self.config.set(section, item, data)\n # Write the updated file whenever anything changes\n with open(self.filename, 'w') as configfile:\n self.config.write(configfile)",
"def write_setting(self, section, key, value):\n cp = ConfigParser()\n cp.read(self.settings_file)\n if not cp.has_section(section):\n cp.add_section(section)\n\n cp.set(section, key, \"%s\" % value)\n with open(self.settings_file, 'w') as configfile:\n cp.write(configfile)\n\n logging.debug(\"Wrote [%s] %s = %s to %s\" % (section, key, value, self.settings_file))",
"def save(data, section): # save a config\n\tglobal _timesSaved\n\tif dynConfig['logConfigActions']:\n\t\tlogger.info( f'saving {section}: {data}' )\n\t# save\n\tif section != 'placeholderForSaving':\n\t\tcurrentConfigData[section] = data\n\t\tlogger.debug( f'saved {section}' )\n\telse:\n\t\t_timesSaved = 2\n\t# save to disk if this is the third save\n\tif _timesSaved == 0 or _timesSaved == 1:\n\t\t_timesSaved += 1\n\telse:\n\t\t_timesSaved = 0\n\t\ttry:\n\t\t\t# save to disk\n\t\t\twith open( configPath, 'w', encoding='utf-8' ) as file:\n\t\t\t\tjson.dump( currentConfigData, file, indent=4 )\n\t\texcept:\n\t\t\tlogger.error( f'failed to save config to disk!' )\n\t\t\traise ConfigError( 'error while saving the config file' )",
"def saveSetting(self,section,key,value):\n settings = {section:{key:value}}\n self.saveSettings(settings)",
"def overwrite(section: str, data: any) -> None:\n\toverwriteDict[section] = data\n\tlogger.debug(f'Overwritten config {section}!')",
"def write_stewicombo_metadata(file_name, metadata_dict, category=''):\n meta = set_stewicombo_meta(file_name, category=category)\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)",
"def writeProvenanceEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION, keys, values)",
"def set(self, name, val, section=section_default):\n if section not in self.config.sections():\n self.config.add_section(section)\n\n self.config.set(section, name, val)\n self.save()",
"def writeManifestEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.MANIFEST_SECTION, key, value)",
"def set_config(self, section, values):\n for option in values:\n self.data[section][option] = values[option]\n\n with open(self.filename, 'w') as configfile:\n self.data.write(configfile)",
"def set_metadata(self, val, entry=None):\n \n if entry is None:\n self.metadata = val\n else:\n self.metadata[entry] = val",
"def write_metadata(file_name, metadata_dict, category='',\n datatype=\"inventory\", parameters=None):\n if (datatype == \"inventory\") or (datatype == \"source\"):\n meta = set_stewi_meta(file_name, stewiformat=category)\n if datatype == 'inventory':\n meta.tool_meta = {\"parameters\": parameters,\n \"sources\": metadata_dict}\n else:\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)\n elif datatype == \"validation\":\n file = (paths.local_path / 'validation' /\n f'{file_name}_validationset_metadata.json')\n with file.open('w') as fi:\n fi.write(json.dumps(metadata_dict, indent=4))",
"def setup_cfg_set(section, key, value, filename='setup.cfg'):\n\n config = ConfigParser()\n if os.path.isfile(filename):\n config.read('setup.cfg')\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n with open(filename, 'w') as configfile:\n config.write(configfile)",
"def _readEntriesForSection(projectDir, section):\n sectionDict = dict()\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.R_OK):\n raise IOError(errno.EACCES, \"Unable to read metadata store for project %s\" % \\\n (projectDir,))\n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n if config.has_section(section):\n items = config.items(section)\n for item in items:\n sectionDict[item[0]] = item[1]\n \n return sectionDict",
"def _write_section_values(section_data, fobj):\n\n # Order is significant.\n section_dict = OrderedDict()\n section_dict['Armor'] = section_data.get('armor')\n section_dict['Internals'] = section_data.get('internals')\n section_dict['Rear'] = section_data.get('rear')\n section_dict['Config'] = section_data.get('config')\n\n for name, value in section_dict.items():\n if not value:\n continue\n val_str = \" {name:<14} {{ {value} }}\\n\".format(\n name=name, value=value)\n fobj.write(val_str)",
"def _do_update(self, meta, k, v):\n self.runtime.logger.info('{}: [{}] -> {}'.format(meta.in_group_config_path, k, v))\n meta.config[k] = v\n meta.save()",
"def writeToMetadata(self, context):\n fqId = self.section + GenericMetadata.COMPOUND_KEY_SEP + self.name\n fqId = fqId.lower()\n \n # Write self to the appropriate section\n GenericMetadata.writeEntryToSection(context, self.section, self.name, self.dcIdentifier)\n \n # Write to provenance section\n provenanceEntries = GenericMetadata.readProvenanceEntries(context)\n try:\n entities = provenanceEntries['entities'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n entities = []\n # Write entity metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in entities:\n entities.append(fqId)\n entitiesStr = GenericMetadata.VALUE_DELIM.join(entities)\n keys.append('entities'); values.append(entitiesStr)\n # Write attributes for entity\n keyProto = fqId + GenericMetadata.COMPOUND_KEY_SEP\n dcIdentifier = keyProto + 'dc.identifier'\n keys.append(dcIdentifier); values.append(self.dcIdentifier)\n dcSource = keyProto + 'dc.source'\n keys.append(dcSource); values.append(self.dcSource)\n dcTitle = keyProto + 'dc.title'\n keys.append(dcTitle); values.append(self.dcTitle)\n if self.dcDate:\n dcDate = keyProto + 'dc.date'\n keys.append(dcDate); values.append(self.dcDate.strftime(AssetProvenance.FMT_DATE))\n dcPublisher = keyProto + 'dc.publisher'\n keys.append(dcPublisher); values.append(self.dcPublisher)\n dcDescription = keyProto + 'dc.description'\n keys.append(dcDescription); values.append(self.dcDescription)\n processingNotes = keyProto + 'processing_notes'\n keys.append(processingNotes); values.append(self.processingNotes)\n GenericMetadata.writeProvenanceEntries(context, keys, values)",
"def WriteMetadata(self, metadata, overwrite=True):\n if not overwrite and 'meta' in metadata:\n raise errors.KeyczarError('\"meta\" attribute already exists')\n self.dict['meta'] = str(metadata)",
"def _prep_metadata(md_sect, path):\n if not set(md_sect).issuperset(metadata_required_fields):\n missing = metadata_required_fields - set(md_sect)\n raise ConfigError(\"Required fields missing: \" + '\\n'.join(missing))\n\n module = md_sect.get('module')\n if not module.isidentifier():\n raise ConfigError(\"Module name %r is not a valid identifier\" % module)\n\n md_dict = {}\n\n # Description file\n if 'description-file' in md_sect:\n description_file = path.parent / md_sect.get('description-file')\n try:\n with description_file.open(encoding='utf-8') as f:\n raw_desc = f.read()\n except FileNotFoundError:\n raise ConfigError(\n \"Description file {} does not exist\".format(description_file)\n )\n ext = description_file.suffix\n try:\n mimetype = readme_ext_to_content_type[ext]\n except KeyError:\n log.warning(\"Unknown extension %r for description file.\", ext)\n log.warning(\" Recognised extensions: %s\",\n \" \".join(readme_ext_to_content_type))\n mimetype = None\n\n if mimetype == 'text/x-rst':\n # rst check\n stream = io.StringIO()\n res = render(raw_desc, stream)\n if not res:\n log.warning(\"The file description seems not to be valid rst for PyPI;\"\n \" it will be interpreted as plain text\")\n log.warning(stream.getvalue())\n\n md_dict['description'] = raw_desc\n md_dict['description_content_type'] = mimetype\n\n if 'urls' in md_sect:\n project_urls = md_dict['project_urls'] = []\n for label, url in sorted(md_sect.pop('urls').items()):\n project_urls.append(\"{}, {}\".format(label, url))\n\n for key, value in md_sect.items():\n if key in {'description-file', 'module'}:\n continue\n if key not in metadata_allowed_fields:\n closest = difflib.get_close_matches(key, metadata_allowed_fields,\n n=1, cutoff=0.7)\n msg = \"Unrecognised metadata key: {!r}\".format(key)\n if closest:\n msg += \" (did you mean {!r}?)\".format(closest[0])\n raise ConfigError(msg)\n\n k2 = key.replace('-', '_')\n md_dict[k2] = value\n if key in metadata_list_fields:\n if not isinstance(value, list):\n raise ConfigError('Expected a list for {} field, found {!r}'\n .format(key, value))\n if not all(isinstance(a, str) for a in value):\n raise ConfigError('Expected a list of strings for {} field'\n .format(key))\n elif key == 'requires-extra':\n if not isinstance(value, dict):\n raise ConfigError('Expected a dict for requires-extra field, found {!r}'\n .format(value))\n if not all(isinstance(e, list) for e in value.values()):\n raise ConfigError('Expected a dict of lists for requires-extra field')\n for e, reqs in value.items():\n if not all(isinstance(a, str) for a in reqs):\n raise ConfigError('Expected a string list for requires-extra. (extra {})'\n .format(e))\n else:\n if not isinstance(value, str):\n raise ConfigError('Expected a string for {} field, found {!r}'\n .format(key, value))\n\n # What we call requires in the ini file is technically requires_dist in\n # the metadata.\n if 'requires' in md_dict:\n md_dict['requires_dist'] = md_dict.pop('requires')\n\n # And what we call dist-name is name in the metadata\n if 'dist_name' in md_dict:\n md_dict['name'] = md_dict.pop('dist_name')\n\n # Move dev-requires into requires-extra\n reqs_noextra = md_dict.pop('requires_dist', [])\n reqs_by_extra = md_dict.pop('requires_extra', {})\n dev_requires = md_dict.pop('dev_requires', None)\n if dev_requires is not None:\n if 'dev' in reqs_by_extra:\n raise ConfigError(\n 'dev-requires occurs together with its replacement requires-extra.dev.')\n else:\n log.warning(\n '“dev-requires = ...” is obsolete. Use “requires-extra = {\"dev\" = ...}” instead.')\n reqs_by_extra['dev'] = dev_requires\n\n # Add requires-extra requirements into requires_dist\n md_dict['requires_dist'] = \\\n reqs_noextra + list(_expand_requires_extra(reqs_by_extra))\n\n md_dict['provides_extra'] = sorted(reqs_by_extra.keys())\n\n # For internal use, record the main requirements as a '.none' extra.\n reqs_by_extra['.none'] = reqs_noextra\n\n return md_dict, module, reqs_by_extra",
"def set_sumoconfig_option(config_parser, config_xml, section, key, value):\n\n key_nodes = config_xml.getElementsByTagName(key)\n if len(key_nodes) > 1:\n raise RuntimeError('config file \"%s\" contains %d <%s> nodes, expected at most 1' % (file_dst_name, key, len(key_nodes)))\n elif len(key_nodes) < 1:\n key_node = config_parser.createElement(key)\n key_node.setAttribute(\"value\", str(value))\n config_xml.appendChild(key_node)\n else:\n key_node = key_nodes[0]\n for n in key_node.childNodes:\n key_node.removeChild(n)\n key_node.setAttribute(\"value\", str(value))",
"def addsection(config_file, section):\n error, parser = _get_config_parsser(config_file)\n if error:\n return error, parser\n # add section\n try:\n parser.add_section(section)\n except Exception, e:\n return 1, e\n # backup config_file\n _backup_config(config_file)\n # write new config to file\n return _write_config_parser_to_file(parser, config_file)",
"def writeGRASSEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.GRASS_SECTION, key, value)",
"def test_set_new_section_property():\n\n value = '1'\n testutils.deploy_config_raw(\"\")\n\n prop.set_prop('info', 'sdk', value)\n assert prop.get_prop('info', 'sdk') == value\n\n testutils.undeploy()\n\n return 0",
"def set_metadata(self, chunk, coords, value):\n\n chunk.set_metadata(coords, value)"
] | [
"0.73840284",
"0.6628399",
"0.571443",
"0.5646245",
"0.5634331",
"0.5582677",
"0.554375",
"0.5497565",
"0.54227996",
"0.5291703",
"0.52288014",
"0.5225978",
"0.5219223",
"0.5213694",
"0.5208545",
"0.5141916",
"0.513379",
"0.510092",
"0.50690436",
"0.50044626",
"0.49892816",
"0.4980155",
"0.4977947",
"0.49760523",
"0.49746165",
"0.49683577",
"0.49583313",
"0.49358788",
"0.49313304",
"0.49185646"
] | 0.8193251 | 0 |
Write entries in the given section to the metadata store for a given project. Will overwrite the value for each key that already exists projectDir Path of the project whose metadata store is to be written to section The section the keys are to be written to keys List of keys to be written to the given section of the project metadata values List of values to be written for key stored in the given section of the project metadata callback A function that should be called before writing the entry. The function takes as input the config object. IOError(errno.EACCES) if the metadata store for the project is not writable Exception if len(keys) != len(values) MetadataVersionError if existing version information in metadata store does not match version of currently running EcohydroLib. | def _writeEntriesToSection(projectDir, section, keys, values, callback=None):
numKeys = len(keys)
if numKeys != len(values):
raise Exception( "%d keys specified for %d values" % (numKeys, len(values)) )
lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)
metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)
if os.path.exists(metadataFilepath):
if not os.access(metadataFilepath, os.W_OK):
raise IOError(errno.EACCES, "Unable to write to metadata store for project %s" % \
(projectDir,))
else:
if not os.access(projectDir, os.W_OK):
raise IOError(errno.EACCES, "Unable to write to metadata store for project %s" % \
(projectDir,))
# Create metadata file as it does not exist
metadataFD = open(metadataFilepath, 'w')
metadataFD.close()
# Wait for lockfile to be relinquished
while os.path.exists(lockFilepath):
time.sleep(5)
# Write lock file
open(lockFilepath, 'w').close()
# Read metadata store
config = ConfigParser.RawConfigParser()
config.read(metadataFilepath)
GenericMetadata._writeVersionToMetadata(config)
if callback:
callback(config)
# Write new entries
if not config.has_section(section):
config.add_section(section)
for i in xrange(numKeys):
config.set(section, keys[i], values[i])
# Write metadata store
config.write(open(metadataFilepath, 'w'))
# Remove lock file
os.unlink(lockFilepath) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def writeProvenanceEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION, keys, values)",
"def deleteEntryFromSection(context, section, key, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Delete entry\n if config.has_section(section):\n config.remove_option(section, key)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def set_config(self, section, values):\n for option in values:\n self.data[section][option] = values[option]\n\n with open(self.filename, 'w') as configfile:\n self.data.write(configfile)",
"def put(self, cmd_names, section, key, value, env=DEFAULT_ENV):\n\n if not self.document:\n self._read()\n # Empty document prepare the initial structure.\n self.document.update({env: {self._to_key(cmd_names): {section: {key: value}}}})\n # Only update appropriate key value pairs within a section\n self.document[env][self._to_key(cmd_names)][section].update({key: value})",
"def writeClimateGridEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_GRID_SECTION, keys, values)",
"def writeModelRunEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.MODEL_RUN_SECTION, keys, values)",
"def _readEntriesForSection(projectDir, section):\n sectionDict = dict()\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.R_OK):\n raise IOError(errno.EACCES, \"Unable to read metadata store for project %s\" % \\\n (projectDir,))\n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n if config.has_section(section):\n items = config.items(section)\n for item in items:\n sectionDict[item[0]] = item[1]\n \n return sectionDict",
"def _writeVersionToMetadata(config):\n if not config.has_section(GenericMetadata.ECOHYDROLIB_SECION):\n config.add_section(GenericMetadata.ECOHYDROLIB_SECION)\n \n if not config.has_option(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY):\n config.set(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY, GenericMetadata._ecohydrolibVersion)\n return\n \n metadataVersion = config.get(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY)\n if metadataVersion != GenericMetadata._ecohydrolibVersion:\n raise MetadataVersionError(metadataVersion)",
"def writeClimatePointEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION, keys, values)",
"def _write_section_values(section_data, fobj):\n\n # Order is significant.\n section_dict = OrderedDict()\n section_dict['Armor'] = section_data.get('armor')\n section_dict['Internals'] = section_data.get('internals')\n section_dict['Rear'] = section_data.get('rear')\n section_dict['Config'] = section_data.get('config')\n\n for name, value in section_dict.items():\n if not value:\n continue\n val_str = \" {name:<14} {{ {value} }}\\n\".format(\n name=name, value=value)\n fobj.write(val_str)",
"def write_metadata(file_name, metadata_dict, category='',\n datatype=\"inventory\", parameters=None):\n if (datatype == \"inventory\") or (datatype == \"source\"):\n meta = set_stewi_meta(file_name, stewiformat=category)\n if datatype == 'inventory':\n meta.tool_meta = {\"parameters\": parameters,\n \"sources\": metadata_dict}\n else:\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)\n elif datatype == \"validation\":\n file = (paths.local_path / 'validation' /\n f'{file_name}_validationset_metadata.json')\n with file.open('w') as fi:\n fi.write(json.dumps(metadata_dict, indent=4))",
"def __setitem__(self, keys, value):\n\n if isinstance(keys, str):\n keys = [keys]\n\n #print(\"KEYTYPE: {0}\".format(keys))\n self.__setInDict(self.__cfg, keys, value)\n self.write(self.__cfgFile)",
"def save(data, section): # save a config\n\tglobal _timesSaved\n\tif dynConfig['logConfigActions']:\n\t\tlogger.info( f'saving {section}: {data}' )\n\t# save\n\tif section != 'placeholderForSaving':\n\t\tcurrentConfigData[section] = data\n\t\tlogger.debug( f'saved {section}' )\n\telse:\n\t\t_timesSaved = 2\n\t# save to disk if this is the third save\n\tif _timesSaved == 0 or _timesSaved == 1:\n\t\t_timesSaved += 1\n\telse:\n\t\t_timesSaved = 0\n\t\ttry:\n\t\t\t# save to disk\n\t\t\twith open( configPath, 'w', encoding='utf-8' ) as file:\n\t\t\t\tjson.dump( currentConfigData, file, indent=4 )\n\t\texcept:\n\t\t\tlogger.error( f'failed to save config to disk!' )\n\t\t\traise ConfigError( 'error while saving the config file' )",
"def save(cls, context):\n\n data = context.get_stored_dict()\n files = {}\n\n def save_in_file(file, key, value):\n if file in files.keys():\n files[file][key] = value\n else:\n files[file] = {key: value}\n\n for key, val in data.items():\n if context.extends is not None and key in context.key_origins:\n save_in_file(context.key_origins[key], key, val)\n else:\n save_in_file(context.profile, key, val)\n\n for profile, content in files.items():\n metadata.update_metadata(\n context.workspace,\n profile,\n 'config',\n content)",
"def save(self, section, key, value, target=None):\n if section in self._section_index:\n config = self._configs[self._section_index[section]]\n elif target is not None:\n if target in self._path_index:\n config = self._configs[self._path_index[target]]\n elif target in self._name_index:\n config = self._configs[self._name_index[target]]\n elif target in self._configs:\n config = self._configs[0]\n else:\n raise KeyError(target)\n else:\n raise KeyError(section)\n\n config.save(section, key, value)\n\n # reindex in case new sections were added\n self.__idx(config)",
"def write_config(profile, values):\n p = Path(CONFIG_FILE_PATH).expanduser()\n p.parent.mkdir(parents=True, exist_ok=True)\n config = configparser.ConfigParser()\n config.read(str(p))\n config[profile] = values\n with p.open(\"w\") as f:\n config.write(f)\n print(\"write config to\", p)",
"def write(config_file, args=None, sections=None):\n config = configparser.ConfigParser()\n\n for section in SECTIONS:\n config.add_section(section)\n for name, opts in SECTIONS[section].items():\n if args and sections and section in sections and hasattr(args, name.replace('-', '_')):\n value = getattr(args, name.replace('-', '_'))\n if isinstance(value, list):\n # print(type(value), value)\n value = ', '.join(value)\n else:\n value = opts['default'] if opts['default'] is not None else ''\n\n prefix = '# ' if value == '' else ''\n\n if name != 'config':\n config.set(section, prefix + name, str(value))\n with open(config_file, 'w') as f:\n config.write(f)",
"def write_stewicombo_metadata(file_name, metadata_dict, category=''):\n meta = set_stewicombo_meta(file_name, category=category)\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)",
"def write(filename):\n log.msg(\"Saving configuration information to \\\"\" + filename + \"\\\"\", lvl='i', ss='ss_configfile')\n\n f = open(filename, 'w')\n cp = ConfigParser.SafeConfigParser()\n #a little string hacking because our section names are un-normalized\n #this builds a list of all the sections names\n sectionslst = []\n sections = []\n for k in _loaded.keys():\n sectionslst.append(k.split('.')[0])\n #get unique entries\n sections = _uniquer(sectionslst)\n for sec in sections:\n log.msg(\"\\tCompiling section \\\"\" + sec + \"\\\"\",\n lvl='d3', ss='ss_configfile')\n #make the headers\n cp.add_section(sec)\n #for each item in my dictionary\n #it splits the key in two and uses that for the first and second \"set\" args\n #then it uses the item.value for the 3rd arg\n # from 'section.option:value'\n \n for k in _loaded.items():\n cp.set(str(k[0]).split('.')[0], str(k[0]).split('.')[1], str(k[1]))\n cp.write(f)\n f.close()",
"def writeProvenanceEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.PROVENANCE_SECTION, key, value)",
"def _update_metadata_file(metadata, networks):\n\n old_networks = metadata.get('network_names', [])\n new_networks = list(networks.keys())\n _validate_duplicate_network(old_networks, new_networks)\n if metadata.get('broker_addresses'):\n new_brokers = list(networks.values())\n else:\n new_brokers = None\n if metadata.get('manager_addresses'):\n new_managers = list(networks.values())\n else:\n new_managers = None\n\n store_cert_metadata(\n new_networks=list(networks.keys()),\n new_brokers=new_brokers,\n new_managers=new_managers,\n )",
"def _prep_metadata(md_sect, path):\n if not set(md_sect).issuperset(metadata_required_fields):\n missing = metadata_required_fields - set(md_sect)\n raise ConfigError(\"Required fields missing: \" + '\\n'.join(missing))\n\n module = md_sect.get('module')\n if not module.isidentifier():\n raise ConfigError(\"Module name %r is not a valid identifier\" % module)\n\n md_dict = {}\n\n # Description file\n if 'description-file' in md_sect:\n description_file = path.parent / md_sect.get('description-file')\n try:\n with description_file.open(encoding='utf-8') as f:\n raw_desc = f.read()\n except FileNotFoundError:\n raise ConfigError(\n \"Description file {} does not exist\".format(description_file)\n )\n ext = description_file.suffix\n try:\n mimetype = readme_ext_to_content_type[ext]\n except KeyError:\n log.warning(\"Unknown extension %r for description file.\", ext)\n log.warning(\" Recognised extensions: %s\",\n \" \".join(readme_ext_to_content_type))\n mimetype = None\n\n if mimetype == 'text/x-rst':\n # rst check\n stream = io.StringIO()\n res = render(raw_desc, stream)\n if not res:\n log.warning(\"The file description seems not to be valid rst for PyPI;\"\n \" it will be interpreted as plain text\")\n log.warning(stream.getvalue())\n\n md_dict['description'] = raw_desc\n md_dict['description_content_type'] = mimetype\n\n if 'urls' in md_sect:\n project_urls = md_dict['project_urls'] = []\n for label, url in sorted(md_sect.pop('urls').items()):\n project_urls.append(\"{}, {}\".format(label, url))\n\n for key, value in md_sect.items():\n if key in {'description-file', 'module'}:\n continue\n if key not in metadata_allowed_fields:\n closest = difflib.get_close_matches(key, metadata_allowed_fields,\n n=1, cutoff=0.7)\n msg = \"Unrecognised metadata key: {!r}\".format(key)\n if closest:\n msg += \" (did you mean {!r}?)\".format(closest[0])\n raise ConfigError(msg)\n\n k2 = key.replace('-', '_')\n md_dict[k2] = value\n if key in metadata_list_fields:\n if not isinstance(value, list):\n raise ConfigError('Expected a list for {} field, found {!r}'\n .format(key, value))\n if not all(isinstance(a, str) for a in value):\n raise ConfigError('Expected a list of strings for {} field'\n .format(key))\n elif key == 'requires-extra':\n if not isinstance(value, dict):\n raise ConfigError('Expected a dict for requires-extra field, found {!r}'\n .format(value))\n if not all(isinstance(e, list) for e in value.values()):\n raise ConfigError('Expected a dict of lists for requires-extra field')\n for e, reqs in value.items():\n if not all(isinstance(a, str) for a in reqs):\n raise ConfigError('Expected a string list for requires-extra. (extra {})'\n .format(e))\n else:\n if not isinstance(value, str):\n raise ConfigError('Expected a string for {} field, found {!r}'\n .format(key, value))\n\n # What we call requires in the ini file is technically requires_dist in\n # the metadata.\n if 'requires' in md_dict:\n md_dict['requires_dist'] = md_dict.pop('requires')\n\n # And what we call dist-name is name in the metadata\n if 'dist_name' in md_dict:\n md_dict['name'] = md_dict.pop('dist_name')\n\n # Move dev-requires into requires-extra\n reqs_noextra = md_dict.pop('requires_dist', [])\n reqs_by_extra = md_dict.pop('requires_extra', {})\n dev_requires = md_dict.pop('dev_requires', None)\n if dev_requires is not None:\n if 'dev' in reqs_by_extra:\n raise ConfigError(\n 'dev-requires occurs together with its replacement requires-extra.dev.')\n else:\n log.warning(\n '“dev-requires = ...” is obsolete. Use “requires-extra = {\"dev\" = ...}” instead.')\n reqs_by_extra['dev'] = dev_requires\n\n # Add requires-extra requirements into requires_dist\n md_dict['requires_dist'] = \\\n reqs_noextra + list(_expand_requires_extra(reqs_by_extra))\n\n md_dict['provides_extra'] = sorted(reqs_by_extra.keys())\n\n # For internal use, record the main requirements as a '.none' extra.\n reqs_by_extra['.none'] = reqs_noextra\n\n return md_dict, module, reqs_by_extra",
"def writeConfig(valuesDict, configFile=None):\n command = {\n \"command\": \"writeConfig\",\n \"Values\": valuesDict,\n }\n return sendCommand(command)",
"def __setitem__(self, key, value):\n if not isinstance(value, dict):\n raise TypeError(\"value must be a dict\")\n\n # Is this a valid cache entry dictionary?\n try:\n validate(value, ENTRY_SCHEMA)\n except ValidationError as e:\n raise ValueError(\"%s is not a valid entry\" % value) from e\n\n entry_dir = self.cache_key_dir(key)\n\n try:\n entry_dir.mkdir(parents=True, exist_ok=True)\n except FileExistsError as e:\n raise ValueError(\"Already exists\") from e\n\n with open(entry_dir / \"entry.yaml\", \"w\") as f:\n f.write(yaml.safe_dump(value))",
"def setSection(self, section, item, data):\n if not self.config.has_section(section):\n self.config.add_section(section)\n self.config.set(section, item, data)\n # Write the updated file whenever anything changes\n with open(self.filename, 'w') as configfile:\n self.config.write(configfile)",
"def writeSeriesConfig(outputDirPath, nkeys, nvalues, keyType='int16', valueType='int16',\n confFilename=\"conf.json\", overwrite=True, awsCredentialsOverride=None):\n import json\n from thunder.rdds.fileio.writers import getFileWriterForPath\n\n filewriterClass = getFileWriterForPath(outputDirPath)\n # write configuration file\n # config JSON keys are lowercased \"valuetype\", \"keytype\", not valueType, keyType\n conf = {'input': outputDirPath,\n 'nkeys': nkeys, 'nvalues': nvalues,\n 'valuetype': str(valueType), 'keytype': str(keyType)}\n\n confWriter = filewriterClass(outputDirPath, confFilename, overwrite=overwrite,\n awsCredentialsOverride=awsCredentialsOverride)\n confWriter.writeFile(json.dumps(conf, indent=2))\n\n # touch \"SUCCESS\" file as final action\n successWriter = filewriterClass(outputDirPath, \"SUCCESS\", overwrite=overwrite,\n awsCredentialsOverride=awsCredentialsOverride)\n successWriter.writeFile('')",
"def _update_filesystem_metadata(self, metadata):\n directory, fname = os.path.split(self.fname)\n fbase = os.path.splitext(fname)[0]\n \n # Test for presence and size of zip file\n zip_file = fbase + '.zip'\n zip_path = os.path.join(directory, zip_file)\n \n if os.path.isfile(zip_path):\n location = 'on_disk'\n data_file_size = os.path.getsize(zip_path)\n else:\n location = 'on_tape'\n data_file_size = 0\n \n # Test for presence of quick look PNG file\n quicklook_file = fbase + '.png'\n quicklook_path = os.path.join(directory, quicklook_file)\n \n if not os.path.isfile(quicklook_path):\n quicklook_file = ''\n\n # Add to metadata dictionary\n item_map = {'directory': directory, 'metadata_file': fname,\n 'data_file': zip_file, 'location': location, \n 'data_file_size': data_file_size, 'quicklook_file': quicklook_file}\n \n for key, value in item_map.items():\n metadata[key] = value",
"def write(**kwargs): \n \n if kwargs is not None:\n for key, value in kwargs.iteritems():\n config.set('postgresql', key, value)\n with open(configFile, 'wb') as configfile:\n config.write(configfile)",
"def overwrite(section: str, data: any) -> None:\n\toverwriteDict[section] = data\n\tlogger.debug(f'Overwritten config {section}!')"
] | [
"0.73303044",
"0.6265618",
"0.5696074",
"0.5662361",
"0.5601739",
"0.5511251",
"0.5383638",
"0.53310764",
"0.5270173",
"0.52505934",
"0.5145473",
"0.5057608",
"0.50152016",
"0.50072145",
"0.49906647",
"0.49845642",
"0.49584794",
"0.4947538",
"0.49214426",
"0.49152473",
"0.4881851",
"0.48789126",
"0.4875752",
"0.48263672",
"0.47806227",
"0.47753826",
"0.475267",
"0.47368076",
"0.47316718",
"0.4724012"
] | 0.83927697 | 0 |
Write a manifest entry to the metadata store for a given project. Will overwrite the value for a key that already exists context Context object containing projectDir, the path of the project whose metadata store is to be written to key The key to be written to the manifest section of the project metadata value The value to be written for key stored in the manifest section of the project metadata IOError(errno.EACCES) if the metadata store for the project is not writable | def writeManifestEntry(context, key, value):
GenericMetadata.writeEntryToSection(context, GenericMetadata.MANIFEST_SECTION, key, value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def _set_manifest(self, manifest: Dict) -> None:\n if \"metadata\" not in manifest:\n manifest[\"metadata\"] = {}\n\n if \"files\" not in manifest:\n manifest[\"files\"] = {\n \"includes\": [],\n \"excludes\": [],\n }\n\n with open(self._manifest_path, \"w\", encoding=\"utf-8\") as file:\n # TODO: Exception handling\n self._yaml.dump(manifest, file)",
"def _writeEntriesToSection(projectDir, section, keys, values, callback=None):\n numKeys = len(keys)\n if numKeys != len(values):\n raise Exception( \"%d keys specified for %d values\" % (numKeys, len(values)) )\n \n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entries\n if not config.has_section(section):\n config.add_section(section)\n for i in xrange(numKeys):\n config.set(section, keys[i], values[i])\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def write_manifest_xml(cls, document, manifest_content):\n with zipfile.ZipFile(document, 'a') as open_document:\n open_document.writestr(DOCUMENT_MANIFEST_PATH, ''.join(manifest_content))",
"def update_manifest(self, dst):\n # Read the current manifest into memory\n mpath = os.path.join(os.path.dirname(dst), \"manifest.json\")\n try:\n with open(mpath, 'r') as f:\n manifest = json.load(f)\n except IOError:\n manifest = {}\n\n name, _ = os.path.splitext(os.path.basename(dst))\n # Update the manifest record\n manifest[name] = {\n \"url\": os.path.basename(dst),\n \"signature\": sha256sum(dst),\n }\n\n # Write the manifest back to disk\n with open(mpath, 'w') as f:\n json.dump(manifest, f, indent=2)",
"def update_manifest(self, filename: Optional[str] = None, manifest: Optional[Dict[str, str]] = None) -> None:\n filename = filename or self.manifest_filename\n manifest = manifest or {}\n self.log.debug(f\"Updating manifest '{manifest}' to file '{filename}'\")\n with open(filename, \"w\") as f:\n json.dump(manifest, f, indent=2)",
"def writeProvenanceEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.PROVENANCE_SECTION, key, value)",
"def test_write(self):\n temp_file = tempfile.mkstemp()[1]\n try:\n with open(temp_file, \"w+\") as fh:\n self.new_manifest.write(fh)\n tools.eq_(self.new_manifest, load_manifest(temp_file))\n finally:\n os.unlink(temp_file)",
"def __setitem__(self, key, value):\n if not isinstance(value, dict):\n raise TypeError(\"value must be a dict\")\n\n # Is this a valid cache entry dictionary?\n try:\n validate(value, ENTRY_SCHEMA)\n except ValidationError as e:\n raise ValueError(\"%s is not a valid entry\" % value) from e\n\n entry_dir = self.cache_key_dir(key)\n\n try:\n entry_dir.mkdir(parents=True, exist_ok=True)\n except FileExistsError as e:\n raise ValueError(\"Already exists\") from e\n\n with open(entry_dir / \"entry.yaml\", \"w\") as f:\n f.write(yaml.safe_dump(value))",
"def WriteMetadata(self, metadata, overwrite=True):\n if not overwrite and 'meta' in metadata:\n raise errors.KeyczarError('\"meta\" attribute already exists')\n self.dict['meta'] = str(metadata)",
"def create_manifest():\n dirpath = os.getcwd()\n file_path_ori = dirpath + \"/manifest.json\"\n file_path_new = dirpath + \"/manifests3.json\"\n\n with open(file_path_ori, \"rt\") as fin:\n with open(file_path_new, \"wt\") as fout:\n for line in fin:\n fout.write(line.replace('bucket-name', bucketName))",
"def set_metadata(self, key, value):\n if '::' not in key:\n raise ValueError('Invalid key %s; must be prefixed with \"appname::\"' % key)\n\n self._db_query('DELETE FROM meta WHERE attr=?', (key,))\n self._db_query('INSERT INTO meta VALUES (?, ?)', (key, value))\n self._set_dirty()",
"def update_manifest(builder):\r\n\r\n manifest_path = join(builder.Config.SourceRootPath, builder.Config.WMAppManifest)\r\n dom = parse(manifest_path)\r\n\r\n #import pdb;pdb.set_trace()\r\n #version = make_version_string(builder)\r\n version = builder.AppVersion\r\n\r\n update_manifest_with_values(dom,\r\n Title = builder.CustomCfg.Title,\r\n #ProductID = builder.CustomCfg.ProductID,\r\n #PublisherID = builder.Config.PublisherID,\r\n Version = version,\r\n Languages = getattr(builder.CustomCfg, \"Languages\", None ) )\r\n\r\n with open(manifest_path, 'wb') as f:\r\n data = dom.toprettyxml(indent = \" \")\r\n # toprettyxml adds extra new lines\r\n lines = [ x for x in data.split(\"\\n\") if len(x.strip()) > 0]\r\n data = \"\\n\".join(lines)\r\n f.write(data)\r\n\r\n return True",
"def save_metadata(self, acl='public-read'):\n bucket_name = app.config['S3_BUCKET_NAME']\n s3_client = app.config['S3']\n key = self.build_s3_key('datapackage.json')\n s3_client.put_object(Bucket=bucket_name, Key=key,\n Body=self.body, ACL=acl)",
"def writeProvenanceEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION, keys, values)",
"def writeToMetadata(self, context):\n fqId = self.section + GenericMetadata.COMPOUND_KEY_SEP + self.name\n fqId = fqId.lower()\n \n # Write self to the appropriate section\n GenericMetadata.writeEntryToSection(context, self.section, self.name, self.dcIdentifier)\n \n # Write to provenance section\n provenanceEntries = GenericMetadata.readProvenanceEntries(context)\n try:\n entities = provenanceEntries['entities'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n entities = []\n # Write entity metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in entities:\n entities.append(fqId)\n entitiesStr = GenericMetadata.VALUE_DELIM.join(entities)\n keys.append('entities'); values.append(entitiesStr)\n # Write attributes for entity\n keyProto = fqId + GenericMetadata.COMPOUND_KEY_SEP\n dcIdentifier = keyProto + 'dc.identifier'\n keys.append(dcIdentifier); values.append(self.dcIdentifier)\n dcSource = keyProto + 'dc.source'\n keys.append(dcSource); values.append(self.dcSource)\n dcTitle = keyProto + 'dc.title'\n keys.append(dcTitle); values.append(self.dcTitle)\n if self.dcDate:\n dcDate = keyProto + 'dc.date'\n keys.append(dcDate); values.append(self.dcDate.strftime(AssetProvenance.FMT_DATE))\n dcPublisher = keyProto + 'dc.publisher'\n keys.append(dcPublisher); values.append(self.dcPublisher)\n dcDescription = keyProto + 'dc.description'\n keys.append(dcDescription); values.append(self.dcDescription)\n processingNotes = keyProto + 'processing_notes'\n keys.append(processingNotes); values.append(self.processingNotes)\n GenericMetadata.writeProvenanceEntries(context, keys, values)",
"def update_metadata(self, key, value):\n sp.verify(self.is_administrator(sp.sender), FA12_Error.NotAdmin)\n self.data.metadata[key] = value",
"def set_metadata(self, key, val):\n \n self.metadata[key] = val",
"def write(self, target):\n mpath = path.join(self._working_dir, 'manifest.json')\n with open(mpath, 'w') as mani:\n json.dump(self.item, mani)\n\n directory = path.abspath(self._working_dir)\n with zipfile.ZipFile(target, 'w', allowZip64=True) as zip:\n for root, dirs, files in walk(directory):\n for f in files:\n abspath = path.join(root, f)\n relpath = path.relpath(abspath, directory)\n zip.write(abspath, relpath)\n return target",
"def _write_manifest_json(self, json_to_write):\n with open(os.path.join(self._crx_dir, \"manifest.json\"), \"wb\") as manifest:\n json.dump(json_to_write, manifest)",
"def _append_to_cache_directory(key: str, value: str) -> None:\n directory_json_path = os.path.join(\n CacheManagerSingleton.CACHE_PATH, \"directory.json\"\n )\n with open(directory_json_path, \"r\") as directory_file_read:\n directory_json = json.loads(directory_file_read.read())\n directory_file_read.close()\n directory_json[key] = value\n new_directory_json = json.dumps(directory_json, indent=2)\n with open(directory_json_path, \"w\") as directory_file_write:\n directory_file_write.write(new_directory_json)\n directory_file_write.close()",
"def set_metadata(self, val, entry=None):\n \n if entry is None:\n self.metadata = val\n else:\n self.metadata[entry] = val",
"def writeToMetadata(self, context):\n if self.modelType not in GenericMetadata.MODEL_TYPES:\n raise Exception(\"Model type %s is not among known model types: %s\" % (self.modelType, str(GenericMetadata.MODEL_TYPES) ) )\n \n modelRunEntries = GenericMetadata.readModelRunEntries(context)\n try:\n runs = modelRunEntries['runs'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n runs = []\n \n # Collected model entry and keys and values into lists so we can write to metadata store in batch\n keys = []\n values = []\n \n # Generate unique identifier for this model run. Unique ID is a combination of model type and a number\n entryNumber = 1\n fqId = self.modelType + GenericMetadata.KEY_SEP + str(entryNumber)\n while fqId in runs:\n entryNumber += 1\n fqId = self.modelType + GenericMetadata.KEY_SEP + str(entryNumber)\n self.runNumber = entryNumber\n # Add new run to list of runs\n runs.append(fqId)\n runsStr = GenericMetadata.VALUE_DELIM.join(runs)\n keys.append('runs'); values.append(runsStr)\n # Write attributes for run\n keyProto = fqId + GenericMetadata.KEY_SEP\n runDate = keyProto + 'date_utc'\n keys.append(runDate); values.append( self.date.strftime(ModelRun.FMT_DATE) )\n runDesc = keyProto + 'description'\n keys.append(runDesc); values.append(self.description)\n runCmd = keyProto + 'command'\n keys.append(runCmd); values.append(self.command)\n runOutput = keyProto + 'output'\n keys.append(runOutput); values.append(self.output)\n # Write to metadata\n GenericMetadata.writeModelRunEntries(context, keys, values)",
"def put_metadata(self, metadata, tombstone=False):\n if tombstone:\n # We don't write tombstone files. So do nothing.\n return\n assert self.data_file is not None, \\\n \"put_metadata: no file to put metadata into\"\n metadata = _adjust_metadata(metadata)\n self.threadpool.run_in_thread(write_metadata, self.data_file, metadata)\n self.metadata = metadata\n self._filter_metadata()",
"def _write_manifest_to_s3(self, manifest_file):\n account = self.boto_session.client(\"sts\").get_caller_identity()[\"Account\"]\n s3_client = self.boto_session.client(\"s3\")\n region = self.boto_session.region_name\n\n # write to s3 bucket\n manifest_bucket_name = \"sagemaker-{}-{}\".format(region, account)\n timstamp = str(int(time.time()))\n manifest_s3_file_key = f\"{self.experiment_id}/manifest_files/manifest-{timstamp}\"\n body = b\"\"\n body += str(json.dumps(manifest_file, sort_keys=True, indent=4)).encode(\"utf_8\")\n try:\n s3_client.put_object(Body=body, Bucket=manifest_bucket_name, Key=manifest_s3_file_key)\n\n except ClientError as e:\n error_code = e.response[\"Error\"][\"Code\"]\n message = e.response[\"Error\"][\"Message\"]\n raise RuntimeError(\n \"Failed to upload manifest data with error {}: {}\".format(error_code, message)\n )\n\n manifest_file_path = f\"s3://{manifest_bucket_name}/{manifest_s3_file_key}\"\n logger.info(f\"Successfully upload manifest file to s3 bucket path `{manifest_file_path}'\")\n return manifest_file_path",
"def _StoreMetadataToFile(payload_dir, metadata_obj):\n file_dict = {SHA1_ATTR: metadata_obj.sha1,\n SHA256_ATTR: metadata_obj.sha256,\n SIZE_ATTR: metadata_obj.size,\n ISDELTA_ATTR: metadata_obj.is_delta_format}\n metadata_file = os.path.join(payload_dir, METADATA_FILE)\n with open(metadata_file, 'w') as file_handle:\n json.dump(file_dict, file_handle)",
"def write_manifest ( self, **manifest_kw ):\n for package in self._subdirs.values():\n package.write_manifest ( **manifest_kw )",
"def SetProjectMetadata(self, new_metadata):\n compute = self.compute\n\n errors = []\n list(request_helper.MakeRequests(\n requests=[\n (compute.projects,\n 'SetCommonInstanceMetadata',\n self.messages.ComputeProjectsSetCommonInstanceMetadataRequest(\n metadata=new_metadata,\n project=properties.VALUES.core.project.Get(\n required=True),\n ))],\n http=self.http,\n batch_url=self.batch_url,\n errors=errors,\n custom_get_requests=None))\n if errors:\n utils.RaiseToolException(\n errors,\n error_message='Could not add SSH key to project metadata:')",
"def write_file_manifest(name_map, out_stream):\n\n out_stream.write(struct.pack(COUNT_FMT, len(name_map)))\n # Sort to make it easier for diff algos to find contiguous\n # changes.\n names = name_map.keys()\n names.sort()\n for name in names:\n length = MANIFEST_ENTRY_HDR_LEN + len(name)\n file_sha, history_sha = name_map[name]\n\n out_stream.write(struct.pack(MANIFEST_ENTRY_FMT % len(name),\n length,\n file_sha,\n history_sha,\n name))",
"def write_manifest(self):\n import time\n import sys\n with open('bake-manifest-' + time.strftime('%Y-%m-%d-%H:%M:%S') + \n '.txt', 'w') as hout:\n hout.write(' '.join(sys.argv) + '\\n')\n for k, v in self.table.items():\n hout.write(';'.join([k] + v) + '\\n')"
] | [
"0.6259716",
"0.57655275",
"0.5742039",
"0.5524634",
"0.5496938",
"0.5429347",
"0.54233384",
"0.5390245",
"0.53718156",
"0.536669",
"0.5344699",
"0.5332121",
"0.5274322",
"0.52674514",
"0.5239261",
"0.52279717",
"0.5213678",
"0.50975734",
"0.5095109",
"0.50944406",
"0.50877255",
"0.50845206",
"0.507644",
"0.5074784",
"0.5052424",
"0.50491977",
"0.50441813",
"0.50399894",
"0.5038969",
"0.50373274"
] | 0.71485126 | 0 |
Write a study area entry to the metadata store for a given project. Will overwrite the value for a key that already exists context Context object containing projectDir, the path of the project whose metadata store is to be written to key The key to be written to the study area section of the project metadata value The value to be written for key stored in the study area section of the project metadata IOError(errno.EACCES) if the metadata store for the project is not writable | def writeStudyAreaEntry(context, key, value):
GenericMetadata.writeEntryToSection(context, GenericMetadata.STUDY_AREA_SECTION, key, value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def deleteStudyAreaEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.STUDY_AREA_SECTION, key)",
"def write(self, path, key):\n raise NotImplementedError",
"def writeGRASSEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.GRASS_SECTION, key, value)",
"def set_study_system_attr(self, study_id: int, key: str, value: Any) -> None:\n raise NotImplementedError",
"def writeProvenanceEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.PROVENANCE_SECTION, key, value)",
"def save(cls, context):\n\n data = context.get_stored_dict()\n files = {}\n\n def save_in_file(file, key, value):\n if file in files.keys():\n files[file][key] = value\n else:\n files[file] = {key: value}\n\n for key, val in data.items():\n if context.extends is not None and key in context.key_origins:\n save_in_file(context.key_origins[key], key, val)\n else:\n save_in_file(context.profile, key, val)\n\n for profile, content in files.items():\n metadata.update_metadata(\n context.workspace,\n profile,\n 'config',\n content)",
"def _writeEntriesToSection(projectDir, section, keys, values, callback=None):\n numKeys = len(keys)\n if numKeys != len(values):\n raise Exception( \"%d keys specified for %d values\" % (numKeys, len(values)) )\n \n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entries\n if not config.has_section(section):\n config.add_section(section)\n for i in xrange(numKeys):\n config.set(section, keys[i], values[i])\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def writeHydroShareEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.HYDROSHARE_SECTION, key, value)",
"def save_project(uid, song_notes, author_name, creation_date, project_name):",
"def _write_map_provenance(cfg, cube, plot_path, title, *attrs):\n cube = cube.copy()\n ancestors = []\n for attr in attrs:\n ancestors.extend(attr['filename'].split('|'))\n netcdf_path = mlr.get_new_path(cfg, plot_path)\n io.iris_save(cube, netcdf_path)\n record = {\n 'ancestors': ancestors,\n 'authors': ['schlund_manuel'],\n 'caption': f\"Geographical distribution of {cube.long_name} for \"\n f\"{title}.\",\n 'plot_types': ['geo'],\n 'references': ['schlund20jgr'],\n }\n with ProvenanceLogger(cfg) as provenance_logger:\n provenance_logger.log(netcdf_path, record)\n provenance_logger.log(plot_path, record)",
"def writeToMetadata(self, context):\n fqId = self.type + GenericMetadata.COMPOUND_KEY_SEP + self.id\n fqId = fqId.lower()\n\n climatePoints = GenericMetadata.readClimatePointEntries(context)\n try:\n stations = climatePoints['stations'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n stations = []\n # Write station metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in stations:\n stations.append(fqId)\n stationsStr = GenericMetadata.VALUE_DELIM.join(stations)\n keys.append('stations'); values.append(stationsStr)\n # Write attributes for station\n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP \n longitude = keyProto + 'longitude'\n keys.append(longitude); values.append(self.longitude)\n latitude = keyProto + 'latitude'\n keys.append(latitude); values.append(self.latitude)\n elevation = keyProto + 'elevation'\n keys.append(elevation); values.append(self.elevation)\n name = keyProto + 'name'\n keys.append(name); values.append(self.name)\n if self.startDate:\n startDate = keyProto + 'startdate'\n keys.append(startDate); values.append(self.startDate.strftime(ClimatePointStation.FMT_DATE))\n if self.endDate:\n endDate = keyProto + 'enddate'\n keys.append(endDate); values.append(self.endDate.strftime(ClimatePointStation.FMT_DATE))\n if self.variables:\n variablesKey = keyProto + 'variables'\n variablesValue = GenericMetadata.VALUE_DELIM.join(self.variables)\n keys.append(variablesKey); values.append(variablesValue)\n if self.data != None:\n data = keyProto + 'data'\n keys.append(data); values.append(self.data)\n elif self.variablesData:\n # Try to write data entries for each variable separately\n vars = self.variablesData.keys()\n for var in vars:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n keys.append(varKey); values.append(self.variablesData[var])\n GenericMetadata.writeClimatePointEntries(context, keys, values)",
"def set(self, name, path):\n self.yaml[IDK_YAML_GROUP][name] = path\n self.write()",
"def write_metadata_to_file(self, path):\n return write_metadata_to_ma_file(path, self)",
"def write_locations(pathfolder, key_firms, years, locs, methodvalues):\n ## Generate namefile\n namefile = generate_namefile(pathfolder, methodvalues)\n\n ## Writting\n db = shelve.open(namefile)\n db['hashes'] = generate_yearnif_hash(years, key_firms)\n db['nif'] = key_firms\n db['year'] = years\n db['locations'] = locs\n db['methodvalues'] = methodvalues\n db.close()",
"def readStudyAreaEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.STUDY_AREA_SECTION)",
"def store(self, name, value):\n # ...but only when the context has been entered (and locks acquired etc.)\n if not self.ready:\n raise RuntimeError(\"SnapshotView is a context manager. Never use it directly!\")\n # Do not ask for permission - overwrite the old entry if necessary\n self.data[name] = value",
"def save_project(self, project_id, project):\n with open('{}/{}'.format(self._storage_location, project_id), 'w') as project_file:\n project_file.write(project.name + '\\n')\n project_file.write(project.description + '\\n')\n project_file.write(\",\".join(project.members) + '\\n')\n project_file.write(\",\".join(project.documents) + '\\n')",
"def writeProvenanceEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION, keys, values)",
"def writedb(path, key, value) -> int:\n if key == \"\" or value == \"\":\n return 1\n if os.path.exists(path):\n pass \n else:\n return 1\n with open(path, \"a\") as db:\n db.write(f\"\\n{key}:{value}\")\n return 0",
"def writeMetadataValue(self, field_type, key_field, field_name, field_value, \\\n study_id, host_key_field, row_num, lock):\n \n # This is a mess and it's slow right now. In need of serious speed improvements and cleanup.\n \n statement = ''\n log = []\n pk_name = ''\n\n try:\n #lock.acquire()\n \n # Get our database connection\n con = self.getMetadataDatabaseConnection()\n \n # Set the timeout\n #con.cursor().execute('alter session set ddl_lock_timeout=100') \n \n # Find the table name\n log.append('Locating table name...')\n table_name = None\n table_name = self.findMetadataTable(field_name, field_type, log, study_id, lock)\n log.append('Successfully found table name. Table name is \"%s\"' % str(table_name))\n \n #if field_name == 'dw1':\n # raise Exception('asdf')\n\n # Double-quote for database safety\n log.append('Adding quotes to table name...')\n table_name = '\"' + table_name + '\"' \n log.append('Quoted table name is %s' % table_name)\n log.append('Table name found: %s' % (table_name))\n \n # Store the field name in the database. These are the field names which will\n # be used later to generate a mapping file. We collect the names here because\n # it's an expensive operation to determine post-commit which fields were\n # actually submitted to the database.\n log.append('Attempting to store values in study_actual_columns: %s, %s, %s'\\\n % (study_id, field_name, table_name))\n self.addStudyActualColumn(study_id, field_name, table_name);\n \n # Get extended field info from the database\n log.append('Loading field details...')\n field_details = self.getFieldDetails(field_name)\n log.append(str(field_details))\n if field_details == None:\n log.append('Could not obtain detailed field information. Skipping.')\n raise Exception\n else:\n database_data_type = field_details[1]\n log.append('Read field database data type as \"%s\"' % database_data_type)\n \n # If the field value is 'unknown', switch to 'null' (empty string is the same as null)\n #pass_values = \n if str(field_value).upper() == 'UNKNOWN':\n field_value = ''\n # Figure out if this needs to be an integer ID instead of a text value\n elif database_data_type == 'list':\n log.append('Field is of type list. Looking up integer value...')\n named_params = {'field_value':field_value.upper()}\n statement = 'select vocab_value_id from controlled_vocab_values where upper(term) = :field_value'\n statement = str(statement)\n log.append(statement)\n results = con.cursor().execute(statement, named_params).fetchone()\n if results != None:\n # If found, set the field_value to its numeric identifier for storage\n log.append('Value found in controlled_vocab_values. Old field value: \"%s\", new field value: \"%s\".'\\\n % (field_value, results[0]))\n field_value = results[0]\n else:\n log.append('Could not determine inteteger value for list term \"%s\" with value \"%s\". Skipping.'\\\n % (field_name, field_value))\n raise Exception\n \n # Set the field_name to it's quoted upper-case name to avoid key-work issues with Oracle\n field_name = '\"%s\"' % field_name.upper()\n \n ########################################\n ### For STUDY\n ########################################\n \n # Study is special - handle separately since row is guaranteed to exist and there can only be one row\n if table_name == '\"STUDY\"' or 'EXTRA_STUDY_' in table_name:\n log.append('Updating study field...')\n named_params = {'field_value':field_value, 'study_id':study_id}\n statement = 'update %s set %s = :field_value where study_id = :study_id' % (table_name, field_name)\n statement = str(statement)\n log.append(statement)\n log.append('field_value = \"%s\", study_id = \"%s\"' % (field_value, study_id))\n results = con.cursor().execute(statement, named_params)\n con.cursor().execute('commit')\n log.append('Study updated.')\n #raise Exception\n return\n \n ########################################\n ### For other tables\n ########################################\n \n if table_name in ['\"AIR\"', '\"COMMON_FIELDS\"', '\"MICROBIAL_MAT_BIOFILM\"', '\"OTHER_ENVIRONMENT\"', \\\n '\"SAMPLE\"', '\"SEDIMENT\"', '\"SOIL\"', '\"WASTEWATER_SLUDGE\"', '\"WATER\"', '\"SEQUENCE_PREP\"', \\\n '\"HOST_ASSOC_VERTIBRATE\"', '\"HOST_ASSOCIATED_PLANT\"', '\"HOST_SAMPLE\"', '\"HUMAN_ASSOCIATED\"',\n '\"COMMON_EXTRA_SAMPLE\"', '\"COMMON_EXTRA_SAMPLE_2\"', '\"COMMON_EXTRA_PREP\"'] \\\n or 'EXTRA_SAMPLE_' in table_name or 'EXTRA_PREP_' in table_name:\n named_params = {'key_field':key_field, 'study_id':study_id}\n statement = 'select sample_id from \"SAMPLE\" where sample_name = :key_field and study_id = :study_id'\n statement = str(statement)\n key_column = 'sample_id'\n key_table = '\"SAMPLE\"'\n elif table_name in ['\"HOST\"']:\n named_params = {'key_field':'{0}:{1}'.format(str(study_id), host_key_field)}\n statement = 'select host_id from \"HOST\" where host_subject_id = :key_field'\n statement = str(statement)\n key_column = 'host_id'\n key_table = '\"HOST\"'\n else:\n return 'Unknown table found - no action taken. Table name was: \"%s\". Column name was: \"%s\"'\\\n % (table_name, field_name)\n \n # Find the assocaited key column\n log.append('Determining if required key row exists...')\n log.append(statement + '\", key_field is ' + key_field + ', study_id is ' + str(study_id))\n results = con.cursor().execute(statement, named_params).fetchone()\n if results != None:\n key_column_value = results[0]\n log.append('Found key_column_value: %s' % str(key_column_value))\n else:\n log.append('Could not determine key. Skipping write for field %s with value \"%s\".'\\\n % (field_name, field_value))\n raise Exception\n\n\n\n\n\n\n\n\n\n\n\n ####### to speed up access, create local storage for all items already seen\n\n\n\n # If it ain't there, create it\n log.append('Checking if row already exists...')\n \n # Must append row_num if sequence table\n if table_name == '\"SEQUENCE_PREP\"' or table_name == '\"COMMON_EXTRA_PREP\"' or 'EXTRA_PREP_' in table_name:\n named_params = {'key_column_value':key_column_value, 'row_number':row_num}\n statement = 'select 1 from %s where %s = :key_column_value and row_number = :row_number'\\\n % (table_name, key_column)\n else:\n named_params = {'key_column_value':key_column_value}\n statement = 'select 1 from %s where %s = :key_column_value' % (table_name, key_column)\n statement = str(statement)\n log.append(statement)\n results = con.cursor().execute(statement, named_params).fetchone()\n \n if results == None:\n log.append('No row found, inserting new row')\n if table_name == '\"SEQUENCE_PREP\"' or table_name == '\"COMMON_EXTRA_PREP\"' or 'EXTRA_PREP_' in table_name:\n log.append('Row number is %s' % (str(row_num)))\n named_params = {'key_column_value':key_column_value, 'row_number':row_num}\n statement = 'insert into %s (%s, row_number) values (:key_column_value, :row_number)'\\\n % (table_name, key_column)\n else:\n named_params = {'key_column_value':key_column_value}\n statement = 'insert into %s (%s) values (:key_column_value)' % (table_name, key_column)\n statement = str(statement)\n log.append(statement)\n con.cursor().execute(statement, named_params)\n\n\n\n\n\n\n \n # Attempt to write the metadata field\n log.append('Writing metadata value...')\n \n # If it's a date, must not put quotes around the oracle to_date function.\n if database_data_type == 'date':\n field_value = self.convertToOracleHappyName(field_value)\n if table_name == '\"SEQUENCE_PREP\"' or table_name == '\"COMMON_EXTRA_PREP\"' or 'EXTRA_PREP_' in table_name:\n statement = 'update %s set %s = %s where %s = %s and row_number = %s'\\\n % (table_name, field_name, field_value, key_column, key_column_value, row_num)\n else: \n statement = 'update %s set %s = %s where %s = %s'\\\n % (table_name, field_name, field_value, key_column, key_column_value) \n else: \n if table_name == '\"SEQUENCE_PREP\"' or table_name == '\"COMMON_EXTRA_PREP\"' or 'EXTRA_PREP_' in table_name:\n statement = 'update %s set %s = \\'%s\\' where %s = %s and row_number = %s'\\\n % (table_name, field_name, field_value, key_column, key_column_value, row_num)\n else: \n statement = 'update %s set %s = \\'%s\\' where %s = %s'\\\n % (table_name, field_name, field_value, key_column, key_column_value)\n statement = str(statement)\n log.append(statement)\n results = con.cursor().execute(statement)\n \n # Finally, commit the changes\n results = con.cursor().execute('commit')\n\n #if field_name == '\"DW1\"':\n # raise Exception('Found DW1: Dumping log')\n \n except Exception, e:\n call_string = 'writeMetadataValue(\"%s\", \"%s\", \"%s\", \"%s\", \"%s\")'\\\n % (field_type, key_field, field_name, field_value, study_id)\n log_string = '<br/>'.join(log)\n error_msg = 'Exception caught attempting to store field \"%s\" with value \"%s\" into \\\n table \"%s\".<br/>Method signature: %s<br/>Full error log:<br/>%s<br/>Oracle says: %s' % \\\n (field_name, field_value, table_name, call_string, log_string, str(e))\n raise Exception(error_msg)\n finally:\n # Release the lock\n #lock.release()\n log.append('Lock released')",
"def _saveExperiment(self, experiment, path):\n Experiment.save(experiment, path);",
"def put(self, project_slug):\n try:\n docker_repo_field(project_slug, 'slug')\n except ValueError as ex:\n raise WrappedValueError(ex)\n\n args = PROJECT_NEW_PARSER.parse_args(strict=True)\n args = clean_attrs(args)\n\n args['slug'] = project_slug\n\n if 'gitlab_repo_id' in args:\n args['external_auth_token'] = (\n current_user.oauth_token_for('gitlab'))\n\n elif 'github_repo_id' in args:\n args['external_auth_token'] = (\n current_user.oauth_token_for('github'))\n\n if args['utility']: # Utilities must have target registry set\n ensure_target_registry(True)\n\n set_target_registry(args)\n\n return self.handle_write(Project(), data=args)",
"def writeClimatePointEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.CLIMATE_POINT_SECTION, key, value)",
"def write(self, data_pref)\n\n def _writeToAddama(self, addama_dir):",
"def __setitem__(self, key, value):\n if not isinstance(value, dict):\n raise TypeError(\"value must be a dict\")\n\n # Is this a valid cache entry dictionary?\n try:\n validate(value, ENTRY_SCHEMA)\n except ValidationError as e:\n raise ValueError(\"%s is not a valid entry\" % value) from e\n\n entry_dir = self.cache_key_dir(key)\n\n try:\n entry_dir.mkdir(parents=True, exist_ok=True)\n except FileExistsError as e:\n raise ValueError(\"Already exists\") from e\n\n with open(entry_dir / \"entry.yaml\", \"w\") as f:\n f.write(yaml.safe_dump(value))",
"def write_stewicombo_metadata(file_name, metadata_dict, category=''):\n meta = set_stewicombo_meta(file_name, category=category)\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)",
"def put(self, key, value):\n self.execute_command('sudo -i bash -c \\'echo -n \"{0}\" > {1}{2}\\''\n .format(value, self._store_path, key))",
"def write_parameter(self, path, value, attr=None):\n if path.startswith('sample'):\n entry = self.entry.nxroot['entry']\n else:\n entry = self.entry\n if value is not None:\n if attr and path in entry:\n entry[path].attrs[attr] = value\n elif path in entry:\n if isinstance(entry[path], NXgroup):\n del entry[path]\n entry[path] = value\n else:\n entry[path].replace(value)\n elif attr is None:\n entry[path] = value",
"def createSampleKey(self, study_id, sample_name):\n try:\n con = self.getMetadataDatabaseConnection()\n con.cursor().callproc('qiime_assets.sample_insert', [study_id, sample_name])\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False"
] | [
"0.56092906",
"0.55380523",
"0.5533075",
"0.5486322",
"0.5415097",
"0.5412373",
"0.52140784",
"0.5208131",
"0.5167864",
"0.51001203",
"0.5078394",
"0.5048557",
"0.49965262",
"0.4960082",
"0.4948646",
"0.48635966",
"0.48555365",
"0.4849937",
"0.48480463",
"0.48332018",
"0.48322332",
"0.48229825",
"0.48024255",
"0.47976807",
"0.47955865",
"0.4793947",
"0.4788215",
"0.4719138",
"0.4711388",
"0.47070017"
] | 0.7742472 | 0 |
Write a GRASS entry to the metadata store for a given project. Typically used to record the location within a project directory of a GRASS location and mapset, as well as to note the existence of particular datasets stored in the mapset Will overwrite the value for a key that already exists context Context object containing projectDir, the path of the project whose metadata store is to be written to key The key to be written to the GRASS section of the project metadata value The value to be written for key stored in the GRASS section of the project metadata IOError(errno.EACCES) if the metadata store for the project is not writable | def writeGRASSEntry(context, key, value):
GenericMetadata.writeEntryToSection(context, GenericMetadata.GRASS_SECTION, key, value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def _write_map_provenance(cfg, cube, plot_path, title, *attrs):\n cube = cube.copy()\n ancestors = []\n for attr in attrs:\n ancestors.extend(attr['filename'].split('|'))\n netcdf_path = mlr.get_new_path(cfg, plot_path)\n io.iris_save(cube, netcdf_path)\n record = {\n 'ancestors': ancestors,\n 'authors': ['schlund_manuel'],\n 'caption': f\"Geographical distribution of {cube.long_name} for \"\n f\"{title}.\",\n 'plot_types': ['geo'],\n 'references': ['schlund20jgr'],\n }\n with ProvenanceLogger(cfg) as provenance_logger:\n provenance_logger.log(netcdf_path, record)\n provenance_logger.log(plot_path, record)",
"def write(self, path, key):\n raise NotImplementedError",
"def save_project(uid, song_notes, author_name, creation_date, project_name):",
"def _writeEntriesToSection(projectDir, section, keys, values, callback=None):\n numKeys = len(keys)\n if numKeys != len(values):\n raise Exception( \"%d keys specified for %d values\" % (numKeys, len(values)) )\n \n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entries\n if not config.has_section(section):\n config.add_section(section)\n for i in xrange(numKeys):\n config.set(section, keys[i], values[i])\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def __setitem__(self, key, value):\n if not isinstance(value, dict):\n raise TypeError(\"value must be a dict\")\n\n # Is this a valid cache entry dictionary?\n try:\n validate(value, ENTRY_SCHEMA)\n except ValidationError as e:\n raise ValueError(\"%s is not a valid entry\" % value) from e\n\n entry_dir = self.cache_key_dir(key)\n\n try:\n entry_dir.mkdir(parents=True, exist_ok=True)\n except FileExistsError as e:\n raise ValueError(\"Already exists\") from e\n\n with open(entry_dir / \"entry.yaml\", \"w\") as f:\n f.write(yaml.safe_dump(value))",
"def writeProvenanceEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.PROVENANCE_SECTION, key, value)",
"def writeClimateGridEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.CLIMATE_GRID_SECTION, key, value)",
"def save_project(self, project_id, project):\n with open('{}/{}'.format(self._storage_location, project_id), 'w') as project_file:\n project_file.write(project.name + '\\n')\n project_file.write(project.description + '\\n')\n project_file.write(\",\".join(project.members) + '\\n')\n project_file.write(\",\".join(project.documents) + '\\n')",
"def _write_to_datastore(self, index, doc_type, document, login, path):\n if self.config['Github']['datastore'] == 'filesystem':\n filename = self._generate_filename(doc_type, login)\n self._save_file(json.dumps(document), path, filename)\n elif self.config['Github']['datastore'] == 'elasticsearch':\n self._save_elasticsearch(document, index, doc_type)\n elif self.config['Github']['datastore'] == 'both':\n filename = self._generate_filename(doc_type, login)\n self._save_file(json.dumps(document), path, filename)\n self._save_elasticsearch(document, index, doc_type)\n else:\n error_msg = \"Unable to save result data for {}. Check \" \\\n \" configuration file setting: {}\" \\\n .format(doc_type, self.config['Github']['datastore'])\n self.logger.error(error_msg)",
"def writeManifestEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.MANIFEST_SECTION, key, value)",
"def set(self, name, path):\n self.yaml[IDK_YAML_GROUP][name] = path\n self.write()",
"def dumpprojects (self):\r\n\r\n\r\n\r\n datesuffix=str(datetime.datetime.now()).split(' ')[0]\r\n project = str(transform(self.default_dict['projects'].return_dict()))\r\n\r\n if self.using_shelf:\r\n\r\n file_access.save_file(returntext=project,\r\n filename='PROJ'+notebookname+datesuffix,\r\n folder='/textfiles')\r\n if self.using_database:\r\n value_tuple = (notebookname, project,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO projects \"\r\n +\"(notebook, projectfile) \"\r\n +\"VALUES (?,?);\",\r\n value_tuple)\r\n db_connection.commit()",
"def writedb(path, key, value) -> int:\n if key == \"\" or value == \"\":\n return 1\n if os.path.exists(path):\n pass \n else:\n return 1\n with open(path, \"a\") as db:\n db.write(f\"\\n{key}:{value}\")\n return 0",
"def save_project_to_file(self, project=None):\n if type(project) is not Project:\n return False\n\n path = self.data_path + self.project_dir\n\n # create dir if it does not exist\n is_dir = os.path.isdir(str(path))\n is_file = os.path.isfile(str(path))\n if not is_dir and not is_file:\n os.mkdir(path)\n\n # generate filenames\n filename = path + '/' + self.us(project.project_id()) + '.flproject'\n filename_bu = path + '/' + self.us(project.project_id()) + '.flproject_bu'\n\n # if it already exists, save a backup\n if os.path.isfile(filename):\n shutil.copy2(filename, filename_bu)\n\n # write the file\n f = open(filename, 'w')\n f.write(project.to_json())\n f.close()",
"def save_metadata(self, directory: pathlib.Path, **kwargs) -> None:\n path_to_metadata = directory / (self.name + \".json\")\n metadata = {\n \"latent_dim\": self.latent_dim,\n \"img_size\": self.img_size,\n \"num_pixels\": self.num_pixels,\n \"name\": self.name,\n }\n with open(path_to_metadata, \"w\") as f:\n json.dump(metadata, f, indent=4, sort_keys=True, **kwargs)",
"def save(cls, context):\n\n data = context.get_stored_dict()\n files = {}\n\n def save_in_file(file, key, value):\n if file in files.keys():\n files[file][key] = value\n else:\n files[file] = {key: value}\n\n for key, val in data.items():\n if context.extends is not None and key in context.key_origins:\n save_in_file(context.key_origins[key], key, val)\n else:\n save_in_file(context.profile, key, val)\n\n for profile, content in files.items():\n metadata.update_metadata(\n context.workspace,\n profile,\n 'config',\n content)",
"def writeStudyAreaEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.STUDY_AREA_SECTION, key, value)",
"def export_project_dump(self, key):",
"def put(self, project_slug):\n try:\n docker_repo_field(project_slug, 'slug')\n except ValueError as ex:\n raise WrappedValueError(ex)\n\n args = PROJECT_NEW_PARSER.parse_args(strict=True)\n args = clean_attrs(args)\n\n args['slug'] = project_slug\n\n if 'gitlab_repo_id' in args:\n args['external_auth_token'] = (\n current_user.oauth_token_for('gitlab'))\n\n elif 'github_repo_id' in args:\n args['external_auth_token'] = (\n current_user.oauth_token_for('github'))\n\n if args['utility']: # Utilities must have target registry set\n ensure_target_registry(True)\n\n set_target_registry(args)\n\n return self.handle_write(Project(), data=args)",
"def writeToMetadata(self, context):\n fqId = self.type + GenericMetadata.COMPOUND_KEY_SEP + self.id\n fqId = fqId.lower()\n\n climatePoints = GenericMetadata.readClimatePointEntries(context)\n try:\n stations = climatePoints['stations'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n stations = []\n # Write station metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in stations:\n stations.append(fqId)\n stationsStr = GenericMetadata.VALUE_DELIM.join(stations)\n keys.append('stations'); values.append(stationsStr)\n # Write attributes for station\n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP \n longitude = keyProto + 'longitude'\n keys.append(longitude); values.append(self.longitude)\n latitude = keyProto + 'latitude'\n keys.append(latitude); values.append(self.latitude)\n elevation = keyProto + 'elevation'\n keys.append(elevation); values.append(self.elevation)\n name = keyProto + 'name'\n keys.append(name); values.append(self.name)\n if self.startDate:\n startDate = keyProto + 'startdate'\n keys.append(startDate); values.append(self.startDate.strftime(ClimatePointStation.FMT_DATE))\n if self.endDate:\n endDate = keyProto + 'enddate'\n keys.append(endDate); values.append(self.endDate.strftime(ClimatePointStation.FMT_DATE))\n if self.variables:\n variablesKey = keyProto + 'variables'\n variablesValue = GenericMetadata.VALUE_DELIM.join(self.variables)\n keys.append(variablesKey); values.append(variablesValue)\n if self.data != None:\n data = keyProto + 'data'\n keys.append(data); values.append(self.data)\n elif self.variablesData:\n # Try to write data entries for each variable separately\n vars = self.variablesData.keys()\n for var in vars:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n keys.append(varKey); values.append(self.variablesData[var])\n GenericMetadata.writeClimatePointEntries(context, keys, values)",
"def put(self, key, value):\n self.execute_command('sudo -i bash -c \\'echo -n \"{0}\" > {1}{2}\\''\n .format(value, self._store_path, key))",
"def write_metadata_to_file(self, path):\n return write_metadata_to_ma_file(path, self)",
"def _WriteStorageMetadata(self):\n stream_name = 'metadata.txt'\n if self._HasStream(stream_name):\n return\n\n stream_data = (\n '[plaso_storage_file]\\n'\n 'format_version: {0:d}\\n'\n 'serialization_format: {1:s}\\n'\n 'storage_type: {2:s}\\n'\n '\\n').format(\n self._FORMAT_VERSION, self.serialization_format, self.storage_type)\n\n stream_data = stream_data.encode('utf-8')\n self._WriteStream(stream_name, stream_data)",
"def save_metadata(self, directory: pathlib.Path, **kwargs) -> None:\n path_to_metadata = directory / (self.name + \".json\")\n metadata = {\"latent_dim\": self.latent_dim, \"name\": self.name}\n with open(path_to_metadata, \"w\") as f:\n json.dump(metadata, f, indent=4, sort_keys=True, **kwargs)",
"def save_metadata(self, directory: pathlib.Path, **kwargs) -> None:\n path_to_metadata = directory / (self.name + \".json\")\n metadata = {\"latent_dim\": self.latent_dim, \"name\": self.name}\n with open(path_to_metadata, \"w\") as f:\n json.dump(metadata, f, indent=4, sort_keys=True, **kwargs)",
"def put(self, file_path):\n resource_kwargs = self.resource\n if file_path:\n resource_kwargs['upload'] = open(file_path, 'rb')\n if self.dataset_id:\n resource_kwargs['package_id'] = self.dataset_id\n # TODO: Offline mode should be able to run offline (so pass files from task to task),\n # without uploading and getting a resource id, and expiring files after a few seconds\n with CachedCKAN(**self.ckan_kwargs) as ckan:\n status = ckan.create_resource(**resource_kwargs)\n self.resource['id'] = status['id']\n self.resource['package_id'] = self.dataset['id'] = status['package_id']\n # TODO: Create dataset too? How indicate in CkanTarget API?\n # TODO: Patch (update) existing dataset and/or resource? Issue is if a Target exists, Luigi\n # considers running precedent tasks unnecessary, so currently you must delete and re-create.\n # One solution would be some sort of timeout that automatically deletes a resource if someone\n # tries to create it after x seconds has elapsed.",
"def save_grd(filename, meta, map):\n if os.path.exists(filename):\n raise ValueError(\"File already exists: {}\".format(filename))\n if map.shape != (meta['NX'], meta['NY'], meta['NCOMP']):\n raise ValueError(\"The map shape does not match the metadata dictionary.\")\n points = meta['NX'] * meta['NY']\n components = meta['NCOMP']\n data = np.empty((points, 2 * components))\n for component in range(components):\n data[:, 2 * component] = map[:, :, component].reshape(points, order='F').real\n data[:, 2 * component + 1] = map[:, :, component].reshape(points, order='F').imag\n with open(filename, 'w') as f:\n for line in meta['header']:\n f.write('{}\\n'.format(line))\n f.write('{:2d}\\n'.format(meta['KTYPE']))\n f.write('{:12d}{:12d}{:12d}{:12d}\\n'.format(meta['NSET'], meta['ICOMP'], meta['NCOMP'], meta['IGRID']))\n f.write('{:12d}{:12d}\\n'.format(meta['IX'], meta['IY']))\n f.write(' {: 0.10E} {: 0.10E} {: 0.10E} {: 0.10E}\\n'.format(meta['XS'], meta['YS'], meta['XE'], meta['YE']))\n f.write('{:12d}{:12d}{:12d}\\n'.format(meta['NX'], meta['NY'], meta['KLIMIT']))\n for p in range(points):\n f.write(''.join([float_to_string(number) for number in data[p, :]]) + '\\n')",
"def write(self, data_pref)\n\n def _writeToAddama(self, addama_dir):",
"def write(cls, object, filename: str, key: str = None):\n raise NotImplemented(\"Write method for MCPL is not implemented nor required\")"
] | [
"0.57098305",
"0.5499765",
"0.54096854",
"0.5396995",
"0.53238463",
"0.5178773",
"0.5174975",
"0.5169251",
"0.5139384",
"0.5127704",
"0.50935525",
"0.5081287",
"0.5065597",
"0.4993995",
"0.498795",
"0.4984682",
"0.4982508",
"0.49815756",
"0.49772733",
"0.49714443",
"0.49576974",
"0.4909334",
"0.4907314",
"0.48980406",
"0.48861504",
"0.48861504",
"0.48741293",
"0.48724654",
"0.4859926",
"0.48395672"
] | 0.71451133 | 0 |
Write a point climate entry to the metadata store for a given project. Will overwrite the value for a key that already exists context Context object containing projectDir, the path of the project whose metadata store is to be written to key The key to be written to the point climate section of the project metadata value The value to be written for key stored in the point climate section of the project metadata IOError(errno.EACCES) if the metadata store for the project is not writable | def writeClimatePointEntry(context, key, value):
GenericMetadata.writeEntryToSection(context, GenericMetadata.CLIMATE_POINT_SECTION, key, value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeClimatePointEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION, keys, values)",
"def writeToMetadata(self, context):\n fqId = self.type + GenericMetadata.COMPOUND_KEY_SEP + self.id\n fqId = fqId.lower()\n\n climatePoints = GenericMetadata.readClimatePointEntries(context)\n try:\n stations = climatePoints['stations'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n stations = []\n # Write station metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in stations:\n stations.append(fqId)\n stationsStr = GenericMetadata.VALUE_DELIM.join(stations)\n keys.append('stations'); values.append(stationsStr)\n # Write attributes for station\n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP \n longitude = keyProto + 'longitude'\n keys.append(longitude); values.append(self.longitude)\n latitude = keyProto + 'latitude'\n keys.append(latitude); values.append(self.latitude)\n elevation = keyProto + 'elevation'\n keys.append(elevation); values.append(self.elevation)\n name = keyProto + 'name'\n keys.append(name); values.append(self.name)\n if self.startDate:\n startDate = keyProto + 'startdate'\n keys.append(startDate); values.append(self.startDate.strftime(ClimatePointStation.FMT_DATE))\n if self.endDate:\n endDate = keyProto + 'enddate'\n keys.append(endDate); values.append(self.endDate.strftime(ClimatePointStation.FMT_DATE))\n if self.variables:\n variablesKey = keyProto + 'variables'\n variablesValue = GenericMetadata.VALUE_DELIM.join(self.variables)\n keys.append(variablesKey); values.append(variablesValue)\n if self.data != None:\n data = keyProto + 'data'\n keys.append(data); values.append(self.data)\n elif self.variablesData:\n # Try to write data entries for each variable separately\n vars = self.variablesData.keys()\n for var in vars:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n keys.append(varKey); values.append(self.variablesData[var])\n GenericMetadata.writeClimatePointEntries(context, keys, values)",
"def writeClimateGridEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.CLIMATE_GRID_SECTION, key, value)",
"def writeProvenanceEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.PROVENANCE_SECTION, key, value)",
"def deleteClimatePointEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.CLIMATE_POINT_SECTION, key)",
"def writeProvenanceEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION, keys, values)",
"def writeClimateGridEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_GRID_SECTION, keys, values)",
"def readClimatePointEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION)",
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def save(cls, context):\n\n data = context.get_stored_dict()\n files = {}\n\n def save_in_file(file, key, value):\n if file in files.keys():\n files[file][key] = value\n else:\n files[file] = {key: value}\n\n for key, val in data.items():\n if context.extends is not None and key in context.key_origins:\n save_in_file(context.key_origins[key], key, val)\n else:\n save_in_file(context.profile, key, val)\n\n for profile, content in files.items():\n metadata.update_metadata(\n context.workspace,\n profile,\n 'config',\n content)",
"def write_stewicombo_metadata(file_name, metadata_dict, category=''):\n meta = set_stewicombo_meta(file_name, category=category)\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)",
"def writeCheckpoint(self, metadata_key, times):\r\n # Make sure that the directory exists\r\n try:\r\n os.makedirs(self.tmpDir)\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise\r\n\r\n # Open a temporary file\r\n (file_handle, tmp_filename) = tempfile.mkstemp(dir=self.tmpDir)\r\n wrapped_file = os.fdopen(file_handle, 'w')\r\n wrapped_file.write(json.dumps(times))\r\n wrapped_file.close()\r\n os.rename(tmp_filename, self.tmpDir + metadata_key)",
"def write(self, path, key):\n raise NotImplementedError",
"def _write_map_provenance(cfg, cube, plot_path, title, *attrs):\n cube = cube.copy()\n ancestors = []\n for attr in attrs:\n ancestors.extend(attr['filename'].split('|'))\n netcdf_path = mlr.get_new_path(cfg, plot_path)\n io.iris_save(cube, netcdf_path)\n record = {\n 'ancestors': ancestors,\n 'authors': ['schlund_manuel'],\n 'caption': f\"Geographical distribution of {cube.long_name} for \"\n f\"{title}.\",\n 'plot_types': ['geo'],\n 'references': ['schlund20jgr'],\n }\n with ProvenanceLogger(cfg) as provenance_logger:\n provenance_logger.log(netcdf_path, record)\n provenance_logger.log(plot_path, record)",
"def writeToMetadata(self, context):\n fqId = self.section + GenericMetadata.COMPOUND_KEY_SEP + self.name\n fqId = fqId.lower()\n \n # Write self to the appropriate section\n GenericMetadata.writeEntryToSection(context, self.section, self.name, self.dcIdentifier)\n \n # Write to provenance section\n provenanceEntries = GenericMetadata.readProvenanceEntries(context)\n try:\n entities = provenanceEntries['entities'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n entities = []\n # Write entity metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in entities:\n entities.append(fqId)\n entitiesStr = GenericMetadata.VALUE_DELIM.join(entities)\n keys.append('entities'); values.append(entitiesStr)\n # Write attributes for entity\n keyProto = fqId + GenericMetadata.COMPOUND_KEY_SEP\n dcIdentifier = keyProto + 'dc.identifier'\n keys.append(dcIdentifier); values.append(self.dcIdentifier)\n dcSource = keyProto + 'dc.source'\n keys.append(dcSource); values.append(self.dcSource)\n dcTitle = keyProto + 'dc.title'\n keys.append(dcTitle); values.append(self.dcTitle)\n if self.dcDate:\n dcDate = keyProto + 'dc.date'\n keys.append(dcDate); values.append(self.dcDate.strftime(AssetProvenance.FMT_DATE))\n dcPublisher = keyProto + 'dc.publisher'\n keys.append(dcPublisher); values.append(self.dcPublisher)\n dcDescription = keyProto + 'dc.description'\n keys.append(dcDescription); values.append(self.dcDescription)\n processingNotes = keyProto + 'processing_notes'\n keys.append(processingNotes); values.append(self.processingNotes)\n GenericMetadata.writeProvenanceEntries(context, keys, values)",
"def SetProjectMetadata(self, new_metadata):\n compute = self.compute\n\n errors = []\n list(request_helper.MakeRequests(\n requests=[\n (compute.projects,\n 'SetCommonInstanceMetadata',\n self.messages.ComputeProjectsSetCommonInstanceMetadataRequest(\n metadata=new_metadata,\n project=properties.VALUES.core.project.Get(\n required=True),\n ))],\n http=self.http,\n batch_url=self.batch_url,\n errors=errors,\n custom_get_requests=None))\n if errors:\n utils.RaiseToolException(\n errors,\n error_message='Could not add SSH key to project metadata:')",
"def set_metadata(self, chunk, coords, value):\n\n chunk.set_metadata(coords, value)",
"def _writeEntriesToSection(projectDir, section, keys, values, callback=None):\n numKeys = len(keys)\n if numKeys != len(values):\n raise Exception( \"%d keys specified for %d values\" % (numKeys, len(values)) )\n \n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entries\n if not config.has_section(section):\n config.add_section(section)\n for i in xrange(numKeys):\n config.set(section, keys[i], values[i])\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def writeToMetadata(self, context):\n pass",
"def updateProject(self, index, data_role):\n row_index = index.row()\n value = self._dataModel.data(index, data_role)\n experiment_id = self._project.experimentsIds()[0] # only 1st measured datablock is currently taken into account\n keys = [\"experiments\", experiment_id, \"calculated\", \"calc\"]\n self._project.setByPathAndIndex(keys, row_index, value)",
"def writeGRASSEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.GRASS_SECTION, key, value)",
"def writer(output, output_name, output_data):\n\n kml = simplekml.Kml(name=output_name)\n for exif in output_data:\n if('Latitude' in exif.keys() and\n 'Latitude Reference' in exif.keys() and\n 'Longitude Reference' in exif.keys() and\n 'Longitude' in exif.keys()):\n\n if 'Original Date' in exif.keys():\n dt = exif['Original Date']\n else:\n dt = 'N/A'\n\n if exif['Latitude Reference'] == 'S':\n latitude = '-' + exif['Latitude']\n else:\n latitude = exif['Latitude']\n\n if exif['Longitude Reference'] == 'W':\n longitude = '-' + exif['Longitude']\n else:\n longitude = exif['Longitude']\n\n kml.newpoint(name=exif['Name'],\n description='Originally Created: ' + dt,\n coords=[(longitude, latitude)])\n else:\n pass\n kml.save(os.path.join(output, output_name))",
"def save_to_file(self, file_name):\n from ligo.skymap.io.fits import write_sky_map\n\n check_file_exists_and_rename(file_name)\n kwargs = {}\n if self.meta_data is not None:\n kwargs = self.meta_data\n write_sky_map(file_name, self, **kwargs)",
"def sync_set_metadata(self, chunk, coords, value):\n\n chunk.set_metadata(coords, value)",
"def writeStudyAreaEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.STUDY_AREA_SECTION, key, value)",
"def set_metadata(self, key, value):\n if '::' not in key:\n raise ValueError('Invalid key %s; must be prefixed with \"appname::\"' % key)\n\n self._db_query('DELETE FROM meta WHERE attr=?', (key,))\n self._db_query('INSERT INTO meta VALUES (?, ?)', (key, value))\n self._set_dirty()",
"def writeManifestEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.MANIFEST_SECTION, key, value)",
"def update_metadata(self, key, value):\n sp.verify(self.is_administrator(sp.sender), FA12_Error.NotAdmin)\n self.data.metadata[key] = value",
"def WriteMetadata(self, metadata, overwrite=True):\n if not overwrite and 'meta' in metadata:\n raise errors.KeyczarError('\"meta\" attribute already exists')\n self.dict['meta'] = str(metadata)",
"def readFromMetadata(cls, context, fqId):\n newInstance = ClimatePointStation()\n (newInstance.type, newInstance.id) = fqId.split(GenericMetadata.COMPOUND_KEY_SEP)\n\n climate = GenericMetadata.readClimatePointEntries(context)\n \n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP\n longitude = keyProto + 'longitude'\n newInstance.longitude = float(climate[longitude])\n latitude = keyProto + 'latitude'\n newInstance.latitude = float(climate[latitude])\n elevation = keyProto + 'elevation'\n newInstance.elevation = float(climate[elevation])\n name = keyProto + 'name'\n newInstance.name = climate[name] \n startDate = keyProto + 'startdate'\n try:\n newInstance.startDate = datetime.strptime(climate[startDate], ClimatePointStation.FMT_DATE)\n except KeyError:\n pass \n endDate = keyProto + 'enddate'\n try:\n newInstance.endDate = datetime.strptime(climate[endDate], ClimatePointStation.FMT_DATE)\n except KeyError:\n pass\n variablesKey = keyProto + 'variables'\n try:\n newInstance.variables = climate[variablesKey].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n pass\n data = keyProto + 'data'\n try:\n newInstance.data = climate[data]\n except KeyError:\n pass\n try:\n for var in newInstance.variables:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n newInstance.variablesData[var] = climate[varKey]\n except KeyError:\n pass\n \n return newInstance"
] | [
"0.7367401",
"0.6412568",
"0.62726176",
"0.59334",
"0.5545114",
"0.5398628",
"0.5321518",
"0.5314684",
"0.53015107",
"0.5264374",
"0.52064365",
"0.50884044",
"0.5035485",
"0.5029372",
"0.50156707",
"0.4898175",
"0.48908222",
"0.4814849",
"0.4801623",
"0.4764186",
"0.4760266",
"0.47295085",
"0.47264224",
"0.47233906",
"0.47076923",
"0.4682454",
"0.46600068",
"0.46183673",
"0.4610782",
"0.45818788"
] | 0.78591615 | 0 |
Write a point climate entries to the metadata store for a given project. Will overwrite the value for keys that already exist context Context object containing projectDir, the path of the project whose metadata store is to be written to keys List of keys to be written to the point climate section of the project metadata values List of values to be written for keys stored in the point climate section of the project metadata IOError(errno.EACCES) if the metadata store for the project is not writable Exception if len(keys) != len(values) | def writeClimatePointEntries(context, keys, values):
GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION, keys, values) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeClimateGridEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_GRID_SECTION, keys, values)",
"def writeClimatePointEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.CLIMATE_POINT_SECTION, key, value)",
"def writeProvenanceEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION, keys, values)",
"def writeToMetadata(self, context):\n fqId = self.type + GenericMetadata.COMPOUND_KEY_SEP + self.id\n fqId = fqId.lower()\n\n climatePoints = GenericMetadata.readClimatePointEntries(context)\n try:\n stations = climatePoints['stations'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n stations = []\n # Write station metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in stations:\n stations.append(fqId)\n stationsStr = GenericMetadata.VALUE_DELIM.join(stations)\n keys.append('stations'); values.append(stationsStr)\n # Write attributes for station\n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP \n longitude = keyProto + 'longitude'\n keys.append(longitude); values.append(self.longitude)\n latitude = keyProto + 'latitude'\n keys.append(latitude); values.append(self.latitude)\n elevation = keyProto + 'elevation'\n keys.append(elevation); values.append(self.elevation)\n name = keyProto + 'name'\n keys.append(name); values.append(self.name)\n if self.startDate:\n startDate = keyProto + 'startdate'\n keys.append(startDate); values.append(self.startDate.strftime(ClimatePointStation.FMT_DATE))\n if self.endDate:\n endDate = keyProto + 'enddate'\n keys.append(endDate); values.append(self.endDate.strftime(ClimatePointStation.FMT_DATE))\n if self.variables:\n variablesKey = keyProto + 'variables'\n variablesValue = GenericMetadata.VALUE_DELIM.join(self.variables)\n keys.append(variablesKey); values.append(variablesValue)\n if self.data != None:\n data = keyProto + 'data'\n keys.append(data); values.append(self.data)\n elif self.variablesData:\n # Try to write data entries for each variable separately\n vars = self.variablesData.keys()\n for var in vars:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n keys.append(varKey); values.append(self.variablesData[var])\n GenericMetadata.writeClimatePointEntries(context, keys, values)",
"def _writeEntriesToSection(projectDir, section, keys, values, callback=None):\n numKeys = len(keys)\n if numKeys != len(values):\n raise Exception( \"%d keys specified for %d values\" % (numKeys, len(values)) )\n \n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entries\n if not config.has_section(section):\n config.add_section(section)\n for i in xrange(numKeys):\n config.set(section, keys[i], values[i])\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def writeClimateGridEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.CLIMATE_GRID_SECTION, key, value)",
"def save(cls, context):\n\n data = context.get_stored_dict()\n files = {}\n\n def save_in_file(file, key, value):\n if file in files.keys():\n files[file][key] = value\n else:\n files[file] = {key: value}\n\n for key, val in data.items():\n if context.extends is not None and key in context.key_origins:\n save_in_file(context.key_origins[key], key, val)\n else:\n save_in_file(context.profile, key, val)\n\n for profile, content in files.items():\n metadata.update_metadata(\n context.workspace,\n profile,\n 'config',\n content)",
"def writeModelRunEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.MODEL_RUN_SECTION, keys, values)",
"def readClimatePointEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION)",
"def writePoints(self, pointsvalues):\n raise NotImplementedError()",
"def write_locations(pathfolder, key_firms, years, locs, methodvalues):\n ## Generate namefile\n namefile = generate_namefile(pathfolder, methodvalues)\n\n ## Writting\n db = shelve.open(namefile)\n db['hashes'] = generate_yearnif_hash(years, key_firms)\n db['nif'] = key_firms\n db['year'] = years\n db['locations'] = locs\n db['methodvalues'] = methodvalues\n db.close()",
"def writeProvenanceEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.PROVENANCE_SECTION, key, value)",
"def writeCheckpoint(self, metadata_key, times):\r\n # Make sure that the directory exists\r\n try:\r\n os.makedirs(self.tmpDir)\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise\r\n\r\n # Open a temporary file\r\n (file_handle, tmp_filename) = tempfile.mkstemp(dir=self.tmpDir)\r\n wrapped_file = os.fdopen(file_handle, 'w')\r\n wrapped_file.write(json.dumps(times))\r\n wrapped_file.close()\r\n os.rename(tmp_filename, self.tmpDir + metadata_key)",
"def write_stewicombo_metadata(file_name, metadata_dict, category=''):\n meta = set_stewicombo_meta(file_name, category=category)\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)",
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def _write_map_provenance(cfg, cube, plot_path, title, *attrs):\n cube = cube.copy()\n ancestors = []\n for attr in attrs:\n ancestors.extend(attr['filename'].split('|'))\n netcdf_path = mlr.get_new_path(cfg, plot_path)\n io.iris_save(cube, netcdf_path)\n record = {\n 'ancestors': ancestors,\n 'authors': ['schlund_manuel'],\n 'caption': f\"Geographical distribution of {cube.long_name} for \"\n f\"{title}.\",\n 'plot_types': ['geo'],\n 'references': ['schlund20jgr'],\n }\n with ProvenanceLogger(cfg) as provenance_logger:\n provenance_logger.log(netcdf_path, record)\n provenance_logger.log(plot_path, record)",
"def write_tide_values(tide_values, plist, level):\n pts_schema = {\n \"geometry\": \"Point\",\n \"properties\": {\"p_ID\": \"int\", str(level): \"float\"},\n }\n\n mem_file = fiona.MemoryFile()\n ms = mem_file.open(crs=from_epsg(4326), driver=\"ESRI Shapefile\", schema=pts_schema,)\n\n for pid, (p, tv) in enumerate(zip(plist, tide_values)):\n prop = {\"p_ID\": int(pid + 1), str(level): float(tv)}\n ms.write({\"geometry\": mapping(p), \"properties\": prop})\n\n return ms",
"def writePoints(self, pointsvalues):\n try: values = dict(pointsvalues)\n except ValueError: \n #If pointsvalues is not a list, we get a ValueError\n values = dict([pointsvalues])\n points = values.keys()\n return self._requestPoints(points, 'w', values)",
"def write(self, path, key):\n raise NotImplementedError",
"def SetProjectMetadata(self, new_metadata):\n compute = self.compute\n\n errors = []\n list(request_helper.MakeRequests(\n requests=[\n (compute.projects,\n 'SetCommonInstanceMetadata',\n self.messages.ComputeProjectsSetCommonInstanceMetadataRequest(\n metadata=new_metadata,\n project=properties.VALUES.core.project.Get(\n required=True),\n ))],\n http=self.http,\n batch_url=self.batch_url,\n errors=errors,\n custom_get_requests=None))\n if errors:\n utils.RaiseToolException(\n errors,\n error_message='Could not add SSH key to project metadata:')",
"def writeEcMaps( self ):\n\n self.logger.info( 'writeEcMaps: START' )\n\n self.logger.info( 'writeEcMaps: insert file will be ecMapsInsert.psql' )\n\n ecMapsFile = self.openInsertFile( 'ecMapsInsert.psql' )\n\n self.logger.info( 'writeEcMaps: keggreader.getEcMaps(): START' )\n\n ecMaps = self.reader.getEcMaps()\n\n self.logger.info( 'writeEcMaps: keggreader.getEcMaps(): START' )\n\n for ec,mapNumbers in ecMaps.iteritems():\n ecId = self.importerEc.ecsInserted[ ec ]\n \n for mapNumber in mapNumbers:\n\n if mapNumber in self.importerPathway.pathwayMapsInserted:\n\n mapId = self.importerPathway.pathwayMapsInserted[ mapNumber ]\n\n #self.writeEcMapsFile( ecMapsFile, ecId, mapId )\n self.writeFile( ecMapsFile, 'ec_maps', [ str(ecId), str(mapId) ] )\n\n self.logger.info( 'writeEcMaps: DONE' )",
"def __setitem__(self, keys, value):\n\n if isinstance(keys, str):\n keys = [keys]\n\n #print(\"KEYTYPE: {0}\".format(keys))\n self.__setInDict(self.__cfg, keys, value)\n self.write(self.__cfgFile)",
"def writeToMetadata(self, context):\n fqId = self.section + GenericMetadata.COMPOUND_KEY_SEP + self.name\n fqId = fqId.lower()\n \n # Write self to the appropriate section\n GenericMetadata.writeEntryToSection(context, self.section, self.name, self.dcIdentifier)\n \n # Write to provenance section\n provenanceEntries = GenericMetadata.readProvenanceEntries(context)\n try:\n entities = provenanceEntries['entities'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n entities = []\n # Write entity metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in entities:\n entities.append(fqId)\n entitiesStr = GenericMetadata.VALUE_DELIM.join(entities)\n keys.append('entities'); values.append(entitiesStr)\n # Write attributes for entity\n keyProto = fqId + GenericMetadata.COMPOUND_KEY_SEP\n dcIdentifier = keyProto + 'dc.identifier'\n keys.append(dcIdentifier); values.append(self.dcIdentifier)\n dcSource = keyProto + 'dc.source'\n keys.append(dcSource); values.append(self.dcSource)\n dcTitle = keyProto + 'dc.title'\n keys.append(dcTitle); values.append(self.dcTitle)\n if self.dcDate:\n dcDate = keyProto + 'dc.date'\n keys.append(dcDate); values.append(self.dcDate.strftime(AssetProvenance.FMT_DATE))\n dcPublisher = keyProto + 'dc.publisher'\n keys.append(dcPublisher); values.append(self.dcPublisher)\n dcDescription = keyProto + 'dc.description'\n keys.append(dcDescription); values.append(self.dcDescription)\n processingNotes = keyProto + 'processing_notes'\n keys.append(processingNotes); values.append(self.processingNotes)\n GenericMetadata.writeProvenanceEntries(context, keys, values)",
"def set_metadata(self, chunk, coords, value):\n\n chunk.set_metadata(coords, value)",
"def sync_set_metadata(self, chunk, coords, value):\n\n chunk.set_metadata(coords, value)",
"def writedb(path, key, value) -> int:\n if key == \"\" or value == \"\":\n return 1\n if os.path.exists(path):\n pass \n else:\n return 1\n with open(path, \"a\") as db:\n db.write(f\"\\n{key}:{value}\")\n return 0",
"def writeToMetadata(self, context):\n pass",
"def write_files(self, basedir):\n outdir = basedir / self.type\n outdir.mkdir(parents=True, exist_ok=True)\n\n for point, row in zip(self.points, self.array):\n filepath = outdir / point\n with filepath.open('w') as f:\n idx = 0\n for ikey in self.pardict.keys():\n f.write(\"{} {}\\n\".format(ikey, row[idx]))\n idx += 1\n logging.debug('wrote %s', filepath)",
"def write_to_tape(self, new_values_dict):\n\n # new_values_dict is not allowed to contain un-initialised keys\n assert all([key in self.tape.keys() for key in new_values_dict.keys()]), \\\n f\"self.tape.keys()={self.tape.keys()}\\nnew_values_dict.keys()={new_values_dict.keys()}\"\n\n for key in self.tape.keys():\n if key in new_values_dict.keys():\n self.tape[key].append(copy.deepcopy(new_values_dict[key]))\n else:\n self.tape[key].append(None)",
"def save_n3d_coords(file_path, coords_dict, seq_pos_dict): \n \n file_obj = open(file_path, 'w')\n write = file_obj.write\n \n for chromo in seq_pos_dict:\n chromo_coords = coords_dict[chromo]\n chromo_seq_pos = seq_pos_dict[chromo]\n \n num_models = len(chromo_coords)\n num_coords = len(chromo_seq_pos)\n \n if chromo[:3].lower() != 'chr':\n chromo_name = 'chr' + chromo\n else:\n chromo_name = chromo\n \n line = '%s\\t%d\\t%d\\n' % (chromo_name, num_coords, num_models)\n write(line)\n \n for j in range(num_coords):\n data = chromo_coords[:,j].ravel().tolist()\n data = '\\t'.join('%.8f' % d for d in data)\n \n line = '%d\\t%s\\n' % (chromo_seq_pos[j], data)\n write(line)\n\n file_obj.close()"
] | [
"0.67549765",
"0.66737044",
"0.66195965",
"0.6296745",
"0.6239152",
"0.59280026",
"0.54196686",
"0.5238751",
"0.51865333",
"0.5180364",
"0.5136568",
"0.512455",
"0.5122717",
"0.49809092",
"0.49597445",
"0.49249732",
"0.49174872",
"0.49159452",
"0.48960033",
"0.4796764",
"0.47960767",
"0.47921002",
"0.47732544",
"0.4765759",
"0.4728297",
"0.47181857",
"0.47067043",
"0.46933353",
"0.467466",
"0.46606633"
] | 0.80713403 | 0 |
Write a grid climate entry to the metadata store for a given project. Will overwrite the value for a key that already exists context Context object containing projectDir, the path of the project whose metadata store is to be read from key The key to be written to the grid climate section of the project metadata value The value to be written for key stored in the grid climate section of the project metadata IOError(errno.EACCES) if the metadata store for the project is not writable | def writeClimateGridEntry(context, key, value):
GenericMetadata.writeEntryToSection(context, GenericMetadata.CLIMATE_GRID_SECTION, key, value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeClimateGridEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_GRID_SECTION, keys, values)",
"def writeClimatePointEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.CLIMATE_POINT_SECTION, key, value)",
"def writeClimatePointEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION, keys, values)",
"def writeToMetadata(self, context):\n fqId = self.type + GenericMetadata.COMPOUND_KEY_SEP + self.id\n fqId = fqId.lower()\n\n climatePoints = GenericMetadata.readClimatePointEntries(context)\n try:\n stations = climatePoints['stations'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n stations = []\n # Write station metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in stations:\n stations.append(fqId)\n stationsStr = GenericMetadata.VALUE_DELIM.join(stations)\n keys.append('stations'); values.append(stationsStr)\n # Write attributes for station\n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP \n longitude = keyProto + 'longitude'\n keys.append(longitude); values.append(self.longitude)\n latitude = keyProto + 'latitude'\n keys.append(latitude); values.append(self.latitude)\n elevation = keyProto + 'elevation'\n keys.append(elevation); values.append(self.elevation)\n name = keyProto + 'name'\n keys.append(name); values.append(self.name)\n if self.startDate:\n startDate = keyProto + 'startdate'\n keys.append(startDate); values.append(self.startDate.strftime(ClimatePointStation.FMT_DATE))\n if self.endDate:\n endDate = keyProto + 'enddate'\n keys.append(endDate); values.append(self.endDate.strftime(ClimatePointStation.FMT_DATE))\n if self.variables:\n variablesKey = keyProto + 'variables'\n variablesValue = GenericMetadata.VALUE_DELIM.join(self.variables)\n keys.append(variablesKey); values.append(variablesValue)\n if self.data != None:\n data = keyProto + 'data'\n keys.append(data); values.append(self.data)\n elif self.variablesData:\n # Try to write data entries for each variable separately\n vars = self.variablesData.keys()\n for var in vars:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n keys.append(varKey); values.append(self.variablesData[var])\n GenericMetadata.writeClimatePointEntries(context, keys, values)",
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def SetProjectMetadata(self, new_metadata):\n compute = self.compute\n\n errors = []\n list(request_helper.MakeRequests(\n requests=[\n (compute.projects,\n 'SetCommonInstanceMetadata',\n self.messages.ComputeProjectsSetCommonInstanceMetadataRequest(\n metadata=new_metadata,\n project=properties.VALUES.core.project.Get(\n required=True),\n ))],\n http=self.http,\n batch_url=self.batch_url,\n errors=errors,\n custom_get_requests=None))\n if errors:\n utils.RaiseToolException(\n errors,\n error_message='Could not add SSH key to project metadata:')",
"def _writeEntriesToSection(projectDir, section, keys, values, callback=None):\n numKeys = len(keys)\n if numKeys != len(values):\n raise Exception( \"%d keys specified for %d values\" % (numKeys, len(values)) )\n \n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entries\n if not config.has_section(section):\n config.add_section(section)\n for i in xrange(numKeys):\n config.set(section, keys[i], values[i])\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def updateProject(self, index, data_role):\n row_index = index.row()\n value = self._dataModel.data(index, data_role)\n experiment_id = self._project.experimentsIds()[0] # only 1st measured datablock is currently taken into account\n keys = [\"experiments\", experiment_id, \"calculated\", \"calc\"]\n self._project.setByPathAndIndex(keys, row_index, value)",
"def edit(cls, project_id, resource_type, resource_id, key, value):\n\n try:\n cls.get(\n project_id=project_id,\n resource_type=resource_type,\n resource_id=resource_id,\n key=key\n )\n\n query = DBMetadata.query()\n query = query.filter_by(\n resource_type=resource_type,\n resource_id=resource_id,\n project_id=project_id,\n key=key,\n deleted=False\n )\n return query.update({\"value\": json.dumps(value)})\n\n except exception.NotFound:\n return DBMetadata.create(\n resource_type=resource_type,\n resource_id=resource_id,\n project_id=project_id,\n key=key,\n value=json.dumps(value)\n )",
"def writeGRASSEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.GRASS_SECTION, key, value)",
"def save(cls, context):\n\n data = context.get_stored_dict()\n files = {}\n\n def save_in_file(file, key, value):\n if file in files.keys():\n files[file][key] = value\n else:\n files[file] = {key: value}\n\n for key, val in data.items():\n if context.extends is not None and key in context.key_origins:\n save_in_file(context.key_origins[key], key, val)\n else:\n save_in_file(context.profile, key, val)\n\n for profile, content in files.items():\n metadata.update_metadata(\n context.workspace,\n profile,\n 'config',\n content)",
"def write_stewicombo_metadata(file_name, metadata_dict, category=''):\n meta = set_stewicombo_meta(file_name, category=category)\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)",
"def deleteClimateGridEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.CLIMATE_GRID_SECTION, key)",
"def writeProvenanceEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.PROVENANCE_SECTION, key, value)",
"def update_metadata(state, name, wid, file_id, metadata_file):\n\n client = state.api_client\n\n # Get the workspace details\n w_details = helpers.workspace.details(client, wid, name)\n if w_details is None:\n # Can only happen when the name is used and there are no results. Not\n # with the wid option because it would raise a 404 QuetzalAPIException\n raise click.ClickException(f'Workspace named \"{name}\" does not exist.')\n\n metadata_contents = json.load(metadata_file)\n response = client.workspace_file_update_metadata(wid=w_details.id, uuid=file_id,\n body=metadata_contents)\n click.secho(f'Metadata for file {file_id} successfully changed.', fg='green')\n click.secho('Updated metadata:')\n click.secho(json.dumps(response, indent=2), fg='blue')",
"def update_metadata(self, key, value):\n sp.verify(self.is_administrator(sp.sender), FA12_Error.NotAdmin)\n self.data.metadata[key] = value",
"def set_metadata(self, key, value):\n if '::' not in key:\n raise ValueError('Invalid key %s; must be prefixed with \"appname::\"' % key)\n\n self._db_query('DELETE FROM meta WHERE attr=?', (key,))\n self._db_query('INSERT INTO meta VALUES (?, ?)', (key, value))\n self._set_dirty()",
"def writeToMetadata(self, context):\n fqId = self.section + GenericMetadata.COMPOUND_KEY_SEP + self.name\n fqId = fqId.lower()\n \n # Write self to the appropriate section\n GenericMetadata.writeEntryToSection(context, self.section, self.name, self.dcIdentifier)\n \n # Write to provenance section\n provenanceEntries = GenericMetadata.readProvenanceEntries(context)\n try:\n entities = provenanceEntries['entities'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n entities = []\n # Write entity metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in entities:\n entities.append(fqId)\n entitiesStr = GenericMetadata.VALUE_DELIM.join(entities)\n keys.append('entities'); values.append(entitiesStr)\n # Write attributes for entity\n keyProto = fqId + GenericMetadata.COMPOUND_KEY_SEP\n dcIdentifier = keyProto + 'dc.identifier'\n keys.append(dcIdentifier); values.append(self.dcIdentifier)\n dcSource = keyProto + 'dc.source'\n keys.append(dcSource); values.append(self.dcSource)\n dcTitle = keyProto + 'dc.title'\n keys.append(dcTitle); values.append(self.dcTitle)\n if self.dcDate:\n dcDate = keyProto + 'dc.date'\n keys.append(dcDate); values.append(self.dcDate.strftime(AssetProvenance.FMT_DATE))\n dcPublisher = keyProto + 'dc.publisher'\n keys.append(dcPublisher); values.append(self.dcPublisher)\n dcDescription = keyProto + 'dc.description'\n keys.append(dcDescription); values.append(self.dcDescription)\n processingNotes = keyProto + 'processing_notes'\n keys.append(processingNotes); values.append(self.processingNotes)\n GenericMetadata.writeProvenanceEntries(context, keys, values)",
"def set_metadata(self, chunk, coords, value):\n\n chunk.set_metadata(coords, value)",
"def sync_set_metadata(self, chunk, coords, value):\n\n chunk.set_metadata(coords, value)",
"def writeProvenanceEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION, keys, values)",
"def _set_entry(self, cvs_path, node):\n\n self._make_writable()\n self._set_entry(cvs_path, node)",
"def readClimateGridEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.CLIMATE_GRID_SECTION)",
"def WriteMetadata(self, metadata, overwrite=True):\n if not overwrite and 'meta' in metadata:\n raise errors.KeyczarError('\"meta\" attribute already exists')\n self.dict['meta'] = str(metadata)",
"def set_metadata(self, key, val):\n \n self.metadata[key] = val",
"def write(self, path, key):\n raise NotImplementedError",
"def update_plasticc_metadata(metadata):\n # Rename columns in the metadata table to match the avocado conventions.\n metadata_name_map = {\n 'true_target': 'class',\n 'hostgal_photoz_err': 'host_photoz_error',\n 'hostgal_photoz': 'host_photoz',\n 'hostgal_specz': 'host_specz',\n 'ddf_bool': 'ddf',\n 'true_z': 'redshift',\n }\n metadata.rename(metadata_name_map, axis=1, inplace=True)\n\n # Convert the ddf flag to a boolean\n metadata['ddf'] = metadata['ddf'].astype(bool)\n\n # Explicitly set a galactic/extragalactic flag.\n metadata['galactic'] = metadata['host_photoz'] == 0.\n\n # Update the object_id\n new_object_id = ['plasticc_%09d' % i for i in metadata['object_id']]\n metadata['object_id'] = new_object_id\n\n # Drop useless columns that are just confusing and unnecessary.\n metadata.drop(['target', 'distmod'], axis=1, inplace=True)\n\n metadata.set_index('object_id', inplace=True)\n\n return metadata",
"def save(self):\n logging.debug(\"environment save entered\")\n filename = \"index.json\"\n content_dict = {}\n for fpname in self.footprints:\n # for now, just using the patteern ${footprint_name}-metadata for the name \n content_dict[fpname] = fpname\n content = json.dumps(content_dict)\n index = cf.store_object(self.container, filename, content) \n return True",
"def _do_update(self, meta, k, v):\n self.runtime.logger.info('{}: [{}] -> {}'.format(meta.in_group_config_path, k, v))\n meta.config[k] = v\n meta.save()",
"def update_project(self, project_id, project):\n\n with self._transaction.cursor() as cur:\n # ensure this project exists\n cur.execute(\n \"SELECT project_id \"\n \"FROM barcodes.project \"\n \"WHERE project_id=%s;\",\n (project_id,))\n\n row = cur.fetchone()\n if row is None:\n raise NotFound(\"No project with ID %s\" % project_id)\n\n query = f\"\"\"\n UPDATE barcodes.project\n SET {p.DB_PROJ_NAME_KEY}=%s,\n {p.SUBPROJECT_NAME_KEY}=%s,\n {p.ALIAS_KEY}=%s,\n {p.IS_MICROSETTA_KEY}=%s,\n {p.SPONSOR_KEY}=%s,\n {p.COORDINATION_KEY}=%s,\n {p.CONTACT_NAME_KEY}=%s,\n {p.ADDTL_CONTACT_NAME_KEY}=%s,\n {p.CONTACT_EMAIL_KEY}=%s,\n {p.DEADLINES_KEY}=%s,\n {p.NUM_SUBJECTS_KEY}=%s,\n {p.NUM_TIMEPOINTS_KEY}=%s,\n {p.START_DATE_KEY}=%s,\n {p.BANK_SAMPLES_KEY}=%s,\n {p.PLATING_START_DATE_KEY}=%s,\n {p.DISPOSITION_COMMENTS_KEY}=%s,\n {p.COLLECTION_KEY}=%s,\n {p.IS_FECAL_KEY}=%s,\n {p.IS_SALIVA_KEY}=%s,\n {p.IS_SKIN_KEY}=%s,\n {p.IS_BLOOD_KEY}=%s,\n {p.IS_OTHER_KEY}=%s,\n {p.DO_16S_KEY}=%s,\n {p.DO_SHALLOW_SHOTGUN_KEY}=%s,\n {p.DO_SHOTGUN_KEY}=%s,\n {p.DO_RT_QPCR_KEY}=%s,\n {p.DO_SEROLOGY_KEY}=%s,\n {p.DO_METATRANSCRIPTOMICS_KEY}=%s,\n {p.DO_MASS_SPEC_KEY}=%s,\n {p.MASS_SPEC_COMMENTS_KEY}=%s,\n {p.MASS_SPEC_CONTACT_NAME_KEY}=%s,\n {p.MASS_SPEC_CONTACT_EMAIL_KEY}=%s,\n {p.DO_OTHER_KEY}=%s,\n {p.BRANDING_ASSOC_INSTRUCTIONS_KEY}=%s,\n {p.BRANDING_STATUS_KEY}=%s\n WHERE project_id=%s;\"\"\"\n\n cur.execute(query,\n (\n project.project_name,\n project.subproject_name,\n project.alias,\n project.is_microsetta,\n project.sponsor,\n project.coordination,\n project.contact_name,\n project.additional_contact_name,\n project.contact_email,\n project.deadlines,\n project.num_subjects,\n project.num_timepoints,\n project.start_date,\n project.bank_samples,\n project.plating_start_date,\n project.disposition_comments,\n project.collection,\n project.is_fecal,\n project.is_saliva,\n project.is_skin,\n project.is_blood,\n project.is_other,\n project.do_16s,\n project.do_shallow_shotgun,\n project.do_shotgun,\n project.do_rt_qpcr,\n project.do_serology,\n project.do_metatranscriptomics,\n project.do_mass_spec,\n project.mass_spec_comments,\n project.mass_spec_contact_name,\n project.mass_spec_contact_email,\n project.do_other,\n project.branding_associated_instructions,\n project.branding_status,\n project_id\n ))\n return cur.rowcount == 1"
] | [
"0.6694937",
"0.63357854",
"0.6136591",
"0.581763",
"0.5691291",
"0.5482182",
"0.53957677",
"0.525952",
"0.52572453",
"0.52550113",
"0.52379483",
"0.51842844",
"0.51610136",
"0.5108495",
"0.50271004",
"0.5011402",
"0.4947504",
"0.4934356",
"0.49206182",
"0.48853344",
"0.4877173",
"0.4862158",
"0.48196745",
"0.4802106",
"0.47811309",
"0.47707102",
"0.4728751",
"0.47241405",
"0.47103834",
"0.4702107"
] | 0.7376574 | 0 |
Write a HydroShare entry to the metadata store for a given project. Will overwrite the value for a key that already exists context Context object containing projectDir, the path of the project whose metadata store is to be written to key The key to be written to the study area section of the project metadata value The value to be written for key stored in the HydroShare section of the project metadata IOError(errno.EACCES) if the metadata store for the project is not writable | def writeHydroShareEntry(context, key, value):
GenericMetadata.writeEntryToSection(context, GenericMetadata.HYDROSHARE_SECTION, key, value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def writeStudyAreaEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.STUDY_AREA_SECTION, key, value)",
"def writeProvenanceEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.PROVENANCE_SECTION, key, value)",
"def _writeEntriesToSection(projectDir, section, keys, values, callback=None):\n numKeys = len(keys)\n if numKeys != len(values):\n raise Exception( \"%d keys specified for %d values\" % (numKeys, len(values)) )\n \n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entries\n if not config.has_section(section):\n config.add_section(section)\n for i in xrange(numKeys):\n config.set(section, keys[i], values[i])\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def modify_share(self, pool, project, share, arg):\n svc = self.share_path % (pool, project, share)\n ret = self.rclient.put(svc, arg)\n if ret.status != restclient.Status.ACCEPTED:\n exception_msg = (_('Error modifying %(arg)s '\n ' of share %(id)s.')\n % {'arg': arg,\n 'id': share})\n raise exception.ShareBackendException(msg=exception_msg)",
"def write(self, path, key):\n raise NotImplementedError",
"def writeGRASSEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.GRASS_SECTION, key, value)",
"def update_metadata(self, key, value):\n sp.verify(self.is_administrator(sp.sender), FA12_Error.NotAdmin)\n self.data.metadata[key] = value",
"def put(self, key, value):\n self.execute_command('sudo -i bash -c \\'echo -n \"{0}\" > {1}{2}\\''\n .format(value, self._store_path, key))",
"def writeManifestEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.MANIFEST_SECTION, key, value)",
"def writeToMetadata(self, context):\n fqId = self.section + GenericMetadata.COMPOUND_KEY_SEP + self.name\n fqId = fqId.lower()\n \n # Write self to the appropriate section\n GenericMetadata.writeEntryToSection(context, self.section, self.name, self.dcIdentifier)\n \n # Write to provenance section\n provenanceEntries = GenericMetadata.readProvenanceEntries(context)\n try:\n entities = provenanceEntries['entities'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n entities = []\n # Write entity metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in entities:\n entities.append(fqId)\n entitiesStr = GenericMetadata.VALUE_DELIM.join(entities)\n keys.append('entities'); values.append(entitiesStr)\n # Write attributes for entity\n keyProto = fqId + GenericMetadata.COMPOUND_KEY_SEP\n dcIdentifier = keyProto + 'dc.identifier'\n keys.append(dcIdentifier); values.append(self.dcIdentifier)\n dcSource = keyProto + 'dc.source'\n keys.append(dcSource); values.append(self.dcSource)\n dcTitle = keyProto + 'dc.title'\n keys.append(dcTitle); values.append(self.dcTitle)\n if self.dcDate:\n dcDate = keyProto + 'dc.date'\n keys.append(dcDate); values.append(self.dcDate.strftime(AssetProvenance.FMT_DATE))\n dcPublisher = keyProto + 'dc.publisher'\n keys.append(dcPublisher); values.append(self.dcPublisher)\n dcDescription = keyProto + 'dc.description'\n keys.append(dcDescription); values.append(self.dcDescription)\n processingNotes = keyProto + 'processing_notes'\n keys.append(processingNotes); values.append(self.processingNotes)\n GenericMetadata.writeProvenanceEntries(context, keys, values)",
"def writeToMetadata(self, context):\n fqId = self.type + GenericMetadata.COMPOUND_KEY_SEP + self.id\n fqId = fqId.lower()\n\n climatePoints = GenericMetadata.readClimatePointEntries(context)\n try:\n stations = climatePoints['stations'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n stations = []\n # Write station metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in stations:\n stations.append(fqId)\n stationsStr = GenericMetadata.VALUE_DELIM.join(stations)\n keys.append('stations'); values.append(stationsStr)\n # Write attributes for station\n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP \n longitude = keyProto + 'longitude'\n keys.append(longitude); values.append(self.longitude)\n latitude = keyProto + 'latitude'\n keys.append(latitude); values.append(self.latitude)\n elevation = keyProto + 'elevation'\n keys.append(elevation); values.append(self.elevation)\n name = keyProto + 'name'\n keys.append(name); values.append(self.name)\n if self.startDate:\n startDate = keyProto + 'startdate'\n keys.append(startDate); values.append(self.startDate.strftime(ClimatePointStation.FMT_DATE))\n if self.endDate:\n endDate = keyProto + 'enddate'\n keys.append(endDate); values.append(self.endDate.strftime(ClimatePointStation.FMT_DATE))\n if self.variables:\n variablesKey = keyProto + 'variables'\n variablesValue = GenericMetadata.VALUE_DELIM.join(self.variables)\n keys.append(variablesKey); values.append(variablesValue)\n if self.data != None:\n data = keyProto + 'data'\n keys.append(data); values.append(self.data)\n elif self.variablesData:\n # Try to write data entries for each variable separately\n vars = self.variablesData.keys()\n for var in vars:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n keys.append(varKey); values.append(self.variablesData[var])\n GenericMetadata.writeClimatePointEntries(context, keys, values)",
"def save(cls, context):\n\n data = context.get_stored_dict()\n files = {}\n\n def save_in_file(file, key, value):\n if file in files.keys():\n files[file][key] = value\n else:\n files[file] = {key: value}\n\n for key, val in data.items():\n if context.extends is not None and key in context.key_origins:\n save_in_file(context.key_origins[key], key, val)\n else:\n save_in_file(context.profile, key, val)\n\n for profile, content in files.items():\n metadata.update_metadata(\n context.workspace,\n profile,\n 'config',\n content)",
"def writeProvenanceEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION, keys, values)",
"def set_metadata(self, key, value):\n if '::' not in key:\n raise ValueError('Invalid key %s; must be prefixed with \"appname::\"' % key)\n\n self._db_query('DELETE FROM meta WHERE attr=?', (key,))\n self._db_query('INSERT INTO meta VALUES (?, ?)', (key, value))\n self._set_dirty()",
"def put(self, key, value):\n c = self.conn.cursor()\n c.execute(\"REPLACE INTO metastore (k, v) VALUES (?, ?)\", (key, value))\n self.conn.commit()\n return True",
"def locked_put(self, credentials, overwrite=False):\n args = {self.key_name: self.key_value}\n\n if overwrite:\n (entity,\n unused_is_new) = self.model_class.objects.get_or_create(**args)\n else:\n entity = self.model_class(**args)\n\n setattr(entity, self.property_name, credentials)\n entity.save()",
"def writeMetadataValue(self, field_type, key_field, field_name, field_value, \\\n study_id, host_key_field, row_num, lock):\n \n # This is a mess and it's slow right now. In need of serious speed improvements and cleanup.\n \n statement = ''\n log = []\n pk_name = ''\n\n try:\n #lock.acquire()\n \n # Get our database connection\n con = self.getMetadataDatabaseConnection()\n \n # Set the timeout\n #con.cursor().execute('alter session set ddl_lock_timeout=100') \n \n # Find the table name\n log.append('Locating table name...')\n table_name = None\n table_name = self.findMetadataTable(field_name, field_type, log, study_id, lock)\n log.append('Successfully found table name. Table name is \"%s\"' % str(table_name))\n \n #if field_name == 'dw1':\n # raise Exception('asdf')\n\n # Double-quote for database safety\n log.append('Adding quotes to table name...')\n table_name = '\"' + table_name + '\"' \n log.append('Quoted table name is %s' % table_name)\n log.append('Table name found: %s' % (table_name))\n \n # Store the field name in the database. These are the field names which will\n # be used later to generate a mapping file. We collect the names here because\n # it's an expensive operation to determine post-commit which fields were\n # actually submitted to the database.\n log.append('Attempting to store values in study_actual_columns: %s, %s, %s'\\\n % (study_id, field_name, table_name))\n self.addStudyActualColumn(study_id, field_name, table_name);\n \n # Get extended field info from the database\n log.append('Loading field details...')\n field_details = self.getFieldDetails(field_name)\n log.append(str(field_details))\n if field_details == None:\n log.append('Could not obtain detailed field information. Skipping.')\n raise Exception\n else:\n database_data_type = field_details[1]\n log.append('Read field database data type as \"%s\"' % database_data_type)\n \n # If the field value is 'unknown', switch to 'null' (empty string is the same as null)\n #pass_values = \n if str(field_value).upper() == 'UNKNOWN':\n field_value = ''\n # Figure out if this needs to be an integer ID instead of a text value\n elif database_data_type == 'list':\n log.append('Field is of type list. Looking up integer value...')\n named_params = {'field_value':field_value.upper()}\n statement = 'select vocab_value_id from controlled_vocab_values where upper(term) = :field_value'\n statement = str(statement)\n log.append(statement)\n results = con.cursor().execute(statement, named_params).fetchone()\n if results != None:\n # If found, set the field_value to its numeric identifier for storage\n log.append('Value found in controlled_vocab_values. Old field value: \"%s\", new field value: \"%s\".'\\\n % (field_value, results[0]))\n field_value = results[0]\n else:\n log.append('Could not determine inteteger value for list term \"%s\" with value \"%s\". Skipping.'\\\n % (field_name, field_value))\n raise Exception\n \n # Set the field_name to it's quoted upper-case name to avoid key-work issues with Oracle\n field_name = '\"%s\"' % field_name.upper()\n \n ########################################\n ### For STUDY\n ########################################\n \n # Study is special - handle separately since row is guaranteed to exist and there can only be one row\n if table_name == '\"STUDY\"' or 'EXTRA_STUDY_' in table_name:\n log.append('Updating study field...')\n named_params = {'field_value':field_value, 'study_id':study_id}\n statement = 'update %s set %s = :field_value where study_id = :study_id' % (table_name, field_name)\n statement = str(statement)\n log.append(statement)\n log.append('field_value = \"%s\", study_id = \"%s\"' % (field_value, study_id))\n results = con.cursor().execute(statement, named_params)\n con.cursor().execute('commit')\n log.append('Study updated.')\n #raise Exception\n return\n \n ########################################\n ### For other tables\n ########################################\n \n if table_name in ['\"AIR\"', '\"COMMON_FIELDS\"', '\"MICROBIAL_MAT_BIOFILM\"', '\"OTHER_ENVIRONMENT\"', \\\n '\"SAMPLE\"', '\"SEDIMENT\"', '\"SOIL\"', '\"WASTEWATER_SLUDGE\"', '\"WATER\"', '\"SEQUENCE_PREP\"', \\\n '\"HOST_ASSOC_VERTIBRATE\"', '\"HOST_ASSOCIATED_PLANT\"', '\"HOST_SAMPLE\"', '\"HUMAN_ASSOCIATED\"',\n '\"COMMON_EXTRA_SAMPLE\"', '\"COMMON_EXTRA_SAMPLE_2\"', '\"COMMON_EXTRA_PREP\"'] \\\n or 'EXTRA_SAMPLE_' in table_name or 'EXTRA_PREP_' in table_name:\n named_params = {'key_field':key_field, 'study_id':study_id}\n statement = 'select sample_id from \"SAMPLE\" where sample_name = :key_field and study_id = :study_id'\n statement = str(statement)\n key_column = 'sample_id'\n key_table = '\"SAMPLE\"'\n elif table_name in ['\"HOST\"']:\n named_params = {'key_field':'{0}:{1}'.format(str(study_id), host_key_field)}\n statement = 'select host_id from \"HOST\" where host_subject_id = :key_field'\n statement = str(statement)\n key_column = 'host_id'\n key_table = '\"HOST\"'\n else:\n return 'Unknown table found - no action taken. Table name was: \"%s\". Column name was: \"%s\"'\\\n % (table_name, field_name)\n \n # Find the assocaited key column\n log.append('Determining if required key row exists...')\n log.append(statement + '\", key_field is ' + key_field + ', study_id is ' + str(study_id))\n results = con.cursor().execute(statement, named_params).fetchone()\n if results != None:\n key_column_value = results[0]\n log.append('Found key_column_value: %s' % str(key_column_value))\n else:\n log.append('Could not determine key. Skipping write for field %s with value \"%s\".'\\\n % (field_name, field_value))\n raise Exception\n\n\n\n\n\n\n\n\n\n\n\n ####### to speed up access, create local storage for all items already seen\n\n\n\n # If it ain't there, create it\n log.append('Checking if row already exists...')\n \n # Must append row_num if sequence table\n if table_name == '\"SEQUENCE_PREP\"' or table_name == '\"COMMON_EXTRA_PREP\"' or 'EXTRA_PREP_' in table_name:\n named_params = {'key_column_value':key_column_value, 'row_number':row_num}\n statement = 'select 1 from %s where %s = :key_column_value and row_number = :row_number'\\\n % (table_name, key_column)\n else:\n named_params = {'key_column_value':key_column_value}\n statement = 'select 1 from %s where %s = :key_column_value' % (table_name, key_column)\n statement = str(statement)\n log.append(statement)\n results = con.cursor().execute(statement, named_params).fetchone()\n \n if results == None:\n log.append('No row found, inserting new row')\n if table_name == '\"SEQUENCE_PREP\"' or table_name == '\"COMMON_EXTRA_PREP\"' or 'EXTRA_PREP_' in table_name:\n log.append('Row number is %s' % (str(row_num)))\n named_params = {'key_column_value':key_column_value, 'row_number':row_num}\n statement = 'insert into %s (%s, row_number) values (:key_column_value, :row_number)'\\\n % (table_name, key_column)\n else:\n named_params = {'key_column_value':key_column_value}\n statement = 'insert into %s (%s) values (:key_column_value)' % (table_name, key_column)\n statement = str(statement)\n log.append(statement)\n con.cursor().execute(statement, named_params)\n\n\n\n\n\n\n \n # Attempt to write the metadata field\n log.append('Writing metadata value...')\n \n # If it's a date, must not put quotes around the oracle to_date function.\n if database_data_type == 'date':\n field_value = self.convertToOracleHappyName(field_value)\n if table_name == '\"SEQUENCE_PREP\"' or table_name == '\"COMMON_EXTRA_PREP\"' or 'EXTRA_PREP_' in table_name:\n statement = 'update %s set %s = %s where %s = %s and row_number = %s'\\\n % (table_name, field_name, field_value, key_column, key_column_value, row_num)\n else: \n statement = 'update %s set %s = %s where %s = %s'\\\n % (table_name, field_name, field_value, key_column, key_column_value) \n else: \n if table_name == '\"SEQUENCE_PREP\"' or table_name == '\"COMMON_EXTRA_PREP\"' or 'EXTRA_PREP_' in table_name:\n statement = 'update %s set %s = \\'%s\\' where %s = %s and row_number = %s'\\\n % (table_name, field_name, field_value, key_column, key_column_value, row_num)\n else: \n statement = 'update %s set %s = \\'%s\\' where %s = %s'\\\n % (table_name, field_name, field_value, key_column, key_column_value)\n statement = str(statement)\n log.append(statement)\n results = con.cursor().execute(statement)\n \n # Finally, commit the changes\n results = con.cursor().execute('commit')\n\n #if field_name == '\"DW1\"':\n # raise Exception('Found DW1: Dumping log')\n \n except Exception, e:\n call_string = 'writeMetadataValue(\"%s\", \"%s\", \"%s\", \"%s\", \"%s\")'\\\n % (field_type, key_field, field_name, field_value, study_id)\n log_string = '<br/>'.join(log)\n error_msg = 'Exception caught attempting to store field \"%s\" with value \"%s\" into \\\n table \"%s\".<br/>Method signature: %s<br/>Full error log:<br/>%s<br/>Oracle says: %s' % \\\n (field_name, field_value, table_name, call_string, log_string, str(e))\n raise Exception(error_msg)\n finally:\n # Release the lock\n #lock.release()\n log.append('Lock released')",
"def save_project(uid, song_notes, author_name, creation_date, project_name):",
"def __setitem__(self, key, value):\n if not isinstance(value, dict):\n raise TypeError(\"value must be a dict\")\n\n # Is this a valid cache entry dictionary?\n try:\n validate(value, ENTRY_SCHEMA)\n except ValidationError as e:\n raise ValueError(\"%s is not a valid entry\" % value) from e\n\n entry_dir = self.cache_key_dir(key)\n\n try:\n entry_dir.mkdir(parents=True, exist_ok=True)\n except FileExistsError as e:\n raise ValueError(\"Already exists\") from e\n\n with open(entry_dir / \"entry.yaml\", \"w\") as f:\n f.write(yaml.safe_dump(value))",
"def writedb(path, key, value) -> int:\n if key == \"\" or value == \"\":\n return 1\n if os.path.exists(path):\n pass \n else:\n return 1\n with open(path, \"a\") as db:\n db.write(f\"\\n{key}:{value}\")\n return 0",
"def deleteHydroShareEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.HYDROSHARE_SECTION, key)",
"def putHid(hid, did, dbn=\"hid2did\", env=None):\n global gDbEnv\n\n if env is None:\n env = gDbEnv\n\n if env is None:\n raise DatabaseError(\"Database environment not set up\")\n\n subDb = env.open_db(dbn.encode(\"utf-8\")) # open named sub dbn within env\n with env.begin(db=subDb, write=True) as txn: # txn is a Transaction object\n # will overwrite by default\n result = txn.put(hid.encode(\"utf-8\"), did.encode(\"utf-8\")) # keys and values are bytes\n return result",
"def put_metadata(self, metadata, tombstone=False):\n if tombstone:\n # We don't write tombstone files. So do nothing.\n return\n assert self.data_file is not None, \\\n \"put_metadata: no file to put metadata into\"\n metadata = _adjust_metadata(metadata)\n self.threadpool.run_in_thread(write_metadata, self.data_file, metadata)\n self.metadata = metadata\n self._filter_metadata()",
"def put(self, project_slug):\n try:\n docker_repo_field(project_slug, 'slug')\n except ValueError as ex:\n raise WrappedValueError(ex)\n\n args = PROJECT_NEW_PARSER.parse_args(strict=True)\n args = clean_attrs(args)\n\n args['slug'] = project_slug\n\n if 'gitlab_repo_id' in args:\n args['external_auth_token'] = (\n current_user.oauth_token_for('gitlab'))\n\n elif 'github_repo_id' in args:\n args['external_auth_token'] = (\n current_user.oauth_token_for('github'))\n\n if args['utility']: # Utilities must have target registry set\n ensure_target_registry(True)\n\n set_target_registry(args)\n\n return self.handle_write(Project(), data=args)",
"def store(self, name, value):\n # ...but only when the context has been entered (and locks acquired etc.)\n if not self.ready:\n raise RuntimeError(\"SnapshotView is a context manager. Never use it directly!\")\n # Do not ask for permission - overwrite the old entry if necessary\n self.data[name] = value",
"def write_metadata_to_file(self, path):\n return write_metadata_to_ma_file(path, self)",
"def put(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n key = db.Key.from_path('Project', int(guid))\n project = db.get(key)\n if not project == None:\n # collect the json from the request\n project_json = simplejson.loads(self.request.body)\n # update the project record\n project = helpers.apply_json_to_model_instance(project, project_json)\n # save the updated data\n project.put()\n \n # return the same record...\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(project_json))\n \n else:\n self.response.set_status(404, \"Project not found\")\n else:\n self.response.set_status(401, \"Not Authorized\")",
"def set_metadata(self, key, val):\n \n self.metadata[key] = val",
"def write_stewicombo_metadata(file_name, metadata_dict, category=''):\n meta = set_stewicombo_meta(file_name, category=category)\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)"
] | [
"0.6070559",
"0.5985404",
"0.57395643",
"0.5506189",
"0.5480306",
"0.53947574",
"0.5374139",
"0.5336701",
"0.5335403",
"0.53242046",
"0.5319031",
"0.5252505",
"0.52357906",
"0.5161735",
"0.51210135",
"0.51179457",
"0.50932795",
"0.507933",
"0.50530195",
"0.5035785",
"0.5014921",
"0.49522027",
"0.49193355",
"0.49083722",
"0.48882306",
"0.48683578",
"0.48475817",
"0.48434186",
"0.47992745",
"0.4793837"
] | 0.72641283 | 0 |
Delete an entry from the manifest section of the metadata store for a given project. context Context object containing projectDir, the path of the project whose metadata store is to be deleted from key The key to be deleted from the given section of the project metadata IOError(errno.EACCES) if the metadata store for the project is not writable | def deleteManifestEntry(context, key):
GenericMetadata.deleteEntryFromSection(context, GenericMetadata.MANIFEST_SECTION, key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deleteEntryFromSection(context, section, key, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Delete entry\n if config.has_section(section):\n config.remove_option(section, key)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def delete(cls, project_id, resource_type, resource_id, key=None):\n\n query = DBMetadata.query()\n filters = [DBMetadata.deleted == 0,\n DBMetadata.resource_type == resource_type,\n DBMetadata.resource_id == resource_id,\n DBMetadata.project_id == project_id]\n\n if key:\n filters.append(DBMetadata.key == key)\n\n query = query.filter(*filters)\n return query.update({\"deleted\": True})",
"def _delete_metadata(self, metadata_role):\n \n # The root metadata role is never deleted without a replacement.\n if metadata_role == 'root':\n return\n \n # Get rid of the current metadata file.\n self._move_current_to_previous(metadata_role)\n \n # Remove knowledge of the role.\n if metadata_role in self.metadata['current']:\n del self.metadata['current'][metadata_role]\n tuf.roledb.remove_role(metadata_role)",
"def test_unpacker_delete_manifest_metadata_v3(config, mocker, path_map_mock):\n logger_mock = mocker.MagicMock()\n p = Unpacker(config, logger_mock)\n mock_os_remove = mocker.patch(\"os.remove\")\n mock_os_remove.side_effect = [NameError, None]\n p._delete_manifest_metadata(\"0869ea50-e437-443f-8cdb-31a350f88e57\")\n mock_os_remove.assert_called_with(\"/tmp/lta/testing/unpacker/outbox/0869ea50-e437-443f-8cdb-31a350f88e57.metadata.ndjson\")",
"def delete(self) -> None:\n try:\n self._logger.debug('Delete old metadata file %s.', self._path)\n os.remove(self._path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n msg = 'Failed to delete old metadata file. {}'.format(ex.strerror)\n raise MetaFileError(msg)",
"def delete_project(arn=None):\n pass",
"def delete_meta_file(self):\n try:\n self.logger.debug('Delete old metadata file %s.', self.meta_file_path)\n os.remove(self.meta_file_path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n raise MetadataError('Failed to delete old metadata file. {}'\n .format(ex.strerror))",
"def delete():\n run('rm -r {}'.format(utils.home('apps', env.PROJECT_NAME)))",
"def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)",
"def delete_account_key(configuration):\n os.remove(configuration.cm_key)",
"def delete_metadata(self, keys):\n return self.manager.delete_metadata(self, keys)",
"def delete_metadata(self, keys=None):\n return self.manager.delete_metadata(self, keys=keys)",
"def delete_entry(title):\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n default_storage.delete(filename)",
"def delete_project_file(self, project=None):\n if type(project) is not Project:\n return False\n\n path = self.data_path + self.project_dir\n\n # generate filenames\n filename = path + '/' + self.us(project.project_id()) + '.flproject'\n\n # check if the file exists and delete it\n if os.path.isfile(filename):\n os.remove(filename)\n return True\n else:\n return False",
"def _del_entry(self, cvs_path):\n\n self._make_writable()\n self._del_entry(cvs_path)",
"def delete_metadata(self, volume, keys, deletes=10, delete_size=3):\n self._impl.delete_metadata(volume, keys=keys, deletes=10,\n delete_size=3)",
"def _del_entry(self, cvs_path):\n\n del self._entries[cvs_path]",
"def deleteGRASSEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.GRASS_SECTION, key)",
"def delete(self, id):\n\n kparams = KalturaParams()\n kparams.addIntIfDefined(\"id\", id);\n self.client.queueServiceActionCall(\"metadata_metadataprofile\", \"delete\", \"None\", kparams)\n if self.client.isMultiRequest():\n return self.client.getMultiRequestResult()\n resultNode = self.client.doQueue()",
"def delete(self):\n if not pdbox._args.get(\"dryrun\"):\n result = execute(pdbox.dbx.files_delete_v2, self.path)\n pdbox.debug(\"Metadata response: %s\" % result.metadata)\n pdbox.info(\"Deleted %s\" % self.uri)",
"def delete_stored_project():\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(STORED_ID['project_id']))\n client.execute_request()",
"def delete_metadata(self, keys=None):\n return self.parent.delete_metadata_for_node(self, keys=keys)",
"def post_project_delete(self, resource_id, resource_dict):\n pass",
"def delete_project(project):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.delete_project(project)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])",
"def es_delete(project=None):\n if project is not None:\n script_indexer.delete_project(project)\n else:\n script_indexer.delete_all()",
"def remove(args):\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit('To remove a file you need to provide a username and'\n ' password.')\n\n project = osf.project(args.project)\n\n storage, remote_path = split_storage(args.target)\n\n store = project.storage(storage)\n for f in store.files:\n if norm_remote_path(f.path) == remote_path:\n f.remove()",
"def delete_key(self, key_name, headers=None,\r\n version_id=None, mfa_token=None):\r\n os.remove(key_name)",
"def delete_current_entry(self):\n if not self.current_history_entry:\n return\n\n filename = self.current_history_entry['filename']\n self.debug('Removing history entry for \"%s\" from project \"%s\"' % (filename, self.project_name))\n self.__remove(self.project_name, filename)\n self.__save_history()",
"def delete(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n # search for the Project and delete if found\n key = db.Key.from_path('Project', int(guid))\n project = db.get(key)\n if not project == None:\n project.delete()\n self.response.set_status(204, \"Deleted\")\n else:\n self.response.set_status(404, \"Not Found\")\n else:\n self.response.set_status(401, \"Not Authorized\")",
"def delete(conn, project):\n with conn:\n c = conn.cursor()\n c.execute(\"DELETE FROM projects WHERE project =?\", (project,))"
] | [
"0.68024397",
"0.6111268",
"0.6022293",
"0.5909736",
"0.588682",
"0.58015454",
"0.57910436",
"0.5750921",
"0.57077354",
"0.5683751",
"0.5682426",
"0.56275785",
"0.56221336",
"0.5618329",
"0.56145746",
"0.5569288",
"0.55320674",
"0.5485642",
"0.54798424",
"0.5467747",
"0.5465148",
"0.5430634",
"0.54189295",
"0.5418051",
"0.5407543",
"0.54046553",
"0.5402105",
"0.53872544",
"0.5356405",
"0.5352264"
] | 0.68855435 | 0 |
Delete an entry from the study area section of the metadata store for a given project. context Context object containing projectDir, the path of the project whose metadata store is to be deleted from key The key to be deleted from the given section of the project metadata IOError(errno.EACCES) if the metadata store for the project is not writable | def deleteStudyAreaEntry(context, key):
GenericMetadata.deleteEntryFromSection(context, GenericMetadata.STUDY_AREA_SECTION, key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deleteEntryFromSection(context, section, key, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Delete entry\n if config.has_section(section):\n config.remove_option(section, key)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def deleteStudy(self, study_id, full_delete):\n con = self.getMetadataDatabaseConnection()\n con.cursor().callproc('qiime_assets.study_delete', [study_id, full_delete])",
"def es_delete(project=None):\n if project is not None:\n script_indexer.delete_project(project)\n else:\n script_indexer.delete_all()",
"def deleteGRASSEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.GRASS_SECTION, key)",
"def delete(cls, project_id, resource_type, resource_id, key=None):\n\n query = DBMetadata.query()\n filters = [DBMetadata.deleted == 0,\n DBMetadata.resource_type == resource_type,\n DBMetadata.resource_id == resource_id,\n DBMetadata.project_id == project_id]\n\n if key:\n filters.append(DBMetadata.key == key)\n\n query = query.filter(*filters)\n return query.update({\"deleted\": True})",
"def _del_entry(self, cvs_path):\n\n self._make_writable()\n self._del_entry(cvs_path)",
"def _del_entry(self, cvs_path):\n\n del self._entries[cvs_path]",
"def deleteClimatePointEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.CLIMATE_POINT_SECTION, key)",
"def delete_project(arn=None):\n pass",
"def _delete_metadata(self, metadata_role):\n \n # The root metadata role is never deleted without a replacement.\n if metadata_role == 'root':\n return\n \n # Get rid of the current metadata file.\n self._move_current_to_previous(metadata_role)\n \n # Remove knowledge of the role.\n if metadata_role in self.metadata['current']:\n del self.metadata['current'][metadata_role]\n tuf.roledb.remove_role(metadata_role)",
"def delete(self, *args, **kwargs):\n print(\"form delete\")\n self.is_deleted = True\n current_section_sequence = self.section_sequence\n\n #This can be modified if we have to hard delete the sections\n\n # for sec_id in current_section_sequence:\n # current_section = Sections.objects.get(id = sec_id )\n # current_section.delete()\n\n self.save()",
"def delete(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n shutil.rmtree(self.paths['root'])",
"def test_d_remove_database(self):\n\n if os.path.isfile(location):\n os.remove(location)\n\n assert(True)",
"def __delitem__(self, cvs_path):\n\n node = self[cvs_path]\n self._del_entry(cvs_path)\n if isinstance(node, _WritableMirrorDirectoryMixin):\n node._mark_deleted()",
"def deleteClimateGridEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.CLIMATE_GRID_SECTION, key)",
"def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)",
"def delete(self, tree_path):\n\t\traise NotImplementedError",
"def delete(self, c_path):\n raise NotImplementedError",
"def delete_entry(self, scenario_info):\n sql = self.delete(\"id\")\n self.cur.execute(sql, (scenario_info[\"id\"],))",
"def delete_study(self, study_id: int) -> None:\n raise NotImplementedError",
"def delete_this_region(self):",
"def remove(self, area, field, subfield, problem, name, verbosity):\n if self.__path_check(self.database_dir, self.storage_dir, settings.rootdir,\n 'removing') == False:\n return\n\n for s in self.shelves[:]:\n if area == s.get_area():\n if field is None:\n ok = raw_input('Are you sure to delete area {} --> '.format(area))\n if ok in ('y', 'ye', 'yes', 'Y', 'YE', 'YES'):\n self.shelves.remove(s)\n if os.path.exists(self.database_dir + '/' + area) == True:\n shutil.rmtree(self.database_dir + '/' + area)\n if os.path.exists(self.storage_dir + '/' + area) == True:\n shutil.rmtree(self.storage_dir + '/' + area)\n\n if verbosity >= 1:\n print 'remove area {}'.format(area)\n self.add_log('remove area {}'.format(area))\n else:\n s.remove(self.database_dir + '/' + area, self.storage_dir + '/' +\n area, field, subfield, problem, name, verbosity)\n\n with open('.area', 'w') as f:\n for s in self.shelves[:]:\n f.write(s.get_area() + '\\n')",
"def delete_key_command():\n incident = demisto.args().get('id', get_investigation_id())\n key = demisto.args().get('key')\n # Search Collection for incident_id and key\n search = incident + '.key'\n cursor = COLLECTION.find_one({search: key})\n if cursor is not None:\n object_id = cursor.get('_id')\n COLLECTION.delete_one({'_id': object_id})\n return f'Incident \"{incident}\" - key/value collection - 1 document deleted', {}, {}\n return f'Key \"{key}\" for incident_id \"{incident}\" does not exist', {}, {}",
"def delete_account_key(configuration):\n os.remove(configuration.cm_key)",
"def delete(self, path):\n raise imap4.MailboxException(\"Permission denied.\")",
"def deleteManifestEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.MANIFEST_SECTION, key)",
"def delete(self, keyword, key):",
"def delete_file(self, key):\n path = os.path.join(self.directory, self.subdirectory, key)\n if os.path.isfile(path):\n os.unlink(path)\n else:\n raise ValueError(f\"No such file: {key}\")",
"def post_project_delete(self, resource_id, resource_dict):\n pass",
"def access_info_delete(context, storage_id):\n _access_info_get_query(context). \\\n filter_by(storage_id=storage_id).delete()"
] | [
"0.639799",
"0.5758851",
"0.57235366",
"0.57130414",
"0.5666655",
"0.56168956",
"0.55672556",
"0.5476324",
"0.54678047",
"0.5369297",
"0.5334535",
"0.53198314",
"0.53188354",
"0.5314964",
"0.53112054",
"0.5301273",
"0.52748704",
"0.5268022",
"0.52541924",
"0.52227664",
"0.52146477",
"0.5210868",
"0.52042735",
"0.51998997",
"0.5194008",
"0.5193363",
"0.519121",
"0.5187664",
"0.518388",
"0.51760846"
] | 0.70406127 | 0 |
Delete an entry from the GRASS section of the metadata store for a given project. context Context object containing projectDir, the path of the project whose metadata store is to be deleted from key The key to be deleted from the given section of the project metadata IOError(errno.EACCES) if the metadata store for the project is not writable | def deleteGRASSEntry(context, key):
GenericMetadata.deleteEntryFromSection(context, GenericMetadata.GRASS_SECTION, key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deleteEntryFromSection(context, section, key, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Delete entry\n if config.has_section(section):\n config.remove_option(section, key)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def delete(cls, project_id, resource_type, resource_id, key=None):\n\n query = DBMetadata.query()\n filters = [DBMetadata.deleted == 0,\n DBMetadata.resource_type == resource_type,\n DBMetadata.resource_id == resource_id,\n DBMetadata.project_id == project_id]\n\n if key:\n filters.append(DBMetadata.key == key)\n\n query = query.filter(*filters)\n return query.update({\"deleted\": True})",
"def _del_entry(self, cvs_path):\n\n del self._entries[cvs_path]",
"def _del_entry(self, cvs_path):\n\n self._make_writable()\n self._del_entry(cvs_path)",
"def es_delete(project=None):\n if project is not None:\n script_indexer.delete_project(project)\n else:\n script_indexer.delete_all()",
"def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)",
"def deleteClimateGridEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.CLIMATE_GRID_SECTION, key)",
"def deleteManifestEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.MANIFEST_SECTION, key)",
"def delete_project(arn=None):\n pass",
"def __delitem__(self, cvs_path):\n\n node = self[cvs_path]\n self._del_entry(cvs_path)\n if isinstance(node, _WritableMirrorDirectoryMixin):\n node._mark_deleted()",
"def delete_project_file(self, project=None):\n if type(project) is not Project:\n return False\n\n path = self.data_path + self.project_dir\n\n # generate filenames\n filename = path + '/' + self.us(project.project_id()) + '.flproject'\n\n # check if the file exists and delete it\n if os.path.isfile(filename):\n os.remove(filename)\n return True\n else:\n return False",
"def delete_stored_project():\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(STORED_ID['project_id']))\n client.execute_request()",
"def delete(self) -> None:\n try:\n self._logger.debug('Delete old metadata file %s.', self._path)\n os.remove(self._path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n msg = 'Failed to delete old metadata file. {}'.format(ex.strerror)\n raise MetaFileError(msg)",
"def delete(conn, project):\n with conn:\n c = conn.cursor()\n c.execute(\"DELETE FROM projects WHERE project =?\", (project,))",
"def project_delete(cursor, project):\n haystack = (project['_id'], )\n\n query = \"DELETE FROM projects WHERE _id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n\n query = \"DELETE FROM namespaces WHERE project_id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n else:\n cursor.connection.commit()\n raise Return((True, None))",
"def delete_account_key(configuration):\n os.remove(configuration.cm_key)",
"def post_project_delete(self, resource_id, resource_dict):\n pass",
"def _delete_metadata(self, metadata_role):\n \n # The root metadata role is never deleted without a replacement.\n if metadata_role == 'root':\n return\n \n # Get rid of the current metadata file.\n self._move_current_to_previous(metadata_role)\n \n # Remove knowledge of the role.\n if metadata_role in self.metadata['current']:\n del self.metadata['current'][metadata_role]\n tuf.roledb.remove_role(metadata_role)",
"def delete_project(project):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.delete_project(project)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])",
"def delete_meta_file(self):\n try:\n self.logger.debug('Delete old metadata file %s.', self.meta_file_path)\n os.remove(self.meta_file_path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n raise MetadataError('Failed to delete old metadata file. {}'\n .format(ex.strerror))",
"def delete(self, c_path):\n raise NotImplementedError",
"def delete_entry(title):\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n default_storage.delete(filename)",
"def delete(ctx: click.Context, repository_path):\n root_commands.cmd_delete(ctx.obj, repository_path)",
"def delete(self, request, p_name):\n project = Project.objects.get(name=p_name)\n connectors = project.connector_set.all()\n connectors.delete()\n if os.path.isfile(project.project_location):\n os.remove(project.project_location)\n project.delete()\n return HttpResponse(HTTPStatus.OK)",
"def delete(self):\n if not pdbox._args.get(\"dryrun\"):\n result = execute(pdbox.dbx.files_delete_v2, self.path)\n pdbox.debug(\"Metadata response: %s\" % result.metadata)\n pdbox.info(\"Deleted %s\" % self.uri)",
"def _delete_credential(self, key):\n try:\n del self._data[key]\n except KeyError:\n pass\n self._write()",
"def delete(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n # search for the Project and delete if found\n key = db.Key.from_path('Project', int(guid))\n project = db.get(key)\n if not project == None:\n project.delete()\n self.response.set_status(204, \"Deleted\")\n else:\n self.response.set_status(404, \"Not Found\")\n else:\n self.response.set_status(401, \"Not Authorized\")",
"def deleteClimatePointEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.CLIMATE_POINT_SECTION, key)",
"def delete_file(self, key):\n path = os.path.join(self.directory, self.subdirectory, key)\n if os.path.isfile(path):\n os.unlink(path)\n else:\n raise ValueError(f\"No such file: {key}\")",
"def delete(self, cache_key):\r\n pass"
] | [
"0.663183",
"0.6198806",
"0.5947854",
"0.58994526",
"0.58258384",
"0.57844085",
"0.57791597",
"0.57316333",
"0.569258",
"0.5681562",
"0.56727916",
"0.5644133",
"0.5620544",
"0.56173795",
"0.55573726",
"0.5517395",
"0.5476237",
"0.5459271",
"0.54587215",
"0.5442384",
"0.54394233",
"0.54331434",
"0.5410565",
"0.5390593",
"0.5374409",
"0.5373472",
"0.5372524",
"0.53724605",
"0.53515697",
"0.5350894"
] | 0.67057467 | 0 |
Delete an entry from the point climate section of the metadata store for a given project. context Context object containing projectDir, the path of the project whose metadata store is to be deleted from key The key to be deleted from the given section of the project metadata IOError(errno.EACCES) if the metadata store for the project is not writable | def deleteClimatePointEntry(context, key):
GenericMetadata.deleteEntryFromSection(context, GenericMetadata.CLIMATE_POINT_SECTION, key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deleteClimateGridEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.CLIMATE_GRID_SECTION, key)",
"def deleteEntryFromSection(context, section, key, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Delete entry\n if config.has_section(section):\n config.remove_option(section, key)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def _del_entry(self, cvs_path):\n\n self._make_writable()\n self._del_entry(cvs_path)",
"def _del_entry(self, cvs_path):\n\n del self._entries[cvs_path]",
"def delete(self, c_path):\n raise NotImplementedError",
"def delete(cls, project_id, resource_type, resource_id, key=None):\n\n query = DBMetadata.query()\n filters = [DBMetadata.deleted == 0,\n DBMetadata.resource_type == resource_type,\n DBMetadata.resource_id == resource_id,\n DBMetadata.project_id == project_id]\n\n if key:\n filters.append(DBMetadata.key == key)\n\n query = query.filter(*filters)\n return query.update({\"deleted\": True})",
"def deleteManifestEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.MANIFEST_SECTION, key)",
"def deleteGRASSEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.GRASS_SECTION, key)",
"def __delitem__(self, cvs_path):\n\n node = self[cvs_path]\n self._del_entry(cvs_path)\n if isinstance(node, _WritableMirrorDirectoryMixin):\n node._mark_deleted()",
"def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)",
"def delete(self) -> None:\n try:\n self._logger.debug('Delete old metadata file %s.', self._path)\n os.remove(self._path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n msg = 'Failed to delete old metadata file. {}'.format(ex.strerror)\n raise MetaFileError(msg)",
"def delete_stored_project():\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(STORED_ID['project_id']))\n client.execute_request()",
"def es_delete(project=None):\n if project is not None:\n script_indexer.delete_project(project)\n else:\n script_indexer.delete_all()",
"def delete(self):\n\n del self.parent_mirror_dir[self.cvs_path]",
"def delete(self, request, p_name):\n project = Project.objects.get(name=p_name)\n connectors = project.connector_set.all()\n connectors.delete()\n if os.path.isfile(project.project_location):\n os.remove(project.project_location)\n project.delete()\n return HttpResponse(HTTPStatus.OK)",
"def delete_meta_file(self):\n try:\n self.logger.debug('Delete old metadata file %s.', self.meta_file_path)\n os.remove(self.meta_file_path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n raise MetadataError('Failed to delete old metadata file. {}'\n .format(ex.strerror))",
"def delete_account_key(configuration):\n os.remove(configuration.cm_key)",
"def delete_metadata(self, keys):\n return self.manager.delete_metadata(self, keys)",
"def delete_project(arn=None):\n pass",
"def deleteStudyAreaEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.STUDY_AREA_SECTION, key)",
"def _delete_metadata(self, metadata_role):\n \n # The root metadata role is never deleted without a replacement.\n if metadata_role == 'root':\n return\n \n # Get rid of the current metadata file.\n self._move_current_to_previous(metadata_role)\n \n # Remove knowledge of the role.\n if metadata_role in self.metadata['current']:\n del self.metadata['current'][metadata_role]\n tuf.roledb.remove_role(metadata_role)",
"def delete_project_file(self, project=None):\n if type(project) is not Project:\n return False\n\n path = self.data_path + self.project_dir\n\n # generate filenames\n filename = path + '/' + self.us(project.project_id()) + '.flproject'\n\n # check if the file exists and delete it\n if os.path.isfile(filename):\n os.remove(filename)\n return True\n else:\n return False",
"def s3_delete_data(self):\n\n self.k.delete()",
"def delete_key_command():\n incident = demisto.args().get('id', get_investigation_id())\n key = demisto.args().get('key')\n # Search Collection for incident_id and key\n search = incident + '.key'\n cursor = COLLECTION.find_one({search: key})\n if cursor is not None:\n object_id = cursor.get('_id')\n COLLECTION.delete_one({'_id': object_id})\n return f'Incident \"{incident}\" - key/value collection - 1 document deleted', {}, {}\n return f'Key \"{key}\" for incident_id \"{incident}\" does not exist', {}, {}",
"def post_project_delete(self, resource_id, resource_dict):\n pass",
"def delete(self):\n if not pdbox._args.get(\"dryrun\"):\n result = execute(pdbox.dbx.files_delete_v2, self.path)\n pdbox.debug(\"Metadata response: %s\" % result.metadata)\n pdbox.info(\"Deleted %s\" % self.uri)",
"def project_delete(cursor, project):\n haystack = (project['_id'], )\n\n query = \"DELETE FROM projects WHERE _id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n\n query = \"DELETE FROM namespaces WHERE project_id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n else:\n cursor.connection.commit()\n raise Return((True, None))",
"def delete_metadata(self, keys=None):\n return self.manager.delete_metadata(self, keys=keys)",
"def delete(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n shutil.rmtree(self.paths['root'])",
"def delete(self, logical_key):\n path = self._split_key(logical_key)\n pkg = self[path[:-1]]\n del pkg._children[path[-1]]\n return self"
] | [
"0.657034",
"0.64909166",
"0.59813344",
"0.5950042",
"0.5759556",
"0.57526565",
"0.5573513",
"0.55714506",
"0.5553916",
"0.5483869",
"0.5475279",
"0.5436373",
"0.54319376",
"0.5418876",
"0.5379181",
"0.5350449",
"0.531416",
"0.52881044",
"0.5284254",
"0.52753764",
"0.52735746",
"0.52470094",
"0.5237756",
"0.5218699",
"0.52186",
"0.5171839",
"0.5171412",
"0.51675934",
"0.51591235",
"0.51569295"
] | 0.7254461 | 0 |
Delete an entry from the grid climate section of the metadata store for a given project. context Context object containing projectDir, the path of the project whose metadata store is to be deleted from key The key to be deleted from the given section of the project metadata IOError(errno.EACCES) if the metadata store for the project is not writable | def deleteClimateGridEntry(context, key):
GenericMetadata.deleteEntryFromSection(context, GenericMetadata.CLIMATE_GRID_SECTION, key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deleteEntryFromSection(context, section, key, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Delete entry\n if config.has_section(section):\n config.remove_option(section, key)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def deleteClimatePointEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.CLIMATE_POINT_SECTION, key)",
"def _del_entry(self, cvs_path):\n\n del self._entries[cvs_path]",
"def _del_entry(self, cvs_path):\n\n self._make_writable()\n self._del_entry(cvs_path)",
"def delete(cls, project_id, resource_type, resource_id, key=None):\n\n query = DBMetadata.query()\n filters = [DBMetadata.deleted == 0,\n DBMetadata.resource_type == resource_type,\n DBMetadata.resource_id == resource_id,\n DBMetadata.project_id == project_id]\n\n if key:\n filters.append(DBMetadata.key == key)\n\n query = query.filter(*filters)\n return query.update({\"deleted\": True})",
"def __delitem__(self, cvs_path):\n\n node = self[cvs_path]\n self._del_entry(cvs_path)\n if isinstance(node, _WritableMirrorDirectoryMixin):\n node._mark_deleted()",
"def delete(self, c_path):\n raise NotImplementedError",
"def deleteGRASSEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.GRASS_SECTION, key)",
"def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)",
"def es_delete(project=None):\n if project is not None:\n script_indexer.delete_project(project)\n else:\n script_indexer.delete_all()",
"def delete_stored_project():\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(STORED_ID['project_id']))\n client.execute_request()",
"def delete(conn, project):\n with conn:\n c = conn.cursor()\n c.execute(\"DELETE FROM projects WHERE project =?\", (project,))",
"def project_delete(cursor, project):\n haystack = (project['_id'], )\n\n query = \"DELETE FROM projects WHERE _id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n\n query = \"DELETE FROM namespaces WHERE project_id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n else:\n cursor.connection.commit()\n raise Return((True, None))",
"def delete_metadata(self, keys):\n return self.manager.delete_metadata(self, keys)",
"def _delete_metadata(self, metadata_role):\n \n # The root metadata role is never deleted without a replacement.\n if metadata_role == 'root':\n return\n \n # Get rid of the current metadata file.\n self._move_current_to_previous(metadata_role)\n \n # Remove knowledge of the role.\n if metadata_role in self.metadata['current']:\n del self.metadata['current'][metadata_role]\n tuf.roledb.remove_role(metadata_role)",
"def delete_project(arn=None):\n pass",
"def deleteManifestEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.MANIFEST_SECTION, key)",
"def delete(self) -> None:\n try:\n self._logger.debug('Delete old metadata file %s.', self._path)\n os.remove(self._path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n msg = 'Failed to delete old metadata file. {}'.format(ex.strerror)\n raise MetaFileError(msg)",
"def delete_data(key):\n session.query(Issue).filter(Issue.cef_key == key).delete()\n session.commit()",
"def delete(self):\n\n del self.parent_mirror_dir[self.cvs_path]",
"def delete_account_key(configuration):\n os.remove(configuration.cm_key)",
"def delete_metadata(self, keys=None):\n return self.manager.delete_metadata(self, keys=keys)",
"def delete(self, request, p_name):\n project = Project.objects.get(name=p_name)\n connectors = project.connector_set.all()\n connectors.delete()\n if os.path.isfile(project.project_location):\n os.remove(project.project_location)\n project.delete()\n return HttpResponse(HTTPStatus.OK)",
"def delete_metadata(self, keys=None):\n return self.parent.delete_metadata_for_node(self, keys=keys)",
"def post_project_delete(self, resource_id, resource_dict):\n pass",
"def delete_project_file(self, project=None):\n if type(project) is not Project:\n return False\n\n path = self.data_path + self.project_dir\n\n # generate filenames\n filename = path + '/' + self.us(project.project_id()) + '.flproject'\n\n # check if the file exists and delete it\n if os.path.isfile(filename):\n os.remove(filename)\n return True\n else:\n return False",
"def access_info_delete(context, storage_id):\n _access_info_get_query(context). \\\n filter_by(storage_id=storage_id).delete()",
"def delete_metadata(self, volume, keys, deletes=10, delete_size=3):\n self._impl.delete_metadata(volume, keys=keys, deletes=10,\n delete_size=3)",
"def delete_meta_file(self):\n try:\n self.logger.debug('Delete old metadata file %s.', self.meta_file_path)\n os.remove(self.meta_file_path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n raise MetadataError('Failed to delete old metadata file. {}'\n .format(ex.strerror))",
"def delete_project(project):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.delete_project(project)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])"
] | [
"0.6503831",
"0.64400357",
"0.6210888",
"0.61607176",
"0.6114747",
"0.5806895",
"0.57731616",
"0.5720591",
"0.5711945",
"0.5704156",
"0.5704014",
"0.5659985",
"0.56278455",
"0.556038",
"0.54745364",
"0.54652095",
"0.54497516",
"0.54400027",
"0.54372054",
"0.5431896",
"0.5431749",
"0.54300904",
"0.54073906",
"0.5370271",
"0.53631806",
"0.53594667",
"0.53309107",
"0.53254837",
"0.53187376",
"0.531013"
] | 0.6943208 | 0 |
Delete an entry from the HydroShare section of the metadata store for a given project. context Context object containing projectDir, the path of the project whose metadata store is to be deleted from key The key to be deleted from the given section of the project metadata IOError(errno.EACCES) if the metadata store for the project is not writable | def deleteHydroShareEntry(context, key):
GenericMetadata.deleteEntryFromSection(context, GenericMetadata.HYDROSHARE_SECTION, key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deleteEntryFromSection(context, section, key, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Delete entry\n if config.has_section(section):\n config.remove_option(section, key)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def delete(cls, project_id, resource_type, resource_id, key=None):\n\n query = DBMetadata.query()\n filters = [DBMetadata.deleted == 0,\n DBMetadata.resource_type == resource_type,\n DBMetadata.resource_id == resource_id,\n DBMetadata.project_id == project_id]\n\n if key:\n filters.append(DBMetadata.key == key)\n\n query = query.filter(*filters)\n return query.update({\"deleted\": True})",
"def deleteManifestEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.MANIFEST_SECTION, key)",
"def _del_entry(self, cvs_path):\n\n self._make_writable()\n self._del_entry(cvs_path)",
"def _del_entry(self, cvs_path):\n\n del self._entries[cvs_path]",
"def delete_metadata(self, keys):\n return self.manager.delete_metadata(self, keys)",
"def delete_metadata(self, keys=None):\n return self.manager.delete_metadata(self, keys=keys)",
"def delete(self, logical_key):\n path = self._split_key(logical_key)\n pkg = self[path[:-1]]\n del pkg._children[path[-1]]\n return self",
"def __delitem__(self, cvs_path):\n\n node = self[cvs_path]\n self._del_entry(cvs_path)\n if isinstance(node, _WritableMirrorDirectoryMixin):\n node._mark_deleted()",
"def delete_stored_project():\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(STORED_ID['project_id']))\n client.execute_request()",
"def es_delete(project=None):\n if project is not None:\n script_indexer.delete_project(project)\n else:\n script_indexer.delete_all()",
"def delete_share(self, pool, project, share):\n svc = self.share_path % (pool, project, share)\n ret = self.rclient.delete(svc)\n if ret.status != restclient.Status.NO_CONTENT:\n exception_msg = (('Error deleting '\n 'share: %(share)s to '\n 'pool: %(pool)s '\n 'project: %(project)s '\n 'return code: %(ret.status)d '\n 'message: %(ret.data)s.'),\n {'share': share,\n 'pool': pool,\n 'project': project,\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n LOG.error(exception_msg)",
"def delete_account_key(configuration):\n os.remove(configuration.cm_key)",
"def delete(conn, project):\n with conn:\n c = conn.cursor()\n c.execute(\"DELETE FROM projects WHERE project =?\", (project,))",
"def deleteGRASSEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.GRASS_SECTION, key)",
"def _delete_metadata(self, metadata_role):\n \n # The root metadata role is never deleted without a replacement.\n if metadata_role == 'root':\n return\n \n # Get rid of the current metadata file.\n self._move_current_to_previous(metadata_role)\n \n # Remove knowledge of the role.\n if metadata_role in self.metadata['current']:\n del self.metadata['current'][metadata_role]\n tuf.roledb.remove_role(metadata_role)",
"def delete(self):\r\n if self.provider.readonly:\r\n raise DAVError(HTTP_FORBIDDEN)\r\n\r\n self.provider.cache_fs.remove(self.path)\r\n if self.nibbler.find(self.path):\r\n self.nibbler.remove_file(self.path)\r\n\r\n self.removeAllProperties(True)\r\n self.removeAllLocks(True)",
"def delete(self) -> None:\n try:\n self._logger.debug('Delete old metadata file %s.', self._path)\n os.remove(self._path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n msg = 'Failed to delete old metadata file. {}'.format(ex.strerror)\n raise MetaFileError(msg)",
"def delete(self):\n\n del self.parent_mirror_dir[self.cvs_path]",
"def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)",
"def delete_metadata(self, volume, keys, deletes=10, delete_size=3):\n self._impl.delete_metadata(volume, keys=keys, deletes=10,\n delete_size=3)",
"def delete_project(arn=None):\n pass",
"def delete_metadata(self, keys=None):\n return self.parent.delete_metadata_for_node(self, keys=keys)",
"def _delete_key(self):\n return self.connection.hdel(self.key, self.name)",
"def delete(ctx: click.Context, repository_path):\n root_commands.cmd_delete(ctx.obj, repository_path)",
"def access_info_delete(context, storage_id):\n _access_info_get_query(context). \\\n filter_by(storage_id=storage_id).delete()",
"def remove(args):\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit('To remove a file you need to provide a username and'\n ' password.')\n\n project = osf.project(args.project)\n\n storage, remote_path = split_storage(args.target)\n\n store = project.storage(storage)\n for f in store.files:\n if norm_remote_path(f.path) == remote_path:\n f.remove()",
"def delete(self):\n if not pdbox._args.get(\"dryrun\"):\n result = execute(pdbox.dbx.files_delete_v2, self.path)\n pdbox.debug(\"Metadata response: %s\" % result.metadata)\n pdbox.info(\"Deleted %s\" % self.uri)",
"def delete_entity(cls, key):\n entity_key = \"entity:\" + str(key)\n hashmap = db.delete(entity_key)",
"def delete(self, c_path):\n raise NotImplementedError"
] | [
"0.66969836",
"0.6114704",
"0.59135765",
"0.58948624",
"0.58330137",
"0.5749956",
"0.5720702",
"0.5715892",
"0.5683251",
"0.56806993",
"0.5660069",
"0.56545544",
"0.5646248",
"0.5645598",
"0.56452",
"0.56423646",
"0.56192744",
"0.56175125",
"0.5605478",
"0.5603749",
"0.55689806",
"0.55673635",
"0.5555219",
"0.5541393",
"0.5521021",
"0.5498465",
"0.54949194",
"0.5491337",
"0.5490255",
"0.54817617"
] | 0.6612039 | 1 |
Write grid climate entries to the metadata store for a given project. Will overwrite the value for keys that already exist context Context object containing projectDir, the path of the project whose metadata store is to be read from key List of keys to be written to the grid climate section of the project metadata value List of values to be written for keys stored in the grid climate section of the project metadata IOError(errno.EACCES) if the metadata store for the project is not writable Exception if len(keys) != len(values) | def writeClimateGridEntries(context, keys, values):
GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_GRID_SECTION, keys, values) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeClimatePointEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION, keys, values)",
"def _writeEntriesToSection(projectDir, section, keys, values, callback=None):\n numKeys = len(keys)\n if numKeys != len(values):\n raise Exception( \"%d keys specified for %d values\" % (numKeys, len(values)) )\n \n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entries\n if not config.has_section(section):\n config.add_section(section)\n for i in xrange(numKeys):\n config.set(section, keys[i], values[i])\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def writeClimateGridEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.CLIMATE_GRID_SECTION, key, value)",
"def writeProvenanceEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION, keys, values)",
"def writeToMetadata(self, context):\n fqId = self.type + GenericMetadata.COMPOUND_KEY_SEP + self.id\n fqId = fqId.lower()\n\n climatePoints = GenericMetadata.readClimatePointEntries(context)\n try:\n stations = climatePoints['stations'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n stations = []\n # Write station metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in stations:\n stations.append(fqId)\n stationsStr = GenericMetadata.VALUE_DELIM.join(stations)\n keys.append('stations'); values.append(stationsStr)\n # Write attributes for station\n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP \n longitude = keyProto + 'longitude'\n keys.append(longitude); values.append(self.longitude)\n latitude = keyProto + 'latitude'\n keys.append(latitude); values.append(self.latitude)\n elevation = keyProto + 'elevation'\n keys.append(elevation); values.append(self.elevation)\n name = keyProto + 'name'\n keys.append(name); values.append(self.name)\n if self.startDate:\n startDate = keyProto + 'startdate'\n keys.append(startDate); values.append(self.startDate.strftime(ClimatePointStation.FMT_DATE))\n if self.endDate:\n endDate = keyProto + 'enddate'\n keys.append(endDate); values.append(self.endDate.strftime(ClimatePointStation.FMT_DATE))\n if self.variables:\n variablesKey = keyProto + 'variables'\n variablesValue = GenericMetadata.VALUE_DELIM.join(self.variables)\n keys.append(variablesKey); values.append(variablesValue)\n if self.data != None:\n data = keyProto + 'data'\n keys.append(data); values.append(self.data)\n elif self.variablesData:\n # Try to write data entries for each variable separately\n vars = self.variablesData.keys()\n for var in vars:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n keys.append(varKey); values.append(self.variablesData[var])\n GenericMetadata.writeClimatePointEntries(context, keys, values)",
"def save(cls, context):\n\n data = context.get_stored_dict()\n files = {}\n\n def save_in_file(file, key, value):\n if file in files.keys():\n files[file][key] = value\n else:\n files[file] = {key: value}\n\n for key, val in data.items():\n if context.extends is not None and key in context.key_origins:\n save_in_file(context.key_origins[key], key, val)\n else:\n save_in_file(context.profile, key, val)\n\n for profile, content in files.items():\n metadata.update_metadata(\n context.workspace,\n profile,\n 'config',\n content)",
"def writeModelRunEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.MODEL_RUN_SECTION, keys, values)",
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def SetProjectMetadata(self, new_metadata):\n compute = self.compute\n\n errors = []\n list(request_helper.MakeRequests(\n requests=[\n (compute.projects,\n 'SetCommonInstanceMetadata',\n self.messages.ComputeProjectsSetCommonInstanceMetadataRequest(\n metadata=new_metadata,\n project=properties.VALUES.core.project.Get(\n required=True),\n ))],\n http=self.http,\n batch_url=self.batch_url,\n errors=errors,\n custom_get_requests=None))\n if errors:\n utils.RaiseToolException(\n errors,\n error_message='Could not add SSH key to project metadata:')",
"def writeClimatePointEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.CLIMATE_POINT_SECTION, key, value)",
"def checkMetadata(self):\n super(WorldfileMultiple, self).checkMetadata()\n \n # Check for necessary information in metadata\n if not 'basin_rast' in self.grassMetadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a basin raster in a GRASS mapset\" % (self.context.projectDir,))\n if not 'subbasins_rast' in self.grassMetadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a sub-basin raster in a GRASS mapset\" % (self.context.projectDir,))\n if not 'dem_rast' in self.grassMetadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a DEM raster in a GRASS mapset\" % (self.context.projectDir,)) \n if not 'soil_rast' in self.grassMetadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a soil raster in a GRASS mapset\" % (self.context.projectDir,))\n if not 'patch_rast' in self.grassMetadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a patch raster in a GRASS mapset\" % (self.context.projectDir,))\n \n if not 'rhessys_dir' in self.metadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a RHESSys directory\" % (self.context.projectDir,))\n if not 'g2w_bin' in self.metadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a grass2world executable\" % (self.context.projectDir,))\n if not 'rat_bin' in self.metadata:\n raise MetadataException(\"Metadata in project directory %s does not contain an AverageTables executable\" % (self.context.projectDir,))\n if not 'template' in self.metadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a world template\" % (self.context.projectDir,))\n if not 'rhessys_dir' in self.metadata:\n raise MetadataException(\"Metadata in project directory {0} does not contain a RHESSys directory\".format(self.context.projectDir))",
"def save(projects, path):\n with open(path,'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', lineterminator='\\n')\n # string for help MS Excel\n writer.writerow([\"sep=,\"])\n # Table name_columm\n writer.writerow(recode_for_write(NAME_COLUMM))\n\n with open(path,'a') as csvfile:\n headers = ['title', 'category', 'price', 'applications']\n writer = csv.DictWriter(csvfile, lineterminator='\\n',\n fieldnames=headers)\n for i in projects:\n i = recode_value_dict(i)\n writer.writerow(i)",
"def __setitem__(self, keys, value):\n\n if isinstance(keys, str):\n keys = [keys]\n\n #print(\"KEYTYPE: {0}\".format(keys))\n self.__setInDict(self.__cfg, keys, value)\n self.write(self.__cfgFile)",
"def write_locations(pathfolder, key_firms, years, locs, methodvalues):\n ## Generate namefile\n namefile = generate_namefile(pathfolder, methodvalues)\n\n ## Writting\n db = shelve.open(namefile)\n db['hashes'] = generate_yearnif_hash(years, key_firms)\n db['nif'] = key_firms\n db['year'] = years\n db['locations'] = locs\n db['methodvalues'] = methodvalues\n db.close()",
"def readClimateGridEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.CLIMATE_GRID_SECTION)",
"def write_stewicombo_metadata(file_name, metadata_dict, category=''):\n meta = set_stewicombo_meta(file_name, category=category)\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)",
"def update_metadata(self, key, value):\n sp.verify(self.is_administrator(sp.sender), FA12_Error.NotAdmin)\n self.data.metadata[key] = value",
"def set_metadata(self, key, value):\n if '::' not in key:\n raise ValueError('Invalid key %s; must be prefixed with \"appname::\"' % key)\n\n self._db_query('DELETE FROM meta WHERE attr=?', (key,))\n self._db_query('INSERT INTO meta VALUES (?, ?)', (key, value))\n self._set_dirty()",
"def update_project_documents(self, manifest_info):\n\n for proj_name, proj_info in manifest_info.projects.items():\n # See if project document already is in the database and extract\n # for updating if so, otherwise create a new dictionary for\n # population\n key_name = f'project:{proj_name}'\n\n try:\n project_data = self.db.get_document(key_name)\n except cbdatabase_db.NotFoundError:\n project_data = dict(\n type='project', key_=key_name, name=proj_name\n )\n\n remote, repo_url = \\\n manifest_info.get_project_remote_info(proj_name)\n\n if 'remotes' in project_data:\n if remote in project_data['remotes']:\n if repo_url not in project_data['remotes'][remote]:\n project_data['remotes'][remote].append(repo_url)\n else:\n project_data['remotes'][remote] = [repo_url]\n else:\n project_data['remotes'] = {remote: [repo_url]}\n\n self.db.upsert_documents({key_name: project_data})",
"def update_channel_metadata():\n from .channel_import import (\n import_channel_from_local_db,\n InvalidSchemaVersionError,\n FutureSchemaError,\n )\n\n channel_ids = get_channel_ids_for_content_database_dir(\n get_content_database_dir_path()\n )\n for channel_id in channel_ids:\n if not ChannelMetadata.objects.filter(id=channel_id).exists():\n try:\n import_channel_from_local_db(channel_id)\n annotate_content(channel_id)\n except (InvalidSchemaVersionError, FutureSchemaError):\n logger.warning(\n \"Tried to import channel {channel_id}, but database file was incompatible\".format(\n channel_id=channel_id\n )\n )\n except DatabaseError:\n logger.warning(\n \"Tried to import channel {channel_id}, but database file was corrupted.\".format(\n channel_id=channel_id\n )\n )\n fix_multiple_trees_with_id_one()\n connection.close()",
"def sync_set_metadata(self, chunk, coords, value):\n\n chunk.set_metadata(coords, value)",
"def write_metadata(dir_path, fs, *metas, global_metadata=True):\n assert metas\n md = metas[0]\n with fs.open(\"/\".join([dir_path, \"_common_metadata\"]), \"wb\") as fil:\n md.write_metadata_file(fil)\n if global_metadata:\n for meta in metas[1:]:\n md.append_row_groups(meta)\n with fs.open(\"/\".join([dir_path, \"_metadata\"]), \"wb\") as fil:\n md.write_metadata_file(fil)",
"def _update_metadata_file(metadata, networks):\n\n old_networks = metadata.get('network_names', [])\n new_networks = list(networks.keys())\n _validate_duplicate_network(old_networks, new_networks)\n if metadata.get('broker_addresses'):\n new_brokers = list(networks.values())\n else:\n new_brokers = None\n if metadata.get('manager_addresses'):\n new_managers = list(networks.values())\n else:\n new_managers = None\n\n store_cert_metadata(\n new_networks=list(networks.keys()),\n new_brokers=new_brokers,\n new_managers=new_managers,\n )",
"def writeToMetadata(self, context):\n fqId = self.section + GenericMetadata.COMPOUND_KEY_SEP + self.name\n fqId = fqId.lower()\n \n # Write self to the appropriate section\n GenericMetadata.writeEntryToSection(context, self.section, self.name, self.dcIdentifier)\n \n # Write to provenance section\n provenanceEntries = GenericMetadata.readProvenanceEntries(context)\n try:\n entities = provenanceEntries['entities'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n entities = []\n # Write entity metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in entities:\n entities.append(fqId)\n entitiesStr = GenericMetadata.VALUE_DELIM.join(entities)\n keys.append('entities'); values.append(entitiesStr)\n # Write attributes for entity\n keyProto = fqId + GenericMetadata.COMPOUND_KEY_SEP\n dcIdentifier = keyProto + 'dc.identifier'\n keys.append(dcIdentifier); values.append(self.dcIdentifier)\n dcSource = keyProto + 'dc.source'\n keys.append(dcSource); values.append(self.dcSource)\n dcTitle = keyProto + 'dc.title'\n keys.append(dcTitle); values.append(self.dcTitle)\n if self.dcDate:\n dcDate = keyProto + 'dc.date'\n keys.append(dcDate); values.append(self.dcDate.strftime(AssetProvenance.FMT_DATE))\n dcPublisher = keyProto + 'dc.publisher'\n keys.append(dcPublisher); values.append(self.dcPublisher)\n dcDescription = keyProto + 'dc.description'\n keys.append(dcDescription); values.append(self.dcDescription)\n processingNotes = keyProto + 'processing_notes'\n keys.append(processingNotes); values.append(self.processingNotes)\n GenericMetadata.writeProvenanceEntries(context, keys, values)",
"def edit(cls, project_id, resource_type, resource_id, key, value):\n\n try:\n cls.get(\n project_id=project_id,\n resource_type=resource_type,\n resource_id=resource_id,\n key=key\n )\n\n query = DBMetadata.query()\n query = query.filter_by(\n resource_type=resource_type,\n resource_id=resource_id,\n project_id=project_id,\n key=key,\n deleted=False\n )\n return query.update({\"value\": json.dumps(value)})\n\n except exception.NotFound:\n return DBMetadata.create(\n resource_type=resource_type,\n resource_id=resource_id,\n project_id=project_id,\n key=key,\n value=json.dumps(value)\n )",
"def update_metadata(self, new_metadata_path):\n metadata = set()\n metadata.update(self.metadata_fields)\n\n if isinstance(new_metadata_path, str):\n new_metadata_path = Path(new_metadata_path)\n if not isinstance(new_metadata_path, Path):\n raise ValueError(f'new_metadata_path must be str or Path object, not type {type(new_metadata_path)}')\n\n try:\n csv_list = load_csv_to_list(new_metadata_path)\n except FileNotFoundError:\n err = \"Could not find the metadata csv file for the \"\n err += f\"corpus in the expected location ({self.csv_path}).\"\n raise FileNotFoundError(err)\n csv_reader = csv.DictReader(csv_list)\n\n for document_metadata in csv_reader:\n document_metadata = dict(document_metadata)\n metadata.update(list(document_metadata))\n try:\n document = self.get_document('filename', document_metadata['filename'])\n except ValueError:\n raise ValueError(f\"Document {document_metadata['filename']} not found in corpus\")\n\n document.update_metadata(document_metadata)\n\n self.metadata_fields = list(metadata)",
"def dumpprojects (self):\r\n\r\n\r\n\r\n datesuffix=str(datetime.datetime.now()).split(' ')[0]\r\n project = str(transform(self.default_dict['projects'].return_dict()))\r\n\r\n if self.using_shelf:\r\n\r\n file_access.save_file(returntext=project,\r\n filename='PROJ'+notebookname+datesuffix,\r\n folder='/textfiles')\r\n if self.using_database:\r\n value_tuple = (notebookname, project,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO projects \"\r\n +\"(notebook, projectfile) \"\r\n +\"VALUES (?,?);\",\r\n value_tuple)\r\n db_connection.commit()",
"def writedb(path, key, value) -> int:\n if key == \"\" or value == \"\":\n return 1\n if os.path.exists(path):\n pass \n else:\n return 1\n with open(path, \"a\") as db:\n db.write(f\"\\n{key}:{value}\")\n return 0",
"def write_metadata(file_name, metadata_dict, category='',\n datatype=\"inventory\", parameters=None):\n if (datatype == \"inventory\") or (datatype == \"source\"):\n meta = set_stewi_meta(file_name, stewiformat=category)\n if datatype == 'inventory':\n meta.tool_meta = {\"parameters\": parameters,\n \"sources\": metadata_dict}\n else:\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)\n elif datatype == \"validation\":\n file = (paths.local_path / 'validation' /\n f'{file_name}_validationset_metadata.json')\n with file.open('w') as fi:\n fi.write(json.dumps(metadata_dict, indent=4))",
"def _write(self, tkt_id, repo_id, changesets):\n @self.env.with_transaction()\n def do_update(db):\n cursor = db.cursor()\n value = str(changesets)\n if changesets.exists:\n if value:\n cursor.execute('UPDATE ticket_changesets SET value=%s '\n 'WHERE ticket=%s AND repository=%s',\n [value, tkt_id, repo_id])\n else:\n cursor.execute('DELETE FROM ticket_changesets '\n 'WHERE ticket=%s AND repository=%s',\n [tkt_id, repo_id])\n elif value:\n cursor.execute('INSERT INTO ticket_changesets '\n '(ticket,repository,value) VALUES(%s,%s,%s)',\n [tkt_id, repo_id, value])"
] | [
"0.67957145",
"0.66655767",
"0.6512811",
"0.6203893",
"0.56767184",
"0.54653037",
"0.5448667",
"0.52735066",
"0.52370846",
"0.5186844",
"0.50501317",
"0.5009672",
"0.49810913",
"0.49324104",
"0.48789868",
"0.48425463",
"0.48071223",
"0.48050776",
"0.47918886",
"0.47881624",
"0.47632495",
"0.4743214",
"0.47384545",
"0.47349444",
"0.47219765",
"0.4718653",
"0.46975422",
"0.46822327",
"0.467822",
"0.46711695"
] | 0.7622373 | 0 |
Write a provenance entry to the metadata store for a given project. Will overwrite a the value for a key that already exists context Context object containing projectDir, the path of the project whose metadata store is to be written to key The key to be written to the provenance section of the project metadata value The value to be written for key stored in the provenance section of the project metadata IOError(errno.EACCES) if the metadata store for the project is not writable | def writeProvenanceEntry(context, key, value):
GenericMetadata.writeEntryToSection(context, GenericMetadata.PROVENANCE_SECTION, key, value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeProvenanceEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION, keys, values)",
"def writeToMetadata(self, context):\n fqId = self.section + GenericMetadata.COMPOUND_KEY_SEP + self.name\n fqId = fqId.lower()\n \n # Write self to the appropriate section\n GenericMetadata.writeEntryToSection(context, self.section, self.name, self.dcIdentifier)\n \n # Write to provenance section\n provenanceEntries = GenericMetadata.readProvenanceEntries(context)\n try:\n entities = provenanceEntries['entities'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n entities = []\n # Write entity metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in entities:\n entities.append(fqId)\n entitiesStr = GenericMetadata.VALUE_DELIM.join(entities)\n keys.append('entities'); values.append(entitiesStr)\n # Write attributes for entity\n keyProto = fqId + GenericMetadata.COMPOUND_KEY_SEP\n dcIdentifier = keyProto + 'dc.identifier'\n keys.append(dcIdentifier); values.append(self.dcIdentifier)\n dcSource = keyProto + 'dc.source'\n keys.append(dcSource); values.append(self.dcSource)\n dcTitle = keyProto + 'dc.title'\n keys.append(dcTitle); values.append(self.dcTitle)\n if self.dcDate:\n dcDate = keyProto + 'dc.date'\n keys.append(dcDate); values.append(self.dcDate.strftime(AssetProvenance.FMT_DATE))\n dcPublisher = keyProto + 'dc.publisher'\n keys.append(dcPublisher); values.append(self.dcPublisher)\n dcDescription = keyProto + 'dc.description'\n keys.append(dcDescription); values.append(self.dcDescription)\n processingNotes = keyProto + 'processing_notes'\n keys.append(processingNotes); values.append(self.processingNotes)\n GenericMetadata.writeProvenanceEntries(context, keys, values)",
"def _write_map_provenance(cfg, cube, plot_path, title, *attrs):\n cube = cube.copy()\n ancestors = []\n for attr in attrs:\n ancestors.extend(attr['filename'].split('|'))\n netcdf_path = mlr.get_new_path(cfg, plot_path)\n io.iris_save(cube, netcdf_path)\n record = {\n 'ancestors': ancestors,\n 'authors': ['schlund_manuel'],\n 'caption': f\"Geographical distribution of {cube.long_name} for \"\n f\"{title}.\",\n 'plot_types': ['geo'],\n 'references': ['schlund20jgr'],\n }\n with ProvenanceLogger(cfg) as provenance_logger:\n provenance_logger.log(netcdf_path, record)\n provenance_logger.log(plot_path, record)",
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def setProvenance(args, syn):\n \n activity = Activity(name=args.name, description=args.description)\n if args.used:\n for item in args.used:\n activity.used(item)\n if args.executed:\n for item in args.executed:\n activity.used(item, wasExecuted=True)\n activity = syn.setProvenance(args.id, activity)\n\n # Display the activity record, if -o or -output specified\n if args.output:\n if args.output=='STDOUT':\n sys.stdout.write(json.dumps(activity))\n sys.stdout.write('\\n')\n else:\n with open(args.output, 'w') as f:\n f.write(json.dumps(activity))\n f.write('\\n')\n else:\n print 'Set provenance record %s on entity %s\\n' % (str(activity['id']), str(args.id))",
"def testUpdate(self):\n try:\n provU = ProvenanceProvider(self.__cfgOb, self.__cachePath, useCache=False)\n pD = {self.__provKeyName: self.__provInfoL}\n ok = provU.store(pD)\n self.assertTrue(ok)\n #\n ok = provU.update(pD)\n self.assertTrue(ok)\n #\n fD = provU.fetch()\n self.assertTrue(self.__provKeyName in fD)\n self.assertDictEqual(pD, fD)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def save_project(self, project_id, project):\n with open('{}/{}'.format(self._storage_location, project_id), 'w') as project_file:\n project_file.write(project.name + '\\n')\n project_file.write(project.description + '\\n')\n project_file.write(\",\".join(project.members) + '\\n')\n project_file.write(\",\".join(project.documents) + '\\n')",
"def save(cls, context):\n\n data = context.get_stored_dict()\n files = {}\n\n def save_in_file(file, key, value):\n if file in files.keys():\n files[file][key] = value\n else:\n files[file] = {key: value}\n\n for key, val in data.items():\n if context.extends is not None and key in context.key_origins:\n save_in_file(context.key_origins[key], key, val)\n else:\n save_in_file(context.profile, key, val)\n\n for profile, content in files.items():\n metadata.update_metadata(\n context.workspace,\n profile,\n 'config',\n content)",
"def write_release_id(project, release_id):\n releases_dir = os.path.join(ROOT, '.releases')\n os.makedirs(releases_dir, exist_ok=True)\n\n release_file = os.path.join(releases_dir, project)\n with open(release_file, 'w') as f:\n f.write(release_id)",
"def addProvenance(self, provenance_on=True):\n self.kwargs['additionalInfo'] = provenance_on",
"def save_project(uid, song_notes, author_name, creation_date, project_name):",
"def _writeEntriesToSection(projectDir, section, keys, values, callback=None):\n numKeys = len(keys)\n if numKeys != len(values):\n raise Exception( \"%d keys specified for %d values\" % (numKeys, len(values)) )\n \n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entries\n if not config.has_section(section):\n config.add_section(section)\n for i in xrange(numKeys):\n config.set(section, keys[i], values[i])\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def _post_src_install_write_metadata(settings):\n\n\teapi_attrs = _get_eapi_attrs(settings.configdict['pkg']['EAPI'])\n\n\tbuild_info_dir = os.path.join(settings['PORTAGE_BUILDDIR'], 'build-info')\n\n\tmetadata_keys = ['IUSE']\n\tif eapi_attrs.iuse_effective:\n\t\tmetadata_keys.append('IUSE_EFFECTIVE')\n\n\tfor k in metadata_keys:\n\t\tv = settings.configdict['pkg'].get(k)\n\t\tif v is not None:\n\t\t\twrite_atomic(os.path.join(build_info_dir, k), v + '\\n')\n\n\t# The following variables are irrelevant for virtual packages.\n\tif settings.get('CATEGORY') != 'virtual':\n\n\t\tfor k in ('CHOST',):\n\t\t\tv = settings.get(k)\n\t\t\tif v is not None:\n\t\t\t\twrite_atomic(os.path.join(build_info_dir, k), v + '\\n')\n\n\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t'BUILD_TIME'), encoding=_encodings['fs'], errors='strict'),\n\t\tmode='w', encoding=_encodings['repo.content'],\n\t\terrors='strict') as f:\n\t\tf.write(_unicode_decode(\"%.0f\\n\" % (time.time(),)))\n\n\tuse = frozenset(settings['PORTAGE_USE'].split())\n\tfor k in _vdb_use_conditional_keys:\n\t\tv = settings.configdict['pkg'].get(k)\n\t\tfilename = os.path.join(build_info_dir, k)\n\t\tif v is None:\n\t\t\ttry:\n\t\t\t\tos.unlink(filename)\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\t\tcontinue\n\n\t\tif k.endswith('DEPEND'):\n\t\t\tif eapi_attrs.slot_operator:\n\t\t\t\tcontinue\n\t\t\ttoken_class = Atom\n\t\telse:\n\t\t\ttoken_class = None\n\n\t\tv = use_reduce(v, uselist=use, token_class=token_class)\n\t\tv = paren_enclose(v)\n\t\tif not v:\n\t\t\ttry:\n\t\t\t\tos.unlink(filename)\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\t\tcontinue\n\t\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t\tk), encoding=_encodings['fs'], errors='strict'),\n\t\t\tmode='w', encoding=_encodings['repo.content'],\n\t\t\terrors='strict') as f:\n\t\t\tf.write(_unicode_decode(v + '\\n'))\n\n\tif eapi_attrs.slot_operator:\n\t\tdeps = evaluate_slot_operator_equal_deps(settings, use, QueryCommand.get_db())\n\t\tfor k, v in deps.items():\n\t\t\tfilename = os.path.join(build_info_dir, k)\n\t\t\tif not v:\n\t\t\t\ttry:\n\t\t\t\t\tos.unlink(filename)\n\t\t\t\texcept OSError:\n\t\t\t\t\tpass\n\t\t\t\tcontinue\n\t\t\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t\t\tk), encoding=_encodings['fs'], errors='strict'),\n\t\t\t\tmode='w', encoding=_encodings['repo.content'],\n\t\t\t\terrors='strict') as f:\n\t\t\t\tf.write(_unicode_decode(v + '\\n'))",
"def update_metadata(self, key, value):\n sp.verify(self.is_administrator(sp.sender), FA12_Error.NotAdmin)\n self.data.metadata[key] = value",
"def setProperty(self, path, key, value):\n \n try:\n self._client.propset(key, value, self._workingCopyPath + path)\n self.checkin(path)\n except ClientError, error:\n raise SubversionError(error)",
"def testStore(self):\n try:\n provU = ProvenanceProvider(self.__cfgOb, self.__cachePath, useCache=False)\n pD = {self.__provKeyName: self.__provInfoL}\n ok = provU.store(pD)\n #\n self.assertTrue(ok)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def SetProjectMetadata(self, new_metadata):\n compute = self.compute\n\n errors = []\n list(request_helper.MakeRequests(\n requests=[\n (compute.projects,\n 'SetCommonInstanceMetadata',\n self.messages.ComputeProjectsSetCommonInstanceMetadataRequest(\n metadata=new_metadata,\n project=properties.VALUES.core.project.Get(\n required=True),\n ))],\n http=self.http,\n batch_url=self.batch_url,\n errors=errors,\n custom_get_requests=None))\n if errors:\n utils.RaiseToolException(\n errors,\n error_message='Could not add SSH key to project metadata:')",
"def readProvenanceEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION)",
"def write(self, path, key):\n raise NotImplementedError",
"def set_provenance_map(self, kwargs: Dict):\n if \"default_provenance\" in kwargs:\n self.default_provenance = kwargs.pop(\"default_provenance\")\n\n ksf_found = []\n for ksf in knowledge_provenance_properties:\n if ksf in kwargs:\n ksf_found.append(ksf)\n ksf_value = kwargs.pop(ksf)\n if isinstance(ksf_value, dict):\n for ksf_pattern in ksf_value.keys():\n log.debug(\"ksf_pattern: \", ksf_pattern)\n if ksf not in self.mapping:\n log.debug(\"not in the mapping\", ksf)\n self.mapping[ksf] = dict()\n log.debug(\"self.mapping[ksf]: \", self.mapping[ksf])\n ir = self.get_mapping(ksf)\n self.mapping[ksf][ksf_pattern] = ir.set_provenance_map_entry(\n ksf_value[ksf_pattern]\n )\n log.debug(\"self.mapping[ksf][ksf_pattern]: \", self.mapping[ksf][ksf_pattern])\n else:\n ir = self.get_mapping(ksf)\n self.mapping[ksf] = ir.set_provenance_map_entry(ksf_value)\n # if none specified, add at least one generic 'knowledge_source'\n if len(ksf_found) == 0:\n ir = self.get_mapping(\"knowledge_source\")\n if \"name\" in kwargs:\n self.mapping[\"knowledge_source\"] = ir.default(kwargs[\"name\"])\n else:\n self.mapping[\"knowledge_source\"] = ir.default(self.default_provenance)\n if \"provided_by\" not in self.mapping:\n ir = self.get_mapping(\"provided_by\")\n self.mapping[\"provided_by\"] = ir.default(self.default_provenance)",
"def update_provenance(self):\n\n try:\n self._save_or_reregister_result(None)\n except InternalCacheStateError as e:\n self._raise_state_error_with_explanation(e)",
"def write_pv_put(self, pv_name, new_value, old_value):\n time_now = datetime.datetime.now()\n time_str = time_now.strftime(\"%d-%b-%y %H:%M:%S\")\n message = \"{} {} {} {} {} {}\".format(time_str, LOCALHOST, self._ioc_name, pv_name, new_value, old_value)\n self.logger.write_to_log(message)",
"def _writeVersionToMetadata(config):\n if not config.has_section(GenericMetadata.ECOHYDROLIB_SECION):\n config.add_section(GenericMetadata.ECOHYDROLIB_SECION)\n \n if not config.has_option(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY):\n config.set(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY, GenericMetadata._ecohydrolibVersion)\n return\n \n metadataVersion = config.get(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY)\n if metadataVersion != GenericMetadata._ecohydrolibVersion:\n raise MetadataVersionError(metadataVersion)",
"def writeManifestEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.MANIFEST_SECTION, key, value)",
"def provenance(self, provenance):\n\n self._provenance = provenance",
"def journal_write(session, k, v):\n entry = models.VppEtcdJournal(k=k, v=v)\n session.add(entry)\n session.flush()",
"def writeToMetadata(self, context):\n pass",
"def propset(self, name, value, *args):\r\n d = py.path.local.mkdtemp() \r\n try: \r\n p = d.join('value') \r\n p.write(value) \r\n self._svn('propset', name, '--file', str(p), *args)\r\n finally: \r\n d.remove()",
"def put(self, key, value):\n self.execute_command('sudo -i bash -c \\'echo -n \"{0}\" > {1}{2}\\''\n .format(value, self._store_path, key))",
"def put_prop(self, obj_type, obj_id, prop_name, value):\n ierr = exolib.py_expp(self.exoid, obj_type, obj_id, prop_name, value)\n if ierr:\n raise ExodusIIWriterError(\"Error putting prop\")"
] | [
"0.702613",
"0.60999066",
"0.5898809",
"0.5638033",
"0.55177164",
"0.5379722",
"0.52836007",
"0.52061015",
"0.51149523",
"0.50688386",
"0.50665015",
"0.5016869",
"0.50116354",
"0.4975159",
"0.4963921",
"0.49586692",
"0.49479687",
"0.49324423",
"0.4919253",
"0.49008313",
"0.48834983",
"0.4873125",
"0.48585013",
"0.4846911",
"0.48347777",
"0.48305932",
"0.4821083",
"0.48101494",
"0.4797219",
"0.47656962"
] | 0.7517834 | 0 |
Write model run entries to the metadata store for a given project. Will overwrite the values of keys that already exist context Context object containing projectDir, the path of the project whose metadata store is to be written to keys The keys to be written to the model run section of the project metadata values The values to be written for keys stored in the model run section of the project metadata IOError(errno.EACCES) if the metadata store for the project is not writable Exception if len(keys) != len(values) | def writeModelRunEntries(context, keys, values):
GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.MODEL_RUN_SECTION, keys, values) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeToMetadata(self, context):\n if self.modelType not in GenericMetadata.MODEL_TYPES:\n raise Exception(\"Model type %s is not among known model types: %s\" % (self.modelType, str(GenericMetadata.MODEL_TYPES) ) )\n \n modelRunEntries = GenericMetadata.readModelRunEntries(context)\n try:\n runs = modelRunEntries['runs'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n runs = []\n \n # Collected model entry and keys and values into lists so we can write to metadata store in batch\n keys = []\n values = []\n \n # Generate unique identifier for this model run. Unique ID is a combination of model type and a number\n entryNumber = 1\n fqId = self.modelType + GenericMetadata.KEY_SEP + str(entryNumber)\n while fqId in runs:\n entryNumber += 1\n fqId = self.modelType + GenericMetadata.KEY_SEP + str(entryNumber)\n self.runNumber = entryNumber\n # Add new run to list of runs\n runs.append(fqId)\n runsStr = GenericMetadata.VALUE_DELIM.join(runs)\n keys.append('runs'); values.append(runsStr)\n # Write attributes for run\n keyProto = fqId + GenericMetadata.KEY_SEP\n runDate = keyProto + 'date_utc'\n keys.append(runDate); values.append( self.date.strftime(ModelRun.FMT_DATE) )\n runDesc = keyProto + 'description'\n keys.append(runDesc); values.append(self.description)\n runCmd = keyProto + 'command'\n keys.append(runCmd); values.append(self.command)\n runOutput = keyProto + 'output'\n keys.append(runOutput); values.append(self.output)\n # Write to metadata\n GenericMetadata.writeModelRunEntries(context, keys, values)",
"def _writeEntriesToSection(projectDir, section, keys, values, callback=None):\n numKeys = len(keys)\n if numKeys != len(values):\n raise Exception( \"%d keys specified for %d values\" % (numKeys, len(values)) )\n \n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entries\n if not config.has_section(section):\n config.add_section(section)\n for i in xrange(numKeys):\n config.set(section, keys[i], values[i])\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def writeProvenanceEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION, keys, values)",
"def save(cls, context):\n\n data = context.get_stored_dict()\n files = {}\n\n def save_in_file(file, key, value):\n if file in files.keys():\n files[file][key] = value\n else:\n files[file] = {key: value}\n\n for key, val in data.items():\n if context.extends is not None and key in context.key_origins:\n save_in_file(context.key_origins[key], key, val)\n else:\n save_in_file(context.profile, key, val)\n\n for profile, content in files.items():\n metadata.update_metadata(\n context.workspace,\n profile,\n 'config',\n content)",
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def run_metadata(self, run_metadata):\n\n if run_metadata is not None:\n run_metadata = self._validate_run_metadata(run_metadata)\n runs = ListDict()\n runs.append(run_metadata)\n runs.extend(\n self.station_metadata.runs, skip_keys=[run_metadata.id, \"0\"]\n )\n self._survey_metadata.stations[0].runs = runs",
"def writeClimateGridEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_GRID_SECTION, keys, values)",
"def readModelRunEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.MODEL_RUN_SECTION)",
"def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info['use_it']:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n if not is_mc:\n continue\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable, process_name))\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n key_dir = getKey(process_name)\n\n outputFile = os.path.join(\n self.dirs[key_dir][DKEY_HISTO], \"%s.root\" % process_name\n )\n self.outputFiles[process_name] = {\n 'inputFiles' : [],\n 'outputFile' : outputFile,\n }\n if os.path.isfile(outputFile) and tools_is_file_ok(outputFile, min_file_size = 2000):\n logging.info('File {} already exists --> skipping job'.format(outputFile))\n continue\n\n for jobId in inputFileList.keys():\n\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = inputFileList[jobId]\n if len(self.inputFiles[key_file]) == 0:\n logging.warning(\n \"'%s' = %s --> skipping job !!\" % (key_file, self.inputFiles[key_file])\n )\n continue\n\n self.cfgFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.txt\" % (process_name, jobId)\n )\n self.outputFiles_tmp[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_HISTO_TMP], \"histogram_%i.root\" % jobId\n )\n self.logFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"project_%s_%i.log\" % (process_name, jobId)\n )\n self.scriptFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.sh\" % (process_name, jobId)\n )\n projection_module = self.projection_module\n if projection_module == \"count\":\n projection_module = \"countHistogramAll\"\n if sample_name.startswith('/TTTo'):\n projection_module += \"CompTopRwgt\"\n elif sample_info['sample_category'].startswith('ttH'):\n projection_module += \"CompHTXS\"\n elif isSplitByNlheJet(process_name):\n projection_module += \"SplitByLHENjet\"\n elif isSplitByNlheHT(process_name):\n projection_module += \"SplitByLHEHT\"\n elif isSplitByNlheJetHT(process_name, sample_name):\n projection_module += \"SplitByLHENjetHT\"\n self.jobOptions_sbatch[key_file] = {\n 'histName' : process_name,\n 'inputFiles' : self.inputFiles[key_file],\n 'cfgFile_path' : self.cfgFiles_projection[key_file],\n 'outputFile' : self.outputFiles_tmp[key_file],\n 'logFile' : self.logFiles_projection[key_file],\n 'scriptFile' : self.scriptFiles_projection[key_file],\n 'projection_module' : projection_module,\n }\n if self.projection_module != 'puHist':\n self.jobOptions_sbatch[key_file]['ref_genWeight'] = self.ref_genWeights[process_name]\n if process_name not in self.ref_genWeights:\n raise RuntimeError(\"Unable to find reference LHE weight for process %s\" % process_name)\n self.createCfg_project(self.jobOptions_sbatch[key_file])\n self.outputFiles[process_name]['inputFiles'].append(self.outputFiles_tmp[key_file])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable)\n self.num_jobs['project'] += self.createScript_sbatch(\n self.executable, self.sbatchFile_projection, self.jobOptions_sbatch\n )\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_project(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n if self.plot:\n self.addToMakefile_plot(lines_makefile)\n self.addToMakefile_finalHadd(lines_makefile)\n self.createMakefile(lines_makefile)\n logging.info(\"Done\")\n\n return self.num_jobs",
"def _setup_run(cfg: Dict) -> Dict:\n now = datetime.now()\n day = f\"{now.day}\".zfill(2)\n month = f\"{now.month}\".zfill(2)\n hour = f\"{now.hour}\".zfill(2)\n minute = f\"{now.minute}\".zfill(2)\n run_name = f'run_{day}{month}_{hour}{minute}_seed{cfg[\"seed\"]}'\n # cfg[\"run_dir\"] = Path(__file__).absolute().parent / \"runs\" / run_name\n cfg[\"run_dir\"] = cfg[\"run_dir\"] / run_name\n if not cfg[\"run_dir\"].is_dir():\n cfg[\"train_dir\"] = cfg[\"run_dir\"] / \"data\" / \"train\"\n cfg[\"train_dir\"].mkdir(parents=True)\n cfg[\"val_dir\"] = cfg[\"run_dir\"] / \"data\" / \"val\"\n cfg[\"val_dir\"].mkdir(parents=True)\n else:\n raise RuntimeError(f\"There is already a folder at {cfg['run_dir']}\")\n\n # dump a copy of cfg to run directory\n with (cfg[\"run_dir\"] / \"cfg.json\").open(\"w\") as fp:\n temp_cfg = {}\n for key, val in cfg.items():\n if isinstance(val, PosixPath):\n temp_cfg[key] = str(val)\n elif isinstance(val, Dict):\n for k in val:\n if isinstance(val[k], PosixPath):\n val[k] = str(val[k])\n elif isinstance(val, pd.Timestamp):\n temp_cfg[key] = val.strftime(format=\"%d%m%Y\")\n else:\n temp_cfg[key] = val\n json.dump(temp_cfg, fp, sort_keys=True, indent=4)\n\n return cfg",
"def Write(self):\n template_mappings = {\n 'pypi_token': self._project_definition.pypi_token or ''}\n\n file_content = []\n\n template_data = self._GenerateFromTemplate('environment', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name not in self._PROJECTS_WITHOUT_BUILD:\n if self._project_definition.pypi_token:\n template_data = self._GenerateFromTemplate(\n 'pypi_token', template_mappings)\n file_content.append(template_data)\n\n template_data = self._GenerateFromTemplate('matrix', template_mappings)\n file_content.append(template_data)\n\n template_data = self._GenerateFromTemplate('install', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name != 'l2tdevtools':\n template_data = self._GenerateFromTemplate(\n 'install_l2tdevtools', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name in self._PROJECTS_WITHOUT_BUILD:\n template_filename = 'build_off'\n else:\n template_filename = 'build'\n\n template_data = self._GenerateFromTemplate(\n template_filename, template_mappings)\n file_content.append(template_data)\n\n template_data = self._GenerateFromTemplate('test_script', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name not in self._PROJECTS_WITHOUT_BUILD:\n template_data = self._GenerateFromTemplate('artifacts', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.pypi_token:\n template_data = self._GenerateFromTemplate(\n 'deploy_script', template_mappings)\n file_content.append(template_data)\n\n file_content = ''.join(file_content)\n\n with io.open(self.PATH, 'w', encoding='utf-8') as file_object:\n file_object.write(file_content)",
"def writeClimatePointEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION, keys, values)",
"def save_project(uid, song_notes, author_name, creation_date, project_name):",
"def put(self, cmd_names, section, key, value, env=DEFAULT_ENV):\n\n if not self.document:\n self._read()\n # Empty document prepare the initial structure.\n self.document.update({env: {self._to_key(cmd_names): {section: {key: value}}}})\n # Only update appropriate key value pairs within a section\n self.document[env][self._to_key(cmd_names)][section].update({key: value})",
"def write(self, model, ids, field_values, context={}):\n try:\n res = self.object_facade.execute(self.dbname, self.user_id, self.user_passwd,\n model, 'write', ids, field_values, context)\n return res\n except socket.error, err:\n raise Exception(u'Conexion rechazada: %s!' % err)\n except xmlrpclib.Fault, err:\n raise Exception(u'Error %s en write: %s' % (err.faultCode, err.faultString))",
"def write(nmrCalcRun, targetDir):\n \n intIo.writeDataFiles(nmrCalcRun, targetDir)\n \n jsonDict = intIo.makeJsonDict(nmrCalcRun)\n \n \n # write properties file (must be done at the end\n propFile = uniIo.joinPath(targetDir, intIo.propFileName)\n print 'About to write', propFile\n open(propFile,'w').write(json.dumps(jsonDict, sort_keys=True, \n indent=intIo.propIndent))",
"def write_inputs(self, extraFstDict={}):\n\n if (self.run_dir == self.fst_dir):\n raise ValueError, \"run_dir == fst_dir, you cannot run directly in the template directory\"\n\n self.run_name, ext = os.path.splitext(self.fst_file)\n\n if (not os.path.isdir(self.run_dir)):\n os.mkdir(self.run_dir)\n\n self.fst_dir = os.path.abspath(self.fst_dir)\n\n if (self.exec_count <= 1): # Is 0 when invoked by main()\n # Is 1 when invoked by Assembly ???\n self.read_inputs()\n\n for key in extraFstDict:\n self.fstDict[key] = extraFstDict[key]\n\n curdir = os.getcwd()\n os.chdir (self.run_dir) ###note, change to run_dir\n\n self.writeFST(self.fst_file,self.fstDict) \n self.writeAD()\n self.writeBlade()\n self.writeWnd()\n self.writeNoise()\n self.writePtfm(self.fstDict)\n self.copyTwr()\n self.copyAdams()\n\n os.chdir(curdir) ## restore dir",
"def __setitem__(self, key, value):\n if not isinstance(value, dict):\n raise TypeError(\"value must be a dict\")\n\n # Is this a valid cache entry dictionary?\n try:\n validate(value, ENTRY_SCHEMA)\n except ValidationError as e:\n raise ValueError(\"%s is not a valid entry\" % value) from e\n\n entry_dir = self.cache_key_dir(key)\n\n try:\n entry_dir.mkdir(parents=True, exist_ok=True)\n except FileExistsError as e:\n raise ValueError(\"Already exists\") from e\n\n with open(entry_dir / \"entry.yaml\", \"w\") as f:\n f.write(yaml.safe_dump(value))",
"def fix_project_keys(apps, schema_editor):\n RemoteProject = apps.get_model('projectroles', 'RemoteProject')\n Project = apps.get_model('projectroles', 'Project')\n\n for rp in RemoteProject.objects.all():\n if not rp.project:\n rp.project = Project.objects.filter(\n sodar_uuid=rp.project_uuid\n ).first()\n rp.save()",
"def _save_model(self):\n groups = {cluster: self.model.cluster_metadata.group(cluster)\n for cluster in self.cluster_ids}\n self.model.save(self.model.spike_clusters,\n groups,\n clustering_metadata=self.model.clustering_metadata,\n )\n info(\"Saved {0:s}.\".format(self.model.kwik_path))",
"def writeCheckpoint(self, metadata_key, times):\r\n # Make sure that the directory exists\r\n try:\r\n os.makedirs(self.tmpDir)\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise\r\n\r\n # Open a temporary file\r\n (file_handle, tmp_filename) = tempfile.mkstemp(dir=self.tmpDir)\r\n wrapped_file = os.fdopen(file_handle, 'w')\r\n wrapped_file.write(json.dumps(times))\r\n wrapped_file.close()\r\n os.rename(tmp_filename, self.tmpDir + metadata_key)",
"def write(self):\n #\n if self.what == 'ecutwfc':\n for i in range(self.Ndata):\n self.pwinput.filename = self.inpFiles[i]\n self.pwinput.SYSTEM.set_ecutwfc(self.values[i])\n self.pwinput.write()\n #\n elif self.what == 'ecutrho':\n for i in range(self.Ndata):\n self.pwinput.filename = self.inpFiles[i]\n self.pwinput.SYSTEM.ecutrho = self.values[i]\n self.pwinput.write()\n elif self.what == 'kpoints':\n for i in range(self.Ndata):\n self.pwinput.filename = self.inpFiles[i]\n self.pwinput.Nk = self.values[i]\n self.pwinput.write()\n #\n else:\n raise RuntimeError('what = %s is not implemented yet' % (self.what))\n #\n self.inputs_have_been_written = True",
"def write(self):\r\n if self.system.status.background_task:\r\n return\r\n\r\n self.logger.debug(\"Saving data\")\r\n\r\n # Hacs\r\n path = f\"{self.system.config_path}/.storage/{STORES['hacs']}\"\r\n hacs = {\"view\": self.configuration.frontend_mode}\r\n save(self.logger, path, hacs)\r\n\r\n # Installed\r\n path = f\"{self.system.config_path}/.storage/{STORES['installed']}\"\r\n installed = {}\r\n for repository_name in self.common.installed:\r\n repository = self.get_by_name(repository_name)\r\n if repository is None:\r\n self.logger.warning(f\"Did not save information about {repository_name}\")\r\n continue\r\n installed[repository.information.full_name] = {\r\n \"version_type\": repository.display_version_or_commit,\r\n \"version_installed\": repository.display_installed_version,\r\n \"version_available\": repository.display_available_version,\r\n }\r\n save(self.logger, path, installed)\r\n\r\n # Repositories\r\n path = f\"{self.system.config_path}/.storage/{STORES['repositories']}\"\r\n content = {}\r\n for repository in self.repositories:\r\n if repository.repository_manifest is not None:\r\n repository_manifest = repository.repository_manifest.manifest\r\n else:\r\n repository_manifest = None\r\n content[repository.information.uid] = {\r\n \"authors\": repository.information.authors,\r\n \"topics\": repository.information.topics,\r\n \"category\": repository.information.category,\r\n \"description\": repository.information.description,\r\n \"full_name\": repository.information.full_name,\r\n \"hide\": repository.status.hide,\r\n \"installed_commit\": repository.versions.installed_commit,\r\n \"installed\": repository.status.installed,\r\n \"last_commit\": repository.versions.available_commit,\r\n \"last_release_tag\": repository.versions.available,\r\n \"repository_manifest\": repository_manifest,\r\n \"name\": repository.information.name,\r\n \"new\": repository.status.new,\r\n \"selected_tag\": repository.status.selected_tag,\r\n \"show_beta\": repository.status.show_beta,\r\n \"version_installed\": repository.versions.installed,\r\n }\r\n\r\n # Validate installed repositories\r\n count_installed = len(installed) + 1 # For HACS it self\r\n count_installed_restore = 0\r\n for repository in self.repositories:\r\n if repository.status.installed:\r\n count_installed_restore += 1\r\n\r\n if count_installed < count_installed_restore:\r\n self.logger.debug(\"Save failed!\")\r\n self.logger.debug(\r\n f\"Number of installed repositories does not match the number of stored repositories [{count_installed} vs {count_installed_restore}]\"\r\n )\r\n return\r\n save(self.logger, path, content)",
"def _rebuild_key_and_role_db(self):\n \n # Clobbering this means all delegated metadata files are rendered outdated\n # and will need to be reloaded. However, reloading the delegated metadata\n # files is avoided here because fetching target information with methods\n # like all_targets() and target() always cause a refresh of these files.\n # The metadata files for delegated roles are also not loaded when the\n # repository is first instantiated. Due to this setup, reloading delegated\n # roles is not required here.\n tuf.keydb.create_keydb_from_root_metadata(self.metadata['current']['root'])\n tuf.roledb.create_roledb_from_root_metadata(self.metadata['current']['root'])",
"def write(self, model, ids, field_values,context={}):\n try:\n res = self.object_facade.execute(self.dbname, self.user_id, self.user_passwd,\n model, 'write', ids, field_values, context)\n return res\n except socket.error, err:\n raise Exception(u'Conexion rechazada: %s!' % err)\n except xmlrpclib.Fault, err:\n raise Exception(u'Error %s en write: %s' % (err.faultCode, err.faultString))",
"def SetProjectMetadata(self, new_metadata):\n compute = self.compute\n\n errors = []\n list(request_helper.MakeRequests(\n requests=[\n (compute.projects,\n 'SetCommonInstanceMetadata',\n self.messages.ComputeProjectsSetCommonInstanceMetadataRequest(\n metadata=new_metadata,\n project=properties.VALUES.core.project.Get(\n required=True),\n ))],\n http=self.http,\n batch_url=self.batch_url,\n errors=errors,\n custom_get_requests=None))\n if errors:\n utils.RaiseToolException(\n errors,\n error_message='Could not add SSH key to project metadata:')",
"def save (self):\n if self.newobj:\n using_sequence = self.sequence ()\n self.keyvals['id'] = using_sequence\n self.seq = using_sequence\n else:\n using_sequence = self.seq\n for key, val in self.keyvals.items ():\n r_key = self.prepare_key (key, using_sequence)\n r.set (r_key, val)\n self.keyvals = {}\n self.newobj = False",
"def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:",
"def save_project(self, project_id, project):\n with open('{}/{}'.format(self._storage_location, project_id), 'w') as project_file:\n project_file.write(project.name + '\\n')\n project_file.write(project.description + '\\n')\n project_file.write(\",\".join(project.members) + '\\n')\n project_file.write(\",\".join(project.documents) + '\\n')",
"def _save_experiment_metadata(self, suppress_errors: bool = True) -> None:\n if not self._service:\n LOG.warning(\n \"Experiment cannot be saved because no experiment service is available. \"\n \"An experiment service is available, for example, \"\n \"when using an IBM Quantum backend.\"\n )\n return\n try:\n handle_metadata_separately = self._metadata_too_large()\n if handle_metadata_separately:\n metadata = self._db_data.metadata\n self._db_data.metadata = {}\n\n result = self.service.create_or_update_experiment(\n self._db_data, json_encoder=self._json_encoder, create=not self._created_in_db\n )\n if isinstance(result, dict):\n created_datetime = result.get(\"created_at\", None)\n updated_datetime = result.get(\"updated_at\", None)\n self._db_data.creation_datetime = parse_utc_datetime(created_datetime)\n self._db_data.updated_datetime = parse_utc_datetime(updated_datetime)\n\n self._created_in_db = True\n\n if handle_metadata_separately:\n self.service.file_upload(\n self._db_data.experiment_id, self._metadata_filename, metadata\n )\n self._db_data.metadata = metadata\n\n except Exception as ex: # pylint: disable=broad-except\n # Don't automatically fail the experiment just because its data cannot be saved.\n LOG.error(\"Unable to save the experiment data: %s\", traceback.format_exc())\n if not suppress_errors:\n raise QiskitError(f\"Experiment data save failed\\nError Message:\\n{str(ex)}\") from ex"
] | [
"0.70324004",
"0.624896",
"0.5617584",
"0.5436556",
"0.5102913",
"0.50380903",
"0.5019441",
"0.5013116",
"0.48381904",
"0.4805784",
"0.4781773",
"0.47577384",
"0.47417828",
"0.47400704",
"0.47357443",
"0.47232473",
"0.4700202",
"0.4680056",
"0.46675187",
"0.4654608",
"0.46476293",
"0.4643456",
"0.46204692",
"0.46176842",
"0.46141252",
"0.4596623",
"0.45950142",
"0.4581375",
"0.45778543",
"0.45733356"
] | 0.8042498 | 0 |
Write provenance entries to the metadata store for a given project. Will overwrite the values of keys that already exist context Context object containing projectDir, the path of the project whose metadata store is to be written to keys The keys to be written to the provenance section of the project metadata values The values to be written for keys stored in the provenance section of the project metadata IOError(errno.EACCES) if the metadata store for the project is not writable Exception if len(keys) != len(values) | def writeProvenanceEntries(context, keys, values):
GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION, keys, values) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeProvenanceEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.PROVENANCE_SECTION, key, value)",
"def _writeEntriesToSection(projectDir, section, keys, values, callback=None):\n numKeys = len(keys)\n if numKeys != len(values):\n raise Exception( \"%d keys specified for %d values\" % (numKeys, len(values)) )\n \n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entries\n if not config.has_section(section):\n config.add_section(section)\n for i in xrange(numKeys):\n config.set(section, keys[i], values[i])\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def writeToMetadata(self, context):\n fqId = self.section + GenericMetadata.COMPOUND_KEY_SEP + self.name\n fqId = fqId.lower()\n \n # Write self to the appropriate section\n GenericMetadata.writeEntryToSection(context, self.section, self.name, self.dcIdentifier)\n \n # Write to provenance section\n provenanceEntries = GenericMetadata.readProvenanceEntries(context)\n try:\n entities = provenanceEntries['entities'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n entities = []\n # Write entity metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in entities:\n entities.append(fqId)\n entitiesStr = GenericMetadata.VALUE_DELIM.join(entities)\n keys.append('entities'); values.append(entitiesStr)\n # Write attributes for entity\n keyProto = fqId + GenericMetadata.COMPOUND_KEY_SEP\n dcIdentifier = keyProto + 'dc.identifier'\n keys.append(dcIdentifier); values.append(self.dcIdentifier)\n dcSource = keyProto + 'dc.source'\n keys.append(dcSource); values.append(self.dcSource)\n dcTitle = keyProto + 'dc.title'\n keys.append(dcTitle); values.append(self.dcTitle)\n if self.dcDate:\n dcDate = keyProto + 'dc.date'\n keys.append(dcDate); values.append(self.dcDate.strftime(AssetProvenance.FMT_DATE))\n dcPublisher = keyProto + 'dc.publisher'\n keys.append(dcPublisher); values.append(self.dcPublisher)\n dcDescription = keyProto + 'dc.description'\n keys.append(dcDescription); values.append(self.dcDescription)\n processingNotes = keyProto + 'processing_notes'\n keys.append(processingNotes); values.append(self.processingNotes)\n GenericMetadata.writeProvenanceEntries(context, keys, values)",
"def _write_map_provenance(cfg, cube, plot_path, title, *attrs):\n cube = cube.copy()\n ancestors = []\n for attr in attrs:\n ancestors.extend(attr['filename'].split('|'))\n netcdf_path = mlr.get_new_path(cfg, plot_path)\n io.iris_save(cube, netcdf_path)\n record = {\n 'ancestors': ancestors,\n 'authors': ['schlund_manuel'],\n 'caption': f\"Geographical distribution of {cube.long_name} for \"\n f\"{title}.\",\n 'plot_types': ['geo'],\n 'references': ['schlund20jgr'],\n }\n with ProvenanceLogger(cfg) as provenance_logger:\n provenance_logger.log(netcdf_path, record)\n provenance_logger.log(plot_path, record)",
"def save(cls, context):\n\n data = context.get_stored_dict()\n files = {}\n\n def save_in_file(file, key, value):\n if file in files.keys():\n files[file][key] = value\n else:\n files[file] = {key: value}\n\n for key, val in data.items():\n if context.extends is not None and key in context.key_origins:\n save_in_file(context.key_origins[key], key, val)\n else:\n save_in_file(context.profile, key, val)\n\n for profile, content in files.items():\n metadata.update_metadata(\n context.workspace,\n profile,\n 'config',\n content)",
"def save_project(self, project_id, project):\n with open('{}/{}'.format(self._storage_location, project_id), 'w') as project_file:\n project_file.write(project.name + '\\n')\n project_file.write(project.description + '\\n')\n project_file.write(\",\".join(project.members) + '\\n')\n project_file.write(\",\".join(project.documents) + '\\n')",
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def writeClimatePointEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION, keys, values)",
"def readProvenanceEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION)",
"def testUpdate(self):\n try:\n provU = ProvenanceProvider(self.__cfgOb, self.__cachePath, useCache=False)\n pD = {self.__provKeyName: self.__provInfoL}\n ok = provU.store(pD)\n self.assertTrue(ok)\n #\n ok = provU.update(pD)\n self.assertTrue(ok)\n #\n fD = provU.fetch()\n self.assertTrue(self.__provKeyName in fD)\n self.assertDictEqual(pD, fD)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def update_project_documents(self, manifest_info):\n\n for proj_name, proj_info in manifest_info.projects.items():\n # See if project document already is in the database and extract\n # for updating if so, otherwise create a new dictionary for\n # population\n key_name = f'project:{proj_name}'\n\n try:\n project_data = self.db.get_document(key_name)\n except cbdatabase_db.NotFoundError:\n project_data = dict(\n type='project', key_=key_name, name=proj_name\n )\n\n remote, repo_url = \\\n manifest_info.get_project_remote_info(proj_name)\n\n if 'remotes' in project_data:\n if remote in project_data['remotes']:\n if repo_url not in project_data['remotes'][remote]:\n project_data['remotes'][remote].append(repo_url)\n else:\n project_data['remotes'][remote] = [repo_url]\n else:\n project_data['remotes'] = {remote: [repo_url]}\n\n self.db.upsert_documents({key_name: project_data})",
"def commit(self):\n\t\t## Loops through ALL items\n\t\tfor k in self.data.keys():\n\t\t\tfor item in self[k]:\n\n\t\t\t\t## If the object needs committing, commit it!\n\t\t\t\tif item['meta']['needs_commit']:\n\t\t\t\t\t## Create file contents as an empty string\n\t\t\t\t\tfile_contents = \"\"\n\n\t\t\t\t\t## find any other items that may share this config file\n\t\t\t\t\textra_items = self._get_items_in_file(item['meta']['filename'])\n\t\t\t\t\tif len(extra_items) > 0:\n\t\t\t\t\t\tfor commit_item in extra_items:\n\t\t\t\t\t\t\t## Ignore files that are already set to be deleted:w\n\t\t\t\t\t\t\tif commit_item['meta']['delete_me']:\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t## Make sure we aren't adding this thing twice\n\t\t\t\t\t\t\tif item != commit_item:\n\t\t\t\t\t\t\t\tfile_contents += self.print_conf(commit_item)\n\n\t\t\t\t\t## This is the actual item that needs commiting\n\t\t\t\t\tif not item['meta']['delete_me']:\n\t\t\t\t\t\tfile_contents += self.print_conf(item)\n\n\t\t\t\t\t## Write the file\n\t\t\t\t\tf = open(item['meta']['filename'], 'w')\n\t\t\t\t\tf.write(file_contents)\n\t\t\t\t\tf.close()\n\n\t\t\t\t\t## Recreate the item entry without the commit flag\n\t\t\t\t\tself.data[k].remove(item)\n\t\t\t\t\titem['meta']['needs_commit'] = None\n\t\t\t\t\tself.data[k].append(item)",
"def dumpprojects (self):\r\n\r\n\r\n\r\n datesuffix=str(datetime.datetime.now()).split(' ')[0]\r\n project = str(transform(self.default_dict['projects'].return_dict()))\r\n\r\n if self.using_shelf:\r\n\r\n file_access.save_file(returntext=project,\r\n filename='PROJ'+notebookname+datesuffix,\r\n folder='/textfiles')\r\n if self.using_database:\r\n value_tuple = (notebookname, project,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO projects \"\r\n +\"(notebook, projectfile) \"\r\n +\"VALUES (?,?);\",\r\n value_tuple)\r\n db_connection.commit()",
"def set_provenance_map(self, kwargs: Dict):\n if \"default_provenance\" in kwargs:\n self.default_provenance = kwargs.pop(\"default_provenance\")\n\n ksf_found = []\n for ksf in knowledge_provenance_properties:\n if ksf in kwargs:\n ksf_found.append(ksf)\n ksf_value = kwargs.pop(ksf)\n if isinstance(ksf_value, dict):\n for ksf_pattern in ksf_value.keys():\n log.debug(\"ksf_pattern: \", ksf_pattern)\n if ksf not in self.mapping:\n log.debug(\"not in the mapping\", ksf)\n self.mapping[ksf] = dict()\n log.debug(\"self.mapping[ksf]: \", self.mapping[ksf])\n ir = self.get_mapping(ksf)\n self.mapping[ksf][ksf_pattern] = ir.set_provenance_map_entry(\n ksf_value[ksf_pattern]\n )\n log.debug(\"self.mapping[ksf][ksf_pattern]: \", self.mapping[ksf][ksf_pattern])\n else:\n ir = self.get_mapping(ksf)\n self.mapping[ksf] = ir.set_provenance_map_entry(ksf_value)\n # if none specified, add at least one generic 'knowledge_source'\n if len(ksf_found) == 0:\n ir = self.get_mapping(\"knowledge_source\")\n if \"name\" in kwargs:\n self.mapping[\"knowledge_source\"] = ir.default(kwargs[\"name\"])\n else:\n self.mapping[\"knowledge_source\"] = ir.default(self.default_provenance)\n if \"provided_by\" not in self.mapping:\n ir = self.get_mapping(\"provided_by\")\n self.mapping[\"provided_by\"] = ir.default(self.default_provenance)",
"def SetProjectMetadata(self, new_metadata):\n compute = self.compute\n\n errors = []\n list(request_helper.MakeRequests(\n requests=[\n (compute.projects,\n 'SetCommonInstanceMetadata',\n self.messages.ComputeProjectsSetCommonInstanceMetadataRequest(\n metadata=new_metadata,\n project=properties.VALUES.core.project.Get(\n required=True),\n ))],\n http=self.http,\n batch_url=self.batch_url,\n errors=errors,\n custom_get_requests=None))\n if errors:\n utils.RaiseToolException(\n errors,\n error_message='Could not add SSH key to project metadata:')",
"def save_project(uid, song_notes, author_name, creation_date, project_name):",
"def _post_src_install_write_metadata(settings):\n\n\teapi_attrs = _get_eapi_attrs(settings.configdict['pkg']['EAPI'])\n\n\tbuild_info_dir = os.path.join(settings['PORTAGE_BUILDDIR'], 'build-info')\n\n\tmetadata_keys = ['IUSE']\n\tif eapi_attrs.iuse_effective:\n\t\tmetadata_keys.append('IUSE_EFFECTIVE')\n\n\tfor k in metadata_keys:\n\t\tv = settings.configdict['pkg'].get(k)\n\t\tif v is not None:\n\t\t\twrite_atomic(os.path.join(build_info_dir, k), v + '\\n')\n\n\t# The following variables are irrelevant for virtual packages.\n\tif settings.get('CATEGORY') != 'virtual':\n\n\t\tfor k in ('CHOST',):\n\t\t\tv = settings.get(k)\n\t\t\tif v is not None:\n\t\t\t\twrite_atomic(os.path.join(build_info_dir, k), v + '\\n')\n\n\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t'BUILD_TIME'), encoding=_encodings['fs'], errors='strict'),\n\t\tmode='w', encoding=_encodings['repo.content'],\n\t\terrors='strict') as f:\n\t\tf.write(_unicode_decode(\"%.0f\\n\" % (time.time(),)))\n\n\tuse = frozenset(settings['PORTAGE_USE'].split())\n\tfor k in _vdb_use_conditional_keys:\n\t\tv = settings.configdict['pkg'].get(k)\n\t\tfilename = os.path.join(build_info_dir, k)\n\t\tif v is None:\n\t\t\ttry:\n\t\t\t\tos.unlink(filename)\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\t\tcontinue\n\n\t\tif k.endswith('DEPEND'):\n\t\t\tif eapi_attrs.slot_operator:\n\t\t\t\tcontinue\n\t\t\ttoken_class = Atom\n\t\telse:\n\t\t\ttoken_class = None\n\n\t\tv = use_reduce(v, uselist=use, token_class=token_class)\n\t\tv = paren_enclose(v)\n\t\tif not v:\n\t\t\ttry:\n\t\t\t\tos.unlink(filename)\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\t\tcontinue\n\t\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t\tk), encoding=_encodings['fs'], errors='strict'),\n\t\t\tmode='w', encoding=_encodings['repo.content'],\n\t\t\terrors='strict') as f:\n\t\t\tf.write(_unicode_decode(v + '\\n'))\n\n\tif eapi_attrs.slot_operator:\n\t\tdeps = evaluate_slot_operator_equal_deps(settings, use, QueryCommand.get_db())\n\t\tfor k, v in deps.items():\n\t\t\tfilename = os.path.join(build_info_dir, k)\n\t\t\tif not v:\n\t\t\t\ttry:\n\t\t\t\t\tos.unlink(filename)\n\t\t\t\texcept OSError:\n\t\t\t\t\tpass\n\t\t\t\tcontinue\n\t\t\twith io.open(_unicode_encode(os.path.join(build_info_dir,\n\t\t\t\tk), encoding=_encodings['fs'], errors='strict'),\n\t\t\t\tmode='w', encoding=_encodings['repo.content'],\n\t\t\t\terrors='strict') as f:\n\t\t\t\tf.write(_unicode_decode(v + '\\n'))",
"def writeProteinAccessions( self ):\n\n self.logger.info( 'writeProteinAccessions: START' )\n\n self.logger.info( 'writeProteinAccessions: insert file will be proteinAccessionsInsert.psql' )\n\n proteinAccessionFile = self.openInsertFile( 'proteinAccessionsInsert.psql')\n\n for proteinIdentification, proteinIdRelationalDatabase in self.proteinsInserted.iteritems():\n accessionId = self.accessionsInserted[ proteinIdentification ]\n self.writeFile( proteinAccessionFile, 'protein_accessions', [ str(proteinIdRelationalDatabase), str(accessionId) ] )\n\n\n self.logger.info( 'writeProteinAccessions: DONE' )",
"def writeModelRunEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.MODEL_RUN_SECTION, keys, values)",
"def writeToMetadata(self, context):\n pass",
"def fix_project_keys(apps, schema_editor):\n RemoteProject = apps.get_model('projectroles', 'RemoteProject')\n Project = apps.get_model('projectroles', 'Project')\n\n for rp in RemoteProject.objects.all():\n if not rp.project:\n rp.project = Project.objects.filter(\n sodar_uuid=rp.project_uuid\n ).first()\n rp.save()",
"def testStore(self):\n try:\n provU = ProvenanceProvider(self.__cfgOb, self.__cachePath, useCache=False)\n pD = {self.__provKeyName: self.__provInfoL}\n ok = provU.store(pD)\n #\n self.assertTrue(ok)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def checkMetadataVersion(projectDir):\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.R_OK):\n raise IOError(errno.EACCES, \"Unable to read metadata store for project %s\" % \\\n (projectDir,))\n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n if config.has_section(GenericMetadata.ECOHYDROLIB_SECION):\n if config.has_option(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY):\n metadataVersion = config.get(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY)\n if metadataVersion != GenericMetadata._ecohydrolibVersion:\n raise MetadataVersionError(metadataVersion)",
"def write(self, path, key):\n raise NotImplementedError",
"def save(projects, path):\n with open(path,'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', lineterminator='\\n')\n # string for help MS Excel\n writer.writerow([\"sep=,\"])\n # Table name_columm\n writer.writerow(recode_for_write(NAME_COLUMM))\n\n with open(path,'a') as csvfile:\n headers = ['title', 'category', 'price', 'applications']\n writer = csv.DictWriter(csvfile, lineterminator='\\n',\n fieldnames=headers)\n for i in projects:\n i = recode_value_dict(i)\n writer.writerow(i)",
"def test_upload_existing_file(self):\n ps = PersistenceStore(s3_client=S3ExistingUpload())\n\n try:\n new_data = {\n 'maven': {\n 'pck1, pck2, pck3': 7,\n 'pck30, pck6': 20,\n 'pck2, pck4, pck7': 12\n },\n 'npm': {\n 'pck1, pck2, pck3': 45,\n 'pck77': 23,\n 'pck2, pck4, pck7': 99\n },\n 'pypi': {\n 'pck3, pck56': 65,\n 'pck2, pck4, pck7': 110\n }\n }\n ps.update(new_data, 'filename.json')\n except Exception:\n assert False, 'Exception raised'",
"def writeClimateGridEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_GRID_SECTION, keys, values)",
"def propset(self, name, value, *args):\r\n d = py.path.local.mkdtemp() \r\n try: \r\n p = d.join('value') \r\n p.write(value) \r\n self._svn('propset', name, '--file', str(p), *args)\r\n finally: \r\n d.remove()",
"def writeProteins( self ):\n\n self.logger.info( 'writeProteins: START' )\n\n proteinsDestination = self.openInsertFile( 'proteinsInsert.psql' )\n accessionsDestination = self.openInsertFile( 'accessionsInsert.psql' )\n\n proteins = {}\n\n totalOfSequences = self.reader.getTotalOfSequences()\n\n self.logger.info( 'writeProteins: total of sequences: ' + str(totalOfSequences) + '.' )\n\n files = self.reader.getPepFiles()\n\n self.logger.info( 'writeProteins: total of sequence files: ' + str(len(files)) + '.' )\n\n # For log purposes only!\n counter = 0\n\n for pepFile in files:\n f = self.reader.openPepFile( pepFile )\n\n positions = self.reader.getPepEntriesPositions()\n\n # Just for the log system.\n fileName = self.afs.getFileName( pepFile ) \n self.logger.info( 'writeProteins: writing file: ' + str(fileName) + '.' )\n self.logger.info( 'writeProteins: file: ' + str(fileName) + ' have : ' + str(len(positions)) + ' entries.' )\n # END of log stuff.\n\n for position in positions:\n\n # Only log how long it's taking to run.\n # By thousands.\n counter += 1\n if ( counter % 100000 ) == 0:\n self.logger.info( 'writeProtein: step: ' + str(counter) + '.')\n # END log step.\n\n\n entry = self.reader.getPepParsedEntry( position )\n\n # Sometimes there's 'pep' files without related organism. It happens in KEGG database.\n # We skip completely sequences without related organism.\n if not entry.organism.code in self.importerOrganism.organismsInserted:\n self.logger.info( 'writeProteins: ORGANISM NOT FOUND: ' + entry.organism.code )\n\n # Skip the 'pep' file completely.\n break\n\n else:\n organismId = self.importerOrganism.organismsInserted[ entry.organism.code ]\n\n self.logger.info( 'writeProteins: writing entry : ' + str(entry.identification) + '.' )\n\n #self.writeProteinsFile( proteinsDestination, entry.identification, entry.fullFastaHeader, entry.description, organismId, entry.sequence )\n proteinInserted = self.writeFile( proteinsDestination, 'proteins', [ str(entry.identification), str(entry.fullFastaHeader), str(entry.description), str(organismId), str(entry.sequence) ] )\n self.proteinsInserted[ entry.identification ] = proteinInserted\n\n accessionInserted = self.writeFile( accessionsDestination, 'accessions', [ str(entry.identification) ] )\n self.accessionsInserted[ entry.identification ] = accessionInserted \n #self.writeAccessionsFile( accessionsDestination, entry.identification )\n\n\n self.logger.info( 'writeProteins: DONE' )",
"def test_save_project_data():\n\n inventory_ = copy.deepcopy(self._inventory)\n inventory_[\"key\"] = \"value\"\n inventory_[\"key2\"] = \"value2\"\n\n inventory.save(\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )\n\n project = io.find_one({\"type\": \"project\", \"name\": PROJECT_NAME})\n assert_equals(project[\"data\"][\"key\"], \"value\")\n assert_equals(project[\"data\"][\"key2\"], \"value2\")"
] | [
"0.6461782",
"0.61798054",
"0.57821673",
"0.5684524",
"0.553569",
"0.5147128",
"0.51318043",
"0.5116621",
"0.5025456",
"0.49987754",
"0.4958191",
"0.49401796",
"0.4920296",
"0.49061567",
"0.49038157",
"0.4880907",
"0.484493",
"0.48360598",
"0.48308468",
"0.48263413",
"0.47989592",
"0.47954983",
"0.47131574",
"0.47128034",
"0.4703555",
"0.4698285",
"0.4688452",
"0.46831912",
"0.46737993",
"0.4645674"
] | 0.7994784 | 0 |
Read all entries for the given section from the metadata store for a given project projectDir Absolute path of the project whose metadata are to be read section The section the key is to be written to A dictionary of key/value pairs from the given section of the project metadata | def _readEntriesForSection(projectDir, section):
sectionDict = dict()
metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)
if os.path.exists(metadataFilepath):
if not os.access(metadataFilepath, os.R_OK):
raise IOError(errno.EACCES, "Unable to read metadata store for project %s" % \
(projectDir,))
# Read metadata store
config = ConfigParser.RawConfigParser()
config.read(metadataFilepath)
if config.has_section(section):
items = config.items(section)
for item in items:
sectionDict[item[0]] = item[1]
return sectionDict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _writeEntriesToSection(projectDir, section, keys, values, callback=None):\n numKeys = len(keys)\n if numKeys != len(values):\n raise Exception( \"%d keys specified for %d values\" % (numKeys, len(values)) )\n \n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entries\n if not config.has_section(section):\n config.add_section(section)\n for i in xrange(numKeys):\n config.set(section, keys[i], values[i])\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def _prep_metadata(md_sect, path):\n if not set(md_sect).issuperset(metadata_required_fields):\n missing = metadata_required_fields - set(md_sect)\n raise ConfigError(\"Required fields missing: \" + '\\n'.join(missing))\n\n module = md_sect.get('module')\n if not module.isidentifier():\n raise ConfigError(\"Module name %r is not a valid identifier\" % module)\n\n md_dict = {}\n\n # Description file\n if 'description-file' in md_sect:\n description_file = path.parent / md_sect.get('description-file')\n try:\n with description_file.open(encoding='utf-8') as f:\n raw_desc = f.read()\n except FileNotFoundError:\n raise ConfigError(\n \"Description file {} does not exist\".format(description_file)\n )\n ext = description_file.suffix\n try:\n mimetype = readme_ext_to_content_type[ext]\n except KeyError:\n log.warning(\"Unknown extension %r for description file.\", ext)\n log.warning(\" Recognised extensions: %s\",\n \" \".join(readme_ext_to_content_type))\n mimetype = None\n\n if mimetype == 'text/x-rst':\n # rst check\n stream = io.StringIO()\n res = render(raw_desc, stream)\n if not res:\n log.warning(\"The file description seems not to be valid rst for PyPI;\"\n \" it will be interpreted as plain text\")\n log.warning(stream.getvalue())\n\n md_dict['description'] = raw_desc\n md_dict['description_content_type'] = mimetype\n\n if 'urls' in md_sect:\n project_urls = md_dict['project_urls'] = []\n for label, url in sorted(md_sect.pop('urls').items()):\n project_urls.append(\"{}, {}\".format(label, url))\n\n for key, value in md_sect.items():\n if key in {'description-file', 'module'}:\n continue\n if key not in metadata_allowed_fields:\n closest = difflib.get_close_matches(key, metadata_allowed_fields,\n n=1, cutoff=0.7)\n msg = \"Unrecognised metadata key: {!r}\".format(key)\n if closest:\n msg += \" (did you mean {!r}?)\".format(closest[0])\n raise ConfigError(msg)\n\n k2 = key.replace('-', '_')\n md_dict[k2] = value\n if key in metadata_list_fields:\n if not isinstance(value, list):\n raise ConfigError('Expected a list for {} field, found {!r}'\n .format(key, value))\n if not all(isinstance(a, str) for a in value):\n raise ConfigError('Expected a list of strings for {} field'\n .format(key))\n elif key == 'requires-extra':\n if not isinstance(value, dict):\n raise ConfigError('Expected a dict for requires-extra field, found {!r}'\n .format(value))\n if not all(isinstance(e, list) for e in value.values()):\n raise ConfigError('Expected a dict of lists for requires-extra field')\n for e, reqs in value.items():\n if not all(isinstance(a, str) for a in reqs):\n raise ConfigError('Expected a string list for requires-extra. (extra {})'\n .format(e))\n else:\n if not isinstance(value, str):\n raise ConfigError('Expected a string for {} field, found {!r}'\n .format(key, value))\n\n # What we call requires in the ini file is technically requires_dist in\n # the metadata.\n if 'requires' in md_dict:\n md_dict['requires_dist'] = md_dict.pop('requires')\n\n # And what we call dist-name is name in the metadata\n if 'dist_name' in md_dict:\n md_dict['name'] = md_dict.pop('dist_name')\n\n # Move dev-requires into requires-extra\n reqs_noextra = md_dict.pop('requires_dist', [])\n reqs_by_extra = md_dict.pop('requires_extra', {})\n dev_requires = md_dict.pop('dev_requires', None)\n if dev_requires is not None:\n if 'dev' in reqs_by_extra:\n raise ConfigError(\n 'dev-requires occurs together with its replacement requires-extra.dev.')\n else:\n log.warning(\n '“dev-requires = ...” is obsolete. Use “requires-extra = {\"dev\" = ...}” instead.')\n reqs_by_extra['dev'] = dev_requires\n\n # Add requires-extra requirements into requires_dist\n md_dict['requires_dist'] = \\\n reqs_noextra + list(_expand_requires_extra(reqs_by_extra))\n\n md_dict['provides_extra'] = sorted(reqs_by_extra.keys())\n\n # For internal use, record the main requirements as a '.none' extra.\n reqs_by_extra['.none'] = reqs_noextra\n\n return md_dict, module, reqs_by_extra",
"def writeEntryToSection(context, section, key, value, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Write new entry\n if not config.has_section(section):\n config.add_section(section)\n config.set(section, key, value)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def ReadConfigFileSection( config, section ):\n dict1 = {}\n dict1['config'] = section\n options = config.options(section)\n for option in options:\n try:\n dict1[option] = config.get(section, option)\n except:\n print >> sys.stderr, (\"Exception on %s!\" % option)\n dict1[option] = None\n return dict1",
"def read_db_config(config_path, section='database'):\n\n parser = ConfigParser()\n parser.read(config_path)\n db = {}\n\n if parser.has_section(section):\n items = parser.items(section)\n for item in items:\n db[item[0]] = item[1]\n else:\n raise FileNotFoundError('{} not found in the {} file'.format(section, config_path))\n\n return db",
"def readConfig(file, section):\n config = ConfigParser.ConfigParser()\n config.read(file)\n keyval = dict()\n\n items = config.items('%s' % section)\n for entry in items:\n keyval[entry[0]] = entry[1]\n return keyval",
"def read_metadata(dirname, use_gpu):\n try:\n if not os.path.isdir(dirname):\n pass\n elif not os.path.exists(os.path.join(dirname, 'metadata.json')):\n pass\n else:\n with open(os.path.join(dirname, 'metadata.json')) as f:\n metadata = json.load(f)\n if use_gpu and ('container_gpu' in metadata):\n container = metadata['container_gpu']\n else:\n container = metadata['container']\n entry_point = metadata['entry_point']\n except (IOError, KeyError, ValueError):\n print('Failed to read metadata from defense directory ', dirname)\n return (container, entry_point)",
"def get_section_config_ini(self, cfile, section, dict_format=False):\r\n\r\n config = self.get_config_ini(cfile)\r\n if dict_format:\r\n # Retorno um dicionario\r\n return dict(config.items(section.upper()))\r\n else:\r\n # Retorna um objeto config\r\n return config.items(section.upper())",
"def config_section_map(section):\n try:\n section_dict = {}\n # Parse the config file's sections into options\n options = CONFIG_PARSER.options(section)\n # Loop through each option\n for option in options:\n # Get the section and option and add it to the dictionary\n section_dict[option] = CONFIG_PARSER.get(section,option)\n if section_dict[option] == -1:\n click.secho(\"[*] Skipping: {}\".format(option),fg=\"yellow\")\n # Return the dictionary of settings and values\n return section_dict\n except configparser.Error as error:\n click.secho(\"[!] There was an error with: {}\".format(section),fg=\"red\")\n click.secho(\"L.. Details: {}\".format(error),fg=\"red\")",
"def read_config(filename, section):\n # create parser and read ini configuration file\n parser = ConfigParser()\n parser.read(filename)\n\n # get section\n db = {}\n if parser.has_section(section):\n items = parser.items(section)\n for item in items:\n db[item[0]] = item[1]\n else:\n raise Exception('{0} not found in the {1} file'.format(section, filename))\n\n return db",
"def deleteEntryFromSection(context, section, key, callback=None):\n projectDir = context.projectDir\n if section not in GenericMetadata.SECTIONS:\n raise Exception( \"%s is an unknown section\" % (section,) )\n lockFilepath = os.path.join(projectDir, GenericMetadata.METADATA_LOCKFILE)\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n else:\n if not os.access(projectDir, os.W_OK):\n raise IOError(errno.EACCES, \"Unable to write to metadata store for project %s\" % \\\n (projectDir,))\n # Create metadata file as it does not exist\n metadataFD = open(metadataFilepath, 'w')\n metadataFD.close()\n \n # Wait for lockfile to be relinquished\n while os.path.exists(lockFilepath):\n time.sleep(5)\n # Write lock file\n open(lockFilepath, 'w').close()\n \n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n GenericMetadata._writeVersionToMetadata(config)\n \n if callback:\n callback(config)\n \n # Delete entry\n if config.has_section(section):\n config.remove_option(section, key)\n # Write metadata store\n config.write(open(metadataFilepath, 'w'))\n \n # Remove lock file\n os.unlink(lockFilepath)",
"def config_section_map(section):\n try:\n section_dict = {}\n # Parse the config file's sections into options\n options = CONFIG_PARSER.options(section)\n # Loop through each option\n for option in options:\n # Get the section and option and add it to the dictionary\n section_dict[option] = CONFIG_PARSER.get(section, option)\n if section_dict[option] == -1:\n print(\"[-] Skipping: {}\".format(option))\n\n # Return the dictionary of settings and values\n return section_dict\n except configparser.Error as error:\n print(red(\"[!] There was an error with: {}\".format(section)))\n print(red(\"L.. Details: {}\".format(error)))",
"def load_metadata(self, name) -> Dict[str, str]:\n return load_metadata(self._casedir / Path(\"{name}/metadata_{name}.yaml\".format(name=name)))",
"def read (path, elf_info):\n ehdr, phdrs, shdrs, syms, core_info = elf_info\n info = abbrev = strings = None\n for shdr in shdrs:\n if shdr['name'] == '.debug_info':\n info = shdr['offset'], shdr['size']\n if shdr['name'] == '.debug_abbrev':\n abbrev = shdr['offset'], shdr['size']\n if shdr['name'] == '.debug_str':\n strings = shdr['offset'], shdr['size']\n if not info:\n return []\n else:\n abbrevs = abbrev_section (path, abbrev[0], abbrev[1])\n if strings:\n strings = string_section (path, strings[0], strings[1])\n info = info_section (path, info[0], info[1])\n return info.read_all (abbrevs, strings)",
"def read_metadata(metapath):\r\n with open(metapath) as metaFile:\r\n metadata = {}\r\n for line in metaFile.readlines():\r\n if \"=\" in line: # Get only key-value pairs\r\n l = line.split(\"=\")\r\n metadata[l[0].strip()] = l[1].strip()\r\n\r\n return metadata",
"def readProvenanceEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION)",
"def config_section_map(config_file, section):\n\n config = ConfigParser.ConfigParser()\n config.read(config_file)\n dict1 = {}\n\n if section not in config.sections():\n return dict1\n\n options = config.options(section)\n for option in options:\n try:\n dict1[option] = config.get(section, option)\n except:\n dict1[option] = None\n return dict1",
"def config_section_data():\n config_data = u\"\"\"[feeds]\n# comma separated section names. ex. sqlserver_feed,file_feed\nfeed_names=<your feeds>\nreload=true\n# use reload_types to limit the types of objects when reload=true.\n# Ex: incident,task,note,artifact,attachment,<data_table_api_name>\nreload_types=\n# set to true if ElasticSearch errors occur during reload=true\nreload_query_api_method=false\n\n# feed_data is the default message destination that will be listened to\nqueue=feed_data\n\n# set to true if attachment data should be part of payload send to plugins\ninclude_attachment_data=false\n# if necessary, specify the supported workspace (by label, case sensitive) and the list of feeds associated with it\n# ex: 'Default Workspace': ['sqlserver_feed'], 'workspace A': ['kafka_feed', 'resilient_feed']\nworkspaces=\n\"\"\"\n return config_data",
"def read_json_contents(self, body: dict):\n for section in body:\n new_section = Section(section['id'], section['name'], self.__download)\n new_section.read_json_contents(section['modules'])\n self.__sections.append(new_section)",
"def _get_page_sections(self, sectionNum=None, sectionName=None):\n self.section = {}\n self.sections = [] # list maintains order\n content = self.page.content\n lines = content.split(\"\\n\")\n currentSection = None\n for line in lines:\n if \"==\" in line:\n line = line.replace(\"Edit =\",\"\")\n line = line.replace(\"=\",\"\").lstrip().rstrip()\n self.section[line] = []\n currentSection = line\n self.sections.append(currentSection)\n elif currentSection is not None:\n line = line.lstrip().rstrip()\n self.section[currentSection].append(line)\n else:\n pass\n logger.info(\"Sections in page: \"+str(self.sections))\n # and return some section:\n if sectionNum is not None:\n if sectionNum > len(self.sections) or sectionNum < 0:\n sectionNum = 0\n return self.section[self.sections[sectionNum]]\n elif sectionName is not None:\n pass",
"def read_section(self, configuration_file=\"./conf.txt\", section=\"\"):\n parser = ConfigParser.ConfigParser()\n parser.read(configuration_file)\n\n sec = {}\n if parser.has_section(section):\n items = parser.items(section)\n for item in items:\n sec[item[0]] = item[1]\n else:\n raise ConfException(\"{0} not found in the {1} file\".format(section, configuration_file))\n return sec",
"def parse_section(section):\n data = {}\n for line in section.splitlines(False):\n if not line:\n continue\n if not line.startswith(' '):\n # new key/value\n key, _, value = line.partition(': ')\n data[key] = value\n else:\n # continuation of the previous value\n data[key] += line[1:]\n return data",
"def section_items(self, section: str):\n return self._config_parser.items(section)",
"def _write_section_values(section_data, fobj):\n\n # Order is significant.\n section_dict = OrderedDict()\n section_dict['Armor'] = section_data.get('armor')\n section_dict['Internals'] = section_data.get('internals')\n section_dict['Rear'] = section_data.get('rear')\n section_dict['Config'] = section_data.get('config')\n\n for name, value in section_dict.items():\n if not value:\n continue\n val_str = \" {name:<14} {{ {value} }}\\n\".format(\n name=name, value=value)\n fobj.write(val_str)",
"def handle_section_import(section):\n for prop in section.properties:\n handle_property_import(prop)\n\n # Make sure properties down the rabbit hole are also treated.\n for sec in section.sections:\n handle_section_import(sec)",
"def read_os_release_file(dut, fname=default_os_release_file, key=None):\n cur_dir, f = os.path.split(__file__)\n path = os.path.join(cur_dir, os_release_files_dir, fname)\n d = {}\n with open(path) as f:\n for line in f:\n k, v = line.rstrip().split(\"=\")\n d[k] = v\n if key:\n return d[key]\n else:\n return d",
"def section_tag_cleanup(debug_dir: str, tag_dict: dict, line: str):\n tag_registry = os.path.join(debug_dir, \"../../debugdir/tag_registry.json\")\n with open(tag_registry) as tag_registry_pre:\n tag_registry = json.load(tag_registry_pre)\n\n tag_closed = \"0\"\n\n if tag_registry[\"section\"] == tag_closed:\n content_update = tag_dict[\"section-beg\"]\n output_file_update.content_append(debug_dir=debug_dir,\n content_update=content_update)\n try:\n if logger_debug.isEnabledFor(logging.DEBUG):\n msg = str(tag_dict[\"section-beg\"] + f\"{line}\")\n logger_debug.error(msg)\n except AttributeError:\n logging.exception(\"Check setLevel for logger_debug.\")\n\n else:\n # If a section tag is open, close it and open a new section.\n content_update = tag_dict[\"section-end\"] + tag_dict[\"section-beg\"]\n output_file_update.content_append(debug_dir=debug_dir,\n content_update=content_update)\n try:\n if logger_debug.isEnabledFor(logging.DEBUG):\n msg = str(tag_dict[\"section-end\"] +\n tag_dict[\"section-beg\"] + f\"{line}\")\n logger_debug.error(msg)\n except AttributeError:\n logging.exception(\"Check setLevel for logger_debug.\")",
"def __getitem__(self, section):\n #first translate from CFN into HOT terminology if necessary\n if section not in self.SECTIONS:\n section = HOTemplate20130523._translate(\n section, self._CFN_TO_HOT_SECTIONS,\n _('\"%s\" is not a valid template section'))\n\n if section not in self.SECTIONS:\n raise KeyError(_('\"%s\" is not a valid template section') % section)\n if section in self.SECTIONS_NO_DIRECT_ACCESS:\n raise KeyError(\n _('Section %s can not be accessed directly.') % section)\n\n if section == self.MAPPINGS:\n return {}\n\n if section == self.DESCRIPTION:\n default = 'No description'\n else:\n default = {}\n\n # if a section is None (empty yaml section) return {}\n # to be consistent with an empty json section.\n the_section = self.t.get(section) or default\n\n # In some cases (e.g. parameters), also translate each entry of\n # a section into CFN format (case, naming, etc) so the rest of the\n # engine can cope with it.\n # This is a shortcut for now and might be changed in the future.\n if section == self.RESOURCES:\n return self._translate_resources(the_section)\n\n if section == self.OUTPUTS:\n return self._translate_outputs(the_section)\n\n return the_section",
"def _mv_to_root(map):\n if METADATA_KEY in map:\n for mk in list(map[METADATA_KEY].keys()):\n if mk not in map:\n map[mk] = map[METADATA_KEY][mk]\n del map[METADATA_KEY][mk]\n _LOGGER.debug(\"Section {m}.{k} moved to {k}\".\n format(m=METADATA_KEY, k=mk))\n del self[CONFIG_KEY][METADATA_KEY]",
"def _ReadEntries(self):\n scope = {}\n filename = os.path.join(self._root_dir, self._options.entries_filename)\n if not os.path.exists(filename):\n return []\n exec(gclient_utils.FileRead(filename), scope)\n return scope[\"entries\"]"
] | [
"0.6082751",
"0.59797823",
"0.5723508",
"0.57017773",
"0.5615351",
"0.54072076",
"0.5341342",
"0.5338873",
"0.5323583",
"0.52857554",
"0.5253614",
"0.52374613",
"0.52353126",
"0.50894105",
"0.5071754",
"0.5071219",
"0.5067303",
"0.50422764",
"0.50369376",
"0.5034055",
"0.5032706",
"0.50047153",
"0.50007474",
"0.49782717",
"0.49753767",
"0.49731472",
"0.49718726",
"0.4954216",
"0.49483606",
"0.49475172"
] | 0.8505662 | 0 |
Read all manifest entries from the metadata store for a given project context Context object containing projectDir, the path of the project whose metadata store is to be read from A dictionary of key/value pairs from the manifest section of the project metadata | def readManifestEntries(context):
return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.MANIFEST_SECTION) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _readEntriesForSection(projectDir, section):\n sectionDict = dict()\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.R_OK):\n raise IOError(errno.EACCES, \"Unable to read metadata store for project %s\" % \\\n (projectDir,))\n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n if config.has_section(section):\n items = config.items(section)\n for item in items:\n sectionDict[item[0]] = item[1]\n \n return sectionDict",
"def read_manifest(self): # -> None:\n ...",
"def manifest(self):\n yield self._meta\n for dir_key, meta in self._walk_dir_meta():\n yield {'logical_key': dir_key, 'meta': meta}\n for logical_key, entry in self.walk():\n yield {'logical_key': logical_key, **entry.as_dict()}",
"def parse_manifest_xml(manifest_path):\n dir_project_dict = {}\n parsed_xml = xml.dom.minidom.parse(manifest_path)\n projects = parsed_xml.getElementsByTagName('project')\n for project in projects:\n name = project.getAttribute('name')\n path = project.getAttribute('path')\n if path:\n dir_project_dict[path] = name\n else:\n dir_project_dict[name] = name\n return dir_project_dict",
"def fact():\n manifests = [x for x in os.walk(manifests_dir)]\n\n return { 'manifests': manifests }",
"def read_metadata(dirname, use_gpu):\n try:\n if not os.path.isdir(dirname):\n pass\n elif not os.path.exists(os.path.join(dirname, 'metadata.json')):\n pass\n else:\n with open(os.path.join(dirname, 'metadata.json')) as f:\n metadata = json.load(f)\n if use_gpu and ('container_gpu' in metadata):\n container = metadata['container_gpu']\n else:\n container = metadata['container']\n entry_point = metadata['entry_point']\n except (IOError, KeyError, ValueError):\n print('Failed to read metadata from defense directory ', dirname)\n return (container, entry_point)",
"def read_manifest(manifest_fn):\n with open(manifest_fn, 'r') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=';')\n dicts = list(reader)\n return dicts",
"def read_metadata():\n subdirs = next(os.walk(os.getcwd()))[1]\n\n for subdir in subdirs:\n if '__init__.py' in os.listdir(subdir):\n print('Found package:', subdir)\n break\n else:\n raise SetupError('No package found! Did you forget an __init__.py?')\n\n metadata = {'name': subdir, 'packages': [subdir]}\n relevant_keys = {'__version__': 'version',\n '__author__': 'author',\n '__email__': 'author_email',\n '__license__': 'license'}\n\n m = open(os.path.join(subdir), '__init__.py')\n first_line = next(m)\n metadata['description'] = first_line.strip(). strip('\\n \"')\n for line in m:\n if len(relevant_keys) == 0:\n break\n for key in relevant_keys:\n if line.startswith(key):\n break\n else:\n continue\n\n metadatum_name = relevant_keys.pop(key)\n metadata[metadatum_name] = line.split('=', 1)[1].strip('\\n\\'\\\" ')\n\n if relevant_keys:\n print('FYI; You didn\\'t put the following info in your __init__.py:')\n print(' ', ', '.join(relevant_keys))\n return metadata",
"def readProvenanceEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION)",
"def _read_manifest_json(self):\n with open(os.path.join(self._crx_dir, \"manifest.json\")) as manifest:\n return json.load(manifest)",
"def read_file_manifest(in_stream):\n count = struct.unpack(COUNT_FMT, checked_read(in_stream, COUNT_LEN))[0]\n name_map = {}\n for dummy in range(0, count):\n length, file_sha, history_sha = \\\n struct.unpack(MANIFEST_ENTRY_HDR_FMT,\n checked_read(in_stream,\n MANIFEST_ENTRY_HDR_LEN))\n\n length -= MANIFEST_ENTRY_HDR_LEN\n name = checked_read(in_stream, length)\n\n assert not name in name_map\n name_map[name] = (file_sha, history_sha)\n return name_map",
"def metadata(self) -> Dict:\n # Lazy load the metadata\n if self._metadata is not None:\n return self._metadata\n\n # Initialize metadata\n self._metadata = {}\n # Find wich bucket the package belong to\n bucket_dir = os.path.join(self.scoop_root, \"buckets\")\n buckets = os.listdir(bucket_dir)\n metadata_json = None\n for bucket in buckets:\n metadata_file = os.path.join(\n bucket_dir, bucket, \"bucket\", f\"{self.name}.json\"\n )\n if os.path.isfile(metadata_file):\n with open(metadata_file) as file:\n metadata_json = json.load(file)\n break\n\n if metadata_json is None:\n logger.error(\"Could not find package metadata\")\n return self._metadata\n\n self._metadata = metadata_json\n return self._metadata",
"def _ReadEntries(self):\n scope = {}\n filename = os.path.join(self._root_dir, self._options.entries_filename)\n if not os.path.exists(filename):\n return []\n exec(gclient_utils.FileRead(filename), scope)\n return scope[\"entries\"]",
"def _store_package_metadata(self):\n\n context = self._config.context\n log.debug('processing chef_json file {0} for package metadata'.format(self._get_chef_json_full_path()))\n with open(self._get_chef_json_full_path()) as chef_json_file:\n chef_json = json.load(chef_json_file)\n log.debug(chef_json.dump)\n\n context.package.attributes = {}\n for x in self._config.pkg_attributes:\n context.package.attributes[x] = chef_json.get(x, None)",
"def read_metadata(metapath):\r\n with open(metapath) as metaFile:\r\n metadata = {}\r\n for line in metaFile.readlines():\r\n if \"=\" in line: # Get only key-value pairs\r\n l = line.split(\"=\")\r\n metadata[l[0].strip()] = l[1].strip()\r\n\r\n return metadata",
"def manifest_dict(self):\n return self._parsed",
"def manifest_dict(self):\n return self._parsed",
"def get_manifests(arcroot):\n manifests = []\n for root, dirs, files in os.walk(arcroot):\n if 'manifest.json' in files:\n manifests.append(os.path.join(root, 'manifest.json'))\n \n return manifests",
"def load_app_manifests(self):\n self.app_manifests = []\n apps_lib_path = os.path.join(self.apps_dir_path, \"lib\")\n for app_dir in os.listdir(apps_lib_path):\n if app_dir not in (\"__init__.py\", \"__init__.pyc\"):\n if app_dir.find(\"_v\") > 1:\n app_name = app_dir[:app_dir.find(\"_v\")]\n self.app_manifests.append(json.load(file(os.path.join(self.apps_dir_path, 'lib', app_dir, \"manifest.json\"))))\n log.info(\"Manifest for %s app was loaded\" % (app_dir))\n else:\n log.info(\"Directory %s will be skipped from app loader . Doesn't match naming convention .\" % app_dir)",
"def load_metadata(self, name) -> Dict[str, str]:\n return load_metadata(self._casedir / Path(\"{name}/metadata_{name}.yaml\".format(name=name)))",
"def get_manifest_contents(jar_file_path):\n _is_valid_jar_file(jar_file_path)\n manifest_file_contents = _get_manifest_file_contents(jar_file_path)\n return _format_attributes(manifest_file_contents)",
"def read_extras():\n extras = dict()\n extra_requirements_dir = 'packaging/requirements'\n for extra_requirements_filename in os.listdir(extra_requirements_dir):\n filename_match = re.search(r'^requirements-(\\w*).txt$', extra_requirements_filename)\n if not filename_match:\n continue\n extra_req_file_path = os.path.join(extra_requirements_dir, extra_requirements_filename)\n try:\n extras[filename_match.group(1)] = read_file(extra_req_file_path).splitlines()\n except RuntimeError:\n pass\n return extras",
"def parse_manifest(manifest_contents):\n manifest = {}\n for line in manifest_contents.split('\\n'):\n line_unpacked = line.split()\n try:\n # Check that the line isn't empty or a comment\n if not line_unpacked or line.strip().startswith('#'):\n continue\n\n target, repo_hash, url, sha256_hash = line_unpacked\n manifest[target] = {\"repo_hash\": repo_hash,\n \"url\": url,\n \"sha256_hash\": sha256_hash,\n }\n except ValueError:\n log(\"WARN\", \"Warning: Invalid line in manifest file:\\n\"\n \" {}\".format(line))\n continue\n return manifest",
"def readModelRunEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.MODEL_RUN_SECTION)",
"def load_model_manifest(rel_path=\"model_manifest.json\"):\n manifest = []\n manifest_path = \"{}/{}\".format(Path(__file__).parents[1], rel_path)\n if path.exists(manifest_path):\n with open(manifest_path) as json_file:\n manifest = json.load(json_file)\n return manifest",
"def get_metadata(names, attributes, module_basepath):\n metadata = {}\n\n for name in names:\n module = importlib.import_module(f'{module_basepath}.{name}')\n\n if hasattr(module, '_metadata'):\n for subname in getattr(module, '_metadata'):\n submodule_dict = getattr(module, '_metadata')[subname]\n assert_attributes_exist(f'{name}/{subname}', submodule_dict, attributes)\n\n metadata[subname] = {a: submodule_dict[a] for a in attributes}\n\n # check for attributes with empty strings\n for attribute in attributes:\n if not metadata[subname][attribute]:\n print(f'WARNING: {subname} has empty metadata {attribute}')\n else:\n module_dict = module.__dict__\n\n assert_attributes_exist(name, module_dict, attributes)\n\n metadata[name] = {a: module_dict[a] for a in attributes}\n\n # check for attributes with empty strings\n for attribute in attributes:\n if not metadata[name][attribute]:\n print(f'WARNING: {name} has empty metadata {attribute}')\n return metadata",
"def _read_info_resources(self, **kwargs):\n info = {'keypairs': {},\n 'flavors': {},\n 'user_quotas': [],\n 'project_quotas': []}\n\n for keypair in self.get_keypair_list():\n info['keypairs'][keypair.id] = self.convert(keypair)\n\n for flavor in self.get_flavor_list():\n info['flavors'][flavor.id] = self.convert(flavor)\n\n if self.config.migrate.migrate_quotas:\n self._read_info_quotas(info)\n\n return info",
"def test_unpacker_read_manifest_metadata_v3(config, mocker, path_map_mock):\n logger_mock = mocker.MagicMock()\n p = Unpacker(config, logger_mock)\n read_data = \"\"\"{}\n {\"some\": \"object\"}\"\"\"\n result = {\n \"files\": [\n {\"some\": \"object\"}\n ]\n }\n with patch(\"builtins.open\", mock_open(read_data=read_data)) as metadata_mock:\n assert p._read_manifest_metadata_v3(\"0869ea50-e437-443f-8cdb-31a350f88e57\") == result\n metadata_mock.assert_called_with(mocker.ANY)",
"def read_manifest_xml(cls, document):\n manifest = []\n with zipfile.ZipFile(document, 'a') as open_document:\n for line in open_document.open(DOCUMENT_MANIFEST_PATH):\n manifest.append(line.decode('utf-8'))\n return manifest",
"def _find_file_meta(metadata, bucket_name, saltenv, path):\n env_meta = metadata[saltenv] if saltenv in metadata else {}\n bucket_meta = {}\n for bucket in env_meta:\n if bucket_name in bucket:\n bucket_meta = bucket[bucket_name]\n files_meta = list(list(filter((lambda k: \"Key\" in k), bucket_meta)))\n\n for item_meta in files_meta:\n if \"Key\" in item_meta and item_meta[\"Key\"] == path:\n try:\n # Get rid of quotes surrounding md5\n item_meta[\"ETag\"] = item_meta[\"ETag\"].strip('\"')\n except KeyError:\n pass\n return item_meta"
] | [
"0.6394254",
"0.6376437",
"0.62439424",
"0.6131808",
"0.60059315",
"0.5966567",
"0.5847774",
"0.5758215",
"0.57482386",
"0.568868",
"0.56461066",
"0.5581038",
"0.55642676",
"0.5561107",
"0.55594665",
"0.5524265",
"0.5524265",
"0.55144745",
"0.54978037",
"0.5488108",
"0.54834497",
"0.54748803",
"0.54382986",
"0.5424492",
"0.5389901",
"0.53856474",
"0.5378383",
"0.5356551",
"0.53158706",
"0.53127867"
] | 0.7806468 | 0 |
Read all study area entries from the metadata store for a given project context Context object containing projectDir, the path of the project whose metadata store is to be read from A dictionary of key/value pairs from the study area section of the project metadata | def readStudyAreaEntries(context):
return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.STUDY_AREA_SECTION) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _readEntriesForSection(projectDir, section):\n sectionDict = dict()\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.R_OK):\n raise IOError(errno.EACCES, \"Unable to read metadata store for project %s\" % \\\n (projectDir,))\n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n if config.has_section(section):\n items = config.items(section)\n for item in items:\n sectionDict[item[0]] = item[1]\n \n return sectionDict",
"def getStudyInfo(self, study_id, web_app_user_id):\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_study_info', [study_id, web_app_user_id, results])\n study_info = {}\n for row in results:\n study_info['submit_to_insdc'] = row[0]\n study_info['investigation_type'] = row[1]\n study_info['project_name'] = row[2]\n study_info['experimental_factor'] = row[3]\n study_info['study_alias'] = row[4]\n study_info['study_title'] = row[5]\n study_info['study_type'] = row[6]\n study_info['study_abstract'] = row[7]\n study_info['study_description'] = row[8]\n study_info['center_name'] = row[9]\n study_info['center_project_name'] = row[10]\n study_info['project_id'] = row[11]\n study_info['pmid'] = row[12]\n study_info['metadata_complete'] = row[13]\n study_info['sff_complete'] = row[14]\n study_info['mapping_file_complete'] = row[15]\n study_info['miens_compliant'] = row[16]\n study_info['can_delete'] = row[17]\n study_info['avg_emp_score'] = row[18]\n study_info['user_emp_score'] = row[19]\n study_info['number_samples_promised'] = row[20]\n study_info['number_samples_collected'] = row[21]\n study_info['principal_investigator'] = row[22]\n study_info['sample_count'] = row[23] \n study_info['lab_person'] = row[24] \n study_info['lab_person_contact'] = row[25]\n study_info['emp_person'] = row[26]\n study_info['first_contact'] = row[27]\n study_info['most_recent_contact'] = row[28]\n study_info['sample_type'] = row[29]\n study_info['has_physical_specimen'] = row[30]\n study_info['has_extracted_data'] = row[31]\n study_info['timeseries'] = row[32]\n study_info['spatial_series'] = row[33]\n study_info['principal_investigator'] = row[34]\n study_info['principal_investigator_contact'] = row[35]\n study_info['default_emp_status'] = row[36]\n study_info['funding'] = row[37]\n study_info['includes_timeseries'] = row[38]\n study_info['sample_count'] = row[39]\n study_info['ebi_study_accession'] = row[40]\n study_info['locked'] = row[41]\n study_info['vamps_id'] = row[42]\n return study_info",
"def read_data(path: str):\n documents = {}\n queries = {}\n relevance = {}\n for doc in json.load(open(path + 'cranfield_data.json')):\n title = re.sub(r'\\s+', ' ', doc['title'])\n body = re.sub(r'\\s+', ' ', doc['body'][len(doc['title']):])\n documents[doc['id']] = Article(title=title, body=body)\n \n for query in json.load(open(path + 'cran.qry.json')):\n queries[query['query number']] = query['query']\n for rel in json.load(open(path + 'cranqrel.json')):\n query_id = int(rel['query_num'])\n doc_id = int(rel['id'])\n if query_id in relevance:\n relevance[query_id].append((doc_id, rel['position']))\n else:\n relevance[query_id] = [(doc_id, rel['position'])]\n return documents, queries, relevance",
"def read_locations(db, openfile):\n pass",
"def readProvenanceEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION)",
"def find_records():\r\n\r\n print(\"begin find records\")\r\n\r\n study_list = retrieve_ref('study_list')\r\n sensor_list = retrieve_ref('sensor_list')\r\n # sensor_unit_list = retrieve_ref('sensor_unit_list')\r\n\r\n for study in study_list:\r\n # print('study = ' + str(study))\r\n source_path = os.path.join(study, 'source')\r\n # print('source_path = ' + str(source_path))\r\n\r\n source_folders = os.listdir(source_path)\r\n # print(str(study) + ' source_folders = ')\r\n # print(source_folders)\r\n\r\n df_meta = pd.DataFrame()\r\n df_meta['source_path'] = source_folders\r\n save_meta(study, df_meta)\r\n record_to_summary(study, 'Records found', str(len(source_folders)))\r\n\r\n print(\"completed find records\")",
"def get_apartment_info(experiment_config_path, output_dir, keys):\n if isinstance(keys, str):\n keys = [keys]\n\n store = cytometry.get_readonly_datastore(output_dir)\n config = experiment_config.ExperimentConfig(celldom.read_config(experiment_config_path))\n\n df = store.get('apartment').reset_index(drop=True)\n raw_files_map = store.get('acquisition').set_index('acq_id')['raw_image_path']\n\n key_fields = config.experimental_condition_fields + ['apt_num', 'st_num']\n df['key'] = df[key_fields].apply(lambda r: ':'.join(r.values.astype(str)), axis=1)\n df['raw_image_path'] = df['acq_id'].map(raw_files_map)\n\n return df[df['key'].isin(keys)].sort_values(['key', 'acq_datetime'])",
"def readGRASSEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.GRASS_SECTION)",
"def _ReadEntries(self):\n scope = {}\n filename = os.path.join(self._root_dir, self._options.entries_filename)\n if not os.path.exists(filename):\n return []\n exec(gclient_utils.FileRead(filename), scope)\n return scope[\"entries\"]",
"def readModelRunEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.MODEL_RUN_SECTION)",
"def getSampleList(self, study_id):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_sample_list', [study_id, results])\n sample_list = {}\n for sample_name, sample_id in results:\n sample_list[sample_id] = sample_name\n return sample_list\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False",
"def read_locations(namefile):\n db = shelve.open(namefile)\n hashes = db['hashes']\n key_firms = db['nif']\n year = db['year']\n locs = db['locations']\n methodvalues = db['methodvalues']\n db.close()\n return hashes, key_firms, year, locs, methodvalues",
"def get_content(self):\r\n content = []\r\n for regiongroup in self.region_groups:\r\n for region in regiongroup.get_content():\r\n # Add date, unique_name and project to the metadata\r\n region[0]['date'] = self.extracted_date\r\n region[0]['unique_name'] = self.unique_name\r\n try:\r\n project = os.path.split(\r\n os.path.split(self.unique_name)[0]\r\n )[1]\r\n except IndexError:\r\n project = ''\r\n region[0]['project'] = project\r\n content.append(region)\r\n return content",
"def writeStudyAreaEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.STUDY_AREA_SECTION, key, value)",
"def _collect_scene_data(self, config):\n\n self._config = config\n self.scenes_root_path = config['scenes_root_path']\n assert(os.path.isdir(self.scenes_root_path))\n\n self._scene_dict = dict()\n # each one is a list of scenes\n self._all_image_paths = {\"train\": [], \"test\": []}\n\n for key, val in self._all_image_paths.items():\n for scene_collection_name in config[key]:\n scene_collection_dir = os.path.join(self.scenes_root_path, scene_collection_name)\n assert os.path.isdir(scene_collection_dir), scene_collection_dir\n # Scan all scenes in this scene dir\n for scene_name in os.listdir(scene_collection_dir):\n full = os.path.join(scene_collection_dir, scene_name)\n if os.path.isdir(full):\n val += self._get_all_rgb_image_paths_in_scene_dir(full)",
"def summarize(path: str) -> dict:\n results = parse_bactopia_directory(path)",
"def read_archive(self):\n print '------------------------------------'\n print ' Archiving the simulation results'\n print '----------------------------------- '\n\n if self.Simulation_area.lower() == 'barrow':\n os.chdir(self.control['Run_dir']+self.Input_directory+'/Barrow/')\n elif self.Simulation_area.lower() == 'tanana':\n os.chdir(self.control['Run_dir']+self.Input_directory+'/Tanana/')\n elif self.Simulation_area.lower() == 'yukon':\n os.chdir(self.control['Run_dir']+self.Input_directory+'/Yukon/')\n \n self.archive = {}\n with open(self.archive_data, 'r') as f:\n for line in f:\n if line.startswith('#'):\n continue\n else:\n (key, val) = line.split()\n self.archive[(key)] = val",
"def read_analysis(self,id):\n raw=requests.get(cfg_dict['analysis'] + id, auth=HttpNtlmAuth(cfg_dict['user'], cfg_dict['pass']))\n self.analysis=json.loads(raw.text)\n\n raw = self.analysis['directory'] + cfg_dict['format_loc']\n norm = os.path.normpath(raw)\n if os.path.exists(norm):\n self.path=norm",
"def get_one_study(study_dir):\n df_columns = ['item_type', 'item_urn', 'content']\n df = pd.DataFrame(columns=df_columns)\n\n # category urn and label\n list_files = os.listdir(study_dir)\n\n instrument_dict = {}\n if 'Instrument.txt' in list_files:\n L = json.load(open(os.path.join(study_dir, 'Instrument.txt')))\n item = item_to_dict(L[0])\n instrument_dict['instrument_urn'] = item['InstrumentURN']\n instrument_dict['instrument_name'] = item['InstrumentName']\n\n if 'Category.txt' in list_files:\n category_dict = generate_category_dict(os.path.join(study_dir, 'Category.txt'))\n else:\n category_dict = []\n\n for input_file in list_files:\n\n filename = os.path.splitext(input_file)[0]\n\n L = json.load(open(os.path.join(study_dir, input_file)))\n\n if filename == 'Question':\n for dict_item in L:\n item = item_to_dict(dict_item)\n if not item['QuestionLiteral'] is None:\n literal = item['QuestionLiteral'].replace('\\n', '')\n else:\n literal = None\n df.loc[len(df)] = ['question', item['QuestionURN'], literal]\n df.loc[len(df)] = ['question name', None, item['QuestionItemName']]\n\n if item['Response'] != {} and item['Response']['response_type'] != 'CodeList':\n df.loc[len(df)] = [item['Response']['response_type'], None, item['Response']['response_label']]\n\n elif filename == 'Interviewer Instruction':\n for dict_item in L:\n item = item_to_dict(dict_item)\n df.loc[len(df)] = ['instruction', item['InstructionURN'], item['InstructionText']]\n\n elif filename == 'Code Set' and category_dict != []:\n for dict_item in L:\n item = item_to_dict(dict_item)\n for code in item['Code']:\n if code['Value'] is None:\n code['Value'] = ''\n cat_urn = 'urn:ddi:' + code['CategoryReference']['Agency'] + ':' + code['CategoryReference']['ID'] + ':' + code['CategoryReference']['Version']\n df.loc[len(df)] = ['codelist', code['URN'], code['Value'] + ', ' + category_dict[cat_urn]]\n\n elif filename == 'Statement':\n for dict_item in L:\n item = item_to_dict(dict_item)\n df.loc[len(df)] = ['statement', item['StatementURN'], item['Literal']]\n\n elif filename == 'Conditional':\n for dict_item in L:\n item = item_to_dict(dict_item)\n df.loc[len(df)] = ['conditional', item['URN'], item['IfCondition']['Description'] + item['IfCondition']['CommandContent'] if not item['IfCondition']['CommandContent'] is None else item['IfCondition']['Description']]\n\n elif filename == 'Loop':\n for dict_item in L:\n item = item_to_dict(dict_item)\n df.loc[len(df)] = ['loop', item['URN'], item['LoopWhile']['CommandContent']]\n\n else:\n # TODO\n print(filename)\n\n df = df.drop_duplicates(keep='first')\n df['instrument_name'] = instrument_dict['instrument_name']\n df['instrument_urn'] = instrument_dict['instrument_urn']\n return df",
"def build_data(self):\n from desiutil.io import combine_dicts\n # Loop on exposures\n odict = {}\n for qanight in self.qa_nights:\n for qaexp in qanight.qa_exps:\n # Get the exposure dict\n idict = write_qa_exposure('foo', qaexp, ret_dict=True)\n odict = combine_dicts(odict, idict)\n # Finish\n self.data = odict",
"def readHydroShareEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.HYDROSHARE_SECTION)",
"def get_osm_tracks(area_path: str) -> dict:\n with open(area_path, 'r') as f:\n osm_tracks_dict = json.load(f)\n return osm_tracks_dict['tracks']",
"def load_dataset(path):\n with open(path) as f:\n data = json.load(f)['data']\n output = {'qids': [], 'questions': [], 'answers': [],\n 'contexts': [], 'qid2cid': []}\n for article in data:\n for paragraph in article['paragraphs']:\n output['contexts'].append(paragraph['context'])\n for qa in paragraph['qas']:\n output['qids'].append(qa['id'])\n output['questions'].append(qa['question'])\n output['qid2cid'].append(len(output['contexts']) - 1)\n if 'answers' in qa:\n output['answers'].append(qa['answers'])\n return output",
"def _ReadSessionConfiguration(self, path, knowledge_base_object):\n storage_reader = storage_factory.StorageFactory.CreateStorageReaderForFile(\n path)\n\n for session in storage_reader.GetSessions():\n if not session.source_configurations:\n storage_reader.ReadSystemConfiguration(knowledge_base_object)\n else:\n for source_configuration in session.source_configurations:\n knowledge_base_object.ReadSystemConfigurationArtifact(\n source_configuration.system_configuration,\n session_identifier=session.identifier)",
"def get_data(folder_name):\n train_data = {}\n for study_id, study_path in sorted(get_studies(folder_name)):\n train_data[study_id] = get_slices(study_id, study_path)\n return train_data",
"def getSampleIDsFromStudy(self, study_id):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_sample_ids_from_study', [study_id, results])\n metadata_fields = []\n for row in results:\n metadata_fields.append(row[0])\n return metadata_fields\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False",
"def get_study_system_attrs(self, study_id: int) -> Dict[str, Any]:\n raise NotImplementedError",
"def read_metadata(dirname, use_gpu):\n try:\n if not os.path.isdir(dirname):\n pass\n elif not os.path.exists(os.path.join(dirname, 'metadata.json')):\n pass\n else:\n with open(os.path.join(dirname, 'metadata.json')) as f:\n metadata = json.load(f)\n if use_gpu and ('container_gpu' in metadata):\n container = metadata['container_gpu']\n else:\n container = metadata['container']\n entry_point = metadata['entry_point']\n except (IOError, KeyError, ValueError):\n print('Failed to read metadata from defense directory ', dirname)\n return (container, entry_point)",
"def getMappingFiles(self, study_id):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n items = []\n con.cursor().callproc('qiime_assets.get_mapping_files', [study_id, results])\n for row in results:\n items.append(row[0])\n return items\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False",
"def read_project(path: str):\n textfilecontent = {}\n\n # Discover .txt files and add them to the dictionary\n for filepath in iglob(os.path.join(path, '**/*.txt'), recursive=True):\n add_path_dict(input_dict=textfilecontent, start_path=path,\n file_path=filepath)\n\n return textfilecontent"
] | [
"0.5813684",
"0.5588683",
"0.53330386",
"0.5267538",
"0.52265596",
"0.5189714",
"0.51646703",
"0.51445687",
"0.5102872",
"0.5077357",
"0.50310695",
"0.5018051",
"0.49929735",
"0.4955393",
"0.49027565",
"0.48494425",
"0.48395818",
"0.48371568",
"0.48293978",
"0.48228657",
"0.48134696",
"0.481227",
"0.47949648",
"0.47768673",
"0.47702214",
"0.4767299",
"0.47664088",
"0.47574544",
"0.47564375",
"0.47446096"
] | 0.78608793 | 0 |
Read all GRASS entries from the metadata store for a given project context Context object containing projectDir, the path of the project whose metadata store is to be read from A dictionary of key/value pairs from the GRASS section of the project metadata | def readGRASSEntries(context):
return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.GRASS_SECTION) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _readEntriesForSection(projectDir, section):\n sectionDict = dict()\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.R_OK):\n raise IOError(errno.EACCES, \"Unable to read metadata store for project %s\" % \\\n (projectDir,))\n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n if config.has_section(section):\n items = config.items(section)\n for item in items:\n sectionDict[item[0]] = item[1]\n \n return sectionDict",
"def read_metadata(dirname, use_gpu):\n try:\n if not os.path.isdir(dirname):\n pass\n elif not os.path.exists(os.path.join(dirname, 'metadata.json')):\n pass\n else:\n with open(os.path.join(dirname, 'metadata.json')) as f:\n metadata = json.load(f)\n if use_gpu and ('container_gpu' in metadata):\n container = metadata['container_gpu']\n else:\n container = metadata['container']\n entry_point = metadata['entry_point']\n except (IOError, KeyError, ValueError):\n print('Failed to read metadata from defense directory ', dirname)\n return (container, entry_point)",
"def readClimateGridEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.CLIMATE_GRID_SECTION)",
"def _project_files(w):\n return {k: v for k, v in w['attributes'].items() if isinstance(v, str) and v.startswith('gs://')}",
"def readProvenanceEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION)",
"def _ReadEntries(self):\n scope = {}\n filename = os.path.join(self._root_dir, self._options.entries_filename)\n if not os.path.exists(filename):\n return []\n exec(gclient_utils.FileRead(filename), scope)\n return scope[\"entries\"]",
"def get_projects_data():\n wcscanner_path = context.__BASE_PATH__ + '/.wcscanner'\n\n data = []\n for project in os.listdir(wcscanner_path):\n if (os.path.isdir(os.path.join(wcscanner_path, project))):\n update_project_data(project)\n project_path = '{}/{}'.format(wcscanner_path, project)\n f = open('{}/.project'.format(project_path), 'r')\n data.append(json.load(f))\n f.close()\n return data",
"def checkMetadata(self):\n super(WorldfileMultiple, self).checkMetadata()\n \n # Check for necessary information in metadata\n if not 'basin_rast' in self.grassMetadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a basin raster in a GRASS mapset\" % (self.context.projectDir,))\n if not 'subbasins_rast' in self.grassMetadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a sub-basin raster in a GRASS mapset\" % (self.context.projectDir,))\n if not 'dem_rast' in self.grassMetadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a DEM raster in a GRASS mapset\" % (self.context.projectDir,)) \n if not 'soil_rast' in self.grassMetadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a soil raster in a GRASS mapset\" % (self.context.projectDir,))\n if not 'patch_rast' in self.grassMetadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a patch raster in a GRASS mapset\" % (self.context.projectDir,))\n \n if not 'rhessys_dir' in self.metadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a RHESSys directory\" % (self.context.projectDir,))\n if not 'g2w_bin' in self.metadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a grass2world executable\" % (self.context.projectDir,))\n if not 'rat_bin' in self.metadata:\n raise MetadataException(\"Metadata in project directory %s does not contain an AverageTables executable\" % (self.context.projectDir,))\n if not 'template' in self.metadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a world template\" % (self.context.projectDir,))\n if not 'rhessys_dir' in self.metadata:\n raise MetadataException(\"Metadata in project directory {0} does not contain a RHESSys directory\".format(self.context.projectDir))",
"def read_reference_data():\n return {f:read_local_file(f) for f in os.listdir(DATA_DIR)}",
"def load_dcos_conf_templete(fpath):\n p_key = re.compile(r' *- path: (?P<g1>.*)')\n c_key = re.compile(r' *content: [|].*')\n h_key = re.compile(r' *#.*$')\n\n with fpath.open() as fp:\n\n aggregator = {'package': []}\n path = ''\n content = []\n\n for line in fp:\n pk_match = p_key.match(line)\n ck_match = c_key.match(line)\n hk_match = h_key.match(line)\n\n if pk_match:\n\n if path:\n item = {'path': path, 'content': ''.join(content)}\n aggregator['package'].append(item)\n path = pk_match.group('g1')\n content = []\n else:\n path = pk_match.group('g1')\n elif ck_match:\n continue\n elif hk_match:\n continue\n else:\n if not path:\n continue\n else:\n content.append(line.strip(' '))\n\n item = {'path': path, 'content': ''.join(content)}\n aggregator['package'].append(item)\n\n return aggregator",
"def readManifestEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.MANIFEST_SECTION)",
"def load_metadata(self, path):\n self.paths = []\n self.annotations = []\n\n with open(path, \"r\") as f:\n for line in f:\n line = line.strip().split(\" \")\n \n rgb_path = line[0]\n\n if len(line) > 1:\n bounding_boxes = np.array([list(map(int, box.split(','))) for box in line[1:]])\n else:\n bounding_boxes = []\n \n self.annotations.append({\n \"rgb_path\": rgb_path, \n \"bounding_boxes\": bounding_boxes,\n })",
"def _read_info_resources(self, **kwargs):\n info = {'keypairs': {},\n 'flavors': {},\n 'user_quotas': [],\n 'project_quotas': []}\n\n for keypair in self.get_keypair_list():\n info['keypairs'][keypair.id] = self.convert(keypair)\n\n for flavor in self.get_flavor_list():\n info['flavors'][flavor.id] = self.convert(flavor)\n\n if self.config.migrate.migrate_quotas:\n self._read_info_quotas(info)\n\n return info",
"def read_project(path: str):\n textfilecontent = {}\n\n # Discover .txt files and add them to the dictionary\n for filepath in iglob(os.path.join(path, '**/*.txt'), recursive=True):\n add_path_dict(input_dict=textfilecontent, start_path=path,\n file_path=filepath)\n\n return textfilecontent",
"def project_files_attributes(self):\n _files = {}\n for k, v in self.attributes.workspace.items():\n if isinstance(v, str) and v.startswith('gs://'):\n _files[k] = v\n return _files",
"def read_metadata(metapath):\r\n with open(metapath) as metaFile:\r\n metadata = {}\r\n for line in metaFile.readlines():\r\n if \"=\" in line: # Get only key-value pairs\r\n l = line.split(\"=\")\r\n metadata[l[0].strip()] = l[1].strip()\r\n\r\n return metadata",
"def ReadProjectInfo() -> Dict[str, str]:\n project_info_file = os.environ.get('PROJECT_INFO')\n if project_info_file is None:\n raise OSError('Error: please make sure that you defined the '\n '\"PROJECT_INFO\" environment variable pointing '\n 'to your project settings.')\n try:\n json_file = open(project_info_file)\n try:\n project_info = json.load(json_file)\n except ValueError as exception:\n raise RuntimeError(\n f'Error: cannot parse JSON file. {str(exception)}') from ValueError\n json_file.close()\n except OSError as exception:\n raise OSError(\n f'Error: could not open/close file {project_info}: {str(exception)}'\n ) from OSError\n\n if not all(key in project_info for key in INFO_REQUIRED_KEYS):\n raise ValueError(\n 'Error: please make sure that your JSON file has the required entries. '\n 'The file should contain at least the following: '\n f'{\", \".join(INFO_REQUIRED_KEYS)}')\n\n return project_info",
"def get_data() -> dict:\n project_dir = Path(__file__).parent.parent\n metadata = toml.load(project_dir / \"pyproject.toml\")[\"tool\"][\"poetry\"]\n lock_data = toml.load(project_dir / \"poetry.lock\")\n project_name = metadata[\"name\"]\n\n poetry_dependencies = chain(metadata[\"dependencies\"].keys(), metadata[\"dev-dependencies\"].keys())\n direct_dependencies = sorted(dep.lower() for dep in poetry_dependencies)\n direct_dependencies.remove(\"python\")\n\n indirect_dependencies = sorted(\n pkg[\"name\"] for pkg in lock_data[\"package\"] if pkg[\"name\"] not in direct_dependencies\n )\n\n dependencies = direct_dependencies + indirect_dependencies\n packages = {pkg[\"name\"]: clean_info(pkg) for pkg in search_packages_info(dependencies)}\n # poetry.lock seems to always use lowercase for packages names\n packages.update({name.lower(): pkg for name, pkg in packages.items()}) # noqa: WPS221 (not that complex)\n\n for dependency in dependencies:\n if dependency not in packages:\n pkg_data = httpx.get(f\"https://pypi.python.org/pypi/{dependency}/json\").json()[\"info\"]\n home_page = pkg_data[\"home_page\"] or pkg_data[\"project_url\"] or pkg_data[\"package_url\"]\n pkg_name = pkg_data[\"name\"]\n pkg = {\"name\": pkg_name, \"home-page\": home_page}\n packages.update({pkg_name: pkg, pkg_name.lower(): pkg})\n\n return {\n \"project_name\": project_name,\n \"direct_dependencies\": direct_dependencies,\n \"indirect_dependencies\": indirect_dependencies,\n \"package_info\": packages,\n }",
"def GatherBaseData(self, mr, nonce):\n project = mr.project\n\n project_summary = ''\n project_alert = None\n project_read_only = False\n project_home_page = ''\n project_thumbnail_url = ''\n if project:\n project_summary = project.summary\n project_alert = _CalcProjectAlert(project)\n project_read_only = project.read_only_reason\n project_home_page = project.home_page\n project_thumbnail_url = tracker_views.LogoView(project).thumbnail_url\n\n with work_env.WorkEnv(mr, self.services) as we:\n is_project_starred = False\n project_view = None\n if mr.project:\n if permissions.UserCanViewProject(\n mr.auth.user_pb, mr.auth.effective_ids, mr.project):\n is_project_starred = we.IsProjectStarred(mr.project_id)\n # TODO(jrobbins): should this be a ProjectView?\n project_view = template_helpers.PBProxy(mr.project)\n\n grid_x_attr = None\n grid_y_attr = None\n hotlist_view = None\n if mr.hotlist:\n users_by_id = framework_views.MakeAllUserViews(\n mr.cnxn, self.services.user,\n features_bizobj.UsersInvolvedInHotlists([mr.hotlist]))\n hotlist_view = hotlist_views.HotlistView(\n mr.hotlist, mr.perms, mr.auth, mr.viewed_user_auth.user_id,\n users_by_id, self.services.hotlist_star.IsItemStarredBy(\n mr.cnxn, mr.hotlist.hotlist_id, mr.auth.user_id))\n grid_x_attr = mr.x.lower()\n grid_y_attr = mr.y.lower()\n\n app_version = os.environ.get('CURRENT_VERSION_ID')\n\n viewed_username = None\n if mr.viewed_user_auth.user_view:\n viewed_username = mr.viewed_user_auth.user_view.username\n\n issue_entry_url = 'entry'\n config = None\n if mr.project_id and self.services.config:\n with mr.profiler.Phase('getting config'):\n config = self.services.config.GetProjectConfig(mr.cnxn, mr.project_id)\n grid_x_attr = (mr.x or config.default_x_attr).lower()\n grid_y_attr = (mr.y or config.default_y_attr).lower()\n issue_entry_url = _LoginOrIssueEntryURL(mr, config)\n\n viewing_self = mr.auth.user_id == mr.viewed_user_auth.user_id\n offer_saved_queries_subtab = (\n viewing_self or mr.auth.user_pb and mr.auth.user_pb.is_site_admin)\n\n login_url = _SafeCreateLoginURL(mr)\n logout_url = _SafeCreateLogoutURL(mr)\n logout_url_goto_home = users.create_logout_url('/')\n version_base = _VersionBaseURL(mr.request)\n\n base_data = {\n # EZT does not have constants for True and False, so we pass them in.\n 'True': ezt.boolean(True),\n 'False': ezt.boolean(False),\n\n 'local_mode': ezt.boolean(settings.local_mode),\n\n 'site_name': settings.site_name,\n 'show_search_metadata': ezt.boolean(False),\n 'page_template': self._PAGE_TEMPLATE,\n 'main_tab_mode': self._MAIN_TAB_MODE,\n 'project_summary': project_summary,\n 'project_home_page': project_home_page,\n 'project_thumbnail_url': project_thumbnail_url,\n\n 'hotlist_id': mr.hotlist_id,\n 'hotlist': hotlist_view,\n\n 'hostport': mr.request.host,\n 'absolute_base_url': '%s://%s' % (mr.request.scheme, mr.request.host),\n 'project_home_url': None,\n 'link_rel_canonical': None, # For specifying <link rel=\"canonical\">\n 'projectname': mr.project_name,\n 'project': project_view,\n 'project_is_restricted': ezt.boolean(_ProjectIsRestricted(mr)),\n 'offer_contributor_list': ezt.boolean(\n permissions.CanViewContributorList(mr, mr.project)),\n 'logged_in_user': mr.auth.user_view,\n 'form_token': None, # Set to a value below iff the user is logged in.\n 'form_token_path': None,\n 'token_expires_sec': None,\n 'xhr_token': None, # Set to a value below iff the user is logged in.\n 'flag_spam_token': None,\n 'nonce': nonce,\n 'perms': mr.perms,\n 'warnings': mr.warnings,\n 'errors': mr.errors,\n\n 'viewed_username': viewed_username,\n 'viewed_user': mr.viewed_user_auth.user_view,\n 'viewed_user_pb': template_helpers.PBProxy(\n mr.viewed_user_auth.user_pb),\n 'viewing_self': ezt.boolean(viewing_self),\n 'viewed_user_id': mr.viewed_user_auth.user_id,\n 'offer_saved_queries_subtab': ezt.boolean(offer_saved_queries_subtab),\n\n 'currentPageURL': mr.current_page_url,\n 'currentPageURLEncoded': mr.current_page_url_encoded,\n 'login_url': login_url,\n 'logout_url': logout_url,\n 'logout_url_goto_home': logout_url_goto_home,\n 'continue_issue_id': mr.continue_issue_id,\n 'feedback_email': settings.feedback_email,\n 'category_css': None, # Used to specify a category of stylesheet\n 'category2_css': None, # specify a 2nd category of stylesheet if needed.\n 'page_css': None, # Used to add a stylesheet to a specific page.\n\n 'can': mr.can,\n 'query': mr.query,\n 'colspec': None,\n 'sortspec': mr.sort_spec,\n\n # Options for issuelist display\n 'grid_x_attr': grid_x_attr,\n 'grid_y_attr': grid_y_attr,\n 'grid_cell_mode': mr.cells,\n 'grid_mode': None,\n 'list_mode': None,\n 'chart_mode': None,\n\n 'issue_entry_url': issue_entry_url,\n 'is_cross_project': ezt.boolean(False),\n\n # for project search (some also used in issue search)\n 'start': mr.start,\n 'num': mr.num,\n 'groupby': mr.group_by_spec,\n 'q_field_size': (\n min(framework_constants.MAX_ARTIFACT_SEARCH_FIELD_SIZE,\n max(framework_constants.MIN_ARTIFACT_SEARCH_FIELD_SIZE,\n len(mr.query) + framework_constants.AUTOSIZE_STEP))),\n 'mode': None, # Display mode, e.g., grid mode.\n 'ajah': mr.ajah,\n 'table_title': mr.table_title,\n\n 'alerts': alerts.AlertsView(mr), # For alert.ezt\n 'project_alert': project_alert,\n\n 'title': None, # First part of page title\n 'title_summary': None, # Appended to title on artifact detail pages\n\n # TODO(jrobbins): make sure that the templates use\n # project_read_only for project-mutative actions and if any\n # uses of read_only remain.\n 'project_read_only': ezt.boolean(project_read_only),\n 'site_read_only': ezt.boolean(settings.read_only),\n 'banner_time': servlet_helpers.GetBannerTime(settings.banner_time),\n 'read_only': ezt.boolean(settings.read_only or project_read_only),\n 'site_banner_message': settings.banner_message,\n 'robots_no_index': None,\n 'analytics_id': settings.analytics_id,\n\n 'is_project_starred': ezt.boolean(is_project_starred),\n\n 'version_base': version_base,\n 'app_version': app_version,\n 'gapi_client_id': settings.gapi_client_id,\n 'viewing_user_page': ezt.boolean(False),\n 'old_ui_url': None,\n\n 'is_member': ezt.boolean(False),\n }\n\n if mr.project:\n base_data['project_home_url'] = '/p/%s' % mr.project_name\n\n # Always add xhr-xsrf token because even anon users need some\n # pRPC methods, e.g., autocomplete, flipper, and charts.\n base_data['token_expires_sec'] = xsrf.TokenExpiresSec()\n base_data['xhr_token'] = xsrf.GenerateToken(\n mr.auth.user_id, xsrf.XHR_SERVLET_PATH)\n # Always add other anti-xsrf tokens when the user is logged in.\n if mr.auth.user_id:\n form_token_path = self._FormHandlerURL(mr.request.path)\n base_data['form_token'] = xsrf.GenerateToken(\n mr.auth.user_id, form_token_path)\n base_data['form_token_path'] = form_token_path\n\n return base_data",
"def read_data_files(self):\n\n for name, snap in zip(self.names, self.snaps):\n # build the very important dictionary:\n key = f'{name}_{snap:03}' # e.g 'MW_000'\n self.galaxies[key] = Galaxy(name, snap, self.path, \n self.usesql, self.ptype, self.stride)\n self.time = self.galaxies[key].time\n\n # bits of minor housekeeping:\n # self.path = self.galaxies[key].filepath # may speed up next search\n self.filenames.append(key)",
"def global_metadata(paths):\n\n # Weakly group images to partition image set size- crucial optimization step\n if os.path.exists(paths.image_preprocess):\n clumped_paths = json.loads(open(paths.image_preprocess).read())\n else:\n clumped_paths = network.alpha_categorize(paths)\n print(\"Hashed source images\")\n\n with open(paths.image_preprocess, 'w') as json_file:\n json.dump(clumped_paths, json_file)\n\n # Combinatorial image grouping to graph\n image_graph = network.load_graph(paths.image_network_path)\n\n total = len(list(chain(*clumped_paths.values())))\n counter = 0.\n\n for image_paths in clumped_paths.values():\n counter += len(image_paths)\n print(str(int(counter / float(total) * 100)) + \"% complete\")\n\n if len(image_paths) > 1:\n image_grouping = images.load_paths(paths.default_patches, image_paths)\n image_graph = metadata.network.network_images(\n image_grouping, threshold=0, network=image_graph)\n else:\n image_graph.add_node(image_paths[0])\n\n metadata.network.save_graph(paths.image_network_path, image_graph)\n print(\"Updated image graph.\")\n\n # Create informational json files for templates and files\n templates.build(paths, image_graph)\n mappings.build(paths, image_graph)\n print(\"Created JSON metadata files.\")",
"def forges():\n\n forges = {}\n\n for forge_path in sorted(glob.glob(\"/opt/service/forge/*.yaml\")):\n if forge_path.split(\"/\")[-1] not in [\"fields.yaml\", \"values.yaml\"]:\n with open(forge_path, \"r\") as forge_file:\n forges[forge_path.split(\"/\")[-1].split(\".\")[0]] = yaml.safe_load(forge_file)[\"description\"]\n\n return forges",
"def read_files(project_ID):\n \n # Define the link and the metadata key name\n API_downloads_link = 'http://194.4.103.57:5000/project/downloads/'\n metadata_key_name = 'experimentDesignLink'\n filtered_key_name = 'filteredTPMLink'\n normalised_key_name = 'normalisedCountsLink'\n \n # Define variables\n metadata = None\n matrix = None\n gene_names = None\n cell_names = None\n \n # Get the download links of the project\n links = requests.get(API_downloads_link + project_ID).json()\n if not links: # If project doesn't exists\n raise Exception(f'Project with ID {project_ID} not found')\n links = links[0]\n \n # Return the metadata if it exists\n if metadata_key_name in links:\n metadata_link = links[metadata_key_name]\n metadata = pd.read_csv(metadata_link, sep='\\t', low_memory=False)\n \n if filtered_key_name in links:\n matrix_link = links[filtered_key_name]\n matrix, cell_names, gene_names = download_matrix(matrix_link, matrix_type='filtered')\n elif normalised_key_name in links:\n matrix_link = links[normalised_key_name]\n matrix, cell_names, gene_names = download_matrix(matrix_link, matrix_type='normalised')\n \n # If project does not have metadata link, return none\n return metadata, matrix, gene_names, cell_names",
"def read_data(path: str):\n documents = {}\n queries = {}\n relevance = {}\n for doc in json.load(open(path + 'cranfield_data.json')):\n title = re.sub(r'\\s+', ' ', doc['title'])\n body = re.sub(r'\\s+', ' ', doc['body'][len(doc['title']):])\n documents[doc['id']] = Article(title=title, body=body)\n \n for query in json.load(open(path + 'cran.qry.json')):\n queries[query['query number']] = query['query']\n for rel in json.load(open(path + 'cranqrel.json')):\n query_id = int(rel['query_num'])\n doc_id = int(rel['id'])\n if query_id in relevance:\n relevance[query_id].append((doc_id, rel['position']))\n else:\n relevance[query_id] = [(doc_id, rel['position'])]\n return documents, queries, relevance",
"def get_materials_properties(dbpath): #<un-named>nook\n odb = openOdb(path=dbpath)\n data = []\n for _name,_mat in odb.materials.items():\n _elastic_mod = _mat.elastic.table[0][0]\n _poisson = _mat.elastic.table[0][1]\n if hasattr(_mat,\"plastic\"):\n _plastic = _mat.plastic.table\n else:\n _plastic = []\n data.append((_name,_elastic_mod,_poisson,_plastic))\n odb.close()\n return data",
"def get_entries(self):\n prefixes = self.spot_mappings\n with open(self.path, 'r') as f:\n prefix_key = self.seek_through_comments(f).rsplit(\"/\", 1)[-1]\n prefix = prefixes[prefix_key]\n\n for ln in self.split_log_lines(f, \"|\", prefix):\n yield LogItem(*ln).get_properties()",
"def read_locations(namefile):\n db = shelve.open(namefile)\n hashes = db['hashes']\n key_firms = db['nif']\n year = db['year']\n locs = db['locations']\n methodvalues = db['methodvalues']\n db.close()\n return hashes, key_firms, year, locs, methodvalues",
"def read_redmapper():\n redfile = os.path.join(os.sep, 'global', 'work', 'projects', \n 'redmapper', 'redmapper_isedfit_v5.10_centrals.fits.gz')\n print('Reading {}'.format(redfile))\n cat = fitsio.read(redfile, ext=1)\n return cat",
"def read_biocontext(name: str) -> PREFIX_MAP:\n path_to_jsonld = HERE / \"registry\" / f\"{name}.jsonld\"\n with open(path_to_jsonld) as file:\n return extract_prefixmap(json.load(file))\n # return read_remote_jsonld_context(\"https://raw.githubusercontent.com/prefixcommons/biocontext/master/registry/\"+name+\".jsonld\")",
"def settings_db_read_settings(db_name=SETTINGS_DB_NAME):\n paths = local = None\n if os.path.isfile(db_name):\n with lite.connect(db_name) as con:\n cur = con.cursor()\n result = cur.execute(\"SELECT LOGISIM_HOME, GRADING_PATH, IMPORT_PATH, GRADES_DB\\\n FROM PATHS\")\n paths = result.fetchone()\n result = cur.execute(\"SELECT GRADER_NAME, YEAR, SEMESTER, USE_STYLE, SYNC_COMMAND\\\n FROM LOCAL\")\n local = result.fetchone()\n\n return paths, local"
] | [
"0.6236531",
"0.55502063",
"0.54002386",
"0.5369209",
"0.5348187",
"0.5330783",
"0.528488",
"0.5277649",
"0.5223101",
"0.521148",
"0.52064604",
"0.5200986",
"0.5190385",
"0.5173091",
"0.5053825",
"0.504017",
"0.5016032",
"0.49565023",
"0.49422446",
"0.49338242",
"0.491178",
"0.4897096",
"0.48843643",
"0.48682275",
"0.4864491",
"0.48529974",
"0.48390555",
"0.48366395",
"0.48305607",
"0.48123106"
] | 0.7260052 | 0 |
Read all point model run entries from the metadata store for a given project context Context object containing projectDir, the path of the project whose metadata store is to be read from A dictionary of key/value pairs from the model run section of the project metadata | def readModelRunEntries(context):
return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.MODEL_RUN_SECTION) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readModelRuns(context):\n modelRunObjects = []\n modelRuns = GenericMetadata.readModelRunEntries(context)\n try:\n runs = modelRuns['runs'].split(GenericMetadata.VALUE_DELIM)\n for run in runs:\n modelRunObjects.append(ModelRun.readFromMetadata(context, run))\n except KeyError:\n pass\n return modelRunObjects",
"def writeModelRunEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.MODEL_RUN_SECTION, keys, values)",
"def writeToMetadata(self, context):\n if self.modelType not in GenericMetadata.MODEL_TYPES:\n raise Exception(\"Model type %s is not among known model types: %s\" % (self.modelType, str(GenericMetadata.MODEL_TYPES) ) )\n \n modelRunEntries = GenericMetadata.readModelRunEntries(context)\n try:\n runs = modelRunEntries['runs'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n runs = []\n \n # Collected model entry and keys and values into lists so we can write to metadata store in batch\n keys = []\n values = []\n \n # Generate unique identifier for this model run. Unique ID is a combination of model type and a number\n entryNumber = 1\n fqId = self.modelType + GenericMetadata.KEY_SEP + str(entryNumber)\n while fqId in runs:\n entryNumber += 1\n fqId = self.modelType + GenericMetadata.KEY_SEP + str(entryNumber)\n self.runNumber = entryNumber\n # Add new run to list of runs\n runs.append(fqId)\n runsStr = GenericMetadata.VALUE_DELIM.join(runs)\n keys.append('runs'); values.append(runsStr)\n # Write attributes for run\n keyProto = fqId + GenericMetadata.KEY_SEP\n runDate = keyProto + 'date_utc'\n keys.append(runDate); values.append( self.date.strftime(ModelRun.FMT_DATE) )\n runDesc = keyProto + 'description'\n keys.append(runDesc); values.append(self.description)\n runCmd = keyProto + 'command'\n keys.append(runCmd); values.append(self.command)\n runOutput = keyProto + 'output'\n keys.append(runOutput); values.append(self.output)\n # Write to metadata\n GenericMetadata.writeModelRunEntries(context, keys, values)",
"def readClimatePointEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION)",
"def _readEntriesForSection(projectDir, section):\n sectionDict = dict()\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.R_OK):\n raise IOError(errno.EACCES, \"Unable to read metadata store for project %s\" % \\\n (projectDir,))\n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n if config.has_section(section):\n items = config.items(section)\n for item in items:\n sectionDict[item[0]] = item[1]\n \n return sectionDict",
"def read_metadata(dirname, use_gpu):\n try:\n if not os.path.isdir(dirname):\n pass\n elif not os.path.exists(os.path.join(dirname, 'metadata.json')):\n pass\n else:\n with open(os.path.join(dirname, 'metadata.json')) as f:\n metadata = json.load(f)\n if use_gpu and ('container_gpu' in metadata):\n container = metadata['container_gpu']\n else:\n container = metadata['container']\n entry_point = metadata['entry_point']\n except (IOError, KeyError, ValueError):\n print('Failed to read metadata from defense directory ', dirname)\n return (container, entry_point)",
"def readProvenanceEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION)",
"def readFromMetadata(cls, context, fqId): \n newInstance = ModelRun()\n (newInstance.modelType, newInstance.runNumber) = fqId.split(GenericMetadata.KEY_SEP)\n \n modelRunEntries = GenericMetadata.readModelRunEntries(context)\n keyProto = fqId + GenericMetadata.KEY_SEP\n \n runDate = keyProto + 'date_utc'\n newInstance.date = datetime.strptime(modelRunEntries[runDate], ModelRun.FMT_DATE)\n runDesc = keyProto + 'description'\n newInstance.description = modelRunEntries[runDesc]\n runCmd = keyProto + 'command'\n newInstance.command = modelRunEntries[runCmd]\n runOutput = keyProto + 'output'\n newInstance.output = modelRunEntries[runOutput]\n \n return newInstance",
"def _read_latest_config_files(self, run_path_pairs):\n configs = {}\n config_fpaths = {}\n for run_name, logdir in run_path_pairs:\n config = ProjectorConfig()\n config_fpath = os.path.join(logdir, PROJECTOR_FILENAME)\n if file_io.file_exists(config_fpath):\n file_content = file_io.read_file_to_string(config_fpath).decode('utf-8')\n text_format.Merge(file_content, config)\n\n has_tensor_files = False\n for embedding in config.embeddings:\n if embedding.tensor_path:\n has_tensor_files = True\n break\n\n if not config.model_checkpoint_path:\n # See if you can find a checkpoint file in the logdir.\n ckpt_path = latest_checkpoint(logdir)\n if not ckpt_path:\n # Or in the parent of logdir.\n ckpt_path = latest_checkpoint(os.path.join(logdir, os.pardir))\n if not ckpt_path and not has_tensor_files:\n continue\n if ckpt_path:\n config.model_checkpoint_path = ckpt_path\n\n # Sanity check for the checkpoint file.\n if (config.model_checkpoint_path and\n not checkpoint_exists(config.model_checkpoint_path)):\n logging.warning('Checkpoint file %s not found',\n config.model_checkpoint_path)\n continue\n configs[run_name] = config\n config_fpaths[run_name] = config_fpath\n return configs, config_fpaths",
"def GatherBaseData(self, mr, nonce):\n project = mr.project\n\n project_summary = ''\n project_alert = None\n project_read_only = False\n project_home_page = ''\n project_thumbnail_url = ''\n if project:\n project_summary = project.summary\n project_alert = _CalcProjectAlert(project)\n project_read_only = project.read_only_reason\n project_home_page = project.home_page\n project_thumbnail_url = tracker_views.LogoView(project).thumbnail_url\n\n with work_env.WorkEnv(mr, self.services) as we:\n is_project_starred = False\n project_view = None\n if mr.project:\n if permissions.UserCanViewProject(\n mr.auth.user_pb, mr.auth.effective_ids, mr.project):\n is_project_starred = we.IsProjectStarred(mr.project_id)\n # TODO(jrobbins): should this be a ProjectView?\n project_view = template_helpers.PBProxy(mr.project)\n\n grid_x_attr = None\n grid_y_attr = None\n hotlist_view = None\n if mr.hotlist:\n users_by_id = framework_views.MakeAllUserViews(\n mr.cnxn, self.services.user,\n features_bizobj.UsersInvolvedInHotlists([mr.hotlist]))\n hotlist_view = hotlist_views.HotlistView(\n mr.hotlist, mr.perms, mr.auth, mr.viewed_user_auth.user_id,\n users_by_id, self.services.hotlist_star.IsItemStarredBy(\n mr.cnxn, mr.hotlist.hotlist_id, mr.auth.user_id))\n grid_x_attr = mr.x.lower()\n grid_y_attr = mr.y.lower()\n\n app_version = os.environ.get('CURRENT_VERSION_ID')\n\n viewed_username = None\n if mr.viewed_user_auth.user_view:\n viewed_username = mr.viewed_user_auth.user_view.username\n\n issue_entry_url = 'entry'\n config = None\n if mr.project_id and self.services.config:\n with mr.profiler.Phase('getting config'):\n config = self.services.config.GetProjectConfig(mr.cnxn, mr.project_id)\n grid_x_attr = (mr.x or config.default_x_attr).lower()\n grid_y_attr = (mr.y or config.default_y_attr).lower()\n issue_entry_url = _LoginOrIssueEntryURL(mr, config)\n\n viewing_self = mr.auth.user_id == mr.viewed_user_auth.user_id\n offer_saved_queries_subtab = (\n viewing_self or mr.auth.user_pb and mr.auth.user_pb.is_site_admin)\n\n login_url = _SafeCreateLoginURL(mr)\n logout_url = _SafeCreateLogoutURL(mr)\n logout_url_goto_home = users.create_logout_url('/')\n version_base = _VersionBaseURL(mr.request)\n\n base_data = {\n # EZT does not have constants for True and False, so we pass them in.\n 'True': ezt.boolean(True),\n 'False': ezt.boolean(False),\n\n 'local_mode': ezt.boolean(settings.local_mode),\n\n 'site_name': settings.site_name,\n 'show_search_metadata': ezt.boolean(False),\n 'page_template': self._PAGE_TEMPLATE,\n 'main_tab_mode': self._MAIN_TAB_MODE,\n 'project_summary': project_summary,\n 'project_home_page': project_home_page,\n 'project_thumbnail_url': project_thumbnail_url,\n\n 'hotlist_id': mr.hotlist_id,\n 'hotlist': hotlist_view,\n\n 'hostport': mr.request.host,\n 'absolute_base_url': '%s://%s' % (mr.request.scheme, mr.request.host),\n 'project_home_url': None,\n 'link_rel_canonical': None, # For specifying <link rel=\"canonical\">\n 'projectname': mr.project_name,\n 'project': project_view,\n 'project_is_restricted': ezt.boolean(_ProjectIsRestricted(mr)),\n 'offer_contributor_list': ezt.boolean(\n permissions.CanViewContributorList(mr, mr.project)),\n 'logged_in_user': mr.auth.user_view,\n 'form_token': None, # Set to a value below iff the user is logged in.\n 'form_token_path': None,\n 'token_expires_sec': None,\n 'xhr_token': None, # Set to a value below iff the user is logged in.\n 'flag_spam_token': None,\n 'nonce': nonce,\n 'perms': mr.perms,\n 'warnings': mr.warnings,\n 'errors': mr.errors,\n\n 'viewed_username': viewed_username,\n 'viewed_user': mr.viewed_user_auth.user_view,\n 'viewed_user_pb': template_helpers.PBProxy(\n mr.viewed_user_auth.user_pb),\n 'viewing_self': ezt.boolean(viewing_self),\n 'viewed_user_id': mr.viewed_user_auth.user_id,\n 'offer_saved_queries_subtab': ezt.boolean(offer_saved_queries_subtab),\n\n 'currentPageURL': mr.current_page_url,\n 'currentPageURLEncoded': mr.current_page_url_encoded,\n 'login_url': login_url,\n 'logout_url': logout_url,\n 'logout_url_goto_home': logout_url_goto_home,\n 'continue_issue_id': mr.continue_issue_id,\n 'feedback_email': settings.feedback_email,\n 'category_css': None, # Used to specify a category of stylesheet\n 'category2_css': None, # specify a 2nd category of stylesheet if needed.\n 'page_css': None, # Used to add a stylesheet to a specific page.\n\n 'can': mr.can,\n 'query': mr.query,\n 'colspec': None,\n 'sortspec': mr.sort_spec,\n\n # Options for issuelist display\n 'grid_x_attr': grid_x_attr,\n 'grid_y_attr': grid_y_attr,\n 'grid_cell_mode': mr.cells,\n 'grid_mode': None,\n 'list_mode': None,\n 'chart_mode': None,\n\n 'issue_entry_url': issue_entry_url,\n 'is_cross_project': ezt.boolean(False),\n\n # for project search (some also used in issue search)\n 'start': mr.start,\n 'num': mr.num,\n 'groupby': mr.group_by_spec,\n 'q_field_size': (\n min(framework_constants.MAX_ARTIFACT_SEARCH_FIELD_SIZE,\n max(framework_constants.MIN_ARTIFACT_SEARCH_FIELD_SIZE,\n len(mr.query) + framework_constants.AUTOSIZE_STEP))),\n 'mode': None, # Display mode, e.g., grid mode.\n 'ajah': mr.ajah,\n 'table_title': mr.table_title,\n\n 'alerts': alerts.AlertsView(mr), # For alert.ezt\n 'project_alert': project_alert,\n\n 'title': None, # First part of page title\n 'title_summary': None, # Appended to title on artifact detail pages\n\n # TODO(jrobbins): make sure that the templates use\n # project_read_only for project-mutative actions and if any\n # uses of read_only remain.\n 'project_read_only': ezt.boolean(project_read_only),\n 'site_read_only': ezt.boolean(settings.read_only),\n 'banner_time': servlet_helpers.GetBannerTime(settings.banner_time),\n 'read_only': ezt.boolean(settings.read_only or project_read_only),\n 'site_banner_message': settings.banner_message,\n 'robots_no_index': None,\n 'analytics_id': settings.analytics_id,\n\n 'is_project_starred': ezt.boolean(is_project_starred),\n\n 'version_base': version_base,\n 'app_version': app_version,\n 'gapi_client_id': settings.gapi_client_id,\n 'viewing_user_page': ezt.boolean(False),\n 'old_ui_url': None,\n\n 'is_member': ezt.boolean(False),\n }\n\n if mr.project:\n base_data['project_home_url'] = '/p/%s' % mr.project_name\n\n # Always add xhr-xsrf token because even anon users need some\n # pRPC methods, e.g., autocomplete, flipper, and charts.\n base_data['token_expires_sec'] = xsrf.TokenExpiresSec()\n base_data['xhr_token'] = xsrf.GenerateToken(\n mr.auth.user_id, xsrf.XHR_SERVLET_PATH)\n # Always add other anti-xsrf tokens when the user is logged in.\n if mr.auth.user_id:\n form_token_path = self._FormHandlerURL(mr.request.path)\n base_data['form_token'] = xsrf.GenerateToken(\n mr.auth.user_id, form_token_path)\n base_data['form_token_path'] = form_token_path\n\n return base_data",
"def runModel(quickLogger,\n\t base,\n modelFile=\"\",\n\t irfs=\"P7SOURCE_V6\",\n run=True):\n \n if(modelFile):\n model = modelFile\n else:\n model = base+\"_likeMinuit.xml\"\n\n\n try:\n checkForFiles(quickLogger,\n [base+\"_srcMaps.fits\",\n model,\n base+\"_ltcube.fits\",\n base+\"_BinnedExpMap.fits\"])\n except(FileNotFound):\n quickLogger.critical(\"One or more needed files do not exist.\")\n return\n\n model_map['srcmaps'] = base+\"_srcMaps.fits\"\n model_map['srcmdl'] = model\n model_map['outfile'] = base+\"_modelMap.fits\"\n model_map['expcube'] = base+\"_ltcube.fits\"\n model_map['irfs'] = irfs\n model_map['bexpmap'] = base+\"_BinnedExpMap.fits\"\n \n runCommand(model_map,quickLogger,run)",
"def run_parse(self):\n # Data set already has source file names from load_inputs\n parsedset = {}\n parsedset['data_set'] = []\n for log in self.input_files:\n parsemodule = self.parse_modules[self.args.parser]\n try:\n if self.args.tzone:\n parsemodule.tzone = self.args.tzone\n except NameError: pass\n parsedset['data_set'].append(parsemodule.parse_file(log))\n self.data_set = parsedset\n del(parsedset)",
"def run_attributes (ins, exp, run) :\n t0_sec = time()\n list_of_dicts = experiment_info.run_attributes(ins, exp, run)\n #print 'run_attributes for %s %s run:%d, t(sec) = %f' % (ins, exp, run, time()-t0_sec)\n return list_of_dicts",
"def _fetch_model_attributes(self, run_id, backend=None, results_dir=None, attributes_dump_name=None):\n run_dir = self._fetch_run_dir(run_id, backend, results_dir)\n if attributes_dump_name is None:\n attributes_dump_name = Config.default_log_name('model_attributes', run_id=run_id)\n model_attributes_path = os.path.join(run_dir, attributes_dump_name)\n if not os.path.isfile(model_attributes_path):\n raise ResultsNotFoundError('Model attributes not found in \"{}\"!'.format(model_attributes_path))\n with open(model_attributes_path) as stream:\n model_attributes = yaml.load(stream=stream, Loader=yaml.FullLoader)\n return model_attributes",
"def readRunDict(fileName):\n result = {}\n with FileWrapper(fileName) as f:\n for ln, line in enumerate(tqdm(f, desc='loading run (by line)', leave=False)):\n line = line.strip()\n if not line:\n continue\n fld = line.split()\n if len(fld) != 6:\n ln += 1\n raise Exception(\n f'Invalid line {ln} in run file {fileName} expected 6 white-space separated fields by got: {line}')\n\n qid, _, docid, rank, score, _ = fld\n result.setdefault(qid, {})[docid] = float(score)\n\n return result",
"def configs(self):\n run_path_pairs = list(self.run_paths.items())\n # If there are no summary event files, the projector should still work,\n # treating the `logdir` as the model checkpoint directory.\n if not run_path_pairs:\n run_path_pairs.append(('.', self.logdir))\n if (self._run_paths_changed() or\n _latest_checkpoints_changed(self._configs, run_path_pairs)):\n self.readers = {}\n self._configs, self.config_fpaths = self._read_latest_config_files(\n run_path_pairs)\n self._augment_configs_with_checkpoint_info()\n return self._configs",
"def fetch_logged_data(run_id: str) -> dict:\n client = mlflow.tracking.MlflowClient()\n data = client.get_run(run_id).data\n # Exclude system tags: https://www.mlflow.org/docs/latest/tracking.html#system-tags\n tags = {k: v for k, v in data.tags.items() if not k.startswith(\"mlflow.\")}\n artifacts = list(yield_artifacts(run_id))\n return {\n \"params\": data.params,\n \"metrics\": data.metrics,\n \"tags\": tags,\n \"artifacts\": artifacts,\n }",
"def interop_parse(self):\n # Parse the files and load the data\n try:\n run_metrics = py_interop_run_metrics.run_metrics()\n valid_to_load = py_interop_run.uchar_vector(py_interop_run.MetricCount, 0)\n py_interop_run_metrics.list_summary_metrics_to_load(valid_to_load)\n run_metrics.read(self.path, valid_to_load)\n summary = py_interop_summary.run_summary()\n py_interop_summary.summarize_run_metrics(run_metrics, summary)\n # PhiX error rate for run over all \"usable cycles\"\n errorrate = summary.total_summary().error_rate()\n # Percent aligned PhiX\n pctaligned = summary.total_summary().percent_aligned()\n # Add the error rate and the percent of reads that align to PhiX to the metadata object\n for sample in self.metadata:\n sample.run.error_rate = '{:.2f}'.format(errorrate)\n sample.run.phix_aligned = '{:.2f}'.format(pctaligned)\n except:\n for sample in self.metadata:\n sample.run.error_rate = 'ND'\n sample.run.phix_aligned = 'ND'",
"def _get_all_run_infos(self):\r\n info_dir = self._settings.info_dir\r\n if not os.path.isdir(info_dir):\r\n return []\r\n paths = [os.path.join(info_dir, x) for x in os.listdir(info_dir)]\r\n\r\n # We copy the RunInfo as a dict, so we can add stuff to it to pass to the template.\r\n # We filter only those that have a timestamp, to avoid a race condition with writing\r\n # that field.\r\n return filter(lambda d: 'timestamp' in d, [RunInfo(os.path.join(p, 'info')).get_as_dict()\r\n for p in paths if os.path.isdir(p) and not os.path.islink(p)])",
"def data():\n print (\"&\")\n res = {}\n\t\n # Load Data\n with open(DATA_PATH_TRAIN, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t \t\n with open(DATA_PATH_TEST, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t\t\t\n with open('tasks/env/data/data.json', 'w') as outfile:\n json.dump(res, outfile)",
"def run_metadata(self, run_metadata):\n\n if run_metadata is not None:\n run_metadata = self._validate_run_metadata(run_metadata)\n runs = ListDict()\n runs.append(run_metadata)\n runs.extend(\n self.station_metadata.runs, skip_keys=[run_metadata.id, \"0\"]\n )\n self._survey_metadata.stations[0].runs = runs",
"def get_run_info_miseq( instrument_model, application_version, tree ):\n run_stats = {}\n\n setup_node = tree.getroot().find(\"Setup\")\n if setup_node is None:\n setup_node = tree.getroot()\n\n # Get required tree nodes.\n flowcell_node = tree.getroot().find(\"FlowcellRFIDTag\")\n reads_node = tree.getroot().find('Reads')\n\n # Now actually populate various stats\n run_stats['flow_cell_id'] = flowcell_node.find('SerialNumber').text\n run_stats['date'] = tree.getroot().find('RunStartDate').text\n run_stats['instrument'] = tree.getroot().find('ScannerID').text\n run_stats['lanes'] = int(setup_node.find('NumLanes').text)\n run_stats['run_id'] = tree.getroot().find('RunID').text\n\n read_len = []\n index_len = []\n for read_info in reads_node.findall('RunInfoRead'):\n attrib = read_info.attrib\n if( attrib['IsIndexedRead'] == 'Y' ):\n index_len.append( int( attrib['NumCycles'] ) )\n else:\n read_len.append( int( attrib['NumCycles'] ) )\n\n run_stats['r1_length'] = read_len[0]\n run_stats['p7_index_length'] = index_len[0]\n\n run_stats['paired_end'] = False\n if( len( read_len ) == 2 ):\n run_stats['r1_length'] = read_len[1]\n run_stats['p5_index_length'] = index_len[1]\n run_stats['paired_end'] = True\n\n run_stats['instrument_type'] = instrument_model\n run_stats['reverse_complement_i5'] = False\n\n return run_stats",
"def extract(self):\n self.logger.info(f'Opening {self.file}')\n try:\n f = open(self.file, 'r')\n except FileNotFoundError:\n self.logger.info('Could not open the .vrt file')\n return\n\n result = {}\n self.logger.info('Extracting')\n for line in f:\n # Only attempt to extract if line is correct\n if self.is_target(line):\n # Add the closing text tag to make the line have proper XML\n xml_attributes = XML.fromstring(line.rstrip()+'</text>').attrib\n # Extract wanted parameters\n month = xml_attributes['date'][5:7] # Extract month\n # Add month to the keys\n if month not in result.keys():\n result[month] = []\n result[month].append({\n 'title': xml_attributes['title'], # Extract thread title\n 'thread_id': xml_attributes['thread_id'], # Extract thread id\n 'datetime': xml_attributes['datetime'] # Extract datetime\n })\n self.logger.info('Extraction complete')\n self.db = result\n # self.db[self.year] = result # Assign results to year\n self.save_result() # Save the results",
"def load_models(self):\n logger.info('Loading {name} data'.format(name=self.__class__.__name__))\n for type_name, type_ in self.data_dict.iteritems():\n # An exclude for correlations. Isn't created nor has an ID.\n if type_name == \"correlations_main\":\n continue\n task_response = self.do_task(\n self.types[type_name],\n type_['taskId']\n )\n self.data_dict[type_name]['job_id'] = json.loads(\n task_response.content\n )['JobId']\n logger.info(\n 'Load {name} response: '.format(name=type_name) +\n task_response.content\n )\n\n print(\"Loaded model\")",
"def get_details():\r\n return run_operations.get_run_details(experiment_name, job_name).as_dict(key_transformer=camel_case_transformer)",
"def _get_run_info_dict(self, run_id):\r\n run_info_path = os.path.join(self._settings.info_dir, run_id, 'info')\r\n if os.path.exists(run_info_path):\r\n # We copy the RunInfo as a dict, so we can add stuff to it to pass to the template.\r\n return RunInfo(run_info_path).get_as_dict()\r\n else:\r\n return None",
"def get_examples(data_dir, mode, task_id, shard_id):\n file_path = get_full_filename(data_dir, mode, task_id, shard_id)\n relative_path = \"/\".join(file_path.split(\"/\")[3:])\n tf.logging.info(\"Reading file: %s\" % (file_path))\n print(relative_path)\n #client = storage.Client(projectname, credentials=credentials)\n #bucket = client.get_bucket(bucket_name)\n blob = storage_bucket.blob(relative_path)\n if not blob.exists():\n tf.logging.info(\"Path doesn't exist\")\n return None\n nq_data = extract_nq_data(file_path)\n tf.logging.info(\"NQ data Size: \" + str(len(nq_data.keys())))\n\n tf.logging.info(\"Performing entity extraction\")\n fact_extracted_data = entity_link_nq(nq_data)\n return fact_extracted_data",
"def readManifestEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.MANIFEST_SECTION)",
"def _setup_run(cfg: Dict) -> Dict:\n now = datetime.now()\n day = f\"{now.day}\".zfill(2)\n month = f\"{now.month}\".zfill(2)\n hour = f\"{now.hour}\".zfill(2)\n minute = f\"{now.minute}\".zfill(2)\n run_name = f'run_{day}{month}_{hour}{minute}_seed{cfg[\"seed\"]}'\n # cfg[\"run_dir\"] = Path(__file__).absolute().parent / \"runs\" / run_name\n cfg[\"run_dir\"] = cfg[\"run_dir\"] / run_name\n if not cfg[\"run_dir\"].is_dir():\n cfg[\"train_dir\"] = cfg[\"run_dir\"] / \"data\" / \"train\"\n cfg[\"train_dir\"].mkdir(parents=True)\n cfg[\"val_dir\"] = cfg[\"run_dir\"] / \"data\" / \"val\"\n cfg[\"val_dir\"].mkdir(parents=True)\n else:\n raise RuntimeError(f\"There is already a folder at {cfg['run_dir']}\")\n\n # dump a copy of cfg to run directory\n with (cfg[\"run_dir\"] / \"cfg.json\").open(\"w\") as fp:\n temp_cfg = {}\n for key, val in cfg.items():\n if isinstance(val, PosixPath):\n temp_cfg[key] = str(val)\n elif isinstance(val, Dict):\n for k in val:\n if isinstance(val[k], PosixPath):\n val[k] = str(val[k])\n elif isinstance(val, pd.Timestamp):\n temp_cfg[key] = val.strftime(format=\"%d%m%Y\")\n else:\n temp_cfg[key] = val\n json.dump(temp_cfg, fp, sort_keys=True, indent=4)\n\n return cfg",
"def load_config():\n model_type, run_name, run_comment, epoch, verbose = get_args()\n name = run_name + '-' + run_comment\n if model_type == \"s2s\": \n run_title = \"seq2seq\"\n else:\n run_title = \"def2vec\"\n path = \"outputs/{}/logs/{}/config.json\".format(run_title, name)\n config = None\n with open(path) as f:\n config = dict(json.load(f))\n config = load_config(eval=True)\n return (config, name, model_type)"
] | [
"0.59670794",
"0.5889647",
"0.5816284",
"0.5717778",
"0.5466364",
"0.52447045",
"0.52259123",
"0.51424",
"0.5129271",
"0.50799537",
"0.49862528",
"0.4979997",
"0.49188805",
"0.49156836",
"0.49127826",
"0.49025348",
"0.48849607",
"0.48842287",
"0.4880458",
"0.48771808",
"0.48716104",
"0.48246482",
"0.48207992",
"0.4812863",
"0.4799461",
"0.47970065",
"0.4793687",
"0.47868702",
"0.4783983",
"0.4725058"
] | 0.7738944 | 0 |
Read all model runs from metadata and store in ModelRun instances. context Context object containing projectDir, the path of the project whose metadata store is to be read from A list of ModelRun objects | def readModelRuns(context):
modelRunObjects = []
modelRuns = GenericMetadata.readModelRunEntries(context)
try:
runs = modelRuns['runs'].split(GenericMetadata.VALUE_DELIM)
for run in runs:
modelRunObjects.append(ModelRun.readFromMetadata(context, run))
except KeyError:
pass
return modelRunObjects | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readModelRunEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.MODEL_RUN_SECTION)",
"def writeToMetadata(self, context):\n if self.modelType not in GenericMetadata.MODEL_TYPES:\n raise Exception(\"Model type %s is not among known model types: %s\" % (self.modelType, str(GenericMetadata.MODEL_TYPES) ) )\n \n modelRunEntries = GenericMetadata.readModelRunEntries(context)\n try:\n runs = modelRunEntries['runs'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n runs = []\n \n # Collected model entry and keys and values into lists so we can write to metadata store in batch\n keys = []\n values = []\n \n # Generate unique identifier for this model run. Unique ID is a combination of model type and a number\n entryNumber = 1\n fqId = self.modelType + GenericMetadata.KEY_SEP + str(entryNumber)\n while fqId in runs:\n entryNumber += 1\n fqId = self.modelType + GenericMetadata.KEY_SEP + str(entryNumber)\n self.runNumber = entryNumber\n # Add new run to list of runs\n runs.append(fqId)\n runsStr = GenericMetadata.VALUE_DELIM.join(runs)\n keys.append('runs'); values.append(runsStr)\n # Write attributes for run\n keyProto = fqId + GenericMetadata.KEY_SEP\n runDate = keyProto + 'date_utc'\n keys.append(runDate); values.append( self.date.strftime(ModelRun.FMT_DATE) )\n runDesc = keyProto + 'description'\n keys.append(runDesc); values.append(self.description)\n runCmd = keyProto + 'command'\n keys.append(runCmd); values.append(self.command)\n runOutput = keyProto + 'output'\n keys.append(runOutput); values.append(self.output)\n # Write to metadata\n GenericMetadata.writeModelRunEntries(context, keys, values)",
"def run(self, *args, **kwargs) -> None:\n loop = tqdm(self.configs, desc='Configurations')\n for cfg in loop:\n loop.set_postfix_str(cfg.experiment_cfg['name'])\n for i in range(cfg.num_models):\n filename = None\n run_id = None\n if cfg.filenames is not None:\n if isinstance(cfg.filenames, str):\n filename = cfg.filenames\n else:\n filename = cfg.filenames[i]\n elif cfg.run_ids is not None:\n run_id = cfg.run_ids[i]\n\n run_cfg = modelgen_cfg_to_runner_cfg(cfg, run_id=run_id, filename=filename)\n runner = Runner(run_cfg, persist_metadata=cfg.experiment_cfg)\n runner.run()\n\n # clear up memory between runs\n torch.cuda.empty_cache()",
"def writeModelRunEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.MODEL_RUN_SECTION, keys, values)",
"def readFromMetadata(cls, context, fqId): \n newInstance = ModelRun()\n (newInstance.modelType, newInstance.runNumber) = fqId.split(GenericMetadata.KEY_SEP)\n \n modelRunEntries = GenericMetadata.readModelRunEntries(context)\n keyProto = fqId + GenericMetadata.KEY_SEP\n \n runDate = keyProto + 'date_utc'\n newInstance.date = datetime.strptime(modelRunEntries[runDate], ModelRun.FMT_DATE)\n runDesc = keyProto + 'description'\n newInstance.description = modelRunEntries[runDesc]\n runCmd = keyProto + 'command'\n newInstance.command = modelRunEntries[runCmd]\n runOutput = keyProto + 'output'\n newInstance.output = modelRunEntries[runOutput]\n \n return newInstance",
"def load(self):\n if self.verbosity:\n self.header(\"Loading data files\")\n\n model_list = [\n x for x in get_model_list() if os.path.exists(x.objects.get_csv_path())\n ]\n\n if self.resume_mode:\n # get finished load command logs of last update\n prev_loaded = [\n x.file_name\n for x in self.log_record.called.filter(\n command='loadcalaccessrawfile',\n finish_datetime__isnull=False\n )\n ]\n self.log(\"{} models already loaded.\".format(len(prev_loaded)))\n # remove these from model_list\n model_list = [x for x in model_list if x._meta.db_table not in prev_loaded]\n\n if self.verbosity:\n model_list = progress.bar(model_list)\n for model in model_list:\n call_command(\n \"loadcalaccessrawfile\",\n model.__name__,\n verbosity=self.verbosity,\n keep_files=self.keep_files,\n app_name=self.app_name,\n )",
"def load_models(self):\n logger.info('Loading {name} data'.format(name=self.__class__.__name__))\n for type_name, type_ in self.data_dict.iteritems():\n # An exclude for correlations. Isn't created nor has an ID.\n if type_name == \"correlations_main\":\n continue\n task_response = self.do_task(\n self.types[type_name],\n type_['taskId']\n )\n self.data_dict[type_name]['job_id'] = json.loads(\n task_response.content\n )['JobId']\n logger.info(\n 'Load {name} response: '.format(name=type_name) +\n task_response.content\n )\n\n print(\"Loaded model\")",
"def Run(self):\n # get the runs object, which is an index for every tag.\n runs = self.GetRouteAndSave('runs')\n\n # collect sampled data.\n self.GetRouteAndSave('scalars')\n\n # now let's just download everything!\n for run, tag_type_to_tags in six.iteritems(runs):\n for tag_type, tags in six.iteritems(tag_type_to_tags):\n try:\n if tag_type == 'graph':\n # in this case, tags is a bool which specifies if graph is present.\n if tags:\n url = Url('graph', {'run': run})\n self.GetAndSave(url, GRAPH_SUFFIX, unzip=True)\n elif tag_type == 'images':\n for t in tags:\n images = self.GetRouteAndSave('images', {'run': run, 'tag': t})\n for im in images:\n url = 'individualImage?' + im['query']\n # pull down the images themselves.\n self.GetAndSave(url, IMAGE_SUFFIX)\n elif tag_type == 'audio':\n for t in tags:\n audio = self.GetRouteAndSave('audio', {'run': run, 'tag': t})\n for snd in audio:\n url = 'individualAudio?' + snd['query']\n # pull down the audio clips themselves\n self.GetAndSave(url, AUDIO_SUFFIX)\n elif tag_type == 'run_metadata':\n for t in tags:\n url = Url('run_metadata', {'run': run, 'tag': t})\n self.GetAndSave(url, GRAPH_SUFFIX, unzip=True)\n elif tag_type == 'firstEventTimestamp':\n pass\n else:\n for t in tags:\n # Save this, whatever it is :)\n self.GetRouteAndSave(tag_type, {'run': run, 'tag': t})\n except IOError as e:\n PrintAndLog('Retrieval failed for %s/%s/%s' % (tag_type, run, tags),\n tf.logging.WARN)\n PrintAndLog('Got Exception: %s' % e, tf.logging.WARN)\n PrintAndLog('continuing...', tf.logging.WARN)\n continue",
"def _get_models_from_metafile(dir: str):\n meta_indexes = load(osp.join(dir, 'model-index.yml'))\n for meta_path in meta_indexes['Import']:\n # meta_path example: mmcls/.mim/configs/conformer/metafile.yml\n meta_path = osp.join(dir, meta_path)\n metainfo = load(meta_path)\n yield from metainfo['Models']",
"def runs(self):\n if experiment_info.name2id(self.exp):\n runs_list = experiment_info.experiment_runs(self.instrument.upper(),self.exp)\n for item in runs_list:\n runnum = item['num']\n item['xtc_files'] = glob('{:}/*-r{:04d}*.xtc'.format(\n self.xtc_dir,runnum)) \n item['h5_files'] = glob('{:}/*-r{:04d}*.h5'.format(\n self.h5_dir,runnum)) \n else:\n runs_list = []\n\n return runs_list",
"def list_model_runs(self):\n return sorted([x[\"name\"] for x in self._store.read_model_runs()])",
"def import_data(self):\n self.models = []\n for o in self.loader.load():\n klass = self.type_for(o)\n if hasattr(klass, \"from_api\"):\n self.models.append(klass.from_api(o))\n else:\n self.models.append(klass(o))\n return self.models",
"def load_and_cache_examples(args, tokenizer, split, task_name, model_type, predictions=None):\n processor = MoralStoriesProcessor()\n if task_name != 'consequence|action+context_genref':\n args.data_dir = os.path.join(args.original_data_dir, task_name, args.split_name)\n else:\n args.data_dir = os.path.join(args.original_data_dir, 'consequence|action+context_gen', args.split_name)\n\n # Get features\n logger.info('Creating features from dataset file at %s', args.data_dir)\n label_list = processor.get_labels()\n if split == 'train':\n examples = processor.get_train_examples(args.data_dir)\n elif split == 'dev':\n examples = processor.get_dev_examples(args.data_dir)\n elif split == 'test':\n examples = processor.get_test_examples(args.data_dir)\n else:\n raise Exception('split value should be in [train, dev, test]')\n\n # Replace gold sequences with model predictions\n if predictions is not None:\n if type(predictions[0]) != tuple:\n all_predictions = [tuple(predictions)]\n else:\n all_predictions = predictions\n extended_examples = list()\n\n for predictions in all_predictions:\n if predictions[0] == 'consequences':\n if len(all_predictions) == 1:\n # Remove negative examples\n positive_examples = list()\n for ex in examples:\n if ex.label == '1':\n positive_examples.append(ex)\n examples = positive_examples\n\n for pr_id, pr in enumerate(predictions[1]):\n ex = examples[pr_id]\n if ex.moral_consequence is not None:\n if len(all_predictions) == 1:\n ex.moral_consequence = pr\n else:\n ex.moral_consequence_draft = pr\n else:\n if len(all_predictions) == 1:\n ex.immoral_consequence = pr\n else:\n ex.immoral_consequence_draft = pr\n extended_examples.append(ex)\n examples = extended_examples\n extended_examples = list()\n\n if predictions[0] == 'consequence_labels':\n for pr_id, pr in enumerate(predictions[1]):\n ex = examples[pr_id]\n if ex.moral_consequence_draft is not None:\n if pr == 1:\n ex.moral_consequence_draft = ex.moral_consequence_draft + ' ' + '<|CSQ_TRUE|>'\n else:\n ex.moral_consequence_draft = ex.moral_consequence_draft + ' ' + '<|CSQ_FALSE|>'\n else:\n if pr == 0:\n ex.immoral_consequence_draft = ex.immoral_consequence_draft + ' ' + '<|CSQ_TRUE|>'\n else:\n ex.immoral_consequence_draft = ex.immoral_consequence_draft + ' ' + '<|CSQ_FALSE|>'\n extended_examples.append(ex)\n examples = extended_examples\n extended_examples = list()\n\n # Generate features; target task is classification\n pad_token_id = tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0]\n if pad_token_id is None:\n pad_token_id = tokenizer.convert_tokens_to_ids([tokenizer.eos_token])[0]\n features = convert_examples_to_features(examples,\n label_list,\n args.max_seq_length,\n args.max_gen_length,\n tokenizer,\n task_name,\n model_type,\n TASK_DICT[task_name],\n cls_token_at_end=False,\n cls_token=tokenizer.cls_token,\n sep_token=tokenizer.sep_token,\n sep_token_extra=bool(model_type in ['roberta']),\n cls_token_segment_id=0,\n pad_on_left=False,\n pad_token=pad_token_id,\n pad_token_segment_id=0,\n is_eval=split == 'test',\n fit_to_max_corpus_len=True)\n\n # Make feature tensors\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)\n if 'gen' in task_name:\n all_label_masks = torch.tensor([f.label_mask for f in features], dtype=torch.long)\n all_gen_prompts = torch.tensor([f.gen_prompt_id for f in features], dtype=torch.long)\n dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,\n all_label_ids, all_label_masks, all_gen_prompts)\n else:\n dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n\n return dataset",
"def run_models(\n self,\n normal=True,\n interrupt=True,\n run_start=None,\n state_builder=\"acis\",\n hrc=False,\n ):\n if hrc:\n loads = hrc_loads\n else:\n loads = test_loads\n if normal and \"normal\" in loads:\n for load in loads[\"normal\"]:\n self.run_model(\n load_week=load,\n run_start=run_start,\n state_builder=state_builder,\n )\n if interrupt and \"interrupt\" in loads:\n for load in loads[\"interrupt\"]:\n self.run_model(\n load_week=load,\n interrupt=True,\n run_start=run_start,\n state_builder=state_builder,\n )",
"def eval_model(config):\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n if config.model_type == 'fully_connected_mlp':\n from models.fully_connected_mlp import model_struct\n elif config.model_type == 'fully_connected_mlp_2l':\n from models.fully_connected_mlp_2l import model_struct\n elif config.model_type == 'fully_connected_conv':\n from models.fully_connected_conv import model_struct\n elif config.model_type == 'vgg_feature_model':\n from models.vgg_feature_model import model_struct\n else:\n raise Exception\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = config.model_type + '_' + dt_stamp + '/'\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, config.model_output, dt_dataset)\n dir_list = [config.train_checkpoint, config.summary_dir]\n [make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, 'train.tfrecords')\n validation_data = os.path.join(config.tfrecord_dir, 'val.tfrecords')\n feat_mean = 0 # np.mean(np.load(config.mean_file)['feat_list'])\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_images, train_labels = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n num_feats=config.n_features,\n sample=config.sample['train'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n val_images, val_labels = inputs(\n tfrecord_file=validation_data,\n batch_size=1,\n num_feats=config.n_features,\n sample=config.sample['val'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n tf.summary.image('validation images', tf.cast(val_labels, tf.float32))\n\n # Prepare model on GPU\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n\n model = model_struct()\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n features=train_images,\n output_categories=len(config.labels.keys()),\n train_mode=train_mode, batchnorm=config.batch_norm)\n\n # Prepare the cost function\n cost = softmax_cost(\n model.res_logits, train_labels, ratio=config.ratio,\n label_reshape=[\n config.batch_size * config.max_pixels_per_image])\n train_op = tf.train.AdamOptimizer(config.lr).minimize(cost)\n\n tf.summary.scalar(\"cost\", cost)\n\n train_score = correlation(\n model.prob, train_labels) # training accuracy\n tf.summary.scalar(\"training correlation\", train_score)\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n # Validation graph is the same as training except no batchnorm\n val_model = model_struct()\n val_model.build(\n features=val_images,\n output_categories=len(config.labels.keys()))\n\n # Calculate validation accuracy\n val_pred = tf.cast(\n tf.reshape(\n tf.argmax(\n val_model.prob, axis=1),\n [1, config.resize[0], config.resize[1], 1]),\n tf.float32)\n tf.summary.image('validation prediction', val_pred)\n val_score = correlation(\n val_model.prob, tf.reshape(\n val_labels, [np.prod(config.resize), 1]))\n tf.summary.scalar(\"validation correlation\", val_score)\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, val_max, losses = 0, 0, []\n train_acc = 0\n try:\n while not coord.should_stop():\n start_time = time.time()\n _, loss_value, train_acc = sess.run([train_op, cost, train_score])\n losses.append(loss_value)\n duration = time.time() - start_time\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 100 == 0 and step % 10 == 0:\n if validation_data is not False:\n _, val_acc, val_frame = sess.run(\n [train_op, val_score, val_pred])\n\n np.save(\n os.path.join(\n config.model_output, '%s_val_image' % step),\n val_frame)\n else:\n val_acc = -1 # Store every checkpoint\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy\n format_str = (\n '%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training r = %s | '\n 'Validation r = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, loss_value,\n config.train_batch / duration, float(duration),\n train_acc, val_acc, config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if val_acc > val_max:\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n # Store the new max validation accuracy\n val_max = val_acc\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training F = %s')\n print (format_str % (datetime.now(), step, loss_value,\n config.train_batch / duration,\n float(duration), train_acc))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%straining_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()",
"def run(self, input_files, input_metadata, output_files):\n try:\n # Set and check execution directory. If not exists the directory will be created.\n execution_path = os.path.abspath(self.configuration.get('execution', '.'))\n execution_parent_dir = os.path.dirname(execution_path)\n if not os.path.isdir(execution_parent_dir):\n os.makedirs(execution_parent_dir)\n\n # Update working directory to execution path\n os.chdir(execution_path)\n logger.debug(\"Execution path: {}\".format(execution_path))\n\n # Set file names for output files (with random name if not predefined)\n for key in output_files.keys():\n if output_files[key] is not None:\n pop_output_path = os.path.abspath(output_files[key])\n self.populable_outputs[key] = pop_output_path\n output_files[key] = pop_output_path\n else:\n errstr = \"The output_file[{}] can not be located. Please specify its expected path.\".format(key)\n logger.error(errstr)\n raise Exception(errstr)\n\n logger.debug(\"Init execution of the Machine Learning Model generation\")\n # Prepare file paths\n for key in input_files.keys():\n if key == 'radiomic_features':\n dataset = input_files[key]\n elif key == 'ML_technique':\n ml = input_files[key]\n else:\n logger.debug('Unrecognized input file key {}'.format(key))\n continue\n\n\n\n output_metadata = {}\n for key in output_files.keys():\n \n logger.info('VRE_ML: Iterating over Key {}'.format(key))\n\n \n if os.path.isfile(output_files[key]):\n meta = Metadata()\n meta.file_path = output_files[key] # Set file_path for output files\n \n logger.info('VRE_ML: Update metadata with key {} and value {}'.format(key, meta.file_path))\n\n meta.data_type = 'tool_statistics'\n meta.file_type = 'PDF'\n\n # Set sources for output files\n meta.sources = [output_files[key]+'.pdf']\n # Generate model\n generate_model.run(dataset=dataset,output_files[key]+'.pdf')\n\n # Append new element in output metadata\n logger.info('VRE_ML: Update metadata with key {} and value {}'.format(key, meta.file_path))\n output_metadata.update({key: meta})\n\n else:\n logger.warning(\"Output {} not found. Path {} not exists\".format(key, output_files[key]))\n\n logger.debug(\"Output metadata created\")\n\n return output_files, output_metadata\n\n except Exception:\n errstr = \"VRE ML RUNNER pipeline failed. See logs\"\n logger.fatal(errstr)\n raise Exception(errstr)",
"def run(args):\n # CONFIG\n run_name = get_run_name(args)\n logger.info(f'*** Starting run {run_name} ***')\n data_dir = f'gs://{args.bucket_name}/{args.project_name}/finetune/finetune_data/{args.finetune_data}'\n output_dir = f'gs://{args.bucket_name}/{args.project_name}/finetune/runs/{run_name}'\n\n # Get configs\n pretrained_model_config_path = get_model_config_path(args)\n model_config = get_model_config(pretrained_model_config_path)\n\n # Meta data/label mapping\n input_meta_data = get_input_meta_data(data_dir)\n label_mapping = get_label_mapping(data_dir)\n logger.info(f'Loaded training data meta.json file: {input_meta_data}')\n\n # Calculate steps, warmup steps and eval steps\n train_data_size = input_meta_data['train_data_size']\n num_labels = input_meta_data['num_labels']\n max_seq_length = input_meta_data['max_seq_length']\n if args.limit_train_steps is None:\n steps_per_epoch = int(train_data_size / args.train_batch_size)\n else:\n steps_per_epoch = args.limit_train_steps\n warmup_steps = int(args.num_epochs * train_data_size * args.warmup_proportion/ args.train_batch_size)\n if args.limit_eval_steps is None:\n eval_steps = int(math.ceil(input_meta_data['eval_data_size'] / args.eval_batch_size))\n else:\n eval_steps = args.limit_eval_steps\n\n # some logging\n if args.init_checkpoint is None:\n logger.info(f'Finetuning on datset {args.finetune_data} using default pretrained model {args.model_class}')\n else:\n logger.info(f'Finetuning on datset {args.finetune_data} using pretrained model in {args.init_checkpoint} of type {args.model_class}')\n logger.info(f'Running {args.num_epochs} epochs with {steps_per_epoch:,} steps per epoch')\n logger.info(f'Using warmup proportion of {args.warmup_proportion}, resulting in {warmup_steps:,} warmup steps')\n logger.info(f'Using learning rate: {args.learning_rate}, training batch size: {args.train_batch_size}, num_epochs: {args.num_epochs}')\n\n # Get model\n classifier_model, core_model = get_model(args, model_config, steps_per_epoch, warmup_steps, num_labels, max_seq_length)\n optimizer = classifier_model.optimizer\n loss_fn = get_loss_fn(num_labels)\n try:\n if ',' in args.validation_freq:\n validation_freq = args.validation_freq.split(',')\n validation_freq = [int(v) for v in validation_freq]\n else:\n validation_freq = int(args.validation_freq)\n except:\n raise ValueError(f'Invalid argument for validation_freq!')\n logger.info(f'Using a validation frequency of {validation_freq}')\n\n # Restore checkpoint\n if args.init_checkpoint:\n checkpoint_path = f'gs://{args.bucket_name}/{args.project_name}/pretrain/runs/{args.init_checkpoint}'\n checkpoint = tf.train.Checkpoint(model=core_model)\n checkpoint.restore(checkpoint_path).assert_existing_objects_matched()\n logger.info(f'Successfully restored checkpoint from {checkpoint_path}')\n\n # Run keras compile\n logger.info(f'Compiling keras model...')\n classifier_model.compile(\n optimizer=optimizer,\n loss=loss_fn,\n metrics=get_metrics())\n logger.info(f'... done')\n\n # Create all custom callbacks\n summary_dir = os.path.join(output_dir, 'summaries')\n summary_callback = tf.keras.callbacks.TensorBoard(summary_dir, profile_batch=0)\n time_history_callback = keras_utils.TimeHistory(\n batch_size=args.train_batch_size,\n log_steps=args.time_history_log_steps,\n logdir=summary_dir)\n custom_callbacks = [summary_callback, time_history_callback]\n if args.save_model:\n logger.info('Using save_model option...')\n checkpoint_path = os.path.join(output_dir, 'checkpoint')\n checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1)\n custom_callbacks.append(checkpoint_callback)\n if args.early_stopping_epochs > 0:\n logger.info(f'Using early stopping of after {args.early_stopping_epochs} epochs of val_loss not decreasing')\n early_stopping_callback = tf.keras.callbacks.EarlyStopping(patience=args.early_stopping_epochs, monitor='val_loss')\n custom_callbacks.append(early_stopping_callback)\n\n # Generate dataset_fn\n train_input_fn = get_dataset_fn(\n os.path.join(data_dir, 'tfrecords', 'train.tfrecords'),\n max_seq_length,\n args.train_batch_size,\n is_training=True)\n eval_input_fn = get_dataset_fn(\n os.path.join(data_dir, 'tfrecords', 'dev.tfrecords'),\n max_seq_length,\n args.eval_batch_size,\n is_training=False)\n\n # Add mertrics callback to calculate performance metrics at the end of epoch\n performance_metrics_callback = Metrics(\n eval_input_fn,\n label_mapping,\n os.path.join(summary_dir, 'metrics'),\n eval_steps,\n args.eval_batch_size,\n validation_freq)\n custom_callbacks.append(performance_metrics_callback)\n\n # Run keras fit\n time_start = time.time()\n logger.info('Run training...')\n history = classifier_model.fit(\n x=train_input_fn(),\n validation_data=eval_input_fn(),\n steps_per_epoch=steps_per_epoch,\n epochs=args.num_epochs,\n validation_steps=eval_steps,\n validation_freq=validation_freq,\n callbacks=custom_callbacks,\n verbose=1)\n time_end = time.time()\n training_time_min = (time_end-time_start)/60\n logger.info(f'Finished training after {training_time_min:.1f} min')\n\n # Write training log\n all_scores = performance_metrics_callback.scores\n all_predictions = performance_metrics_callback.predictions\n if len(all_scores) > 0:\n final_scores = all_scores[-1]\n logger.info(f'Final eval scores: {final_scores}')\n else:\n final_scores = {}\n full_history = history.history\n if len(full_history) > 0:\n final_val_loss = full_history['val_loss'][-1]\n final_loss = full_history['loss'][-1]\n logger.info(f'Final training loss: {final_loss:.2f}, Final validation loss: {final_val_loss:.2f}')\n else:\n final_val_loss = None\n final_loss = None\n data = {\n 'created_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'run_name': run_name,\n 'final_loss': final_loss,\n 'final_val_loss': final_val_loss,\n 'max_seq_length': max_seq_length,\n 'num_train_steps': steps_per_epoch * args.num_epochs,\n 'eval_steps': eval_steps,\n 'steps_per_epoch': steps_per_epoch,\n 'training_time_min': training_time_min,\n 'data_dir': data_dir,\n 'output_dir': output_dir,\n 'all_scores': all_scores,\n 'all_predictions': all_predictions,\n 'num_labels': num_labels,\n 'label_mapping': label_mapping,\n **full_history,\n **final_scores,\n **vars(args),\n }\n # Write run_log\n f_path_training_log = os.path.join(output_dir, 'run_logs.json')\n logger.info(f'Writing training log to {f_path_training_log}...')\n save_to_json(data, f_path_training_log)\n # Write bert config\n model_config.id2label = label_mapping\n model_config.label2id = {v:k for k, v in label_mapping.items()}\n model_config.max_seq_length = max_seq_length\n model_config.num_labels = num_labels\n f_path_bert_config = os.path.join(output_dir, 'bert_config.json')\n logger.info(f'Writing BERT config to {f_path_bert_config}...')\n save_to_json(model_config.to_dict(), f_path_bert_config)",
"def run_metadata(self, run_metadata):\n\n if run_metadata is not None:\n run_metadata = self._validate_run_metadata(run_metadata)\n runs = ListDict()\n runs.append(run_metadata)\n runs.extend(\n self.station_metadata.runs, skip_keys=[run_metadata.id, \"0\"]\n )\n self._survey_metadata.stations[0].runs = runs",
"def import_all():\n\n # count the number of files loaded\n count = 0\n\n # get model name\n model_name_list = [model for data_models in settings.OBJECT_DATA_MODELS\n for model in data_models]\n\n model_name_list += [model for model in settings.OTHER_DATA_MODELS]\n\n # import models one by one\n for model_name in model_name_list:\n import_model(model_name)\n\n # import localized strings\n import_localized_strings(settings.LANGUAGE_CODE)",
"def create_child_run_obj(self, models, runs, proj):\n run_objs = []\n for run in runs:\n for model in models:\n try:\n run_obj = aospy.to_run(run, model, proj, self.projs)\n if isinstance(run, ObjectsForCalc):\n run_objs.append(ObjectsForCalc(run_obj))\n else:\n run_objs.append(run_obj)\n except AttributeError as ae:\n logging.info(str(ae))\n # Retain the original type (e.g. list v. ObjectsForCalc).\n run_objs = type(runs)(run_objs)\n if 'cmip5' in [p.name for p in proj]:\n if isinstance(run_objs[0], list):\n return run_objs[0]\n return [run_objs[0]]\n # If flat list, return the list. If nested, then flatten it.\n if all([isinstance(r, aospy.Run) for r in run_objs]):\n return run_objs\n return list(itertools.chain.from_iterable(run_objs))",
"async def get_model_runs(model_run_timestamp: datetime = None,\n response_format: ObjectTypeEnum = ObjectTypeEnum.GEOJSON):\n try:\n logger.info('/c-haines/model-runs')\n if model_run_timestamp is None:\n model_run_timestamp = await _get_most_recent_model_run(ModelEnum.GDPS, response_format)\n return await fetch_model_runs(model_run_timestamp)\n except Exception as exception:\n logger.critical(exception, exc_info=True)\n raise",
"def _run_model(self, samples: Union[NumpyFloatArray, NumpyIntArray]):\n\n self.runmodel_object.run(samples=samples, append_samples=False)\n model_evals = copy.deepcopy(np.array(self.runmodel_object.qoi_list))\n\n return model_evals",
"def run_parse(self):\n # Data set already has source file names from load_inputs\n parsedset = {}\n parsedset['data_set'] = []\n for log in self.input_files:\n parsemodule = self.parse_modules[self.args.parser]\n try:\n if self.args.tzone:\n parsemodule.tzone = self.args.tzone\n except NameError: pass\n parsedset['data_set'].append(parsemodule.parse_file(log))\n self.data_set = parsedset\n del(parsedset)",
"def fetch_models(self):\n if self.model_pool is None:\n print(\"Please train a model first.\", file=STDE)\n EXIT(1)\n else:\n return [copy.deepcopy(m.steps[-1][-1]) for m in self.model_pool]",
"def load_models(self, models, iteration = None):\n print(\"NOT IMPLEMENTED YET\")",
"def runs_loader(path):\n files = sorted(glob.glob(f\"{path}/*_runs.csv\"))\n df_lis = list(range(len(files)))\n for i, f in enumerate(files):\n try:\n df_lis[i] = pd.read_csv(f, sep=\",\", header=0)\n print('Read runs.csv\\n', f, df_lis[i].shape,\n df_lis[i]['dataset__id'][0], df_lis[i]['pipeline__id'][0])\n except Exception as e:\n print(e)\n continue\n df = pd.concat(df_lis, axis=0, sort=False).reset_index()\n # with pd.option_context('display.max_rows', None,\n # 'display.max_columns', None):\n # msg = tabulate.tabulate(df, headers='keys', tablefmt='psql')\n # print(msg)\n return df",
"def get_object_models(self):\n parser = WorldParser(self.world_fpath)\n return parser.models",
"def bulk_train(self):\n logger.info(\"collecting subfolders - relations\")\n relations = self.collect_subfolders(self.input_dir)\n logger.info(\"relations - {}\".format(relations))\n\n execution_times = []\n\n for rel, rel_path in tqdm(relations.items(), desc=\"relations\"):\n logger.info(\"collecting training files from {}\".format(rel_path))\n tr_files = self.collect_files(rel_path, self.regexp_train)\n hyper_params = self.get_hyperparams()\n hyper_params['graph'] = tr_files\n\n output_folder = os.path.join(self.output_dir, rel)\n if not os.path.exists(output_folder):\n logger.info(\"creating {} (did not exist)\".format(output_folder))\n os.makedirs(output_folder)\n\n for params in tqdm(ParameterGrid(hyper_params), desc=\"training embedding\"):\n logger.info(\"hyperparams: {}\".format(params))\n train_file = params['graph']\n model_name = self.compute_model_name(params, output_folder)\n logger.info('training starspace model \"{}\" from file \"{}\"'.format(\n model_name, train_file))\n external_output, delta = self.call_starspace(params, train_file, model_name)\n logger.info(\"executed in {:0.2f}s\".format(delta))\n\n logger.info(\"external command output logged in {}\".format(self.external_log))\n if not os.path.exists(self.output_dir):\n logger.info(\"creating {} (did not exist)\".format(self.output_dir))\n os.makedirs(self.output_dir)\n\n with open(self.external_log, 'a') as f:\n f.write(external_output)\n\n execution_times.append(dict({ 'time': delta }, **params))\n \n return execution_times",
"def _get_all_run_infos(self):\r\n info_dir = self._settings.info_dir\r\n if not os.path.isdir(info_dir):\r\n return []\r\n paths = [os.path.join(info_dir, x) for x in os.listdir(info_dir)]\r\n\r\n # We copy the RunInfo as a dict, so we can add stuff to it to pass to the template.\r\n # We filter only those that have a timestamp, to avoid a race condition with writing\r\n # that field.\r\n return filter(lambda d: 'timestamp' in d, [RunInfo(os.path.join(p, 'info')).get_as_dict()\r\n for p in paths if os.path.isdir(p) and not os.path.islink(p)])",
"def executions(self, context: Any) -> list[Any]:\n pass"
] | [
"0.7975116",
"0.66243535",
"0.5843691",
"0.5701001",
"0.56369174",
"0.55518204",
"0.5430202",
"0.5383377",
"0.53384364",
"0.5260111",
"0.5252604",
"0.52399814",
"0.5229123",
"0.52053994",
"0.5201298",
"0.51903266",
"0.5172383",
"0.51055664",
"0.50917447",
"0.50577384",
"0.5052161",
"0.5036825",
"0.50264466",
"0.50254315",
"0.5024633",
"0.4986918",
"0.49706757",
"0.49571094",
"0.49517202",
"0.49454448"
] | 0.75045604 | 1 |
Read all point climate entries from the metadata store for a given project context Context object containing projectDir, the path of the project whose metadata store is to be read from A dictionary of key/value pairs from the point climate section of the project metadata | def readClimatePointEntries(context):
return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readClimateGridEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.CLIMATE_GRID_SECTION)",
"def writeClimatePointEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION, keys, values)",
"def readClimatePointStations(context):\n stationObjects = []\n climatePoints = GenericMetadata.readClimatePointEntries(context)\n try:\n stations = climatePoints['stations'].split(GenericMetadata.VALUE_DELIM)\n for station in stations:\n stationObjects.append(ClimatePointStation.readFromMetadata(context, station))\n except KeyError:\n pass\n return stationObjects",
"def readFromMetadata(cls, context, fqId):\n newInstance = ClimatePointStation()\n (newInstance.type, newInstance.id) = fqId.split(GenericMetadata.COMPOUND_KEY_SEP)\n\n climate = GenericMetadata.readClimatePointEntries(context)\n \n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP\n longitude = keyProto + 'longitude'\n newInstance.longitude = float(climate[longitude])\n latitude = keyProto + 'latitude'\n newInstance.latitude = float(climate[latitude])\n elevation = keyProto + 'elevation'\n newInstance.elevation = float(climate[elevation])\n name = keyProto + 'name'\n newInstance.name = climate[name] \n startDate = keyProto + 'startdate'\n try:\n newInstance.startDate = datetime.strptime(climate[startDate], ClimatePointStation.FMT_DATE)\n except KeyError:\n pass \n endDate = keyProto + 'enddate'\n try:\n newInstance.endDate = datetime.strptime(climate[endDate], ClimatePointStation.FMT_DATE)\n except KeyError:\n pass\n variablesKey = keyProto + 'variables'\n try:\n newInstance.variables = climate[variablesKey].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n pass\n data = keyProto + 'data'\n try:\n newInstance.data = climate[data]\n except KeyError:\n pass\n try:\n for var in newInstance.variables:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n newInstance.variablesData[var] = climate[varKey]\n except KeyError:\n pass\n \n return newInstance",
"def _readEntriesForSection(projectDir, section):\n sectionDict = dict()\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.R_OK):\n raise IOError(errno.EACCES, \"Unable to read metadata store for project %s\" % \\\n (projectDir,))\n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n if config.has_section(section):\n items = config.items(section)\n for item in items:\n sectionDict[item[0]] = item[1]\n \n return sectionDict",
"def readProvenanceEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION)",
"def writeToMetadata(self, context):\n fqId = self.type + GenericMetadata.COMPOUND_KEY_SEP + self.id\n fqId = fqId.lower()\n\n climatePoints = GenericMetadata.readClimatePointEntries(context)\n try:\n stations = climatePoints['stations'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n stations = []\n # Write station metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in stations:\n stations.append(fqId)\n stationsStr = GenericMetadata.VALUE_DELIM.join(stations)\n keys.append('stations'); values.append(stationsStr)\n # Write attributes for station\n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP \n longitude = keyProto + 'longitude'\n keys.append(longitude); values.append(self.longitude)\n latitude = keyProto + 'latitude'\n keys.append(latitude); values.append(self.latitude)\n elevation = keyProto + 'elevation'\n keys.append(elevation); values.append(self.elevation)\n name = keyProto + 'name'\n keys.append(name); values.append(self.name)\n if self.startDate:\n startDate = keyProto + 'startdate'\n keys.append(startDate); values.append(self.startDate.strftime(ClimatePointStation.FMT_DATE))\n if self.endDate:\n endDate = keyProto + 'enddate'\n keys.append(endDate); values.append(self.endDate.strftime(ClimatePointStation.FMT_DATE))\n if self.variables:\n variablesKey = keyProto + 'variables'\n variablesValue = GenericMetadata.VALUE_DELIM.join(self.variables)\n keys.append(variablesKey); values.append(variablesValue)\n if self.data != None:\n data = keyProto + 'data'\n keys.append(data); values.append(self.data)\n elif self.variablesData:\n # Try to write data entries for each variable separately\n vars = self.variablesData.keys()\n for var in vars:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n keys.append(varKey); values.append(self.variablesData[var])\n GenericMetadata.writeClimatePointEntries(context, keys, values)",
"def read_metadata(dirname, use_gpu):\n try:\n if not os.path.isdir(dirname):\n pass\n elif not os.path.exists(os.path.join(dirname, 'metadata.json')):\n pass\n else:\n with open(os.path.join(dirname, 'metadata.json')) as f:\n metadata = json.load(f)\n if use_gpu and ('container_gpu' in metadata):\n container = metadata['container_gpu']\n else:\n container = metadata['container']\n entry_point = metadata['entry_point']\n except (IOError, KeyError, ValueError):\n print('Failed to read metadata from defense directory ', dirname)\n return (container, entry_point)",
"def get_dicom_info(paths, index_col=None, verbose=False):\n meta_info = []\n paths = tqdm_notebook(paths, leave=False) if verbose else paths\n for path in paths:\n first_slice = dicom.read_file(os.path.join(path, os.listdir(path)[0]))\n\n if hasattr(first_slice, 'PatientAge'):\n patient_age = str(first_slice.PatientAge)\n else:\n patient_age = ''\n\n if hasattr(first_slice, 'PatientSex'):\n patient_sex = str(first_slice.PatientSex)\n else:\n patient_sex = ''\n\n locations = []\n for name in os.listdir(path):\n slice_path = os.path.join(path, name)\n dicom_slice = dicom.read_file(slice_path, stop_before_pixels=True)\n locations.append(float(dicom_slice.SliceLocation))\n\n steps_z = np.diff(np.sort(np.array(locations)))\n spacing_z = np.min(steps_z)\n info_dict = {\n \"UniformSpacing\": np.allclose(steps_z, spacing_z),\n 'MinSpacingZ': np.min(steps_z),\n 'MaxSpacingZ': np.max(steps_z),\n 'SliceThickness': float(first_slice.SliceThickness),\n 'SpacingZ': spacing_z,\n 'SpacingY': float(first_slice.PixelSpacing[0]),\n 'SpacingX': float(first_slice.PixelSpacing[1]),\n 'StudyID': str(first_slice.StudyID),\n 'ConvolutionKernel': str(first_slice.ConvolutionKernel),\n 'FilterType': str(first_slice.FilterType),\n 'WindowWidth': str(first_slice.WindowWidth),\n 'WindowCenter': str(first_slice.WindowCenter),\n 'PatientAge': patient_age,\n 'PatientSex': patient_sex,\n 'AccessionNumber': str(first_slice.AccessionNumber),\n 'PatientID': str(first_slice.PatientID),\n 'Rows': int(first_slice.Rows),\n 'Columns': int(first_slice.Columns),\n 'NumSlices': len(os.listdir(path)),\n 'ScanID': os.path.basename(path),\n 'Index': str(first_slice.AccessionNumber) + '_' + os.path.basename(path),\n 'ScanPath': path\n }\n meta_info.append(info_dict)\n return pd.DataFrame(meta_info) if index_col is None else pd.DataFrame(meta_info).set_index(index_col)",
"def load_cdo_results():\n # Location of data files\n cdo_dir = os.path.dirname(__file__)+'/data/cdo_results/'\n # Dictionary in which to store data\n cdo_dict = {}\n # Load gridcell area data\n cdo_dict['gridarea'] = xr.open_dataset(cdo_dir+'data01_gridarea.nc',\n decode_times=False, autoclose=True)\n # Load data for regions\n for region in load_region_bounds_dict().keys():\n for suffix in ['', '_area', '_fldmean']:\n key = region + suffix\n if key == 'Glb': # for globe is data01.nc\n cdo_dict[key] = xr.open_dataset(cdo_dir+'../data01.nc',\n decode_times=False, autoclose=True)\n else:\n cdo_dict[key] = xr.open_dataset(cdo_dir+'data01_'+key+'.nc',\n decode_times=False, autoclose=True)\n # Return dictioary of data\n return cdo_dict",
"def get_projects_data():\n wcscanner_path = context.__BASE_PATH__ + '/.wcscanner'\n\n data = []\n for project in os.listdir(wcscanner_path):\n if (os.path.isdir(os.path.join(wcscanner_path, project))):\n update_project_data(project)\n project_path = '{}/{}'.format(wcscanner_path, project)\n f = open('{}/.project'.format(project_path), 'r')\n data.append(json.load(f))\n f.close()\n return data",
"def cfdReadPointsFile(self):\r\n\r\n with open(self.pointsFile,\"r\") as fpid:\r\n \r\n print('Reading points file ...')\r\n points_x=[]\r\n points_y=[]\r\n points_z=[]\r\n \r\n for linecount, tline in enumerate(fpid):\r\n \r\n if not io.cfdSkipEmptyLines(tline):\r\n continue\r\n \r\n if not io.cfdSkipMacroComments(tline):\r\n continue\r\n \r\n if \"FoamFile\" in tline:\r\n dictionary=io.cfdReadCfdDictionary(fpid)\r\n continue\r\n \r\n if len(tline.split()) ==1:\r\n if \"(\" in tline:\r\n continue\r\n if \")\" in tline:\r\n continue\r\n else:\r\n self.numberOfNodes = int(tline.split()[0])\r\n continue\r\n \r\n tline=tline.replace(\"(\",\"\")\r\n tline=tline.replace(\")\",\"\")\r\n tline=tline.split()\r\n \r\n points_x.append(float(tline[0]))\r\n points_y.append(float(tline[1]))\r\n points_z.append(float(tline[2]))\r\n \r\n ## (array) with the mesh point coordinates \r\n self.nodeCentroids = np.array((points_x, points_y, points_z), dtype=float).transpose()",
"def read_kml():\n global kmldata\n global CONFIG\n if type(kmldata) == type(None):\n if not os.path.exists(CONFIG[\"kmlfile\"]):\n fiona.drvsupport.supported_drivers['KML'] = 'rw'\n kmldata = geopandas.read_file(CONFIG[\"kmlrepo\"], driver=\"KML\")\n os.makedirs(CONFIG[\"cachedir\"],exist_ok=True)\n with open(CONFIG[\"kmlfile\"], \"wb\") as fh:\n pickle.dump(kmldata,fh)\n else:\n with open(CONFIG[\"kmlfile\"], \"rb\") as fh:\n kmldata = pickle.load(fh)\n return kmldata",
"def get_entries(self):\n prefixes = self.spot_mappings\n with open(self.path, 'r') as f:\n prefix_key = self.seek_through_comments(f).rsplit(\"/\", 1)[-1]\n prefix = prefixes[prefix_key]\n\n for ln in self.split_log_lines(f, \"|\", prefix):\n yield LogItem(*ln).get_properties()",
"def read_locations(db, openfile):\n pass",
"def getAllDataFromDirectory(prediction_directory, actual_directory, write_directory, cities_file, utc_offset = False):\n city_dictionary = getCities(cities_file)\n actualGetter = getActualWeather(actual_directory, city_dictionary, get_API_keys())\n #For each day and for each city, get all the data and put it into a spreadsheet.",
"def readModelRunEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.MODEL_RUN_SECTION)",
"def GatherBaseData(self, mr, nonce):\n project = mr.project\n\n project_summary = ''\n project_alert = None\n project_read_only = False\n project_home_page = ''\n project_thumbnail_url = ''\n if project:\n project_summary = project.summary\n project_alert = _CalcProjectAlert(project)\n project_read_only = project.read_only_reason\n project_home_page = project.home_page\n project_thumbnail_url = tracker_views.LogoView(project).thumbnail_url\n\n with work_env.WorkEnv(mr, self.services) as we:\n is_project_starred = False\n project_view = None\n if mr.project:\n if permissions.UserCanViewProject(\n mr.auth.user_pb, mr.auth.effective_ids, mr.project):\n is_project_starred = we.IsProjectStarred(mr.project_id)\n # TODO(jrobbins): should this be a ProjectView?\n project_view = template_helpers.PBProxy(mr.project)\n\n grid_x_attr = None\n grid_y_attr = None\n hotlist_view = None\n if mr.hotlist:\n users_by_id = framework_views.MakeAllUserViews(\n mr.cnxn, self.services.user,\n features_bizobj.UsersInvolvedInHotlists([mr.hotlist]))\n hotlist_view = hotlist_views.HotlistView(\n mr.hotlist, mr.perms, mr.auth, mr.viewed_user_auth.user_id,\n users_by_id, self.services.hotlist_star.IsItemStarredBy(\n mr.cnxn, mr.hotlist.hotlist_id, mr.auth.user_id))\n grid_x_attr = mr.x.lower()\n grid_y_attr = mr.y.lower()\n\n app_version = os.environ.get('CURRENT_VERSION_ID')\n\n viewed_username = None\n if mr.viewed_user_auth.user_view:\n viewed_username = mr.viewed_user_auth.user_view.username\n\n issue_entry_url = 'entry'\n config = None\n if mr.project_id and self.services.config:\n with mr.profiler.Phase('getting config'):\n config = self.services.config.GetProjectConfig(mr.cnxn, mr.project_id)\n grid_x_attr = (mr.x or config.default_x_attr).lower()\n grid_y_attr = (mr.y or config.default_y_attr).lower()\n issue_entry_url = _LoginOrIssueEntryURL(mr, config)\n\n viewing_self = mr.auth.user_id == mr.viewed_user_auth.user_id\n offer_saved_queries_subtab = (\n viewing_self or mr.auth.user_pb and mr.auth.user_pb.is_site_admin)\n\n login_url = _SafeCreateLoginURL(mr)\n logout_url = _SafeCreateLogoutURL(mr)\n logout_url_goto_home = users.create_logout_url('/')\n version_base = _VersionBaseURL(mr.request)\n\n base_data = {\n # EZT does not have constants for True and False, so we pass them in.\n 'True': ezt.boolean(True),\n 'False': ezt.boolean(False),\n\n 'local_mode': ezt.boolean(settings.local_mode),\n\n 'site_name': settings.site_name,\n 'show_search_metadata': ezt.boolean(False),\n 'page_template': self._PAGE_TEMPLATE,\n 'main_tab_mode': self._MAIN_TAB_MODE,\n 'project_summary': project_summary,\n 'project_home_page': project_home_page,\n 'project_thumbnail_url': project_thumbnail_url,\n\n 'hotlist_id': mr.hotlist_id,\n 'hotlist': hotlist_view,\n\n 'hostport': mr.request.host,\n 'absolute_base_url': '%s://%s' % (mr.request.scheme, mr.request.host),\n 'project_home_url': None,\n 'link_rel_canonical': None, # For specifying <link rel=\"canonical\">\n 'projectname': mr.project_name,\n 'project': project_view,\n 'project_is_restricted': ezt.boolean(_ProjectIsRestricted(mr)),\n 'offer_contributor_list': ezt.boolean(\n permissions.CanViewContributorList(mr, mr.project)),\n 'logged_in_user': mr.auth.user_view,\n 'form_token': None, # Set to a value below iff the user is logged in.\n 'form_token_path': None,\n 'token_expires_sec': None,\n 'xhr_token': None, # Set to a value below iff the user is logged in.\n 'flag_spam_token': None,\n 'nonce': nonce,\n 'perms': mr.perms,\n 'warnings': mr.warnings,\n 'errors': mr.errors,\n\n 'viewed_username': viewed_username,\n 'viewed_user': mr.viewed_user_auth.user_view,\n 'viewed_user_pb': template_helpers.PBProxy(\n mr.viewed_user_auth.user_pb),\n 'viewing_self': ezt.boolean(viewing_self),\n 'viewed_user_id': mr.viewed_user_auth.user_id,\n 'offer_saved_queries_subtab': ezt.boolean(offer_saved_queries_subtab),\n\n 'currentPageURL': mr.current_page_url,\n 'currentPageURLEncoded': mr.current_page_url_encoded,\n 'login_url': login_url,\n 'logout_url': logout_url,\n 'logout_url_goto_home': logout_url_goto_home,\n 'continue_issue_id': mr.continue_issue_id,\n 'feedback_email': settings.feedback_email,\n 'category_css': None, # Used to specify a category of stylesheet\n 'category2_css': None, # specify a 2nd category of stylesheet if needed.\n 'page_css': None, # Used to add a stylesheet to a specific page.\n\n 'can': mr.can,\n 'query': mr.query,\n 'colspec': None,\n 'sortspec': mr.sort_spec,\n\n # Options for issuelist display\n 'grid_x_attr': grid_x_attr,\n 'grid_y_attr': grid_y_attr,\n 'grid_cell_mode': mr.cells,\n 'grid_mode': None,\n 'list_mode': None,\n 'chart_mode': None,\n\n 'issue_entry_url': issue_entry_url,\n 'is_cross_project': ezt.boolean(False),\n\n # for project search (some also used in issue search)\n 'start': mr.start,\n 'num': mr.num,\n 'groupby': mr.group_by_spec,\n 'q_field_size': (\n min(framework_constants.MAX_ARTIFACT_SEARCH_FIELD_SIZE,\n max(framework_constants.MIN_ARTIFACT_SEARCH_FIELD_SIZE,\n len(mr.query) + framework_constants.AUTOSIZE_STEP))),\n 'mode': None, # Display mode, e.g., grid mode.\n 'ajah': mr.ajah,\n 'table_title': mr.table_title,\n\n 'alerts': alerts.AlertsView(mr), # For alert.ezt\n 'project_alert': project_alert,\n\n 'title': None, # First part of page title\n 'title_summary': None, # Appended to title on artifact detail pages\n\n # TODO(jrobbins): make sure that the templates use\n # project_read_only for project-mutative actions and if any\n # uses of read_only remain.\n 'project_read_only': ezt.boolean(project_read_only),\n 'site_read_only': ezt.boolean(settings.read_only),\n 'banner_time': servlet_helpers.GetBannerTime(settings.banner_time),\n 'read_only': ezt.boolean(settings.read_only or project_read_only),\n 'site_banner_message': settings.banner_message,\n 'robots_no_index': None,\n 'analytics_id': settings.analytics_id,\n\n 'is_project_starred': ezt.boolean(is_project_starred),\n\n 'version_base': version_base,\n 'app_version': app_version,\n 'gapi_client_id': settings.gapi_client_id,\n 'viewing_user_page': ezt.boolean(False),\n 'old_ui_url': None,\n\n 'is_member': ezt.boolean(False),\n }\n\n if mr.project:\n base_data['project_home_url'] = '/p/%s' % mr.project_name\n\n # Always add xhr-xsrf token because even anon users need some\n # pRPC methods, e.g., autocomplete, flipper, and charts.\n base_data['token_expires_sec'] = xsrf.TokenExpiresSec()\n base_data['xhr_token'] = xsrf.GenerateToken(\n mr.auth.user_id, xsrf.XHR_SERVLET_PATH)\n # Always add other anti-xsrf tokens when the user is logged in.\n if mr.auth.user_id:\n form_token_path = self._FormHandlerURL(mr.request.path)\n base_data['form_token'] = xsrf.GenerateToken(\n mr.auth.user_id, form_token_path)\n base_data['form_token_path'] = form_token_path\n\n return base_data",
"def writeClimateGridEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_GRID_SECTION, keys, values)",
"def _compute_context(self):\n # get PDBID\n pdbid = get_id(self.pdb_path)\n\n # initialize potential map\n self.potential_map = {}\n\n with open(self.pdb_path, 'r') as f:\n struc = PDBParser().get_structure(pdbid, f)\n\n for bond in self._compute_hydrogen_bonds(get_amino_acids(struc)):\n self.potential_map[bond[:2]] = bond[2:]\n\n self._compute_plain_from_map()",
"def PointCloudData(pdbid, chainid):\n pc = []\n bf = []\n resnames = []\n hets = []\n\n if not os.path.exists(os.getcwd() + '/' + filename):\n pdbl = PDB.PDBList()\n pdbl.retrieve_pdb_file(pdbid, False, os.getcwd(), 'pdb', True)\n parser = PDB.PDBParser(PERMISSIVE=1)\n structure = parser.get_structure(pdbid, 'pdb'+pdbid+'.ent')\n model = structure[0]\n chain = model[chainid]\n for residue in chain:\n for atom in residue:\n if atom.get_id() == \"CA\":\n resnames.append(residue.get_resname())\n bf.append(atom.get_bfactor())\n pc.append(atom.get_coord())\n pointcloud = np.asarray(pc)\n return pointcloud, bf, resnames",
"def loadDCPos(self):\n with open(gv.DC_POS_PATH, 'r') as fh: \n for line in fh:\n dcID, _, dcPos = line.rstrip().split(';')\n self.centerDict[dcID] = [float(i) for i in dcPos.split(',')]",
"def writeClimatePointEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.CLIMATE_POINT_SECTION, key, value)",
"def read_config_file() -> typing.MutableMapping[str, typing.Any]:\n return _read_file()",
"def _read_info_resources(self, **kwargs):\n info = {'keypairs': {},\n 'flavors': {},\n 'user_quotas': [],\n 'project_quotas': []}\n\n for keypair in self.get_keypair_list():\n info['keypairs'][keypair.id] = self.convert(keypair)\n\n for flavor in self.get_flavor_list():\n info['flavors'][flavor.id] = self.convert(flavor)\n\n if self.config.migrate.migrate_quotas:\n self._read_info_quotas(info)\n\n return info",
"def _collect(self, conll_directory) -> Iterator[Any]:\n logging.info(\"Reading .conll from %s\", conll_directory)\n return dataset_path_iterator(conll_directory, self.configs.file_ext)",
"def _ReadEntries(self):\n scope = {}\n filename = os.path.join(self._root_dir, self._options.entries_filename)\n if not os.path.exists(filename):\n return []\n exec(gclient_utils.FileRead(filename), scope)\n return scope[\"entries\"]",
"def read_reference_data():\n return {f:read_local_file(f) for f in os.listdir(DATA_DIR)}",
"def read_metadata(metapath):\r\n with open(metapath) as metaFile:\r\n metadata = {}\r\n for line in metaFile.readlines():\r\n if \"=\" in line: # Get only key-value pairs\r\n l = line.split(\"=\")\r\n metadata[l[0].strip()] = l[1].strip()\r\n\r\n return metadata",
"def read_meta(metafn=None):\n\n metadata = {}\n\n # potential future improvement: strip quotation marks from strings, where applicable. Will then need to adjust\n # the indices used to get the dates and times in the functions above \n # (get_DEM_img_times: dtstrings = {\"sourceImage1\":(5,19, '%Y%m%d%H%M%S')})\n\n #each key is equated with '='. This loop strips and seperates then fills the dictonary.\n with open(metafn) as f: \n for line in f:\n if not line.strip(';') == \"END\":\n val = line.strip().split('=')\n if len(val) == 1:\n continue\n else:\n metadata.setdefault(val[0].strip(), []).append(val[1].strip().strip(';')) \n else:\n break\n\t\n return metadata"
] | [
"0.65940845",
"0.60609496",
"0.58755904",
"0.57020724",
"0.5674292",
"0.5553107",
"0.5467524",
"0.54135525",
"0.5330131",
"0.5210221",
"0.5012895",
"0.501239",
"0.50113815",
"0.50112027",
"0.49934754",
"0.49894994",
"0.49829355",
"0.4965743",
"0.4952534",
"0.49039957",
"0.48897815",
"0.4889665",
"0.48888108",
"0.4861901",
"0.4851275",
"0.48480266",
"0.47860467",
"0.47792357",
"0.47789076",
"0.47692978"
] | 0.78822577 | 0 |
Read all climate point stations from metadata and store in ClimatePointStation instances. context Context object containing projectDir, the path of the project whose metadata store is to be read from A list of ClimatePointStation objects | def readClimatePointStations(context):
stationObjects = []
climatePoints = GenericMetadata.readClimatePointEntries(context)
try:
stations = climatePoints['stations'].split(GenericMetadata.VALUE_DELIM)
for station in stations:
stationObjects.append(ClimatePointStation.readFromMetadata(context, station))
except KeyError:
pass
return stationObjects | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readClimatePointEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION)",
"def readFromMetadata(cls, context, fqId):\n newInstance = ClimatePointStation()\n (newInstance.type, newInstance.id) = fqId.split(GenericMetadata.COMPOUND_KEY_SEP)\n\n climate = GenericMetadata.readClimatePointEntries(context)\n \n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP\n longitude = keyProto + 'longitude'\n newInstance.longitude = float(climate[longitude])\n latitude = keyProto + 'latitude'\n newInstance.latitude = float(climate[latitude])\n elevation = keyProto + 'elevation'\n newInstance.elevation = float(climate[elevation])\n name = keyProto + 'name'\n newInstance.name = climate[name] \n startDate = keyProto + 'startdate'\n try:\n newInstance.startDate = datetime.strptime(climate[startDate], ClimatePointStation.FMT_DATE)\n except KeyError:\n pass \n endDate = keyProto + 'enddate'\n try:\n newInstance.endDate = datetime.strptime(climate[endDate], ClimatePointStation.FMT_DATE)\n except KeyError:\n pass\n variablesKey = keyProto + 'variables'\n try:\n newInstance.variables = climate[variablesKey].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n pass\n data = keyProto + 'data'\n try:\n newInstance.data = climate[data]\n except KeyError:\n pass\n try:\n for var in newInstance.variables:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n newInstance.variablesData[var] = climate[varKey]\n except KeyError:\n pass\n \n return newInstance",
"def writeToMetadata(self, context):\n fqId = self.type + GenericMetadata.COMPOUND_KEY_SEP + self.id\n fqId = fqId.lower()\n\n climatePoints = GenericMetadata.readClimatePointEntries(context)\n try:\n stations = climatePoints['stations'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n stations = []\n # Write station metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in stations:\n stations.append(fqId)\n stationsStr = GenericMetadata.VALUE_DELIM.join(stations)\n keys.append('stations'); values.append(stationsStr)\n # Write attributes for station\n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP \n longitude = keyProto + 'longitude'\n keys.append(longitude); values.append(self.longitude)\n latitude = keyProto + 'latitude'\n keys.append(latitude); values.append(self.latitude)\n elevation = keyProto + 'elevation'\n keys.append(elevation); values.append(self.elevation)\n name = keyProto + 'name'\n keys.append(name); values.append(self.name)\n if self.startDate:\n startDate = keyProto + 'startdate'\n keys.append(startDate); values.append(self.startDate.strftime(ClimatePointStation.FMT_DATE))\n if self.endDate:\n endDate = keyProto + 'enddate'\n keys.append(endDate); values.append(self.endDate.strftime(ClimatePointStation.FMT_DATE))\n if self.variables:\n variablesKey = keyProto + 'variables'\n variablesValue = GenericMetadata.VALUE_DELIM.join(self.variables)\n keys.append(variablesKey); values.append(variablesValue)\n if self.data != None:\n data = keyProto + 'data'\n keys.append(data); values.append(self.data)\n elif self.variablesData:\n # Try to write data entries for each variable separately\n vars = self.variablesData.keys()\n for var in vars:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n keys.append(varKey); values.append(self.variablesData[var])\n GenericMetadata.writeClimatePointEntries(context, keys, values)",
"def gatherStationData():\n flist = list_files()\n station_dics = {}\n print(\"Reading in csv data...\")\n for f_in in flist:\n start,end = find_timespan(f_in)\n station = station_name(f=f_in)\n print(\"File: {0} Station: {1} {2}--{3}\".format(f_in, \n station, start, end))\n station_dics[station] = read_precip(fname=f_in, \n label=station, start_year=start, end_year=end)\n data_list = []\n for s in station_dics:\n data_list.append(station_dics[s]) \n return pd.concat(data_list,axis=1)",
"def readClimateGridEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.CLIMATE_GRID_SECTION)",
"def _get_stations_local() -> List[dict]:\n LOGGER.info('Using pre-generated json to retrieve station list')\n with open(weather_stations_file_path) as weather_stations_file:\n json_data = json.load(weather_stations_file)\n return json_data['weather_stations']",
"def __save_all():\n \n # Use directory listing from stilt-web data. Ignore stations that\n # may be in the queue but are not finished yet.\n allStations = [s for s in os.listdir(CPC.STILTPATH) if os.path.exists(CPC.STILTPATH + s)]\n\n \n # read lis of ICOS stations\n icosStations = cpstation.getIdList()\n icosStations = list(icosStations['id'][icosStations.theme=='AS'])\n \n # dictionary to return\n stations = {}\n\n # fill dictionary with ICOS station id, latitude, longitude and altitude\n for ist in tqdm(sorted(allStations)):\n \n stations[ist] = {}\n # get filename of link (original stiltweb directory structure) and extract location information\n \n loc_ident = os.readlink(CPC.STILTPATH+ist)\n clon = loc_ident[-13:-6]\n lon = float(clon[:-1])\n if clon[-1:] == 'W':\n lon = -lon\n clat = loc_ident[-20:-14]\n lat = float(clat[:-1])\n if clat[-1:] == 'S':\n lat = -lat\n alt = int(loc_ident[-5:])\n\n stations[ist]['lat']=lat\n stations[ist]['lon']=lon\n stations[ist]['alt']=alt\n stations[ist]['locIdent']=os.path.split(loc_ident)[-1]\n \n # set the name and id\n stations[ist]['id'] = ist\n \n # set a flag if it is an ICOS station\n stn = ist[0:3].upper()\n if stn in icosStations:\n stations[ist]['icos'] = cpstation.get(stn).info()\n lat = stations[ist]['icos']['lat']\n lon = stations[ist]['icos']['lon']\n else:\n stations[ist]['icos'] = False \n lat = stations[ist]['lat']\n lon = stations[ist]['lon']\n \n stations[ist]['geoinfo'] = country.get(latlon=[lat,lon])\n \n return stations",
"def get_all_stations(engine): \n # Query db\n sql = (\"SELECT DISTINCT a.station_id, \"\n \" a.station_code, \"\n \" a.station_name, \"\n \" c.station_type, \"\n \" d.latitude, \"\n \" d.longitude \"\n \"FROM nivadatabase.projects_stations a, \"\n \" nivadatabase.stations b, \"\n \" nivadatabase.station_types c, \"\n \" niva_geometry.sample_points d \"\n \"WHERE a.station_id = b.station_id \"\n \"AND b.station_type_id = c.station_type_id \"\n \"AND b.geom_ref_id = d.sample_point_id \"\n \"ORDER BY a.station_id\")\n df = pd.read_sql(sql, engine)\n\n return df",
"async def get_stations() -> List[WeatherStation]:\n # Check if we're really using the api, or loading from pre-generated files.\n use_wfwx = config.get('USE_WFWX') == 'True'\n if use_wfwx:\n return await _get_stations_remote()\n return _get_stations_local()",
"def all(self, skip_cache=False):\n now = _time_ms(datetime.datetime.utcnow())\n if skip_cache or now - self._last_updated > CACHE_LIMIT:\n self._process_stations()\n return self._stations_lst",
"def writeClimatePointEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION, keys, values)",
"def get_all_station_feature(city):\n poi_frequency = np.load(exp_data_path + os.sep + 'poi_frequency' + os.sep + 'poi_frequency_{}.npy'.format(city),\n allow_pickle=True) # .tolist()\n poi_num = np.load(exp_data_path + os.sep + 'poi' + os.sep + 'poi_{}.npy'.format(city), allow_pickle=True)\n poi_entropy = np.load(exp_data_path + os.sep + 'poi_entropy' + os.sep + 'poi_entropy_{}.npy'.format(city),\n allow_pickle=True)\n road = np.load(exp_data_path + os.sep + 'roadnet' + os.sep + 'roadnet_{}.npy'.format(city), allow_pickle=True)\n transportation = np.load(exp_data_path + os.sep + 'transportation' + os.sep + 'transportation_{}.npy'.format(city),\n allow_pickle=True)\n commerce = np.load(exp_data_path + os.sep + 'commerce' + os.sep + 'commerce_{}.npy'.format(city), allow_pickle=True)\n\n file_name = exp_data_path + os.sep + 'station' + os.sep + 'all_demand_{}.npy'.format(city)\n demand_data = np.load(file_name, allow_pickle=True)\n num = demand_data[:, 0, -2, np.newaxis] # todo check meaning here, get quick and slow feature\n\n raw_data = np.concatenate((num, poi_frequency, poi_num, poi_entropy, road, transportation, commerce), axis=1)\n csv_data = pd.DataFrame(raw_data, columns=GENERAL_HEADER)\n\n file_path = exp_data_path + os.sep + 'static' + os.sep + 'static_feature_{}.csv'.format(city)\n if os.path.exists(file_path):\n os.remove(file_path)\n csv_data.to_csv(file_path)\n pass",
"def processStationInfo(obs_loc_df, source, st_list=None):\n if not st_list:\n st_list = dict()\n st_data = obs_loc_df['station_id']\n lat_data = obs_loc_df['latitude (degree)']\n lon_data = obs_loc_df['longitude (degree)']\n\n for k, station_name in enumerate(st_data):\n if station_name in st_list:\n pass\n else:\n st_list[station_name] = dict()\n st_list[station_name][\"lat\"] = lat_data[k]\n st_list[station_name][\"source\"] = source\n st_list[station_name][\"lon\"] = lon_data[k]\n print(station_name)\n\n print(\"Number of stations in bbox {}\".format(len(st_list.keys())))\n return st_list",
"def read_xy_file(self, city = \"\"):\n\t\tcenter = []\n\t\ttemp_list = []\n\t\tif 1 > len( city ):\n\t\t\treturn center\n\t\ttoday = datetime.datetime.now().strftime(\"%Y%m%d\")\n\t\ttry:\n\t\t\tinput_filename = f\"{city}{self.second_part_of_xy_filename}\"\n\t\t\twith open( os.path.join( self.input_dir, input_filename ), 'r', encoding='utf-8') as f:\n\t\t\t\tfor item in f.readlines()[1:]:\n\t\t\t\t\tcenter.append(tuple(item.strip().split(\",\")[-5:])) # lng, lat, ok0, max_value, max_timestamp\n\t\texcept Exception as ex:\n\t\t\tcenter = []\n\t\t\tself.logger.error( f\"Inside Method {sys._getframe().f_code.co_name} of Class {self.__class__.__name__}, cannot read xy_list file ({input_filename}) or requested xy points file ({input_filename}). Exception = {ex}\" )\n\t\treturn center",
"def read_weatherstations(path_to_data):\n namedict = read_weatherstationnames(path_to_data)\n stations = {}\n for i in namedict:\n filename = namedict[i].replace(' ', '_') + '.csv'\n print(\"Reading\", filename)\n ws = read_station_csv(os.path.join(path_to_data, filename))\n stations[i] = ws\n return stations",
"def get_locations(db_path: str) -> List[Location]:\n locations: List[Location] = []\n conn: Connection = sqlite3.connect(path.join(db_path, 'company_data.db'))\n cur: Cursor = conn.cursor()\n for row in cur.execute('SELECT name, area, climate FROM locations'):\n locations.append(Location(row[0], row[1], Climate(row[2])))\n\n cur.close()\n conn.close()\n return locations",
"def stations():\n\n return station_list",
"def read_table_stations(self):\n if not os.path.exists(self.station_table_filename):\n LOGGER.warning('could not find station.table file \"%s\"', self.station_table_filename)\n return self.known_stations\n count = 0\n with open(self.station_table_filename, 'r') as textfile:\n lines = textfile.read().split(LF)\n for line in lines:\n station_id, data = read_table_station_from_line(line)\n if station_id is not None:\n self.known_stations[station_id] = data\n count += 1\n self.station_file_age = os.path.getmtime(self.station_table_filename)\n LOGGER.info(' Loaded %i station records from \"%s\"', count, self.station_table_filename)\n return self.known_stations",
"def _collect(self, conll_directory) -> Iterator[Any]:\n logging.info(\"Reading .conll from %s\", conll_directory)\n return dataset_path_iterator(conll_directory, self.configs.file_ext)",
"def importAllDatasets(directory):\n head_index = findIndex(temp_list, \"Gaze\")\n point_index = findIndex(temp_list, \"Point\")\n grab_index = findIndex(temp_list, \"Grab\")\n pos_index = findIndex(temp_list, \"Position\")\n\n head_data = pd.read_csv(temp_list[head_index]) if head_index != None else None\n point_data = pd.read_csv(temp_list[point_index]) if point_index != None else None\n grab_data = pd.read_csv(temp_list[grab_index]) if grab_index != None else None\n pos_data = pd.read_csv(temp_list[pos_index]) if pos_index != None else None\n\n\n return head_data, point_data, grab_data, pos_data",
"def getAllDataFromDirectory(prediction_directory, actual_directory, write_directory, cities_file, utc_offset = False):\n city_dictionary = getCities(cities_file)\n actualGetter = getActualWeather(actual_directory, city_dictionary, get_API_keys())\n #For each day and for each city, get all the data and put it into a spreadsheet.",
"def _getDataSetForFCSFileSample(self):\n\n # Get the dataset for current FCS file sample\n dataSets = searchService.getDataSet(self._entityId)\n if dataSets is None:\n self._message = \"Could not retrieve datasets for \" \\\n \"FCS file with identifier \" + self._entityId + \"!\"\n self._logger.error(self._message)\n else:\n dataSets = [dataSets]\n\n # Return\n return dataSets",
"def createStations (config):\n trace (\"createStations()\")\n for section in config.sections():\n if section.capitalize().startswith(\"Station\"):\n myPressureProbes = myHumidityProbes = []\n myTemperatureProbes= []\n name = section\n for option in config.options (section):\n value = config.get (section, option)\n opt = option.capitalize()\n if opt == \"Name\":\n name = value\n elif opt == \"Temperature\":\n myTemperatureProbes = getProbeList (value,\n temperatureProbes)\n elif opt == \"Pressure\":\n myPressureProbes = getProbeList (value,\n pressureProbes)\n elif opt == \"Humidity\":\n myHumidityProbes = getProbeList (value,\n humidityProbes)\n stations [name] = Station.Station(myTemperatureProbes,\n myPressureProbes, myHumidityProbes, name)",
"def get_project_stations(proj_df, engine, drop_dups=False): \n # Get proj IDs\n assert len(proj_df) > 0, 'ERROR: Please select at least one project.'\n proj_df['project_id'].drop_duplicates(inplace=True)\n proj_ids = proj_df['project_id'].values.astype(int).tolist()\n\n # Query db\n bind_pars = ','.join(':%d' % i for i in range(len(proj_ids))) \n\n sql = (\"SELECT DISTINCT a.station_id, \"\n \" a.station_code, \"\n \" a.station_name, \"\n \" c.station_type, \"\n \" d.longitude, \"\n \" d.latitude \"\n \"FROM nivadatabase.projects_stations a, \"\n \" nivadatabase.stations b, \"\n \" nivadatabase.station_types c, \"\n \" niva_geometry.sample_points d \"\n \"WHERE a.station_id IN \"\n \" (SELECT station_id \"\n \" FROM nivadatabase.projects_stations \"\n \" WHERE project_id IN (%s) \"\n \" ) \" \n \"AND a.station_id = b.station_id \"\n \"AND b.station_type_id = c.station_type_id \"\n \"AND b.geom_ref_id = d.sample_point_id \"\n \"ORDER BY a.station_id\" % bind_pars)\n df = pd.read_sql(sql, params=proj_ids, con=engine)\n\n # Drop duplictaes, if desired\n if drop_dups:\n df.drop_duplicates(subset='station_id', inplace=True)\n \n return df",
"def collect(self, start_date=None, end_date=None):\n if start_date is None:\n start_date = self.default_start\n if end_date is None:\n end_date = self.default_end\n\n cur = self.conn.cursor()\n\n # Maximum return is 1000 entries\n num_days = 1000 // len(self.stations)\n # Maximum date-range is 1 year\n if num_days > 365:\n num_days = 365\n\n for interval in netzero.util.time_intervals(\n start_date, end_date, days=num_days\n ):\n netzero.util.print_status(\n \"Weather\",\n \"Collecting: {} to {}\".format(\n interval[0].strftime(\"%Y-%m-%d\"), interval[1].strftime(\"%Y-%m-%d\")\n ),\n )\n\n # TODO -- REMOVE ASSUMPTION THAT LEN(DATA) < LIMIT\n raw_data = self.query_api(interval[0], interval[1])\n\n if raw_data is None:\n print(\"ERROR QUERYING API\") # TODO exception here?\n continue\n\n for entry in raw_data.get(\"results\", []):\n # Insert the weather data to the table, to be averaged later\n date = datetime.datetime.strptime(\n entry[\"date\"], \"%Y-%m-%dT%H:%M:%S\"\n ).date()\n value = entry[\"value\"]\n station = entry[\"station\"]\n\n cur.execute(\n \"INSERT OR IGNORE INTO weather VALUES (?, ?, ?)\", (date, value, station)\n )\n\n self.conn.commit()\n\n cur.close()\n\n netzero.util.print_status(\"Weather\", \"Complete\", newline=True)",
"def prepare_data(self):\n import subprocess\n # Download coco data set into dir specified by config then /data/coco\n subprocess.call([f\"{get_original_cwd()}/bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\", f\"{get_original_cwd()}\"])\n # subprocess.call([f\"bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\"])\n task = \"instances\" if self.instance else \"person_keypoints\"\n register_coco_instances(\"train\", {}, f\"{self.dir}/data/coco/{task}_train2014.json\",\n f\"{self.dir}/data/coco/train2014\")\n register_coco_instances(\"val\", {}, f\"{self.dir}/data/coco/{task}_minival2014.json\",\n f\"{self.dir}/data/coco/val2014\")\n register_coco_instances(\"test\", {}, f\"{self.dir}/data/coco/{task}_valminusminival2014.json\",\n f\"{self.dir}/data/coco/val2014\")",
"def stations(self):\n stations = []\n f = self._fetch(Citibike.STATION_URL)\n data = json.load(f)\n if 'stationBeanList' not in data or len(data['stationBeanList']) == 0:\n raise BadResponse('Station Fetch Failed', data)\n for station in data['stationBeanList']:\n stations.append(Station._from_json(station))\n logging.debug(\"Retrieved %d stations\" % len(stations))\n return stations",
"async def _get_stations_remote() -> List[WeatherStation]:\n LOGGER.info('Using WFWX to retrieve station list')\n async with ClientSession() as session:\n # Get the authentication header\n header = await _get_auth_header(session)\n stations = []\n # Iterate through \"raw\" station data.\n async for raw_station in _fetch_raw_stations(session, header, BuildQueryAllStations()):\n # If the station is valid, add it to our list of stations.\n if _is_station_valid(raw_station):\n LOGGER.info('Processing raw_station %d',\n int(raw_station['stationCode']))\n stations.append(_parse_station(raw_station))\n LOGGER.debug('total stations: %d', len(stations))\n return stations",
"async def stations():\n with open(\"/data/station_list.json\") as j:\n data = json.load(j)\n return data",
"def get_stations():\n response = requests.get('https://api.hh.ru/metro/160')\n todos = json.loads(response.text)\n colors = {'CD0505': 'red'}\n all_stations_one_line = []\n\n for i in todos['lines']:\n all_stations_one_line = []\n\n for j in i['stations']:\n one_station = station.station()\n one_station.set_name(j['name'])\n one_station.set_color(colors.get(i['hex_color']))\n one_station.set_lat(j['lat'])\n one_station.set_lng(j['lng'])\n all_stations_one_line.append(one_station)\n return all_stations_one_line"
] | [
"0.6891495",
"0.6260505",
"0.60463357",
"0.5638666",
"0.5469626",
"0.53563493",
"0.52621317",
"0.5249585",
"0.5202059",
"0.51837224",
"0.5047539",
"0.5045007",
"0.49937794",
"0.4993661",
"0.49833637",
"0.49792498",
"0.49386016",
"0.4915742",
"0.49026728",
"0.48896354",
"0.48610312",
"0.48474222",
"0.48411804",
"0.47907016",
"0.47585645",
"0.47467005",
"0.4743808",
"0.4732624",
"0.47308964",
"0.47150943"
] | 0.7727756 | 0 |
Read all grid climate entries from the metadata store for a given project context Context object containing projectDir, the path of the project whose metadata store is to be read from A dictionary of key/value pairs from the grid climate section of the project metadata | def readClimateGridEntries(context):
return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.CLIMATE_GRID_SECTION) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readClimatePointEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION)",
"def writeClimateGridEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.CLIMATE_GRID_SECTION, keys, values)",
"def _readEntriesForSection(projectDir, section):\n sectionDict = dict()\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.R_OK):\n raise IOError(errno.EACCES, \"Unable to read metadata store for project %s\" % \\\n (projectDir,))\n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n if config.has_section(section):\n items = config.items(section)\n for item in items:\n sectionDict[item[0]] = item[1]\n \n return sectionDict",
"def load_cdo_results():\n # Location of data files\n cdo_dir = os.path.dirname(__file__)+'/data/cdo_results/'\n # Dictionary in which to store data\n cdo_dict = {}\n # Load gridcell area data\n cdo_dict['gridarea'] = xr.open_dataset(cdo_dir+'data01_gridarea.nc',\n decode_times=False, autoclose=True)\n # Load data for regions\n for region in load_region_bounds_dict().keys():\n for suffix in ['', '_area', '_fldmean']:\n key = region + suffix\n if key == 'Glb': # for globe is data01.nc\n cdo_dict[key] = xr.open_dataset(cdo_dir+'../data01.nc',\n decode_times=False, autoclose=True)\n else:\n cdo_dict[key] = xr.open_dataset(cdo_dir+'data01_'+key+'.nc',\n decode_times=False, autoclose=True)\n # Return dictioary of data\n return cdo_dict",
"def GatherBaseData(self, mr, nonce):\n project = mr.project\n\n project_summary = ''\n project_alert = None\n project_read_only = False\n project_home_page = ''\n project_thumbnail_url = ''\n if project:\n project_summary = project.summary\n project_alert = _CalcProjectAlert(project)\n project_read_only = project.read_only_reason\n project_home_page = project.home_page\n project_thumbnail_url = tracker_views.LogoView(project).thumbnail_url\n\n with work_env.WorkEnv(mr, self.services) as we:\n is_project_starred = False\n project_view = None\n if mr.project:\n if permissions.UserCanViewProject(\n mr.auth.user_pb, mr.auth.effective_ids, mr.project):\n is_project_starred = we.IsProjectStarred(mr.project_id)\n # TODO(jrobbins): should this be a ProjectView?\n project_view = template_helpers.PBProxy(mr.project)\n\n grid_x_attr = None\n grid_y_attr = None\n hotlist_view = None\n if mr.hotlist:\n users_by_id = framework_views.MakeAllUserViews(\n mr.cnxn, self.services.user,\n features_bizobj.UsersInvolvedInHotlists([mr.hotlist]))\n hotlist_view = hotlist_views.HotlistView(\n mr.hotlist, mr.perms, mr.auth, mr.viewed_user_auth.user_id,\n users_by_id, self.services.hotlist_star.IsItemStarredBy(\n mr.cnxn, mr.hotlist.hotlist_id, mr.auth.user_id))\n grid_x_attr = mr.x.lower()\n grid_y_attr = mr.y.lower()\n\n app_version = os.environ.get('CURRENT_VERSION_ID')\n\n viewed_username = None\n if mr.viewed_user_auth.user_view:\n viewed_username = mr.viewed_user_auth.user_view.username\n\n issue_entry_url = 'entry'\n config = None\n if mr.project_id and self.services.config:\n with mr.profiler.Phase('getting config'):\n config = self.services.config.GetProjectConfig(mr.cnxn, mr.project_id)\n grid_x_attr = (mr.x or config.default_x_attr).lower()\n grid_y_attr = (mr.y or config.default_y_attr).lower()\n issue_entry_url = _LoginOrIssueEntryURL(mr, config)\n\n viewing_self = mr.auth.user_id == mr.viewed_user_auth.user_id\n offer_saved_queries_subtab = (\n viewing_self or mr.auth.user_pb and mr.auth.user_pb.is_site_admin)\n\n login_url = _SafeCreateLoginURL(mr)\n logout_url = _SafeCreateLogoutURL(mr)\n logout_url_goto_home = users.create_logout_url('/')\n version_base = _VersionBaseURL(mr.request)\n\n base_data = {\n # EZT does not have constants for True and False, so we pass them in.\n 'True': ezt.boolean(True),\n 'False': ezt.boolean(False),\n\n 'local_mode': ezt.boolean(settings.local_mode),\n\n 'site_name': settings.site_name,\n 'show_search_metadata': ezt.boolean(False),\n 'page_template': self._PAGE_TEMPLATE,\n 'main_tab_mode': self._MAIN_TAB_MODE,\n 'project_summary': project_summary,\n 'project_home_page': project_home_page,\n 'project_thumbnail_url': project_thumbnail_url,\n\n 'hotlist_id': mr.hotlist_id,\n 'hotlist': hotlist_view,\n\n 'hostport': mr.request.host,\n 'absolute_base_url': '%s://%s' % (mr.request.scheme, mr.request.host),\n 'project_home_url': None,\n 'link_rel_canonical': None, # For specifying <link rel=\"canonical\">\n 'projectname': mr.project_name,\n 'project': project_view,\n 'project_is_restricted': ezt.boolean(_ProjectIsRestricted(mr)),\n 'offer_contributor_list': ezt.boolean(\n permissions.CanViewContributorList(mr, mr.project)),\n 'logged_in_user': mr.auth.user_view,\n 'form_token': None, # Set to a value below iff the user is logged in.\n 'form_token_path': None,\n 'token_expires_sec': None,\n 'xhr_token': None, # Set to a value below iff the user is logged in.\n 'flag_spam_token': None,\n 'nonce': nonce,\n 'perms': mr.perms,\n 'warnings': mr.warnings,\n 'errors': mr.errors,\n\n 'viewed_username': viewed_username,\n 'viewed_user': mr.viewed_user_auth.user_view,\n 'viewed_user_pb': template_helpers.PBProxy(\n mr.viewed_user_auth.user_pb),\n 'viewing_self': ezt.boolean(viewing_self),\n 'viewed_user_id': mr.viewed_user_auth.user_id,\n 'offer_saved_queries_subtab': ezt.boolean(offer_saved_queries_subtab),\n\n 'currentPageURL': mr.current_page_url,\n 'currentPageURLEncoded': mr.current_page_url_encoded,\n 'login_url': login_url,\n 'logout_url': logout_url,\n 'logout_url_goto_home': logout_url_goto_home,\n 'continue_issue_id': mr.continue_issue_id,\n 'feedback_email': settings.feedback_email,\n 'category_css': None, # Used to specify a category of stylesheet\n 'category2_css': None, # specify a 2nd category of stylesheet if needed.\n 'page_css': None, # Used to add a stylesheet to a specific page.\n\n 'can': mr.can,\n 'query': mr.query,\n 'colspec': None,\n 'sortspec': mr.sort_spec,\n\n # Options for issuelist display\n 'grid_x_attr': grid_x_attr,\n 'grid_y_attr': grid_y_attr,\n 'grid_cell_mode': mr.cells,\n 'grid_mode': None,\n 'list_mode': None,\n 'chart_mode': None,\n\n 'issue_entry_url': issue_entry_url,\n 'is_cross_project': ezt.boolean(False),\n\n # for project search (some also used in issue search)\n 'start': mr.start,\n 'num': mr.num,\n 'groupby': mr.group_by_spec,\n 'q_field_size': (\n min(framework_constants.MAX_ARTIFACT_SEARCH_FIELD_SIZE,\n max(framework_constants.MIN_ARTIFACT_SEARCH_FIELD_SIZE,\n len(mr.query) + framework_constants.AUTOSIZE_STEP))),\n 'mode': None, # Display mode, e.g., grid mode.\n 'ajah': mr.ajah,\n 'table_title': mr.table_title,\n\n 'alerts': alerts.AlertsView(mr), # For alert.ezt\n 'project_alert': project_alert,\n\n 'title': None, # First part of page title\n 'title_summary': None, # Appended to title on artifact detail pages\n\n # TODO(jrobbins): make sure that the templates use\n # project_read_only for project-mutative actions and if any\n # uses of read_only remain.\n 'project_read_only': ezt.boolean(project_read_only),\n 'site_read_only': ezt.boolean(settings.read_only),\n 'banner_time': servlet_helpers.GetBannerTime(settings.banner_time),\n 'read_only': ezt.boolean(settings.read_only or project_read_only),\n 'site_banner_message': settings.banner_message,\n 'robots_no_index': None,\n 'analytics_id': settings.analytics_id,\n\n 'is_project_starred': ezt.boolean(is_project_starred),\n\n 'version_base': version_base,\n 'app_version': app_version,\n 'gapi_client_id': settings.gapi_client_id,\n 'viewing_user_page': ezt.boolean(False),\n 'old_ui_url': None,\n\n 'is_member': ezt.boolean(False),\n }\n\n if mr.project:\n base_data['project_home_url'] = '/p/%s' % mr.project_name\n\n # Always add xhr-xsrf token because even anon users need some\n # pRPC methods, e.g., autocomplete, flipper, and charts.\n base_data['token_expires_sec'] = xsrf.TokenExpiresSec()\n base_data['xhr_token'] = xsrf.GenerateToken(\n mr.auth.user_id, xsrf.XHR_SERVLET_PATH)\n # Always add other anti-xsrf tokens when the user is logged in.\n if mr.auth.user_id:\n form_token_path = self._FormHandlerURL(mr.request.path)\n base_data['form_token'] = xsrf.GenerateToken(\n mr.auth.user_id, form_token_path)\n base_data['form_token_path'] = form_token_path\n\n return base_data",
"def _ReadEntries(self):\n scope = {}\n filename = os.path.join(self._root_dir, self._options.entries_filename)\n if not os.path.exists(filename):\n return []\n exec(gclient_utils.FileRead(filename), scope)\n return scope[\"entries\"]",
"def get_projects_data():\n wcscanner_path = context.__BASE_PATH__ + '/.wcscanner'\n\n data = []\n for project in os.listdir(wcscanner_path):\n if (os.path.isdir(os.path.join(wcscanner_path, project))):\n update_project_data(project)\n project_path = '{}/{}'.format(wcscanner_path, project)\n f = open('{}/.project'.format(project_path), 'r')\n data.append(json.load(f))\n f.close()\n return data",
"def get_dicom_info(paths, index_col=None, verbose=False):\n meta_info = []\n paths = tqdm_notebook(paths, leave=False) if verbose else paths\n for path in paths:\n first_slice = dicom.read_file(os.path.join(path, os.listdir(path)[0]))\n\n if hasattr(first_slice, 'PatientAge'):\n patient_age = str(first_slice.PatientAge)\n else:\n patient_age = ''\n\n if hasattr(first_slice, 'PatientSex'):\n patient_sex = str(first_slice.PatientSex)\n else:\n patient_sex = ''\n\n locations = []\n for name in os.listdir(path):\n slice_path = os.path.join(path, name)\n dicom_slice = dicom.read_file(slice_path, stop_before_pixels=True)\n locations.append(float(dicom_slice.SliceLocation))\n\n steps_z = np.diff(np.sort(np.array(locations)))\n spacing_z = np.min(steps_z)\n info_dict = {\n \"UniformSpacing\": np.allclose(steps_z, spacing_z),\n 'MinSpacingZ': np.min(steps_z),\n 'MaxSpacingZ': np.max(steps_z),\n 'SliceThickness': float(first_slice.SliceThickness),\n 'SpacingZ': spacing_z,\n 'SpacingY': float(first_slice.PixelSpacing[0]),\n 'SpacingX': float(first_slice.PixelSpacing[1]),\n 'StudyID': str(first_slice.StudyID),\n 'ConvolutionKernel': str(first_slice.ConvolutionKernel),\n 'FilterType': str(first_slice.FilterType),\n 'WindowWidth': str(first_slice.WindowWidth),\n 'WindowCenter': str(first_slice.WindowCenter),\n 'PatientAge': patient_age,\n 'PatientSex': patient_sex,\n 'AccessionNumber': str(first_slice.AccessionNumber),\n 'PatientID': str(first_slice.PatientID),\n 'Rows': int(first_slice.Rows),\n 'Columns': int(first_slice.Columns),\n 'NumSlices': len(os.listdir(path)),\n 'ScanID': os.path.basename(path),\n 'Index': str(first_slice.AccessionNumber) + '_' + os.path.basename(path),\n 'ScanPath': path\n }\n meta_info.append(info_dict)\n return pd.DataFrame(meta_info) if index_col is None else pd.DataFrame(meta_info).set_index(index_col)",
"def readModelRunEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.MODEL_RUN_SECTION)",
"def read_metadata(dirname, use_gpu):\n try:\n if not os.path.isdir(dirname):\n pass\n elif not os.path.exists(os.path.join(dirname, 'metadata.json')):\n pass\n else:\n with open(os.path.join(dirname, 'metadata.json')) as f:\n metadata = json.load(f)\n if use_gpu and ('container_gpu' in metadata):\n container = metadata['container_gpu']\n else:\n container = metadata['container']\n entry_point = metadata['entry_point']\n except (IOError, KeyError, ValueError):\n print('Failed to read metadata from defense directory ', dirname)\n return (container, entry_point)",
"def walk_csv(self, filepath: str):\n with open(filepath, encoding='ISO-8859-1') as f:\n reader = csv.DictReader(f)\n for row in reader:\n logger.debug('Loading map {}'.format(row.get('id', None)))\n yield row",
"def read_files(project_ID):\n \n # Define the link and the metadata key name\n API_downloads_link = 'http://194.4.103.57:5000/project/downloads/'\n metadata_key_name = 'experimentDesignLink'\n filtered_key_name = 'filteredTPMLink'\n normalised_key_name = 'normalisedCountsLink'\n \n # Define variables\n metadata = None\n matrix = None\n gene_names = None\n cell_names = None\n \n # Get the download links of the project\n links = requests.get(API_downloads_link + project_ID).json()\n if not links: # If project doesn't exists\n raise Exception(f'Project with ID {project_ID} not found')\n links = links[0]\n \n # Return the metadata if it exists\n if metadata_key_name in links:\n metadata_link = links[metadata_key_name]\n metadata = pd.read_csv(metadata_link, sep='\\t', low_memory=False)\n \n if filtered_key_name in links:\n matrix_link = links[filtered_key_name]\n matrix, cell_names, gene_names = download_matrix(matrix_link, matrix_type='filtered')\n elif normalised_key_name in links:\n matrix_link = links[normalised_key_name]\n matrix, cell_names, gene_names = download_matrix(matrix_link, matrix_type='normalised')\n \n # If project does not have metadata link, return none\n return metadata, matrix, gene_names, cell_names",
"def readProvenanceEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION)",
"def readFromMetadata(cls, context, fqId):\n newInstance = ClimatePointStation()\n (newInstance.type, newInstance.id) = fqId.split(GenericMetadata.COMPOUND_KEY_SEP)\n\n climate = GenericMetadata.readClimatePointEntries(context)\n \n keyProto = 'station' + GenericMetadata.COMPOUND_KEY_SEP + fqId + GenericMetadata.COMPOUND_KEY_SEP\n longitude = keyProto + 'longitude'\n newInstance.longitude = float(climate[longitude])\n latitude = keyProto + 'latitude'\n newInstance.latitude = float(climate[latitude])\n elevation = keyProto + 'elevation'\n newInstance.elevation = float(climate[elevation])\n name = keyProto + 'name'\n newInstance.name = climate[name] \n startDate = keyProto + 'startdate'\n try:\n newInstance.startDate = datetime.strptime(climate[startDate], ClimatePointStation.FMT_DATE)\n except KeyError:\n pass \n endDate = keyProto + 'enddate'\n try:\n newInstance.endDate = datetime.strptime(climate[endDate], ClimatePointStation.FMT_DATE)\n except KeyError:\n pass\n variablesKey = keyProto + 'variables'\n try:\n newInstance.variables = climate[variablesKey].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n pass\n data = keyProto + 'data'\n try:\n newInstance.data = climate[data]\n except KeyError:\n pass\n try:\n for var in newInstance.variables:\n varKey = keyProto + var + GenericMetadata.COMPOUND_KEY_SEP + 'data'\n newInstance.variablesData[var] = climate[varKey]\n except KeyError:\n pass\n \n return newInstance",
"def get_entries(self):\n prefixes = self.spot_mappings\n with open(self.path, 'r') as f:\n prefix_key = self.seek_through_comments(f).rsplit(\"/\", 1)[-1]\n prefix = prefixes[prefix_key]\n\n for ln in self.split_log_lines(f, \"|\", prefix):\n yield LogItem(*ln).get_properties()",
"def read_data_from_cloud_manifest(path: str) -> dict:\n\n with open_cloud_file(path, 'r') as csv_file:\n\n def clean_file_header(header: str) -> str:\n return header.strip().lower()\n\n data_to_ingest = {'rows': []}\n csv_reader = csv.DictReader(csv_file, delimiter=\",\")\n csv_reader.fieldnames = list(map(clean_file_header, csv_reader.fieldnames))\n data_to_ingest['fieldnames'] = csv_reader.fieldnames\n for row in csv_reader:\n for key in row.copy():\n if not key:\n del row[key]\n data_to_ingest['rows'].append(row)\n return data_to_ingest",
"def readClimatePointStations(context):\n stationObjects = []\n climatePoints = GenericMetadata.readClimatePointEntries(context)\n try:\n stations = climatePoints['stations'].split(GenericMetadata.VALUE_DELIM)\n for station in stations:\n stationObjects.append(ClimatePointStation.readFromMetadata(context, station))\n except KeyError:\n pass\n return stationObjects",
"def read_project(path: str):\n textfilecontent = {}\n\n # Discover .txt files and add them to the dictionary\n for filepath in iglob(os.path.join(path, '**/*.txt'), recursive=True):\n add_path_dict(input_dict=textfilecontent, start_path=path,\n file_path=filepath)\n\n return textfilecontent",
"def read_locations(db, openfile):\n pass",
"def _read_info_resources(self, **kwargs):\n info = {'keypairs': {},\n 'flavors': {},\n 'user_quotas': [],\n 'project_quotas': []}\n\n for keypair in self.get_keypair_list():\n info['keypairs'][keypair.id] = self.convert(keypair)\n\n for flavor in self.get_flavor_list():\n info['flavors'][flavor.id] = self.convert(flavor)\n\n if self.config.migrate.migrate_quotas:\n self._read_info_quotas(info)\n\n return info",
"def writeClimateGridEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.CLIMATE_GRID_SECTION, key, value)",
"def get_important_data(cgns_path):\n\n # Open data file\n data = h5.File(cgns_path, \"r\")\n\n # Get # of timesteps\n tres = len(data[\"Base\"][\"TimeIterValues\"][\"TimeValues\"][\" data\"])\n\n # Get resolution in each dimension (noncartesian)\n kres, jres, ires = data[\"Base\"][\"Zone1\"][\" data\"][0]\n\n # Close data file\n data.close()\n\n # Return dictionary of important values\n return {\"tres\": tres, \"ires\": ires, \"jres\": jres, \"kres\": kres}",
"def read_metadata(data_dir):\n return pd.read_csv(os.path.join(data_dir, \"metadata.csv\"), index_col=0)",
"def read_metadata(data_dir):\n return pd.read_csv(os.path.join(data_dir, \"metadata.csv\"), index_col=0)",
"def get_content(self):\r\n content = []\r\n for regiongroup in self.region_groups:\r\n for region in regiongroup.get_content():\r\n # Add date, unique_name and project to the metadata\r\n region[0]['date'] = self.extracted_date\r\n region[0]['unique_name'] = self.unique_name\r\n try:\r\n project = os.path.split(\r\n os.path.split(self.unique_name)[0]\r\n )[1]\r\n except IndexError:\r\n project = ''\r\n region[0]['project'] = project\r\n content.append(region)\r\n return content",
"def getAllDataFromDirectory(prediction_directory, actual_directory, write_directory, cities_file, utc_offset = False):\n city_dictionary = getCities(cities_file)\n actualGetter = getActualWeather(actual_directory, city_dictionary, get_API_keys())\n #For each day and for each city, get all the data and put it into a spreadsheet.",
"def satReader(directory,month,latmin,latmax,lonmin,lonmax):\n \n ### Enter filename\n filename = 'cs2icesat_regrid_mar_20042015.nc' \n \n ### Month/Years extracted\n dateyr = now.year \n datemo = datetime.date(dateyr,month+1,1).strftime('%B')\n \n ### Retrieve data\n data = Dataset(directory + filename)\n lat = data.variables['lat'][:]\n lon = data.variables['lon'][:]\n thkn = data.variables['thick'][:]\n data.close()\n \n ### Calculate lat/lon region\n xmask = (lat > latmin) & (lat < latmax)\n ymask = (lon > lonmin) & (lon < lonmax)\n \n mask = xmask[:] & ymask[:]\n latvals = np.where(mask == True)[0]\n lonvals = np.where(mask == True)[1]\n latvals = np.unique(latvals)\n lonvals = np.unique(lonvals)\n \n thk = thkn[:,latvals,:]\n thk = thk[:,:,lonvals]\n \n lat = lat[latvals,:]\n lat = lat[:,lonvals]\n lon = lon[latvals,:]\n lon = lon[:,lonvals]\n\n grid = '---> [[%s to %s N, %s to %s E]]' % (latmin,latmax,lonmin,lonmax)\n print 'Completed: Satellite data read (%s)!' % datemo, grid\n \n return lat,lon,thk",
"def _get_project_tina_entries(self,pool='archive',refresh=False,path_folder=None):\n\t\tif not path_folder: path_folder = self.catalog_path\n\t\tif not refresh:\n\t\t\ttry:\n\t\t\t\treturn self.tina_archive_entries\n\t\t\texcept: pass \n\t\tself.tina_archive_entries = Tina.tina_find(\n\t\t\tpath_folder=path_folder,\n\t\t\tapplication=self.application,\n\t\t\tstrat='A',\n\t\t\tskip_filter=self.skip_filter)\n\t\treturn self.tina_archive_entries",
"def get_CTD_metadata(db):\n\n if not use_sqlite:\n raise RuntimeError('No sqlite standard library found in this python'\n ' installation. This function (get_CTD_metadata)'\n ' is unavailable.')\n\n def _dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n try:\n con = sqlite3.connect(db)\n\n con.row_factory = _dict_factory\n\n c = con.cursor()\n\n out = c.execute('SELECT * from Stations')\n\n meta_info = out.fetchall()\n\n except sqlite3.Error as e:\n if con:\n con.close()\n print('Error {}:'.format(e.args[0]))\n meta_info = [False]\n\n finally:\n if con:\n con.close()\n\n return meta_info",
"def _collect(self, conll_directory) -> Iterator[Any]:\n logging.info(\"Reading .conll from %s\", conll_directory)\n return dataset_path_iterator(conll_directory, self.configs.file_ext)"
] | [
"0.67343694",
"0.60257065",
"0.5869046",
"0.5570541",
"0.5408377",
"0.5368976",
"0.5334755",
"0.5279146",
"0.5261112",
"0.520745",
"0.5160814",
"0.509415",
"0.5077306",
"0.50602674",
"0.50406206",
"0.5021482",
"0.50120205",
"0.50023395",
"0.49895978",
"0.49739048",
"0.49731866",
"0.49663553",
"0.49344784",
"0.49344784",
"0.49315372",
"0.49259",
"0.49199095",
"0.4919768",
"0.49153936",
"0.48997635"
] | 0.776974 | 0 |
Read all HydroShare entries from the metadata store for a given project context Context object containing projectDir, the path of the project whose metadata store is to be read from A dictionary of key/value pairs from the study area section of the project metadata | def readHydroShareEntries(context):
return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.HYDROSHARE_SECTION) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readStudyAreaEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.STUDY_AREA_SECTION)",
"def _readEntriesForSection(projectDir, section):\n sectionDict = dict()\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.R_OK):\n raise IOError(errno.EACCES, \"Unable to read metadata store for project %s\" % \\\n (projectDir,))\n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n if config.has_section(section):\n items = config.items(section)\n for item in items:\n sectionDict[item[0]] = item[1]\n \n return sectionDict",
"def _ReadEntries(self):\n scope = {}\n filename = os.path.join(self._root_dir, self._options.entries_filename)\n if not os.path.exists(filename):\n return []\n exec(gclient_utils.FileRead(filename), scope)\n return scope[\"entries\"]",
"def getStudyInfo(self, study_id, web_app_user_id):\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_study_info', [study_id, web_app_user_id, results])\n study_info = {}\n for row in results:\n study_info['submit_to_insdc'] = row[0]\n study_info['investigation_type'] = row[1]\n study_info['project_name'] = row[2]\n study_info['experimental_factor'] = row[3]\n study_info['study_alias'] = row[4]\n study_info['study_title'] = row[5]\n study_info['study_type'] = row[6]\n study_info['study_abstract'] = row[7]\n study_info['study_description'] = row[8]\n study_info['center_name'] = row[9]\n study_info['center_project_name'] = row[10]\n study_info['project_id'] = row[11]\n study_info['pmid'] = row[12]\n study_info['metadata_complete'] = row[13]\n study_info['sff_complete'] = row[14]\n study_info['mapping_file_complete'] = row[15]\n study_info['miens_compliant'] = row[16]\n study_info['can_delete'] = row[17]\n study_info['avg_emp_score'] = row[18]\n study_info['user_emp_score'] = row[19]\n study_info['number_samples_promised'] = row[20]\n study_info['number_samples_collected'] = row[21]\n study_info['principal_investigator'] = row[22]\n study_info['sample_count'] = row[23] \n study_info['lab_person'] = row[24] \n study_info['lab_person_contact'] = row[25]\n study_info['emp_person'] = row[26]\n study_info['first_contact'] = row[27]\n study_info['most_recent_contact'] = row[28]\n study_info['sample_type'] = row[29]\n study_info['has_physical_specimen'] = row[30]\n study_info['has_extracted_data'] = row[31]\n study_info['timeseries'] = row[32]\n study_info['spatial_series'] = row[33]\n study_info['principal_investigator'] = row[34]\n study_info['principal_investigator_contact'] = row[35]\n study_info['default_emp_status'] = row[36]\n study_info['funding'] = row[37]\n study_info['includes_timeseries'] = row[38]\n study_info['sample_count'] = row[39]\n study_info['ebi_study_accession'] = row[40]\n study_info['locked'] = row[41]\n study_info['vamps_id'] = row[42]\n return study_info",
"def readProvenanceEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION)",
"def GatherBaseData(self, mr, nonce):\n project = mr.project\n\n project_summary = ''\n project_alert = None\n project_read_only = False\n project_home_page = ''\n project_thumbnail_url = ''\n if project:\n project_summary = project.summary\n project_alert = _CalcProjectAlert(project)\n project_read_only = project.read_only_reason\n project_home_page = project.home_page\n project_thumbnail_url = tracker_views.LogoView(project).thumbnail_url\n\n with work_env.WorkEnv(mr, self.services) as we:\n is_project_starred = False\n project_view = None\n if mr.project:\n if permissions.UserCanViewProject(\n mr.auth.user_pb, mr.auth.effective_ids, mr.project):\n is_project_starred = we.IsProjectStarred(mr.project_id)\n # TODO(jrobbins): should this be a ProjectView?\n project_view = template_helpers.PBProxy(mr.project)\n\n grid_x_attr = None\n grid_y_attr = None\n hotlist_view = None\n if mr.hotlist:\n users_by_id = framework_views.MakeAllUserViews(\n mr.cnxn, self.services.user,\n features_bizobj.UsersInvolvedInHotlists([mr.hotlist]))\n hotlist_view = hotlist_views.HotlistView(\n mr.hotlist, mr.perms, mr.auth, mr.viewed_user_auth.user_id,\n users_by_id, self.services.hotlist_star.IsItemStarredBy(\n mr.cnxn, mr.hotlist.hotlist_id, mr.auth.user_id))\n grid_x_attr = mr.x.lower()\n grid_y_attr = mr.y.lower()\n\n app_version = os.environ.get('CURRENT_VERSION_ID')\n\n viewed_username = None\n if mr.viewed_user_auth.user_view:\n viewed_username = mr.viewed_user_auth.user_view.username\n\n issue_entry_url = 'entry'\n config = None\n if mr.project_id and self.services.config:\n with mr.profiler.Phase('getting config'):\n config = self.services.config.GetProjectConfig(mr.cnxn, mr.project_id)\n grid_x_attr = (mr.x or config.default_x_attr).lower()\n grid_y_attr = (mr.y or config.default_y_attr).lower()\n issue_entry_url = _LoginOrIssueEntryURL(mr, config)\n\n viewing_self = mr.auth.user_id == mr.viewed_user_auth.user_id\n offer_saved_queries_subtab = (\n viewing_self or mr.auth.user_pb and mr.auth.user_pb.is_site_admin)\n\n login_url = _SafeCreateLoginURL(mr)\n logout_url = _SafeCreateLogoutURL(mr)\n logout_url_goto_home = users.create_logout_url('/')\n version_base = _VersionBaseURL(mr.request)\n\n base_data = {\n # EZT does not have constants for True and False, so we pass them in.\n 'True': ezt.boolean(True),\n 'False': ezt.boolean(False),\n\n 'local_mode': ezt.boolean(settings.local_mode),\n\n 'site_name': settings.site_name,\n 'show_search_metadata': ezt.boolean(False),\n 'page_template': self._PAGE_TEMPLATE,\n 'main_tab_mode': self._MAIN_TAB_MODE,\n 'project_summary': project_summary,\n 'project_home_page': project_home_page,\n 'project_thumbnail_url': project_thumbnail_url,\n\n 'hotlist_id': mr.hotlist_id,\n 'hotlist': hotlist_view,\n\n 'hostport': mr.request.host,\n 'absolute_base_url': '%s://%s' % (mr.request.scheme, mr.request.host),\n 'project_home_url': None,\n 'link_rel_canonical': None, # For specifying <link rel=\"canonical\">\n 'projectname': mr.project_name,\n 'project': project_view,\n 'project_is_restricted': ezt.boolean(_ProjectIsRestricted(mr)),\n 'offer_contributor_list': ezt.boolean(\n permissions.CanViewContributorList(mr, mr.project)),\n 'logged_in_user': mr.auth.user_view,\n 'form_token': None, # Set to a value below iff the user is logged in.\n 'form_token_path': None,\n 'token_expires_sec': None,\n 'xhr_token': None, # Set to a value below iff the user is logged in.\n 'flag_spam_token': None,\n 'nonce': nonce,\n 'perms': mr.perms,\n 'warnings': mr.warnings,\n 'errors': mr.errors,\n\n 'viewed_username': viewed_username,\n 'viewed_user': mr.viewed_user_auth.user_view,\n 'viewed_user_pb': template_helpers.PBProxy(\n mr.viewed_user_auth.user_pb),\n 'viewing_self': ezt.boolean(viewing_self),\n 'viewed_user_id': mr.viewed_user_auth.user_id,\n 'offer_saved_queries_subtab': ezt.boolean(offer_saved_queries_subtab),\n\n 'currentPageURL': mr.current_page_url,\n 'currentPageURLEncoded': mr.current_page_url_encoded,\n 'login_url': login_url,\n 'logout_url': logout_url,\n 'logout_url_goto_home': logout_url_goto_home,\n 'continue_issue_id': mr.continue_issue_id,\n 'feedback_email': settings.feedback_email,\n 'category_css': None, # Used to specify a category of stylesheet\n 'category2_css': None, # specify a 2nd category of stylesheet if needed.\n 'page_css': None, # Used to add a stylesheet to a specific page.\n\n 'can': mr.can,\n 'query': mr.query,\n 'colspec': None,\n 'sortspec': mr.sort_spec,\n\n # Options for issuelist display\n 'grid_x_attr': grid_x_attr,\n 'grid_y_attr': grid_y_attr,\n 'grid_cell_mode': mr.cells,\n 'grid_mode': None,\n 'list_mode': None,\n 'chart_mode': None,\n\n 'issue_entry_url': issue_entry_url,\n 'is_cross_project': ezt.boolean(False),\n\n # for project search (some also used in issue search)\n 'start': mr.start,\n 'num': mr.num,\n 'groupby': mr.group_by_spec,\n 'q_field_size': (\n min(framework_constants.MAX_ARTIFACT_SEARCH_FIELD_SIZE,\n max(framework_constants.MIN_ARTIFACT_SEARCH_FIELD_SIZE,\n len(mr.query) + framework_constants.AUTOSIZE_STEP))),\n 'mode': None, # Display mode, e.g., grid mode.\n 'ajah': mr.ajah,\n 'table_title': mr.table_title,\n\n 'alerts': alerts.AlertsView(mr), # For alert.ezt\n 'project_alert': project_alert,\n\n 'title': None, # First part of page title\n 'title_summary': None, # Appended to title on artifact detail pages\n\n # TODO(jrobbins): make sure that the templates use\n # project_read_only for project-mutative actions and if any\n # uses of read_only remain.\n 'project_read_only': ezt.boolean(project_read_only),\n 'site_read_only': ezt.boolean(settings.read_only),\n 'banner_time': servlet_helpers.GetBannerTime(settings.banner_time),\n 'read_only': ezt.boolean(settings.read_only or project_read_only),\n 'site_banner_message': settings.banner_message,\n 'robots_no_index': None,\n 'analytics_id': settings.analytics_id,\n\n 'is_project_starred': ezt.boolean(is_project_starred),\n\n 'version_base': version_base,\n 'app_version': app_version,\n 'gapi_client_id': settings.gapi_client_id,\n 'viewing_user_page': ezt.boolean(False),\n 'old_ui_url': None,\n\n 'is_member': ezt.boolean(False),\n }\n\n if mr.project:\n base_data['project_home_url'] = '/p/%s' % mr.project_name\n\n # Always add xhr-xsrf token because even anon users need some\n # pRPC methods, e.g., autocomplete, flipper, and charts.\n base_data['token_expires_sec'] = xsrf.TokenExpiresSec()\n base_data['xhr_token'] = xsrf.GenerateToken(\n mr.auth.user_id, xsrf.XHR_SERVLET_PATH)\n # Always add other anti-xsrf tokens when the user is logged in.\n if mr.auth.user_id:\n form_token_path = self._FormHandlerURL(mr.request.path)\n base_data['form_token'] = xsrf.GenerateToken(\n mr.auth.user_id, form_token_path)\n base_data['form_token_path'] = form_token_path\n\n return base_data",
"def read_metadata(dirname, use_gpu):\n try:\n if not os.path.isdir(dirname):\n pass\n elif not os.path.exists(os.path.join(dirname, 'metadata.json')):\n pass\n else:\n with open(os.path.join(dirname, 'metadata.json')) as f:\n metadata = json.load(f)\n if use_gpu and ('container_gpu' in metadata):\n container = metadata['container_gpu']\n else:\n container = metadata['container']\n entry_point = metadata['entry_point']\n except (IOError, KeyError, ValueError):\n print('Failed to read metadata from defense directory ', dirname)\n return (container, entry_point)",
"def read_metadata(metapath):\r\n with open(metapath) as metaFile:\r\n metadata = {}\r\n for line in metaFile.readlines():\r\n if \"=\" in line: # Get only key-value pairs\r\n l = line.split(\"=\")\r\n metadata[l[0].strip()] = l[1].strip()\r\n\r\n return metadata",
"def _get_project_tina_entries(self,pool='archive',refresh=False,path_folder=None):\n\t\tif not path_folder: path_folder = self.catalog_path\n\t\tif not refresh:\n\t\t\ttry:\n\t\t\t\treturn self.tina_archive_entries\n\t\t\texcept: pass \n\t\tself.tina_archive_entries = Tina.tina_find(\n\t\t\tpath_folder=path_folder,\n\t\t\tapplication=self.application,\n\t\t\tstrat='A',\n\t\t\tskip_filter=self.skip_filter)\n\t\treturn self.tina_archive_entries",
"def getFolder(self, folderUris, metadata = None, queryArgs = None):\n\n returnContents = {}\n\n\n\n #-------------------- \n # Force the relevant argumets to lists\n #-------------------- \n if isinstance(folderUris, str):\n folderUris = [folderUris]\n if isinstance(queryArgs, str):\n queryArgs = [queryArgs]\n\n\n\n #-------------------- \n # Acquire contents via 'self.__getJson'\n #-------------------- \n contents = []\n for folderUri in folderUris:\n\n #\n # Apply query arguments, if any.\n #\n if queryArgs:\n folderUri = Xnat.path.applyQueryArguments(folderUri, \n queryArgs)\n\n\n #\n # Get the JSON\n #\n folderUri = Xnat.path.makeXnatUrl(self.host, folderUri)\n json = self.__getJson(folderUri)\n\n #\n # If json is null we have a login error.\n # Return out.\n #\n if json == None:\n return None\n #\n # Otherwise, concatenate to rest of contents.\n #\n contents = contents + json\n\n #\n # If we want the projects, store projects in a dictionary. \n # 'self.projectCache' is reset if the user logs into a new \n # host or logs in a again.\n #\n if folderUri.endswith('/projects'):\n self.projectCache = contents\n #print(f\"CONTENTS {contents}\")\n #-------------------- \n # Exit out if there are non-Json or XML values.\n #-------------------- \n if str(contents).startswith(\"<?xml\"): return [] \n # We don't want text values\n\n\n\n #-------------------- \n # Get other attributes with the contents \n # for metadata tracking.\n #-------------------- \n for content in contents:\n if metadata:\n for metadataTag in metadata:\n if metadataTag in content:\n #\n # Create the object attribute if not there.\n #\n if not metadataTag in returnContents:\n returnContents[metadataTag] = []\n returnContents[metadataTag].append(\\\n content[metadataTag])\n else:\n returnContents = contents\n\n\n #-------------------- \n # Track projects and files in global dict\n #-------------------- \n for folderUri in folderUris:\n folderUri = folderUri.replace('//', '/')\n if folderUri.endswith('/files'):\n for content in contents:\n # create a tracker in the fileDict\n #print(f\"\\n\\nCONTENT {content} {folderUri}\")\n self.fileDict[content['Name']] = content\n #print(\"%s %s\"%(, self.fileDict))\n elif folderUri.endswith('/projects'):\n self.projectCache = returnContents\n\n\n\n #-------------------- \n # Return the contents of the folder as a\n # dictionary of lists\n #-------------------- \n return returnContents",
"def read_reference_data():\n return {f:read_local_file(f) for f in os.listdir(DATA_DIR)}",
"def get_content(self):\r\n content = []\r\n for regiongroup in self.region_groups:\r\n for region in regiongroup.get_content():\r\n # Add date, unique_name and project to the metadata\r\n region[0]['date'] = self.extracted_date\r\n region[0]['unique_name'] = self.unique_name\r\n try:\r\n project = os.path.split(\r\n os.path.split(self.unique_name)[0]\r\n )[1]\r\n except IndexError:\r\n project = ''\r\n region[0]['project'] = project\r\n content.append(region)\r\n return content",
"def find_records():\r\n\r\n print(\"begin find records\")\r\n\r\n study_list = retrieve_ref('study_list')\r\n sensor_list = retrieve_ref('sensor_list')\r\n # sensor_unit_list = retrieve_ref('sensor_unit_list')\r\n\r\n for study in study_list:\r\n # print('study = ' + str(study))\r\n source_path = os.path.join(study, 'source')\r\n # print('source_path = ' + str(source_path))\r\n\r\n source_folders = os.listdir(source_path)\r\n # print(str(study) + ' source_folders = ')\r\n # print(source_folders)\r\n\r\n df_meta = pd.DataFrame()\r\n df_meta['source_path'] = source_folders\r\n save_meta(study, df_meta)\r\n record_to_summary(study, 'Records found', str(len(source_folders)))\r\n\r\n print(\"completed find records\")",
"def _read_resource_map(cls, bag_content_path, hydroshare_host='www.hydroshare.org'):\n rmap_path = os.path.join(bag_content_path, 'data', 'resourcemap.xml')\n if not os.path.exists(rmap_path):\n raise GenericResourceMeta.ResourceMetaException(\"Resource map {0} does not exist\".format(rmap_path))\n if not os.access(rmap_path, os.R_OK):\n raise GenericResourceMeta.ResourceMetaException(\"Unable to read resource map {0}\".format(rmap_path))\n\n res_meta = {}\n\n g = Graph()\n g.parse(rmap_path)\n # Get resource ID\n for s, p, o in g.triples((None, None, None)):\n if s.endswith(\"resourcemap.xml\") and p == rdflib.namespace.DC.identifier:\n res_meta['id'] = str(o)\n if res_meta['id'] is None:\n msg = \"Unable to determine resource ID from resource map {0}\".format(rmap_path)\n raise GenericResourceMeta.ResourceMetaException(msg)\n logger.debug(\"Resource ID is {0}\".format(res_meta['id']))\n\n # Build URI reference for #aggregation section of resource map\n res_root_uri = \"http://{host}/resource/{res_id}\".format(host=hydroshare_host, res_id=res_meta['id'])\n root_uri = res_root_uri\n res_agg_subj = \"{res_root_url}/data/resourcemap.xml#aggregation\".format(res_root_url=res_root_uri)\n res_agg = URIRef(res_agg_subj)\n\n # Get resource type\n type_lit = g.value(res_agg, rdflib.namespace.DCTERMS.type)\n if type_lit is None:\n raise GenericResourceMeta.ResourceMetaException(\n \"No resource type found in resource map {0}\".format(rmap_path))\n # Type literal is represented as 'http://example.com/terms/GenericResource', we want the part after\n # the final '/', or 'GenericResource'\n res_type_part = str(type_lit).rpartition('/')\n if res_type_part[1] == '':\n raise GenericResourceMeta.ResourceMetaException(\n \"No resource type found in resource map {0}\".format(rmap_path))\n res_meta['type'] = res_type_part[-1]\n logger.debug(\"\\tType is {0}\".format(res_meta['type']))\n\n # Get resource title\n title_lit = g.value(res_agg, rdflib.namespace.DC.title)\n if title_lit is None:\n raise GenericResourceMeta.ResourceMetaException(\n \"No resource title found in resource map {0}\".format(rmap_path))\n res_meta['title'] = str(title_lit)\n logger.debug(\"\\tTitle is {0}\".format(res_meta['title']))\n\n # Get list of files in resource\n res_meta['files'] = []\n res_root_uri_withslash = res_root_uri + '/'\n res_meta_path = None\n ore = rdflib.namespace.Namespace('http://www.openarchives.org/ore/terms/')\n for s, p, o in g.triples((res_agg, ore.aggregates, None)):\n if o.endswith('resourcemetadata.xml'):\n if res_meta_path is not None and o != res_meta_path:\n msg = \"More than one resource metadata URI found. \"\n msg += \"(first: {first}, second: {second}\".format(first=res_meta_path,\n second=o)\n raise GenericResourceMeta.ResourceMetaException(msg)\n res_meta_path = o.split(res_root_uri_withslash)[1]\n continue\n\n res_meta['files'].append(o.split(res_root_uri_withslash)[1])\n\n if res_meta_path is None:\n raise GenericResourceMeta.ResourceMetaException(\n \"No resource metadata found in resource map {0}\".format(rmap_path))\n\n logger.debug(\"\\tResource metadata path {0}\".format(res_meta_path))\n\n for uri in res_meta['files']:\n logger.debug(\"\\tContents: {0}\".format(uri))\n\n return (root_uri, res_meta_path, res_meta)",
"def get_metadata_from_path(path):\n try:\n import yaml\n # assumes index card is in the top-level of path\n index_card = os.path.join(path, \"M_index.yaml\")\n with open(index_card, \"r\") as stream:\n file_info = yaml.safe_load(stream)\n\n metadata_dict = {}\n metadata_dict[\"book_id\"] = file_info[\"book_id\"]\n metadata_dict[\"timestamp_start\"] = file_info[\"start_time\"]\n metadata_dict[\"type\"] = file_info[\"type\"]\n metadata_dict[\"obsid\"] = _convert_book_id_to_obsid(file_info[\"book_id\"])\n # get optional bits\n if \"stop_time\" in file_info:\n metadata_dict[\"timestamp_end\"] = file_info[\"stop_time\"]\n if \"observatory\" in file_info:\n metadata_dict[\"observatory\"] = file_info[\"observatory\"]\n if \"telescope\" in file_info:\n metadata_dict[\"telescope\"] = file_info[\"telescope\"]\n if \"stream_ids\" in file_info:\n metadata_dict[\"stream_ids\"] = file_info[\"stream_ids\"]\n if \"subtype\" in file_info:\n metadata_dict[\"subtype\"] = file_info[\"subtype\"]\n if \"tags\" in file_info:\n metadata_dict[\"tags\"] = file_info[\"tags\"]\n if \"scanification\" in file_info:\n metadata_dict[\"scanification\"] = file_info[\"scanification\"]\n if \"hwp_rate_hz\" in file_info:\n metadata_dict[\"hwp_rate_hz\"] = file_info[\"hwp_rate_hz\"]\n if \"sequencer_ref\" in file_info:\n metadata_dict[\"sequencer_ref\"] = file_info[\"sequencer_ref\"]\n return metadata_dict\n except (ImportError, FileNotFoundError, KeyError):\n pass\n\n return None",
"def read_data(path: str):\n documents = {}\n queries = {}\n relevance = {}\n for doc in json.load(open(path + 'cranfield_data.json')):\n title = re.sub(r'\\s+', ' ', doc['title'])\n body = re.sub(r'\\s+', ' ', doc['body'][len(doc['title']):])\n documents[doc['id']] = Article(title=title, body=body)\n \n for query in json.load(open(path + 'cran.qry.json')):\n queries[query['query number']] = query['query']\n for rel in json.load(open(path + 'cranqrel.json')):\n query_id = int(rel['query_num'])\n doc_id = int(rel['id'])\n if query_id in relevance:\n relevance[query_id].append((doc_id, rel['position']))\n else:\n relevance[query_id] = [(doc_id, rel['position'])]\n return documents, queries, relevance",
"def parse_metadata(self):\n import csv\n f = open(self.seq_id_list)\n self.names = f.readlines()\n f.close()\n num_samples = len(self.names)\n for i in range(len(self.names)):\n self.names[i] = self.names[i].replace(\"\\n\", \"\")\n # Go through the combined metadata file - it has most of the data we need.\n metadata = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/combinedMetadata.csv\"))\n metadata_count = 0\n for row in metadata:\n # There has to be a more elegant way to do this.\n if row[\"SampleName\"] in self.names:\n data = dict()\n data[\"Investigator\"] = row[\"Investigator\"]\n data[\"Coverage\"] = row[\"AverageCoverageDepth\"]\n data[\"TotalLength\"] = row[\"TotalLength\"]\n data[\"rST\"] = row[\"rMLSTsequenceType\"]\n data[\"PipelineVersion\"] = row[\"PipelineVersion\"]\n data[\"MLST\"] = row[\"MLSTsequencetype\"]\n data[\"geneSeekr\"] = row[\"geneSeekrProfile\"].split(\";\")\n self.metadata[row[\"SampleName\"]] = data\n metadata_count += 1\n # Need to look in external WGS spades as well.\n metadata = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/combinedMetadata.csv\"))\n for row in metadata:\n # There has to be a more elegant way to do this.\n if row[\"SampleName\"] in self.names:\n data = dict()\n data[\"Investigator\"] = row[\"Investigator\"]\n data[\"Coverage\"] = row[\"AverageCoverageDepth\"]\n data[\"TotalLength\"] = row[\"TotalLength\"]\n data[\"rST\"] = row[\"rMLSTsequenceType\"]\n data[\"PipelineVersion\"] = row[\"PipelineVersion\"]\n data[\"MLST\"] = row[\"MLSTsequencetype\"]\n data[\"geneSeekr\"] = row[\"geneSeekrProfile\"].split(\";\")\n self.metadata[row[\"SampleName\"]] = data\n metadata_count += 1\n\n\n\n # Also need to go through the rMLST file to make sure that all rMLST genes are covered.\n rMLST_data = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/rmlst.csv\"))\n metadata_count = 0\n for row in rMLST_data:\n if row[\"Strain\"] in self.names:\n self.metadata[row[\"Strain\"]][\"Matches\"] = row[\"Matches\"]\n metadata_count += 1\n # Check external runs.\n rMLST_data = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/rmlst.csv\"))\n for row in rMLST_data:\n if row[\"Strain\"] in self.names:\n self.metadata[row[\"Strain\"]][\"Matches\"] = row[\"Matches\"]\n\n\n\n # Finally, need to get info on the MLST sequence type.\n metadata_count = 0\n mlst_data = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/mlst.csv\"))\n for row in mlst_data:\n if row[\"Strain\"] in self.names:\n mlst = list()\n for i in range(1, 8):\n mlst.append(row[str(i)])\n self.metadata[row[\"Strain\"]][\"mlst_info\"] = mlst\n metadata_count += 1\n\n # Also from External.\n mlst_data = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/mlst.csv\"))\n for row in mlst_data:\n if row[\"Strain\"] in self.names:\n mlst = list()\n for i in range(1, 8):\n mlst.append(row[str(i)])\n self.metadata[row[\"Strain\"]][\"mlst_info\"] = mlst\n metadata_count += 1\n\n # Go through the ROGA Summary file from the access DB to get strain/textual IDs, and 1' and 2' enzymes.\n try: # Assume we're using ROGA summary OLF. If it isn't there, assume ROGA summary OLC\n df = pd.read_excel('ROGA_summary_OLF.xlsx')\n for i in df.index:\n if df['SeqTracking_SEQID'][i] in self.names:\n seqid = df['SeqTracking_SEQID'][i]\n self.metadata[seqid][\"IsolateID\"] = df['Isolate ID'][i]\n self.metadata[seqid][\"TextualID\"] = df['Textual ID'][i]\n self.metadata[seqid][\"1Enzyme\"] = df[\"1' Enzyme\"][i]\n self.metadata[seqid][\"2Enzyme\"] = df[\"2' Enzyme\"][i]\n self.metadata[seqid][\"Source\"] = df['Source'][i]\n self.metadata[seqid][\"ReceivedDate\"] = df['ReceivedDate'][i]\n self.metadata[seqid][\"SequenceDate\"] = df['SequenceDate'][i]\n self.metadata[seqid][\"SequencedBy\"] = df['SequenceBy'][i]\n metadata_count += 1\n\n\n except FileNotFoundError: # Should be a file not found error - look it up.\n metadata_count = 0\n df = pd.read_excel('ROGA_summary_OLC.xlsx')\n for i in df.index:\n if df['SeqTracking_SEQID'][i] in self.names:\n seqid = df['SeqTracking_SEQID'][i]\n self.metadata[seqid][\"IsolateID\"] = df['OLN ID'][i]\n self.metadata[seqid][\"TextualID\"] = df['Lab ID'][i]\n self.metadata[seqid][\"ReceivedDate\"] = df['ReceivedDate'][i]\n self.metadata[seqid][\"SequenceDate\"] = df['SequenceDate'][i]\n self.metadata[seqid][\"SequencedBy\"] = df['SequenceBy'][i]\n metadata_count += 1\n # print(self.metadata)\n self.check_for_empty_data()",
"def getSampleList(self, study_id):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_sample_list', [study_id, results])\n sample_list = {}\n for sample_name, sample_id in results:\n sample_list[sample_id] = sample_name\n return sample_list\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False",
"def readManifestEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.MANIFEST_SECTION)",
"def _read_info_resources(self, **kwargs):\n info = {'keypairs': {},\n 'flavors': {},\n 'user_quotas': [],\n 'project_quotas': []}\n\n for keypair in self.get_keypair_list():\n info['keypairs'][keypair.id] = self.convert(keypair)\n\n for flavor in self.get_flavor_list():\n info['flavors'][flavor.id] = self.convert(flavor)\n\n if self.config.migrate.migrate_quotas:\n self._read_info_quotas(info)\n\n return info",
"def read_data():\r\n\r\n if os.path.isfile(os.getcwd() + \"/www/access_list.txt\") and os.stat(os.getcwd() + \"/www/access_list.txt\").st_size != 0:\r\n data = json.load(open(os.getcwd() + \"/www/access_list.txt\"))\r\n return collections.defaultdict(dict, data)\r\n else:\r\n return collections.defaultdict(dict)",
"def read_locations(namefile):\n db = shelve.open(namefile)\n hashes = db['hashes']\n key_firms = db['nif']\n year = db['year']\n locs = db['locations']\n methodvalues = db['methodvalues']\n db.close()\n return hashes, key_firms, year, locs, methodvalues",
"def import_sitefinder_data(path):\n asset_data = []\n\n site_id = 0\n\n with open(os.path.join(path), 'r') as system_file:\n reader = csv.DictReader(system_file)\n next(reader, None)\n for line in reader:\n if line['Operator'] != 'Airwave' and line['Operator'] != 'Network Rail':\n # if line['Operator'] == 'O2' or line['Operator'] == 'Vodafone':\n # if line['Anttype'] == 'MACRO' or \\\n # line['Anttype'] == 'SECTOR' or \\\n # line['Anttype'] == 'Sectored' or \\\n # line['Anttype'] == 'Directional':\n asset_data.append({\n 'type': \"Feature\",\n 'geometry': {\n \"type\": \"Point\",\n \"coordinates\": [float(line['X']), float(line['Y'])]\n },\n 'properties':{\n 'name': 'site_' + str(site_id),\n 'Operator': line['Operator'],\n 'Opref': line['Opref'],\n 'Sitengr': line['Sitengr'],\n 'Antennaht': line['Antennaht'],\n 'Transtype': line['Transtype'],\n 'Freqband': line['Freqband'],\n 'Anttype': line['Anttype'],\n 'Powerdbw': line['Powerdbw'],\n 'Maxpwrdbw': line['Maxpwrdbw'],\n 'Maxpwrdbm': line['Maxpwrdbm'],\n 'Sitelat': float(line['Sitelat']),\n 'Sitelng': float(line['Sitelng']),\n }\n })\n\n site_id += 1\n\n else:\n pass\n\n return asset_data",
"def getSplitLibrariesMappingFileData(self, study_id):\n\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n result_sets = {}\n con.cursor().callproc('qiime_assets.get_split_libarary_data', [study_id, results])\n\n mapping_file_header = '#SampleID\\tBarcodeSequence\\tLinkerPrimerSequence\\tRunPrefix\\tDescription'\n #for column in results.description:\n # mapping_file_header += column[0] + '\\t'\n\n for row in results:\n linker = row[2]\n primers = row[3]\n run_prefix = row[4]\n linker_primer_list = ''\n \n # handles null linkers\n if linker is None:\n linker=''\n if primers is None:\n primers=''\n # Create a comma-separated list of linker+primer sequences\n if ',' in primers:\n primer_list = primers.split(',')\n for primer in primer_list:\n linker_primer_list += '{0}{1},'.format(linker, primer)\n \n # Strip the trailing comma\n linker_primer_list = linker_primer_list[:-1]\n else:\n linker_primer_list = linker + primers\n\n # Adjust the row contents\n newrow = (row[0], row[1], linker_primer_list, row[4], row[5])\n\n # If this is the first time we've seen this run_prefix, create a new list \n # to hold the rows\n if run_prefix not in result_sets:\n result_sets[run_prefix] = []\n\n # Add the row to the right run_prefix heading\n result_sets[run_prefix].append(newrow)\n\n #raise Exception(str(result_sets))\n\n return mapping_file_header, result_sets\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n raise Exception(str(e))",
"def queryFlywheel(project):\n\n # Create info dict with entries for each subject.\n info = dict()\n\n # Loop through subjects in project\n #for sub in subjects:\n for sub in project.subjects():\n\n # Loop through sessions in subject\n for ses in sub.sessions():\n ses = ses.reload()\n\n # Loop through acquisitions in session\n for acq in ses.acquisitions():\n acq = acq.reload()\n\n # Loop through files in acquisition\n for f in acq.files:\n \n # Skip over non-nifti files\n if f.type != 'nifti':\n next\n\n # Get Flywheel fileId to use as unique identifier\n fileId = f.id\n\n # Try to get timestamp (sometimes DateTime field isn't present.) \n try:\n timestamp = f.info['AcquisitionDateTime']\n except KeyError:\n try:\n timestamp = f.info['AcquisitionDate']\n # Set to None if field isn't present\n except:\n timestamp = pd.NaT\n \n # Try to get series number (sometimes field isn't present.) \n try:\n seriesNum = f.info['SeriesNumber']\n # Set to None if field isn't present\n except:\n np.NaN \n # Add the folowing metadata to study info dict:\n # fileID: [subId, sesId, acqLabel, fileName, seriesNum, timestamp]\n info[fileId] = [sub.label, ses.label, acq.label, f.name, seriesNum, timestamp]\n \n # Return project info dict\n return info",
"def read_metadata_from_resource_bag(cls, bag_content_path, hydroshare_host='www.hydroshare.org'):\n # Read resource map so that we know the resource type\n (root_uri, res_meta_path, res_meta) = cls._read_resource_map(bag_content_path,\n hydroshare_host)\n # Iterate over HydroShare resource types\n res_types = get_resource_types()\n for rt in res_types:\n rt_name = rt.__name__\n if rt_name == res_meta['type']:\n # Instantiate metadata class for resource type\n rt_root = rt.__module__.split('.')[0]\n rt_meta = \"{0}Meta\".format(rt_name)\n logger.debug(\"rt_meta: {0}\".format(rt_meta))\n mod_ser_name = \"{root}.serialization\".format(root=rt_root)\n logger.debug(\"mod_ser_name: {0}\".format(mod_ser_name))\n instance = None\n try:\n # Use __import__ to make sure mod_ser is compiled on import (else we can't import it)\n mod_ser = __import__(mod_ser_name, globals(), locals(), [rt_meta])\n metadata_class = getattr(mod_ser, rt_meta)\n instance = metadata_class()\n except AttributeError:\n msg = \"Unable to instantiate metadata deserializer for resource type {0}, \"\n msg += \"based on resource bag {1}\"\n msg = msg.format(res_meta['type'], bag_content_path)\n raise GenericResourceMeta.ResourceMetaException(msg)\n\n assert (instance is not None)\n\n # Populate core metadata\n instance.id = res_meta['id']\n instance.res_type = res_meta['type']\n instance.title = res_meta['title']\n instance.files = res_meta['files']\n\n # Read additional metadata\n instance.bag_content_path = bag_content_path\n instance.res_meta_path = res_meta_path\n instance.root_uri = root_uri\n instance._read_resource_metadata()\n\n return instance\n return None",
"def get_examples(data_dir, mode, task_id, shard_id):\n file_path = get_full_filename(data_dir, mode, task_id, shard_id)\n relative_path = \"/\".join(file_path.split(\"/\")[3:])\n tf.logging.info(\"Reading file: %s\" % (file_path))\n print(relative_path)\n #client = storage.Client(projectname, credentials=credentials)\n #bucket = client.get_bucket(bucket_name)\n blob = storage_bucket.blob(relative_path)\n if not blob.exists():\n tf.logging.info(\"Path doesn't exist\")\n return None\n nq_data = extract_nq_data(file_path)\n tf.logging.info(\"NQ data Size: \" + str(len(nq_data.keys())))\n\n tf.logging.info(\"Performing entity extraction\")\n fact_extracted_data = entity_link_nq(nq_data)\n return fact_extracted_data",
"def getFileInfo(region, ub, queuename, guids, dsname, dsdict, lfns, pinitdir, analysisJob, tokens, DN, sitemover, error, workdir, dbh, DBReleaseIsAvailable, \\\n scope_dict, pfc_name=\"PoolFileCatalog.xml\", filesizeIn=[], checksumIn=[], thisExperiment=None):\n\n fileInfoDic = {} # FORMAT: fileInfoDic[file_nr] = (guid, pfn, size, checksum, filetype, copytool) - note: copytool not necessarily the same for all file (e.g. FAX case)\n replicas_dic = {} # FORMAT: { guid1: [replica1, .. ], .. } where replica1 is of type replica\n surl_filetype_dictionary = {} # FORMAT: { sfn1: filetype1, .. } (sfn = surl, filetype = DISK/TAPE)\n copytool_dictionary = {} # FORMAT: { surl1: copytool1, .. }\n totalFileSize = 0L\n ec = 0\n pilotErrorDiag = \"\"\n\n tolog(\"Preparing to build paths for input files\")\n\n # Get the site information object\n si = getSiteInformation(thisExperiment.getExperiment())\n\n # In case we are staging in files from an object store, we can do a short cut and skip the catalog lookups below\n copytool, dummy = getCopytool(mode=\"get\")\n if \"objectstore\" in copytool:\n tolog(\"Objectstore stage-in: cutting a few corners\")\n\n # Format: fileInfoDic[file_nr] = (guid, gpfn, size, checksum, filetype, copytool)\n # replicas_dic[guid1] = [replica1, ..]\n\n espath = si.getObjectstorePath(\"eventservice\") #getFilePathForObjectStore(filetype=\"eventservice\")\n logpath = si.getObjectstorePath(\"logs\") #getFilePathForObjectStore(filetype=\"logs\")\n\n i = 0\n try:\n for lfn in lfns:\n if \".log.\" in lfn:\n fullpath = os.path.join(logpath, lfns[i])\n else:\n fullpath = os.path.join(espath, lfns[i])\n fileInfoDic[i] = (guids[i], fullpath, filesizeIn[i], checksumIn[i], 'DISK', copytool) # filetype is always DISK on objectstores\n replicas_dic[guids[i]] = [fullpath]\n surl_filetype_dictionary[fullpath] = 'DISK' # filetype is always DISK on objectstores\n i += 1\n except Exception, e:\n tolog(\"!!WARNING!!2233!! Failed to create replica and file dictionaries: %s\" % (e))\n ec = -1\n tolog(\"fileInfoDic=%s\" % str(fileInfoDic))\n tolog(\"replicas_dic=%s\" % str(replicas_dic))\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n # If the pilot is running on a Tier 3 site, then neither LFC nor PFC should be used\n if si.isTier3():\n tolog(\"Getting file info on a Tier 3 site\")\n\n # Create file path to local SE (not used for scope based paths)\n path = sitemover.getTier3Path(dsname, DN) # note: dsname will only be correct for lib files, otherwise fix dsdict, currently empty for single lib file input?\n file_nr = -1\n for lfn in lfns:\n file_nr += 1\n\n # Use scope based path if possible\n# #if scope_dict and readpar('useruciopaths').lower() == \"true\":\n# if scope_dict and (\"/rucio\" in readpar('seprodpath') or \"/rucio\" in readpar('sepath')):\n# se_path = sitemover.getRucioPath(file_nr, tokens, scope_dict, lfn, path, analysisJob)\n# else:\n# se_path = os.path.join(path, lfn)\n se_path = os.path.join(path, lfn)\n\n # Get the file info\n ec, pilotErrorDiag, fsize, fchecksum = sitemover.getLocalFileInfo(se_path, csumtype=\"default\")\n if ec != 0:\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n # Fill the dictionaries\n fileInfoDic[file_nr] = (guids[file_nr], se_path, fsize, fchecksum, 'DISK', copytool) # no tape on T3s, so filetype is always DISK\n surl_filetype_dictionary[fullpath] = 'DISK' # filetype is always DISK on T3s\n\n # Check total file sizes to avoid filling up the working dir, add current file size\n try:\n totalFileSize += long(fsize)\n except:\n pass\n else:\n # Get the PFC from the proper source\n ec, pilotErrorDiag, xml_from_PFC, xml_source, replicas_dic, surl_filetype_dictionary, copytool_dictionary = \\\n getPoolFileCatalog(ub, guids, lfns, pinitdir, analysisJob, tokens, workdir, dbh,\\\n DBReleaseIsAvailable, scope_dict, filesizeIn, checksumIn,\\\n sitemover, pfc_name=pfc_name, thisExperiment=thisExperiment)\n\n if ec != 0:\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n tolog(\"Using XML source %s\" % (xml_source))\n if xml_from_PFC == '':\n pilotErrorDiag = \"Failed to get PoolFileCatalog\"\n tolog(\"!!FAILED!!2999!! %s\" % (pilotErrorDiag))\n tolog(\"Mover get_data finished (failed)\")\n return error.ERR_NOPFC, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n xmldoc = minidom.parseString(xml_from_PFC) \n fileList = xmldoc.getElementsByTagName(\"File\")\n\n # Extracts the guids from the file list\n guids_filelist = getGuids(fileList)\n fileInfoDictionaryFromDispatcher = getFileInfoDictionaryFromDispatcher(lfns, filesizeIn, checksumIn) \n file_nr = -1\n for thisfile in fileList:\n file_nr += 1\n # Get the SURL and GUID from the XML\n gpfn = str(thisfile.getElementsByTagName(\"pfn\")[0].getAttribute(\"name\"))\n guid = guids_filelist[file_nr]\n\n # Get the filesize and checksum from the primary location (the dispatcher)\n _lfn = getLFN(gpfn, lfns) #os.path.basename(gpfn)\n\n # Remove any __DQ2 substring from the LFN if necessary\n if \"__DQ2\" in _lfn:\n _lfn = stripDQ2FromLFN(_lfn)\n fsize, fchecksum = getFileInfoFromDispatcher(_lfn, fileInfoDictionaryFromDispatcher)\n\n # Get the file info from the metadata [from LFC]\n if not fsize or not fchecksum:\n ec, pilotErrorDiag, fsize, fchecksum = getFileInfoFromMetadata(thisfile, guid, replicas_dic, region, sitemover, error)\n if ec != 0:\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n # Even though checksum and file size is most likely already known from LFC, more reliable file\n # info is stored in Rucio. Try to get it from there unless the dispatcher has already sent it to the pilot\n if dsdict == {}:\n _dataset = dsname\n else:\n _dataset = getDataset(os.path.basename(gpfn), dsdict)\n _filesize, _checksum = sitemover.getFileInfoFromRucio(scope_dict[_lfn], _dataset, guid)\n if _filesize != \"\" and _checksum != \"\":\n if _filesize != fsize:\n tolog(\"!!WARNING!!1001!! Catalog file size (%s) not the same as Rucio file size (%s) (using Rucio value)\" % (fsize, _filesize))\n if _checksum != fchecksum:\n tolog(\"!!WARNING!!1001!! Catalog checksum (%s) not the same as Rucio checksum (%s) (using Rucio value)\" % (fchecksum, _checksum))\n fsize = _filesize\n fchecksum = _checksum\n\n # Get the filetype for this surl\n filetype = getFiletypeFromDictionary(gpfn, surl_filetype_dictionary)\n\n # Extract the copytool for this PFN\n _copytool = extractCopytoolForPFN(gpfn, copytool_dictionary)\n\n # Store in the file info dictionary\n fileInfoDic[file_nr] = (guid, gpfn, fsize, fchecksum, filetype, _copytool)\n\n # Check total file sizes to avoid filling up the working dir, add current file size\n try:\n totalFileSize += long(fsize)\n except:\n pass\n\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic",
"def sdf_reader(cls, filename, dbIdentifier = \"LM_ID\"):\n res_dict = {}\n with open(filename) as fp:\n line = fp.readline()\n line_id = \"\"\n line_dict = {}\n while line:\n if line.startswith(\">\"):\n if dbIdentifier in line:\n if line_id:\n res_dict[line_id] = line_dict\n line_dict = {}\n line_id = \"\"\n line_id = fp.readline().rstrip()\n else:\n key = line.split(\"<\")[1].split(\">\")[0]\n line_dict[key] = fp.readline().rstrip()\n line = fp.readline()\n\n fp.close()\n return res_dict",
"def _ReadSessionConfiguration(self, path, knowledge_base_object):\n storage_reader = storage_factory.StorageFactory.CreateStorageReaderForFile(\n path)\n\n for session in storage_reader.GetSessions():\n if not session.source_configurations:\n storage_reader.ReadSystemConfiguration(knowledge_base_object)\n else:\n for source_configuration in session.source_configurations:\n knowledge_base_object.ReadSystemConfigurationArtifact(\n source_configuration.system_configuration,\n session_identifier=session.identifier)"
] | [
"0.6450273",
"0.57527316",
"0.56171817",
"0.5374528",
"0.52916473",
"0.52504206",
"0.5133669",
"0.50766104",
"0.5059053",
"0.50468785",
"0.50408566",
"0.5025768",
"0.50213546",
"0.5007927",
"0.5003622",
"0.49593022",
"0.4933783",
"0.4924062",
"0.49204034",
"0.48943752",
"0.48679718",
"0.48565224",
"0.48472777",
"0.48354045",
"0.48237455",
"0.48234624",
"0.4815425",
"0.47852212",
"0.47831348",
"0.47768253"
] | 0.71390635 | 0 |
Read all provenance entries from the metadata store for a given project context Context object containing projectDir, the path of the project whose metadata store is to be read from A dictionary of key/value pairs from the provenance section of the project metadata | def readProvenanceEntries(context):
return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readAssetProvenanceObjects(context):\n assetProvenanceObjects = []\n provenance = GenericMetadata.readProvenanceEntries(context)\n try:\n assets = provenance['entities'].split(GenericMetadata.VALUE_DELIM)\n for asset in assets:\n assetProvenanceObjects.append(AssetProvenance.readFromMetadata(context, asset))\n except KeyError:\n pass\n return assetProvenanceObjects",
"def writeProvenanceEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION, keys, values)",
"def _readEntriesForSection(projectDir, section):\n sectionDict = dict()\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.R_OK):\n raise IOError(errno.EACCES, \"Unable to read metadata store for project %s\" % \\\n (projectDir,))\n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n if config.has_section(section):\n items = config.items(section)\n for item in items:\n sectionDict[item[0]] = item[1]\n \n return sectionDict",
"def _get_file_entity_provenance_dict(syn, entity):\n try:\n prov = syn.getProvenance(entity)\n return {'used': ';'.join(prov._getUsedStringList()),\n 'executed': ';'.join(prov._getExecutedStringList()),\n 'activityName': prov.get('name', ''),\n 'activityDescription': prov.get('description', '')}\n except SynapseHTTPError as e:\n if e.response.status_code == 404:\n return {} # No provenance present return empty dict\n else:\n raise # unexpected error so we re-raise the exception",
"def load_provenance(self):\n\n try:\n entry = self._get_nearest_entry_with_artifact()\n if entry is None:\n return None\n return entry.provenance\n except InternalCacheStateError as e:\n self._raise_state_error_with_explanation(e)",
"def test_get_provenance(self):\n filename = 'mpciMeanImage.mlapdv_estimate.npy'\n provenance = MesoscopeFOV.get_provenance(filename)\n self.assertEqual('ESTIMATE', provenance.name)\n filename = 'mpciROIs.brainLocation_ccf_2017.npy'\n provenance = MesoscopeFOV.get_provenance(filename)\n self.assertEqual('HISTOLOGY', provenance.name)",
"def compute_context(modules: List[ModuleHandle]) -> Dict[str, ArtifactDescriptor]:\n context: Dict[str, ArtifactDescriptor] = {}\n for m in modules:\n context = m.provenance.get_database_state(context)\n return context",
"def retrieve_metadata(self, _vx):\n\t\tif (_vx):\n\t\t\tvx_files = _vx.get_files()\n\t\t\tif (len(vx_files) == 0):\n\t\t\t\treturn {}\n\t\t\telif (len(vx_files) == 1):\n\t\t\t\treturn self.retrieve_metadata_single_file(vx_files[0])\n\t\t\telse:\n\t\t\t\treturn self.retrieve_metadata_multiple_files(vx_files)\n\t\telse:\n\t\t\traise NullOrEmptyArgumentException()",
"def writeToMetadata(self, context):\n fqId = self.section + GenericMetadata.COMPOUND_KEY_SEP + self.name\n fqId = fqId.lower()\n \n # Write self to the appropriate section\n GenericMetadata.writeEntryToSection(context, self.section, self.name, self.dcIdentifier)\n \n # Write to provenance section\n provenanceEntries = GenericMetadata.readProvenanceEntries(context)\n try:\n entities = provenanceEntries['entities'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n entities = []\n # Write entity metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in entities:\n entities.append(fqId)\n entitiesStr = GenericMetadata.VALUE_DELIM.join(entities)\n keys.append('entities'); values.append(entitiesStr)\n # Write attributes for entity\n keyProto = fqId + GenericMetadata.COMPOUND_KEY_SEP\n dcIdentifier = keyProto + 'dc.identifier'\n keys.append(dcIdentifier); values.append(self.dcIdentifier)\n dcSource = keyProto + 'dc.source'\n keys.append(dcSource); values.append(self.dcSource)\n dcTitle = keyProto + 'dc.title'\n keys.append(dcTitle); values.append(self.dcTitle)\n if self.dcDate:\n dcDate = keyProto + 'dc.date'\n keys.append(dcDate); values.append(self.dcDate.strftime(AssetProvenance.FMT_DATE))\n dcPublisher = keyProto + 'dc.publisher'\n keys.append(dcPublisher); values.append(self.dcPublisher)\n dcDescription = keyProto + 'dc.description'\n keys.append(dcDescription); values.append(self.dcDescription)\n processingNotes = keyProto + 'processing_notes'\n keys.append(processingNotes); values.append(self.processingNotes)\n GenericMetadata.writeProvenanceEntries(context, keys, values)",
"def readFromMetadata(cls, context, fqId):\n newInstance = AssetProvenance()\n (newInstance.section, newInstance.name) = fqId.split(GenericMetadata.COMPOUND_KEY_SEP)\n \n provenance = GenericMetadata.readProvenanceEntries(context)\n keyProto = fqId + GenericMetadata.COMPOUND_KEY_SEP\n dcIdentifier = keyProto + 'dc.identifier'\n newInstance.dcIdentifier = provenance[dcIdentifier]\n dcSource = keyProto + 'dc.source'\n newInstance.dcSource = provenance[dcSource]\n dcTitle = keyProto + 'dc.title'\n newInstance.dcTitle = provenance[dcTitle]\n dcDate = keyProto + 'dc.date'\n newInstance.dcDate = datetime.strptime(provenance[dcDate], AssetProvenance.FMT_DATE)\n dcPublisher = keyProto + 'dc.publisher'\n newInstance.dcPublisher = provenance[dcPublisher]\n dcDescription = keyProto + 'dc.description'\n newInstance.dcDescription = provenance[dcDescription]\n processingNotes = keyProto + 'processing_notes'\n newInstance.processingNotes = provenance[processingNotes]\n \n return newInstance",
"def _prep_metadata(md_sect, path):\n if not set(md_sect).issuperset(metadata_required_fields):\n missing = metadata_required_fields - set(md_sect)\n raise ConfigError(\"Required fields missing: \" + '\\n'.join(missing))\n\n module = md_sect.get('module')\n if not module.isidentifier():\n raise ConfigError(\"Module name %r is not a valid identifier\" % module)\n\n md_dict = {}\n\n # Description file\n if 'description-file' in md_sect:\n description_file = path.parent / md_sect.get('description-file')\n try:\n with description_file.open(encoding='utf-8') as f:\n raw_desc = f.read()\n except FileNotFoundError:\n raise ConfigError(\n \"Description file {} does not exist\".format(description_file)\n )\n ext = description_file.suffix\n try:\n mimetype = readme_ext_to_content_type[ext]\n except KeyError:\n log.warning(\"Unknown extension %r for description file.\", ext)\n log.warning(\" Recognised extensions: %s\",\n \" \".join(readme_ext_to_content_type))\n mimetype = None\n\n if mimetype == 'text/x-rst':\n # rst check\n stream = io.StringIO()\n res = render(raw_desc, stream)\n if not res:\n log.warning(\"The file description seems not to be valid rst for PyPI;\"\n \" it will be interpreted as plain text\")\n log.warning(stream.getvalue())\n\n md_dict['description'] = raw_desc\n md_dict['description_content_type'] = mimetype\n\n if 'urls' in md_sect:\n project_urls = md_dict['project_urls'] = []\n for label, url in sorted(md_sect.pop('urls').items()):\n project_urls.append(\"{}, {}\".format(label, url))\n\n for key, value in md_sect.items():\n if key in {'description-file', 'module'}:\n continue\n if key not in metadata_allowed_fields:\n closest = difflib.get_close_matches(key, metadata_allowed_fields,\n n=1, cutoff=0.7)\n msg = \"Unrecognised metadata key: {!r}\".format(key)\n if closest:\n msg += \" (did you mean {!r}?)\".format(closest[0])\n raise ConfigError(msg)\n\n k2 = key.replace('-', '_')\n md_dict[k2] = value\n if key in metadata_list_fields:\n if not isinstance(value, list):\n raise ConfigError('Expected a list for {} field, found {!r}'\n .format(key, value))\n if not all(isinstance(a, str) for a in value):\n raise ConfigError('Expected a list of strings for {} field'\n .format(key))\n elif key == 'requires-extra':\n if not isinstance(value, dict):\n raise ConfigError('Expected a dict for requires-extra field, found {!r}'\n .format(value))\n if not all(isinstance(e, list) for e in value.values()):\n raise ConfigError('Expected a dict of lists for requires-extra field')\n for e, reqs in value.items():\n if not all(isinstance(a, str) for a in reqs):\n raise ConfigError('Expected a string list for requires-extra. (extra {})'\n .format(e))\n else:\n if not isinstance(value, str):\n raise ConfigError('Expected a string for {} field, found {!r}'\n .format(key, value))\n\n # What we call requires in the ini file is technically requires_dist in\n # the metadata.\n if 'requires' in md_dict:\n md_dict['requires_dist'] = md_dict.pop('requires')\n\n # And what we call dist-name is name in the metadata\n if 'dist_name' in md_dict:\n md_dict['name'] = md_dict.pop('dist_name')\n\n # Move dev-requires into requires-extra\n reqs_noextra = md_dict.pop('requires_dist', [])\n reqs_by_extra = md_dict.pop('requires_extra', {})\n dev_requires = md_dict.pop('dev_requires', None)\n if dev_requires is not None:\n if 'dev' in reqs_by_extra:\n raise ConfigError(\n 'dev-requires occurs together with its replacement requires-extra.dev.')\n else:\n log.warning(\n '“dev-requires = ...” is obsolete. Use “requires-extra = {\"dev\" = ...}” instead.')\n reqs_by_extra['dev'] = dev_requires\n\n # Add requires-extra requirements into requires_dist\n md_dict['requires_dist'] = \\\n reqs_noextra + list(_expand_requires_extra(reqs_by_extra))\n\n md_dict['provides_extra'] = sorted(reqs_by_extra.keys())\n\n # For internal use, record the main requirements as a '.none' extra.\n reqs_by_extra['.none'] = reqs_noextra\n\n return md_dict, module, reqs_by_extra",
"def get_entries(self):\n prefixes = self.spot_mappings\n with open(self.path, 'r') as f:\n prefix_key = self.seek_through_comments(f).rsplit(\"/\", 1)[-1]\n prefix = prefixes[prefix_key]\n\n for ln in self.split_log_lines(f, \"|\", prefix):\n yield LogItem(*ln).get_properties()",
"def read_metadata(dirname, use_gpu):\n try:\n if not os.path.isdir(dirname):\n pass\n elif not os.path.exists(os.path.join(dirname, 'metadata.json')):\n pass\n else:\n with open(os.path.join(dirname, 'metadata.json')) as f:\n metadata = json.load(f)\n if use_gpu and ('container_gpu' in metadata):\n container = metadata['container_gpu']\n else:\n container = metadata['container']\n entry_point = metadata['entry_point']\n except (IOError, KeyError, ValueError):\n print('Failed to read metadata from defense directory ', dirname)\n return (container, entry_point)",
"def getProvenance(self, imageFilepath, maskFilepath, mask):\n self.logger.info('Adding additional extraction information')\n\n provenanceVector = collections.OrderedDict()\n generalinfoClass = generalinfo.GeneralInfo(imageFilepath, maskFilepath, mask, self.kwargs, self.inputImages)\n for k, v in six.iteritems(generalinfoClass.execute()):\n provenanceVector['general_info_%s' % (k)] = v\n return provenanceVector",
"def set_provenance_map(self, kwargs: Dict):\n if \"default_provenance\" in kwargs:\n self.default_provenance = kwargs.pop(\"default_provenance\")\n\n ksf_found = []\n for ksf in knowledge_provenance_properties:\n if ksf in kwargs:\n ksf_found.append(ksf)\n ksf_value = kwargs.pop(ksf)\n if isinstance(ksf_value, dict):\n for ksf_pattern in ksf_value.keys():\n log.debug(\"ksf_pattern: \", ksf_pattern)\n if ksf not in self.mapping:\n log.debug(\"not in the mapping\", ksf)\n self.mapping[ksf] = dict()\n log.debug(\"self.mapping[ksf]: \", self.mapping[ksf])\n ir = self.get_mapping(ksf)\n self.mapping[ksf][ksf_pattern] = ir.set_provenance_map_entry(\n ksf_value[ksf_pattern]\n )\n log.debug(\"self.mapping[ksf][ksf_pattern]: \", self.mapping[ksf][ksf_pattern])\n else:\n ir = self.get_mapping(ksf)\n self.mapping[ksf] = ir.set_provenance_map_entry(ksf_value)\n # if none specified, add at least one generic 'knowledge_source'\n if len(ksf_found) == 0:\n ir = self.get_mapping(\"knowledge_source\")\n if \"name\" in kwargs:\n self.mapping[\"knowledge_source\"] = ir.default(kwargs[\"name\"])\n else:\n self.mapping[\"knowledge_source\"] = ir.default(self.default_provenance)\n if \"provided_by\" not in self.mapping:\n ir = self.get_mapping(\"provided_by\")\n self.mapping[\"provided_by\"] = ir.default(self.default_provenance)",
"def read_metadata(metapath):\r\n with open(metapath) as metaFile:\r\n metadata = {}\r\n for line in metaFile.readlines():\r\n if \"=\" in line: # Get only key-value pairs\r\n l = line.split(\"=\")\r\n metadata[l[0].strip()] = l[1].strip()\r\n\r\n return metadata",
"def walk_experimental(project_id, version=None):\n from urllib import parse\n doc = IndexedPublication.from_id(project_id, revision=version)\n relation_map = []\n\n project_meta = format_metadata_for_fedora(project_id, version=version)\n if version:\n project_id = '{}v{}'.format(project_id, str(version))\n license = project_meta.get('license', None)\n full_author_list = []\n project_map = {\n 'uuid': doc.project.uuid,\n 'container_path': project_id,\n 'fedora_mapping': {**project_meta, 'generated': [], 'license': None},\n 'fileObjs': []\n }\n\n experiments_list = doc.experimentsList\n for expt in experiments_list:\n # Do stuff with experiment.\n expt_container_path = \"{}/{}\".format(project_id, parse.quote(expt.value.title))\n print('experiment ' + expt.value.title)\n exp_doi = expt.doi\n project_map['fedora_mapping']['generated'].append('Experiment: {}'.format(exp_doi))\n\n experiment_map = {\n 'uuid': expt.uuid,\n 'container_path': expt_container_path,\n 'fedora_mapping': {**format_experiment(expt), 'license': license, 'wasGeneratedBy': project_id, 'generated': []},\n 'fileObjs': expt.fileObjs\n }\n\n full_author_list += experiment_map['fedora_mapping']['creator']\n\n reports = filter(\n lambda report: expt.uuid in report.value.experiments,\n getattr(doc, 'reportsList', []))\n for report in reports:\n # Do stuff with report.\n report_container_path = \"{}/{}\".format(expt_container_path, parse.quote(report.value.title))\n print('\\treport ' + report.value.title)\n experiment_map['fedora_mapping']['generated'].append('Report: {}'.format(report.value.title))\n\n report_map = {\n 'uuid': report.uuid,\n 'fileObjs': report.fileObjs,\n 'container_path': report_container_path,\n 'fedora_mapping': {**format_report(report), 'wasGeneratedBy': 'Experiment: {}'.format(exp_doi)}\n }\n relation_map.append(report_map)\n\n analysis_list = filter(\n lambda analysis: expt.uuid in analysis.value.experiments,\n getattr(doc, 'analysisList', []))\n for analysis in analysis_list:\n # Do stuff with analysis.\n analysis_container_path = \"{}/{}\".format(expt_container_path, parse.quote(analysis.value.title))\n print('\\tanalysis ' + analysis.value.title)\n experiment_map['fedora_mapping']['generated'].append('Analysis: {}'.format(analysis.value.title))\n\n analysis_map = {\n 'uuid': analysis.uuid,\n 'fileObjs': analysis.fileObjs,\n 'container_path': analysis_container_path,\n 'fedora_mapping': {**format_analysis(analysis), 'wasGeneratedBy': 'Experiment: {}'.format(exp_doi)}\n\n }\n relation_map.append(analysis_map)\n\n model_configs = filter(\n lambda model_config: expt.uuid in model_config.value.experiments,\n getattr(doc, 'modelConfigs', []))\n for mc in model_configs:\n # Do stuff with model config.\n configs_container_path = \"{}/{}\".format(expt_container_path, parse.quote(mc.value.title))\n print('\\tmodel config ' + mc.value.title)\n experiment_map['fedora_mapping']['generated'].append('Model Configuration: {}'.format(mc.value.title))\n\n mc_map = {\n 'uuid': mc.uuid,\n 'fileObjs': mc.fileObjs,\n 'container_path': configs_container_path,\n 'fedora_mapping': {**format_model_config(mc), 'wasGeneratedBy': exp_doi}\n }\n\n sensor_lists = filter(\n lambda sensor_list: mc.uuid in sensor_list.value.modelConfigs and expt.uuid in sensor_list.associationIds,\n getattr(doc, 'sensorLists', []))\n for sl in sensor_lists:\n # Do stuff with sensor list.\n sl_container_path = \"{}/{}\".format(configs_container_path, parse.quote(sl.value.title))\n print('\\t\\tsensor list ' + sl.value.title)\n experiment_map['fedora_mapping']['generated'].append('Sensor: {}'.format(sl.value.title))\n\n sl_map = {\n 'uuid': sl.uuid,\n 'fileObjs': sl.fileObjs,\n 'container_path': sl_container_path,\n 'fedora_mapping': {**format_sensor_info(sl),\n 'wasGeneratedBy': 'Experiment: {}'.format(exp_doi),\n 'wasDerivedFrom': 'Model Configuration: {}'.format(mc.value.title),\n 'influenced': []}\n }\n\n events = filter(\n lambda event: sl.uuid in event.value.sensorLists and expt.uuid in event.associationIds and mc.uuid in event.associationIds,\n getattr(doc, 'eventsList', []))\n for event in events:\n # Do stuff with events.\n evt_container_path = \"{}/{}\".format(sl_container_path, parse.quote(event.value.title))\n print('\\t\\t\\tevent ' + event.value.title)\n sl_map['fedora_mapping']['influenced'].append('Event: {}'.format(event.value.title))\n experiment_map['fedora_mapping']['generated'].append('Event: {}'.format(event.value.title))\n\n event_map = {\n 'uuid': event.uuid,\n 'fileObjs': event.fileObjs,\n 'container_path': evt_container_path,\n 'fedora_mapping': {**format_event(event),\n 'wasGeneratedBy': 'Experiment: {}'.format(exp_doi),\n 'wasDerivedFrom': 'Model Configuration: {}'.format(mc.value.title),\n 'wasInfluencedBy': 'Sensor: {}'.format(sl.value.title)}\n }\n relation_map.append(event_map)\n relation_map.append(sl_map)\n relation_map.append(mc_map)\n relation_map.append(experiment_map)\n project_map['fedora_mapping']['creator'] = list(set(full_author_list))\n relation_map.append(project_map)\n\n return relation_map[::-1]",
"def get_data() -> dict:\n project_dir = Path(__file__).parent.parent\n metadata = toml.load(project_dir / \"pyproject.toml\")[\"tool\"][\"poetry\"]\n lock_data = toml.load(project_dir / \"poetry.lock\")\n project_name = metadata[\"name\"]\n\n poetry_dependencies = chain(metadata[\"dependencies\"].keys(), metadata[\"dev-dependencies\"].keys())\n direct_dependencies = sorted(dep.lower() for dep in poetry_dependencies)\n direct_dependencies.remove(\"python\")\n\n indirect_dependencies = sorted(\n pkg[\"name\"] for pkg in lock_data[\"package\"] if pkg[\"name\"] not in direct_dependencies\n )\n\n dependencies = direct_dependencies + indirect_dependencies\n packages = {pkg[\"name\"]: clean_info(pkg) for pkg in search_packages_info(dependencies)}\n # poetry.lock seems to always use lowercase for packages names\n packages.update({name.lower(): pkg for name, pkg in packages.items()}) # noqa: WPS221 (not that complex)\n\n for dependency in dependencies:\n if dependency not in packages:\n pkg_data = httpx.get(f\"https://pypi.python.org/pypi/{dependency}/json\").json()[\"info\"]\n home_page = pkg_data[\"home_page\"] or pkg_data[\"project_url\"] or pkg_data[\"package_url\"]\n pkg_name = pkg_data[\"name\"]\n pkg = {\"name\": pkg_name, \"home-page\": home_page}\n packages.update({pkg_name: pkg, pkg_name.lower(): pkg})\n\n return {\n \"project_name\": project_name,\n \"direct_dependencies\": direct_dependencies,\n \"indirect_dependencies\": indirect_dependencies,\n \"package_info\": packages,\n }",
"def readManifestEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.MANIFEST_SECTION)",
"def read_project(path: str):\n textfilecontent = {}\n\n # Discover .txt files and add them to the dictionary\n for filepath in iglob(os.path.join(path, '**/*.txt'), recursive=True):\n add_path_dict(input_dict=textfilecontent, start_path=path,\n file_path=filepath)\n\n return textfilecontent",
"def read_data(path: str):\n documents = {}\n queries = {}\n relevance = {}\n for doc in json.load(open(path + 'cranfield_data.json')):\n title = re.sub(r'\\s+', ' ', doc['title'])\n body = re.sub(r'\\s+', ' ', doc['body'][len(doc['title']):])\n documents[doc['id']] = Article(title=title, body=body)\n \n for query in json.load(open(path + 'cran.qry.json')):\n queries[query['query number']] = query['query']\n for rel in json.load(open(path + 'cranqrel.json')):\n query_id = int(rel['query_num'])\n doc_id = int(rel['id'])\n if query_id in relevance:\n relevance[query_id].append((doc_id, rel['position']))\n else:\n relevance[query_id] = [(doc_id, rel['position'])]\n return documents, queries, relevance",
"def _get_persistent_dict(filename=DBM_FILE):\n from wikipediabase.persistentkv import PersistentDict\n\n return _context_get(filename, 'peristent_store', PersistentDict)",
"def checkMetadataVersion(projectDir):\n metadataFilepath = os.path.join(projectDir, GenericMetadata.METADATA_FILENAME)\n if os.path.exists(metadataFilepath):\n if not os.access(metadataFilepath, os.R_OK):\n raise IOError(errno.EACCES, \"Unable to read metadata store for project %s\" % \\\n (projectDir,))\n # Read metadata store\n config = ConfigParser.RawConfigParser()\n config.read(metadataFilepath)\n if config.has_section(GenericMetadata.ECOHYDROLIB_SECION):\n if config.has_option(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY):\n metadataVersion = config.get(GenericMetadata.ECOHYDROLIB_SECION, \\\n GenericMetadata.VERSION_KEY)\n if metadataVersion != GenericMetadata._ecohydrolibVersion:\n raise MetadataVersionError(metadataVersion)",
"def _load(self):\n graph = self.context.parent.graph.get_context(self.context.identifier)\n data = {}\n for (_, p, o) in graph.triples((self.context.identifier, None, None)):\n if not p.startswith(META):\n continue\n name = p[len(META):]\n data[name] = o.toPython()\n return data",
"def _store_package_metadata(self):\n\n context = self._config.context\n log.debug('processing chef_json file {0} for package metadata'.format(self._get_chef_json_full_path()))\n with open(self._get_chef_json_full_path()) as chef_json_file:\n chef_json = json.load(chef_json_file)\n log.debug(chef_json.dump)\n\n context.package.attributes = {}\n for x in self._config.pkg_attributes:\n context.package.attributes[x] = chef_json.get(x, None)",
"def extract_metadata(self):\n if self.is_generatable_file:\n logger.debug(\"Converting collected details to dict..\")\n if self.metadata_collector:\n self.metadata = MetadataToDict(\n metadata_collector=self.metadata_collector,\n file_import=self.file_import,\n )\n self.metadata.build_integration_dict()",
"def _read_info_resources(self, **kwargs):\n info = {'keypairs': {},\n 'flavors': {},\n 'user_quotas': [],\n 'project_quotas': []}\n\n for keypair in self.get_keypair_list():\n info['keypairs'][keypair.id] = self.convert(keypair)\n\n for flavor in self.get_flavor_list():\n info['flavors'][flavor.id] = self.convert(flavor)\n\n if self.config.migrate.migrate_quotas:\n self._read_info_quotas(info)\n\n return info",
"def peek_database(persistency_dir: Path, device_id: str):\n database_path = persistency_dir.joinpath(device_id, \"caching\", \"astarte.db\")\n properties = (\n sqlite3.connect(database_path).cursor().execute(\"SELECT * FROM properties\").fetchall()\n )\n parsed_properties = []\n for interface, major, path, value in properties:\n parsed_properties += [(interface, major, path, pickle.loads(value))]\n return parsed_properties",
"def get_projects_data():\n wcscanner_path = context.__BASE_PATH__ + '/.wcscanner'\n\n data = []\n for project in os.listdir(wcscanner_path):\n if (os.path.isdir(os.path.join(wcscanner_path, project))):\n update_project_data(project)\n project_path = '{}/{}'.format(wcscanner_path, project)\n f = open('{}/.project'.format(project_path), 'r')\n data.append(json.load(f))\n f.close()\n return data",
"def _compute_context(self):\n # get PDBID\n pdbid = get_id(self.pdb_path)\n\n # initialize potential map\n self.potential_map = {}\n\n with open(self.pdb_path, 'r') as f:\n struc = PDBParser().get_structure(pdbid, f)\n\n for bond in self._compute_hydrogen_bonds(get_amino_acids(struc)):\n self.potential_map[bond[:2]] = bond[2:]\n\n self._compute_plain_from_map()"
] | [
"0.6217764",
"0.5992833",
"0.59291553",
"0.5594025",
"0.52970666",
"0.52535784",
"0.52164847",
"0.51783854",
"0.5124159",
"0.51027024",
"0.50449044",
"0.4907859",
"0.48889193",
"0.48857158",
"0.48617345",
"0.48610404",
"0.4849621",
"0.48259637",
"0.4825313",
"0.48153925",
"0.4809248",
"0.47999397",
"0.47912633",
"0.47731066",
"0.4757591",
"0.4735283",
"0.47294712",
"0.47069427",
"0.47024748",
"0.46997085"
] | 0.7970474 | 0 |
Read all asset provenance objects from metadata and store in AssetProvenance instances. context Context object containing projectDir, the path of the project whose metadata store is to be read from A list of AssetProvenance objects | def readAssetProvenanceObjects(context):
assetProvenanceObjects = []
provenance = GenericMetadata.readProvenanceEntries(context)
try:
assets = provenance['entities'].split(GenericMetadata.VALUE_DELIM)
for asset in assets:
assetProvenanceObjects.append(AssetProvenance.readFromMetadata(context, asset))
except KeyError:
pass
return assetProvenanceObjects | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readProvenanceEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION)",
"def readFromMetadata(cls, context, fqId):\n newInstance = AssetProvenance()\n (newInstance.section, newInstance.name) = fqId.split(GenericMetadata.COMPOUND_KEY_SEP)\n \n provenance = GenericMetadata.readProvenanceEntries(context)\n keyProto = fqId + GenericMetadata.COMPOUND_KEY_SEP\n dcIdentifier = keyProto + 'dc.identifier'\n newInstance.dcIdentifier = provenance[dcIdentifier]\n dcSource = keyProto + 'dc.source'\n newInstance.dcSource = provenance[dcSource]\n dcTitle = keyProto + 'dc.title'\n newInstance.dcTitle = provenance[dcTitle]\n dcDate = keyProto + 'dc.date'\n newInstance.dcDate = datetime.strptime(provenance[dcDate], AssetProvenance.FMT_DATE)\n dcPublisher = keyProto + 'dc.publisher'\n newInstance.dcPublisher = provenance[dcPublisher]\n dcDescription = keyProto + 'dc.description'\n newInstance.dcDescription = provenance[dcDescription]\n processingNotes = keyProto + 'processing_notes'\n newInstance.processingNotes = provenance[processingNotes]\n \n return newInstance",
"def writeToMetadata(self, context):\n fqId = self.section + GenericMetadata.COMPOUND_KEY_SEP + self.name\n fqId = fqId.lower()\n \n # Write self to the appropriate section\n GenericMetadata.writeEntryToSection(context, self.section, self.name, self.dcIdentifier)\n \n # Write to provenance section\n provenanceEntries = GenericMetadata.readProvenanceEntries(context)\n try:\n entities = provenanceEntries['entities'].split(GenericMetadata.VALUE_DELIM)\n except KeyError:\n entities = []\n # Write entity metadata (overwrite if already present)\n keys = []\n values = []\n if fqId not in entities:\n entities.append(fqId)\n entitiesStr = GenericMetadata.VALUE_DELIM.join(entities)\n keys.append('entities'); values.append(entitiesStr)\n # Write attributes for entity\n keyProto = fqId + GenericMetadata.COMPOUND_KEY_SEP\n dcIdentifier = keyProto + 'dc.identifier'\n keys.append(dcIdentifier); values.append(self.dcIdentifier)\n dcSource = keyProto + 'dc.source'\n keys.append(dcSource); values.append(self.dcSource)\n dcTitle = keyProto + 'dc.title'\n keys.append(dcTitle); values.append(self.dcTitle)\n if self.dcDate:\n dcDate = keyProto + 'dc.date'\n keys.append(dcDate); values.append(self.dcDate.strftime(AssetProvenance.FMT_DATE))\n dcPublisher = keyProto + 'dc.publisher'\n keys.append(dcPublisher); values.append(self.dcPublisher)\n dcDescription = keyProto + 'dc.description'\n keys.append(dcDescription); values.append(self.dcDescription)\n processingNotes = keyProto + 'processing_notes'\n keys.append(processingNotes); values.append(self.processingNotes)\n GenericMetadata.writeProvenanceEntries(context, keys, values)",
"def test_get_provenance(self):\n filename = 'mpciMeanImage.mlapdv_estimate.npy'\n provenance = MesoscopeFOV.get_provenance(filename)\n self.assertEqual('ESTIMATE', provenance.name)\n filename = 'mpciROIs.brainLocation_ccf_2017.npy'\n provenance = MesoscopeFOV.get_provenance(filename)\n self.assertEqual('HISTOLOGY', provenance.name)",
"def compute_context(modules: List[ModuleHandle]) -> Dict[str, ArtifactDescriptor]:\n context: Dict[str, ArtifactDescriptor] = {}\n for m in modules:\n context = m.provenance.get_database_state(context)\n return context",
"def writeProvenanceEntries(context, keys, values):\n GenericMetadata._writeEntriesToSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION, keys, values)",
"def yield_publications_metadata(tf):\n logger.info('Start yielding...')\n with gzip.GzipFile(fileobj=tf, mode='r') as json_file:\n for index, line in enumerate(json_file):\n yield build_es_bulk(line.decode('utf-8'))",
"def retrieve_metadata(self, _vx):\n\t\tif (_vx):\n\t\t\tvx_files = _vx.get_files()\n\t\t\tif (len(vx_files) == 0):\n\t\t\t\treturn {}\n\t\t\telif (len(vx_files) == 1):\n\t\t\t\treturn self.retrieve_metadata_single_file(vx_files[0])\n\t\t\telse:\n\t\t\t\treturn self.retrieve_metadata_multiple_files(vx_files)\n\t\telse:\n\t\t\traise NullOrEmptyArgumentException()",
"def _compute_context(self):\n # get PDBID\n pdbid = get_id(self.pdb_path)\n\n # initialize potential map\n self.potential_map = {}\n\n with open(self.pdb_path, 'r') as f:\n struc = PDBParser().get_structure(pdbid, f)\n\n for bond in self._compute_hydrogen_bonds(get_amino_acids(struc)):\n self.potential_map[bond[:2]] = bond[2:]\n\n self._compute_plain_from_map()",
"def readManifestEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.MANIFEST_SECTION)",
"def load_provenance(self):\n\n try:\n entry = self._get_nearest_entry_with_artifact()\n if entry is None:\n return None\n return entry.provenance\n except InternalCacheStateError as e:\n self._raise_state_error_with_explanation(e)",
"def generate_context(self):\r\n\r\n all_articles = []\r\n all_drafts = []\r\n for f in self.get_files(\r\n self.settings['ARTICLE_DIR'],\r\n exclude=self.settings['ARTICLE_EXCLUDES']):\r\n article = self.get_cached_data(f, None)\r\n if article is None:\r\n try:\r\n article = self.readers.read_file(\r\n base_path=self.path, path=f, content_class=Article,\r\n context=self.context,\r\n preread_signal=signals.article_generator_preread,\r\n preread_sender=self,\r\n context_signal=signals.article_generator_context,\r\n context_sender=self)\r\n except Exception as e:\r\n logger.warning('Could not process {}\\n{}'.format(f, e))\r\n continue\r\n\r\n if not is_valid_content(article, f):\r\n continue\r\n\r\n self.cache_data(f, article)\r\n\r\n self.add_source_path(article)\r\n\r\n if article.status.lower() == \"published\":\r\n all_articles.append(article)\r\n elif article.status.lower() == \"draft\":\r\n draft = self.readers.read_file(\r\n base_path=self.path, path=f, content_class=Draft,\r\n context=self.context,\r\n preread_signal=signals.article_generator_preread,\r\n preread_sender=self,\r\n context_signal=signals.article_generator_context,\r\n context_sender=self)\r\n all_drafts.append(draft)\r\n else:\r\n logger.warning(\"Unknown status %s for file %s, skipping it.\" %\r\n (repr(article.status),\r\n repr(f)))\r\n\r\n self.articles, self.translations = process_translations(all_articles)\r\n self.drafts, self.drafts_translations = \\\r\n process_translations(all_drafts)\r\n\r\n signals.article_generator_pretaxonomy.send(self)\r\n\r\n for article in self.articles:\r\n # only main articles are listed in categories and tags\r\n # not translations\r\n self.categories[article.category].append(article)\r\n if hasattr(article, 'tags'):\r\n for tag in article.tags:\r\n self.tags[tag].append(article)\r\n # ignore blank authors as well as undefined\r\n for author in getattr(article, 'authors', []):\r\n if author.name != '':\r\n self.authors[author].append(article)\r\n # sort the articles by date\r\n self.articles.sort(key=attrgetter('date'), reverse=True)\r\n self.dates = list(self.articles)\r\n self.dates.sort(key=attrgetter('date'),\r\n reverse=self.context['NEWEST_FIRST_ARCHIVES'])\r\n\r\n # create tag cloud\r\n tag_cloud = defaultdict(int)\r\n for article in self.articles:\r\n for tag in getattr(article, 'tags', []):\r\n tag_cloud[tag] += 1\r\n\r\n tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True)\r\n tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')]\r\n\r\n tags = list(map(itemgetter(1), tag_cloud))\r\n if tags:\r\n max_count = max(tags)\r\n steps = self.settings.get('TAG_CLOUD_STEPS')\r\n\r\n # calculate word sizes\r\n self.tag_cloud = [\r\n (\r\n tag,\r\n int(math.floor(steps - (steps - 1) * math.log(count)\r\n / (math.log(max_count)or 1)))\r\n )\r\n for tag, count in tag_cloud\r\n ]\r\n # put words in chaos\r\n random.shuffle(self.tag_cloud)\r\n\r\n # and generate the output :)\r\n\r\n # order the categories per name\r\n self.categories = list(self.categories.items())\r\n self.categories.sort(\r\n reverse=self.settings['REVERSE_CATEGORY_ORDER'])\r\n\r\n self.authors = list(self.authors.items())\r\n self.authors.sort()\r\n\r\n self._update_context(('articles', 'dates', 'tags', 'categories',\r\n 'tag_cloud', 'authors', 'related_posts'))\r\n self.save_cache()\r\n self.readers.save_cache()\r\n signals.article_generator_finalized.send(self)",
"def to_process(self, pdbs, **kwargs):\n current, _ = ReleaseLoader(self.config, self.session).current_id()\n data = []\n for loop_type in ReleaseLoader.types:\n cached = self.cached(loop_type)\n if not cached:\n raise core.InvalidState(\"No cached data\")\n\n if cached['release'] != current:\n raise core.InvalidState(\"Caching does not match excepted ID\")\n data.append((loop_type, current))\n return data",
"def _retrieve(self):\n # Get the projects for which we will retrieve the IAM policies.\n try:\n project_numbers = self.dao.get_project_numbers(\n self.RESOURCE_NAME, self.cycle_timestamp)\n except dao_errors.MySQLError as e:\n raise inventory_errors.LoadDataPipelineError(e)\n\n # Retrieve data from GCP.\n # Not using iterator since we will use the iam_policy_maps twice.\n iam_policy_maps = []\n for project_number in project_numbers:\n iam_policy = self.safe_api_call('get_project_iam_policies',\n self.RESOURCE_NAME,\n project_number)\n if iam_policy:\n iam_policy_map = {'project_number': project_number,\n 'iam_policy': iam_policy}\n iam_policy_maps.append(iam_policy_map)\n return iam_policy_maps",
"def walk(self):\n for project in [self.get_project(name)\n for name in self.project_names]:\n for sample in [project.get_sample(idx)\n for idx in project.sample_ids]:\n yield (project,sample)",
"def process_all():\n\tfiles = os.listdir('records')\n\tfiles = [file for file in files if file not in ('.DS_Store','old')]\n\tattr_list = []\n\tcorpus = []\n\tsentences = []\n\tcorp_set = set()\n\tfor file in files:\n\t\twith open('records/'+file) as f:\n\t\t\tattr_list, corpus, sentences = proc_file(f,file,corpus,attr_list,corp_set,sentences)\n\treturn attr_list,corpus,sentences",
"def get_pr_data(repos, client_id, client_secret):\n pr_data = []\n for owner, repo in repos:\n repo_prs = get_pr_data_for_repo(\n owner, repo, client_id, client_secret)\n pr_data.extend(repo_prs)\n return pr_data",
"def readFromMetadata(cls, context, fqId):\n pass",
"def get_ead_components(context, delete=True):\n result = []\n for ead_file in context.db.query(EadFile):\n ead_file._context = context\n result += ead_file.extract_components()\n return result",
"def get_projects_data():\n wcscanner_path = context.__BASE_PATH__ + '/.wcscanner'\n\n data = []\n for project in os.listdir(wcscanner_path):\n if (os.path.isdir(os.path.join(wcscanner_path, project))):\n update_project_data(project)\n project_path = '{}/{}'.format(wcscanner_path, project)\n f = open('{}/.project'.format(project_path), 'r')\n data.append(json.load(f))\n f.close()\n return data",
"def read_affectiva(self, filename=None, *args, **kwargs):\n if not filename:\n try:\n filename = self.filename\n except:\n print(\"filename must be specified.\")\n result = read_affectiva(filename, *args, **kwargs)\n for name in self._metadata:\n attr_value = getattr(self, name, None)\n if attr_value and getattr(result, name, None) == None:\n setattr(result, name, attr_value)\n return result",
"def _internal_method(all_assets, asset_idx):\n if asset_idx is None:\n raise ItemNotFoundError(asset_key)\n\n # Form an AssetMetadata.\n mdata = AssetMetadata(asset_key, asset_key.path)\n mdata.from_storable(all_assets[asset_idx])\n mdata.update(attr_dict)\n\n # Generate a Mongo doc from the metadata and update the course asset info.\n all_assets.insert_or_update(mdata)\n return all_assets",
"def test_provenance_extras():\n target = DummyTarget()\n provenance = target.provenance()\n assert \"qcsubmit\" in provenance\n assert \"openforcefield\" in provenance\n assert \"bespokefit\" in provenance\n assert \"openforcefield\" in provenance\n assert \"openforcefields\" in provenance\n assert provenance[\"target\"] == target.name\n\n # now add qcsubmit and call again\n target._extra_dependencies.append(\"openeye\")\n provenance = target.provenance()\n assert \"openeye\" in provenance",
"def load_file_data_from_db(self):\n\n file_objs = self.file_queryset.filter(sip=self.sip, removedtime__isnull=True)\n for file_obj in self._batch_query(file_objs):\n self.file_events = get_file_events(file_obj)\n if not self.file_events:\n return\n try:\n # merge the map_file_data dict with the map_av_data\n mapped_file_info = merge_file_data_dicts(\n map_file_data(file_obj, self.file_events), map_av_data(file_obj)\n )\n self.md_info[\"files\"].append(mapped_file_info)\n self.md_info[\"premis:size\"] = create_package_size(\n mapped_file_info[\"premis:size\"]\n )\n self.md_info[\"amount_of_files\"] += 1\n failed_virus_checks = get_failed_virus_checks(self.file_events)\n if failed_virus_checks:\n self.md_info[\"virus_scan_info\"][\"failed_virus_checks\"].append(\n failed_virus_checks\n )\n passed_virus_checks = get_passed_virus_checks(self.file_events)\n # add info virus_scan_tools if they passed and respect\n # different tools and versions if needed.\n if (\n passed_virus_checks\n and passed_virus_checks\n not in self.md_info[\"virus_scan_info\"][\"virus_scan_tools\"]\n ):\n self.md_info[\"virus_scan_info\"][\"virus_scan_tools\"].append(\n passed_virus_checks\n )\n except KeyError:\n logger.info(\n \"File is no longer present on the filesystem: %s\",\n file_obj.currentlocation,\n )\n continue",
"def walk_experimental(project_id, version=None):\n from urllib import parse\n doc = IndexedPublication.from_id(project_id, revision=version)\n relation_map = []\n\n project_meta = format_metadata_for_fedora(project_id, version=version)\n if version:\n project_id = '{}v{}'.format(project_id, str(version))\n license = project_meta.get('license', None)\n full_author_list = []\n project_map = {\n 'uuid': doc.project.uuid,\n 'container_path': project_id,\n 'fedora_mapping': {**project_meta, 'generated': [], 'license': None},\n 'fileObjs': []\n }\n\n experiments_list = doc.experimentsList\n for expt in experiments_list:\n # Do stuff with experiment.\n expt_container_path = \"{}/{}\".format(project_id, parse.quote(expt.value.title))\n print('experiment ' + expt.value.title)\n exp_doi = expt.doi\n project_map['fedora_mapping']['generated'].append('Experiment: {}'.format(exp_doi))\n\n experiment_map = {\n 'uuid': expt.uuid,\n 'container_path': expt_container_path,\n 'fedora_mapping': {**format_experiment(expt), 'license': license, 'wasGeneratedBy': project_id, 'generated': []},\n 'fileObjs': expt.fileObjs\n }\n\n full_author_list += experiment_map['fedora_mapping']['creator']\n\n reports = filter(\n lambda report: expt.uuid in report.value.experiments,\n getattr(doc, 'reportsList', []))\n for report in reports:\n # Do stuff with report.\n report_container_path = \"{}/{}\".format(expt_container_path, parse.quote(report.value.title))\n print('\\treport ' + report.value.title)\n experiment_map['fedora_mapping']['generated'].append('Report: {}'.format(report.value.title))\n\n report_map = {\n 'uuid': report.uuid,\n 'fileObjs': report.fileObjs,\n 'container_path': report_container_path,\n 'fedora_mapping': {**format_report(report), 'wasGeneratedBy': 'Experiment: {}'.format(exp_doi)}\n }\n relation_map.append(report_map)\n\n analysis_list = filter(\n lambda analysis: expt.uuid in analysis.value.experiments,\n getattr(doc, 'analysisList', []))\n for analysis in analysis_list:\n # Do stuff with analysis.\n analysis_container_path = \"{}/{}\".format(expt_container_path, parse.quote(analysis.value.title))\n print('\\tanalysis ' + analysis.value.title)\n experiment_map['fedora_mapping']['generated'].append('Analysis: {}'.format(analysis.value.title))\n\n analysis_map = {\n 'uuid': analysis.uuid,\n 'fileObjs': analysis.fileObjs,\n 'container_path': analysis_container_path,\n 'fedora_mapping': {**format_analysis(analysis), 'wasGeneratedBy': 'Experiment: {}'.format(exp_doi)}\n\n }\n relation_map.append(analysis_map)\n\n model_configs = filter(\n lambda model_config: expt.uuid in model_config.value.experiments,\n getattr(doc, 'modelConfigs', []))\n for mc in model_configs:\n # Do stuff with model config.\n configs_container_path = \"{}/{}\".format(expt_container_path, parse.quote(mc.value.title))\n print('\\tmodel config ' + mc.value.title)\n experiment_map['fedora_mapping']['generated'].append('Model Configuration: {}'.format(mc.value.title))\n\n mc_map = {\n 'uuid': mc.uuid,\n 'fileObjs': mc.fileObjs,\n 'container_path': configs_container_path,\n 'fedora_mapping': {**format_model_config(mc), 'wasGeneratedBy': exp_doi}\n }\n\n sensor_lists = filter(\n lambda sensor_list: mc.uuid in sensor_list.value.modelConfigs and expt.uuid in sensor_list.associationIds,\n getattr(doc, 'sensorLists', []))\n for sl in sensor_lists:\n # Do stuff with sensor list.\n sl_container_path = \"{}/{}\".format(configs_container_path, parse.quote(sl.value.title))\n print('\\t\\tsensor list ' + sl.value.title)\n experiment_map['fedora_mapping']['generated'].append('Sensor: {}'.format(sl.value.title))\n\n sl_map = {\n 'uuid': sl.uuid,\n 'fileObjs': sl.fileObjs,\n 'container_path': sl_container_path,\n 'fedora_mapping': {**format_sensor_info(sl),\n 'wasGeneratedBy': 'Experiment: {}'.format(exp_doi),\n 'wasDerivedFrom': 'Model Configuration: {}'.format(mc.value.title),\n 'influenced': []}\n }\n\n events = filter(\n lambda event: sl.uuid in event.value.sensorLists and expt.uuid in event.associationIds and mc.uuid in event.associationIds,\n getattr(doc, 'eventsList', []))\n for event in events:\n # Do stuff with events.\n evt_container_path = \"{}/{}\".format(sl_container_path, parse.quote(event.value.title))\n print('\\t\\t\\tevent ' + event.value.title)\n sl_map['fedora_mapping']['influenced'].append('Event: {}'.format(event.value.title))\n experiment_map['fedora_mapping']['generated'].append('Event: {}'.format(event.value.title))\n\n event_map = {\n 'uuid': event.uuid,\n 'fileObjs': event.fileObjs,\n 'container_path': evt_container_path,\n 'fedora_mapping': {**format_event(event),\n 'wasGeneratedBy': 'Experiment: {}'.format(exp_doi),\n 'wasDerivedFrom': 'Model Configuration: {}'.format(mc.value.title),\n 'wasInfluencedBy': 'Sensor: {}'.format(sl.value.title)}\n }\n relation_map.append(event_map)\n relation_map.append(sl_map)\n relation_map.append(mc_map)\n relation_map.append(experiment_map)\n project_map['fedora_mapping']['creator'] = list(set(full_author_list))\n relation_map.append(project_map)\n\n return relation_map[::-1]",
"def filectxs(self):\n mf = self.manifest()\n m = mf.keys()\n m.sort()\n for f in m:\n yield self.filectx(f, fileid=mf[f])",
"def _load(self):\n graph = self.context.parent.graph.get_context(self.context.identifier)\n data = {}\n for (_, p, o) in graph.triples((self.context.identifier, None, None)):\n if not p.startswith(META):\n continue\n name = p[len(META):]\n data[name] = o.toPython()\n return data",
"def obj_initialization(cls):\n listimdata = cls.retrieve_json()\n for elem in listimdata:\n CloudCtx.retrieve_from_json(elem)",
"def inject_files():\n for filename, arcname in INJECT_FILES.items():\n filename = os.path.join('bee2', 'inject', filename)\n if os.path.exists(filename):\n yield filename, arcname\n\n # Additionally add files set in the config.\n for prop in CONF.find_children('InjectFiles'):\n filename = os.path.join('bee2', 'inject', prop.real_name)\n if os.path.exists(filename):\n yield filename, prop.value",
"def _read_files(self) -> MMD:\n\t\theaders = []\n\t\tbodies = []\n\t\tif self.config.file_type == FileType.CSV:\n\t\t\tif self.config.source_uris.endswith('.zip'):\n\t\t\t\twith ZipFile(self.config.source_uris) as zf:\n\t\t\t\t\tfor item in zf.namelist():\n\t\t\t\t\t\tif item.endswith('.csv'):\n\t\t\t\t\t\t\t# with zf.open(item, 'r') as infile:\n\t\t\t\t\t\t\tcsv_reader = csv.reader(TextIOWrapper(zf.open(item, 'r'), 'utf-8'))\n\t\t\t\t\t\t\theaders.append(next(csv_reader))\n\t\t\t\t\t\t\t# need to find a more efficient way, the csv reader is a generator that can only be used once\n\t\t\t\t\t\t\tbodies.append(list(csv_reader))\n\t\t\telif self.config.source_uris.endswith('.csv'):\n\t\t\t\tfor uri in self.config.source_uris:\n\t\t\t\t\tif uri.endswith('.csv'):\n\t\t\t\t\t\tcsv_reader = csv.reader(open(uri, newline='', encoding='utf-8'))\n\t\t\t\t\t\theaders.append(next(csv_reader))\n\t\t\t\t\t\tbodies.append(list(csv_reader))\n\t\telif self.config.file_type == FileType.CNSCHEMA:\n\t\t\theader = ['@id', 'label_@language', 'label_@value']\n\t\t\tbody = []\n\t\t\twith open(self.config.source_uris, 'r') as load_f:\n\t\t\t\tload_dict = json.load(load_f)\n\t\t\t\theader.extend(load_dict['@context'].keys())\n\t\t\t\theader = [h for h in header if h not in ['label', 'range', 'domain', 'subClassOf']]\n\t\t\t\ttmp_h = [h for h in header if h not in ['@id', '@language', '@value']]\n\t\t\t\tfor item in load_dict['@graph']:\n\t\t\t\t\tif item['@id'].split('/')[-2] == 'resource':\n\t\t\t\t\t\trow = [item['@id'], item['label']['@language'], item['label']['@value']]\n\t\t\t\t\t\tfor h in tmp_h:\n\t\t\t\t\t\t\tif h in item:\n\t\t\t\t\t\t\t\trow.append(item[h])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\trow.append(None)\n\t\t\t\t\t\tbody.append(tuple(row))\n\t\t\theaders.append(tuple(header))\n\t\t\tbodies.append(body)\n\t\telif self.config.file_type == FileType.OPENBASE:\n\t\t\theader = []\n\t\t\tbody = []\n\t\t\twith open(self.config.source_uris, 'r') as load_f:\n\t\t\t\tfor line in load_f:\n\t\t\t\t\trow = []\n\t\t\t\t\tflat_line = flatten_json(json.loads(line))\n\t\t\t\t\tfor key in flat_line:\n\t\t\t\t\t\tif key not in header:\n\t\t\t\t\t\t\theader.append(key)\n\t\t\t\t\tfor h in header:\n\t\t\t\t\t\tif h in flat_line:\n\t\t\t\t\t\t\trow.append(flat_line[h])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\trow.append(None)\n\t\t\t\t\tbody.append(row)\n\t\t\tfor item in body:\n\t\t\t\tif len(item) < len(header):\n\t\t\t\t\titem.extend([None for i in range(len(header) - len(item))])\n\t\t\theaders.append(tuple(header))\n\t\t\tbodies.append(tuple([tuple(item) for item in body]))\n\t\telif self.config.file_type == FileType.OPENKS:\n\t\t\t# knowledge graph dataset loading \n\t\t\tif os.path.exists(self.config.source_uris + '/entities') and os.path.exists(self.config.source_uris + '/triples'):\n\t\t\t\theaders = [['entities'], ['triples']]\n\t\t\t\tfor file in ['entities', 'triples']:\n\t\t\t\t\ttmp = []\n\t\t\t\t\twith open(self.config.source_uris + '/' + file, 'r') as load_f:\n\t\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\t\ttmp.append(tuple([item.strip() for item in line.split('\\t')]))\n\t\t\t\t\t\tbodies.append(tuple(tmp))\n\t\t\t# general text dataset loading\n\t\t\telif os.path.exists(self.config.source_uris + '/train') and os.path.exists(self.config.source_uris + '/valid'):\n\t\t\t\theaders = [['train'], ['valid']]\n\t\t\t\tfor file in ['train', 'valid']:\n\t\t\t\t\ttmp = []\n\t\t\t\t\twith open(self.config.source_uris + '/' + file, 'r') as load_f:\n\t\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\t\ttmp.append(tuple([item.strip() for item in line.split('@@')]))\n\t\t\t\t\t\tbodies.append(tuple(tmp))\n\t\t\telse:\n\t\t\t\tlogger.warn('Only allows loading with entities and triples for now!')\n\t\t\t\traise IOError\n\t\telif self.config.file_type == FileType.NERO:\n\t\t\theaders = [['unlabeled_data'], ['predict'], ['pattern']]\n\t\t\tfor file in ['unlabeled_data', 'predict', 'pattern']:\n\t\t\t\ttmp = []\n\t\t\t\twith open(self.config.source_uris + '/' + file + '.json', 'r') as load_f:\n\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\ttmp.append(line.strip())\n\t\t\t\t\tbodies.append(tuple(tmp))\n\n\t\tmmd.name = self.config.data_name\n\t\tmmd.headers = headers\n\t\tmmd.bodies = bodies\n\t\treturn mmd"
] | [
"0.7364113",
"0.5579493",
"0.5348965",
"0.5258582",
"0.5069704",
"0.5014022",
"0.49614912",
"0.47507623",
"0.4680159",
"0.46775454",
"0.46405664",
"0.45859373",
"0.45534492",
"0.45287368",
"0.44924214",
"0.4465639",
"0.44465446",
"0.4427319",
"0.44135317",
"0.44028857",
"0.43989247",
"0.43934813",
"0.43917042",
"0.43866673",
"0.43837008",
"0.43767422",
"0.43681625",
"0.43678856",
"0.43578148",
"0.43427807"
] | 0.8090247 | 0 |
Get processing history stored in the project metadata context Context object containing projectDir, the path of the project whose metadata store is to be written to List containing strings representing history items | def getProcessingHistoryList(context):
projectDir = context.projectDir
steps = []
history = GenericMetadata._readEntriesForSection(projectDir, GenericMetadata.HISTORY_SECTION)
try:
idx = int(history['numsteps']) + 1
for i in xrange(1, idx):
key = GenericMetadata.HISTORY_PROTO + str(i)
steps.append(history[key])
except KeyError:
pass
return steps | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def history(self):\n return self.info['history']",
"def History(self):\n return self.historydict.get('history', [])",
"def get_history(self):\n return self.history",
"def history():",
"def appendProcessingHistoryItem(context, item):\n projectDir = context.projectDir\n history = GenericMetadata._readEntriesForSection(projectDir, GenericMetadata.HISTORY_SECTION)\n try:\n idx = int(history['numsteps'])\n except KeyError:\n idx = 0\n idx += 1\n \n idxStr = str(idx)\n key = GenericMetadata.HISTORY_PROTO + idxStr\n GenericMetadata._writeEntriesToSection(projectDir, GenericMetadata.HISTORY_SECTION, [key, 'numsteps'], [item, idxStr])",
"def history(self):\n return self._history",
"def history(self):\n return self._history",
"def get_history(self, taxlot_view):\n history = []\n\n def record_dict(log):\n filename = None if not log.import_filename else path.basename(log.import_filename)\n if filename:\n # Attempt to remove NamedTemporaryFile suffix\n name, ext = path.splitext(filename)\n pattern = re.compile('(.*?)(_[a-zA-Z0-9]{7})$')\n match = pattern.match(name)\n if match:\n filename = match.groups()[0] + ext\n return {\n 'state': TaxLotStateSerializer(log.state).data,\n 'date_edited': convert_to_js_timestamp(log.created),\n 'source': log.get_record_type_display(),\n 'filename': filename,\n # 'changed_fields': json.loads(log.description) if log.record_type == AUDIT_USER_EDIT else None\n }\n\n log = TaxLotAuditLog.objects.select_related('state', 'parent1', 'parent2').filter(\n state_id=taxlot_view.state_id\n ).order_by('-id').first()\n master = {\n 'state': TaxLotStateSerializer(log.state).data,\n 'date_edited': convert_to_js_timestamp(log.created),\n }\n\n # Traverse parents and add to history\n if log.name in ['Manual Match', 'System Match', 'Merge current state in migration']:\n done_searching = False\n while not done_searching:\n if (log.parent1_id is None and log.parent2_id is None) or log.name == 'Manual Edit':\n done_searching = True\n elif log.name == 'Merge current state in migration':\n record = record_dict(log.parent1)\n history.append(record)\n if log.parent1.name == 'Import Creation':\n done_searching = True\n else:\n tree = log.parent1\n log = tree\n else:\n tree = None\n if log.parent2:\n if log.parent2.name in ['Import Creation', 'Manual Edit']:\n record = record_dict(log.parent2)\n history.append(record)\n elif log.parent2.name == 'System Match' and log.parent2.parent1.name == 'Import Creation' and \\\n log.parent2.parent2.name == 'Import Creation':\n # Handle case where an import file matches within itself, and proceeds to match with\n # existing records\n record = record_dict(log.parent2.parent2)\n history.append(record)\n record = record_dict(log.parent2.parent1)\n history.append(record)\n else:\n tree = log.parent2\n if log.parent1.name in ['Import Creation', 'Manual Edit']:\n record = record_dict(log.parent1)\n history.append(record)\n else:\n tree = log.parent1\n\n if not tree:\n done_searching = True\n else:\n log = tree\n elif log.name == 'Manual Edit':\n record = record_dict(log.parent1)\n history.append(record)\n elif log.name == 'Import Creation':\n record = record_dict(log)\n history.append(record)\n\n return history, master",
"def get_history(self):\n return self.__history[:]",
"def QueryHistory(self):\n return []",
"def _get_history_data(self) -> List[Dict[str, Any]]:\n try:\n with open(self._path.as_posix(), \"r\", encoding=\"utf-8\") as history_file:\n data = json.load(history_file)\n data.append(History._get_empty_session_object())\n return data\n except FileNotFoundError:\n self._path.touch()\n return History._get_empty_json_object()\n except json.decoder.JSONDecodeError:\n return History._get_empty_json_object()",
"def get_cache_history_items(self):\n #gdb.execute(\"p cache->history_items\")\n history_items = ZabbixHashset(gdb.parse_and_eval ('cache->history_items'))\n self.data = history_items.parse()",
"def get_meta_history(self, path):\n sql = sa.select([history.c.path, history.c.id, history.c.rev, history.c.created, history.c.owner, history.c.meta]).order_by(sa.desc(history.c.created))\n if path.endswith('/'):\n sql = sql.where(history.c.path.like(path + '%'))\n do_dir = False\n else:\n sql = sql.where(history.c.path == path)\n do_dir = path.count('/')\n for row in self.engine.execute(sql):\n if do_dir and row.path.count('/') > do_dir:\n break\n yield row",
"def get_history(cls, api, history):\n api_base = api.split('/')[-1]\n cursor = cls.history_index.cursor()\n cursor.execute(\n \"select filename from history where api=? and ymdh=?;\",\n (api_base, history))\n files = [r[0] for r in cursor]\n cls.history_index.commit()\n if not files:\n return {}\n results = {}\n for fn in files:\n ts = re.split('[?@]', fn)[-1].replace('.gz', '')\n fn_full = os.path.join(config.base_store_dir, fn)\n fd = (gzip.open if fn.endswith('.gz') else open)(fn_full)\n results[ts] = json.load(fd, encoding='utf8')\n fd.close()\n return results",
"def save_history(cube, field, filename): \n\n history.append(cube.attributes['history'])",
"def history():\n return apology(\"TODO\")",
"def history():\n return apology(\"TODO\")",
"def history():\n return apology(\"TODO\")",
"def getHistoryTitles():\n temp = []\n for idx, val in enumerate(titleStackTrace):\n temp.append(val)\n return temp",
"def retrieve_tracked_files(self):\n result = []\n\n for key in self.repo.index.entries.keys():\n\n result.append(os.path.join(self.repo.working_dir, key[0]))\n\n return result",
"def task_history(self):\n return self._task_history",
"def history(self):\n alembic.command.history(self.alembic_config(), verbose=True)",
"def history(self):\n return _spacegrant_swig.binary_sink_sptr_history(self)",
"def get_history(page):\n headings = page.filter_headings()\n idx = [i for i, head in enumerate(headings) \n if 'History' in head or 'history' in head]\n if not idx:\n return \"\"\n sections = page.get_sections(include_headings=True)\n history = str(sections[idx[0]+1].strip_code())\n return history",
"def historystorage(self):\n return self._historystorage",
"def history(self):\n return self.board.history",
"def history(self) -> List[SnapshotLogEntry]:\n return self.metadata.snapshot_log",
"def history():\n files = os.listdir(app.config['SEGMENTS_FOLDER'])\n if len(files) <= 3:\n flash('There is no history yet', 'warning')\n return redirect(url_for('home'))\n\n range_list, segments_list, full_track_dict_list = generate_track_and_segments_data(app, files)\n\n return render_template(\"history.html\", segments_list=segments_list,\n full_track_dict_list=full_track_dict_list,\n range_list=range_list,\n title=\"history\")",
"def history(name):\n from pybel.resources.arty import get_knowledge_history\n from pybel.resources.document import get_bel_knowledge_hash\n\n for path in get_knowledge_history(name):\n h = get_bel_knowledge_hash(path.as_posix())\n click.echo('{}\\t{}'.format(path, h))",
"def stack(self):\n return self.history"
] | [
"0.63497823",
"0.61489075",
"0.60992277",
"0.6044155",
"0.5910439",
"0.5873644",
"0.5873644",
"0.5873545",
"0.58652145",
"0.5827695",
"0.5823784",
"0.58060896",
"0.58035296",
"0.5702808",
"0.567759",
"0.5677286",
"0.5677286",
"0.5677286",
"0.5669729",
"0.5659796",
"0.5648869",
"0.563351",
"0.56235933",
"0.56151795",
"0.5602545",
"0.55898196",
"0.5580569",
"0.5578197",
"0.55664235",
"0.5559829"
] | 0.75734115 | 0 |
Write an item to the processing history stored in the project metadata context Context object containing projectDir, the path of the project whose metadata store is to be written to item String representing item to be written to processing history | def appendProcessingHistoryItem(context, item):
projectDir = context.projectDir
history = GenericMetadata._readEntriesForSection(projectDir, GenericMetadata.HISTORY_SECTION)
try:
idx = int(history['numsteps'])
except KeyError:
idx = 0
idx += 1
idxStr = str(idx)
key = GenericMetadata.HISTORY_PROTO + idxStr
GenericMetadata._writeEntriesToSection(projectDir, GenericMetadata.HISTORY_SECTION, [key, 'numsteps'], [item, idxStr]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeRecentItems(self, filename):\n if self.recent_items == None:\n return\n else:\n try:\n file_open = open(filename, 'w')\n text = \"\"\n for key in self.recent_items:\n full_path = self.recent_items[key] + key\n new_line = full_path + '\\n'\n text += new_line\n file_open.write(text)\n file_open.close()\n \n except IOError:\n print 'Unable to write the recent files to file: %s\\n'\\\n 'No recent files will be written' % str(filename)",
"def save_history(cube, field, filename): \n\n history.append(cube.attributes['history'])",
"def save_project(uid, song_notes, author_name, creation_date, project_name):",
"def process_item(self, item, spider):\n try:\n s = self.session()\n if isinstance(item, (PomItem, type(PomItem()), type(PomItem))):\n self.store_pom(item, s)\n elif isinstance(item, (AscItem, type(AscItem()), type(AscItem))):\n self.store_asc(item, s)\n elif isinstance(item, (ArtifactItem, type(ArtifactItem()), type(ArtifactItem))):\n self.store_index(item, s)\n elif isinstance(item, LinkItem):\n pass\n else:\n logger.warning('Unknown item: %s type %s' % (item, type(item)))\n return\n\n s.commit()\n s.flush() # writes changes to DB\n s.expunge_all() # removes objects from session\n except Exception as e:\n logger.warning('Exception in storing key %s' % e)\n\n finally:\n utils.silent_close(s)\n s = None\n return item",
"def store(self, item):\n cursor = self.conn.cursor()\n # Store the item\n if item:\n cursor.execute(*self._build_insert(item, 'items'))\n for file_ in item.files:\n cursor.execute(\"\"\"insert into files (filename, item_id)\n values (?, ?)\"\"\", (file_, item.kg_id))\n self.conn.commit()\n self.logger.info(\"Succesfully stored item %d\" % item.kg_id)",
"def process_item(self, item, spider):\n session = self.Session()\n # deal = Deals(**item)\n entry = Lyrics(item['song'], item['text'])\n\n try:\n session.add(entry)\n session.commit()\n print(f\"\\n\\nInserted {item['song']} into DB!\\n\\n\")\n except:\n session.rollback()\n raise\n finally:\n session.close()\n\n ###OLD###\n # # print(\"Pipeline test\" + item['song'])\n # self.conn.execute(f\"\"\"INSERT INTO lyrics VALUES\n # ({item['song']}, {item['text']});\n # \"\"\")\n\n return item",
"def save(items, name, file):\n dirname = os.path.dirname(file)\n if not os.path.isdir(dirname):\n logging.debug(\"creating directory %s\", dirname)\n os.makedirs(dirname)\n f = r.TFile(file, \"RECREATE\")\n f.WriteObject(vectorize(items, \"roast::Process*\"), name)\n f.Close()",
"def store_item(self, item_in_json): # pragma: no cover\n raise NotImplementedError",
"def _write_history(self):\n if self.data['history_file'] is None:\n return\n contents = '\\n'.join(self.data['history_lines'])\n history = self.data['history_file']\n write_text_file(\n history, contents, encoding=self.data['history_encoding'])\n logger.info(\"History file %s updated.\", history)",
"def save_item(self):\r\n raise NotImplementedError(\"Function not implemented, please implement in sub class\")",
"def push(self, item):\n pass",
"def put(self, item): \n self.__db.rpush(self.key, item)",
"def process_item(self, item, spider):\n tmp_dict = {}\n tmp_dict['comments'] = item['comments']\n tmp_dict['referenceName'] = item['referenceName']\n tmp_dict['referenceTime'] = item['referenceTime']\n tmp_dict['productColor'] = item['productColor']\n tmp_dict['productSize'] = item['productSize']\n self.savefile.write(u\"{0}\\n\".format(json.dumps(tmp_dict)))\n #raise DropItem()",
"def writeItem(item_id, userid, name, buy_price, first_bid, currently, number_of_bids, started, ends, description):\n\tlst = [item_id, userid, name, buy_price, first_bid, currently, number_of_bids, started, ends, description]\n\twriteLine(lst, items_file)",
"def addToRecent(self, filepath, repodest, project=''):\n tpl = {'file': filepath, 'type': repodest, 'project': project }\n self.RecentFile.emit(tpl)",
"def lpush(self, item):\n\n self.r.lpush(self.joblist, item)",
"def item_add(self, item, filename):\n\t\tif not 'meta' in item:\n\t\t\titem['meta'] = {}\n\t\titem['meta']['filename'] = filename\n\t\t\n\t\t# Create directory if it does not already exist\t\t\t\t\n\t\tdirname = os.path.dirname(filename)\n\t\tif not os.path.isdir(dirname):\n\t\t\tos.makedirs(dirname)\n\n\t\tbuffer = self.print_conf( item )\n\t\tfile = open(filename,'a')\n\t\tfile.write( buffer )\n\t\tfile.close()\n\t\treturn True",
"def saveHistory(self, token, history):\n filename = \"%s.json\" % token\n helpers.saveFile(self.dataDir, filename, json.dumps(history))",
"def recordLog(project, status, memo):\n path = getPath(project)\n log = open(path, 'a')\n writer = csv.writer(log, lineterminator='\\n')\n writer.writerow((time.time(), status, memo))\n log.close()\n if status == 'a':\n print(\"Tracking your time on \" + project)\n if status == 's':\n print(\"Tracking suspended on \" + project)\n if status == 't':\n print(\"Time shifted on \" + project)\n if not path == '.sourglass':\n store = open(os.path.join(basepath, 'last'), 'w')\n store.write(project)\n store.close",
"def add_item(self, text):\n\t\tnew_todo = self.todolist.add(text)\n\t\tself.store.append((new_todo.id, text))",
"def add_line(project, jobs, line):\n if project not in jobs:\n jobs[project] = []\n jobs[project].append(line)",
"def log_item_modified(self, item_name, item_attrs):\n\n domain_name = \"S3FileLog\"\n\n log_item_name = self.get_log_item_name(item_name, item_attrs)\n\n item_attrs['log_item_name'] = log_item_name\n\n # Check if it already exists\n log_item = self.db.get_item(\"S3FileLog\", log_item_name, consistent_read=True)\n if log_item is None:\n self.db.put_attributes(\"S3FileLog\", log_item_name, item_attrs)",
"def writeToMetadata(self, context):\n pass",
"def push(self, item: str):\n if self.hit or not self._stack:\n self._stack.append([])\n self._stack[-1].append(item)\n self.hit = False",
"def save(self):\r\n self.pushes.append((self.token, self.stream, self.line, self.column))",
"def dumpprojects (self):\r\n\r\n\r\n\r\n datesuffix=str(datetime.datetime.now()).split(' ')[0]\r\n project = str(transform(self.default_dict['projects'].return_dict()))\r\n\r\n if self.using_shelf:\r\n\r\n file_access.save_file(returntext=project,\r\n filename='PROJ'+notebookname+datesuffix,\r\n folder='/textfiles')\r\n if self.using_database:\r\n value_tuple = (notebookname, project,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO projects \"\r\n +\"(notebook, projectfile) \"\r\n +\"VALUES (?,?);\",\r\n value_tuple)\r\n db_connection.commit()",
"def save_history():\n\n mid = get_mid()\n back_file = contact_name + \"_\" + today\n\n if not os.path.isdir(back_path):\n print('WARNING: o {} directory found, creating.').format(back_path)\n os.mkdir(back_path)\n else:\n print(\"OK: {} found.\".format(back_path))\n\n os.chdir(back_path)\n with open(back_file, 'w') as bf:\n for mes in get_todays_history(mid):\n data = \"{}\\n\".format(mes)\n bf.write(data)",
"def add_history(self):\n # add separator, if there already are history entries\n if self.parentApp.History != '':\n self.parentApp.History += (\n '\\n\\n--- --- --- --- --- --- --- --- --- --- --- ---\\n\\n'\n )\n\n # add the transaction to it\n self.parentApp.History += self.parentApp.tmpTransC.to_str()",
"def write_todo(self, todo):\n if todo != None:\n print 'added \"%s\"' % todo.text\n self.new_items.append(todo)",
"def push(self, value):\n self.history.append(value)"
] | [
"0.5538956",
"0.54137826",
"0.5284954",
"0.5267855",
"0.5262807",
"0.52533287",
"0.5198412",
"0.5144163",
"0.50825036",
"0.5042891",
"0.50389177",
"0.50350785",
"0.5034479",
"0.50320613",
"0.5019872",
"0.50136083",
"0.5003788",
"0.49449867",
"0.48960865",
"0.4886148",
"0.48825863",
"0.48773256",
"0.48718494",
"0.48548788",
"0.48451605",
"0.48424217",
"0.48308024",
"0.48157236",
"0.47975928",
"0.4788937"
] | 0.73133624 | 0 |
Evaluate the sum of all the amicable numbers under > number | def sum_of_amicable_numbers(number: int):
start_time = time.time()
amicable = set()
for n in range(1, number):
if n not in amicable:
a = sum_of_proper_divisors(n)
b = sum_of_proper_divisors(a)
if (n == b) and not (n == b == a):
amicable.add(n)
amicable.add(a)
result = sum(amicable)
print_time_log(start_time, result)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sum_of_numbers(numbers):\r\n return sum(numbers)",
"def solution(number):\n\n # a is a list with all the numbers below input that are multiples of 3 or 5\n a = [x for x in range(1,number) if x % 3 == 0 or x % 5 == 0]\n return sum(a)",
"def sum_amnicable(limit):\n return sum(map(lambda num: num * is_amnicable(num), range(2, limit)))",
"def sum_amicable(limit):\n\n def find_amicable_pair(n):\n check_n= 0\n potential_half = 0\n for i in range(1,n):\n if n % i == 0:\n potential_half += i\n for i in range(1, potential_half):\n if potential_half % i == 0:\n check_n += i\n if check_n == n and n != potential_half: # exclude self amicable\n result.append(n)\n result.append(potential_half)\n\n result = []\n for num in range(1, limit):\n if num not in result:\n find_amicable_pair(num)\n return sum(result)",
"def problem1():\n return sum(i for i in range(1000) if i % 3 == 0 or i % 5 == 0)",
"def test_large_sum(self):\n for n in [10, 20, 30, 40, 50]:\n A = np.arange(n*n)\n A = np.reshape(A, (n, n))\n x = Variable(n, n)\n p = Problem(Minimize(at.sum_entries(x)), [x >= A])\n result = p.solve()\n answer = n*n*(n*n+1)/2 - n*n\n print(result - answer)\n self.assertAlmostEqual(result, answer)",
"def sumTo(n):\n\n sum_all = (n * (n+1))/2\n\n return sum_all",
"def sumTo(n):\n \n the_sum = 0 #current sum\n a_number = 1 #where we are\n while a_number <= n:\n the_sum += a_number\n a_number += 1\n return the_sum",
"def question_26(list_num: int) -> int:\n return sum(list_num)",
"def evaluate_number(number : int)->int:\n if type(number) == int and number >1 and number < 100:\n num = total_numbers = porc = 0\n while porc < number:\n num = num + 1\n clasificate = is_bouncy(str(num))\n result = evaluate(clasificate , num)\n if result:\n total_numbers = total_numbers + 1\n porc = total_numbers * 100 / num\n return num\n return 0",
"def solution(n: int = 28123) -> int:\n\n nums = range(1, n+1)\n abundant = list(filter(is_abundant, nums))\n abundant_sums = set(all_sums(abundant, n))\n fit = set(nums) - abundant_sums\n return fit",
"def sum_multiples(num):\n pass",
"def find_the_sum(number):\n the_sum = 0\n\n for i in range(number):\n # The number is a multiple of 3 or 5\n # If the number is a multiple of both 3 and 5, it is counted once\n if (i % 3 == 0) or (i % 5 == 0):\n the_sum += i\n\n return the_sum",
"def non_abundant_sums():\n # the sum of divisors of every number\n divisor_sum = [0] * LIMIT\n for i in range(1, LIMIT):\n for j in range(i * 2, LIMIT, i):\n divisor_sum[j] += i\n # abundant numbers\n abundant_nums = [i for (i, x) in enumerate(divisor_sum) if x > i]\n\n expressible = [False] * LIMIT\n for i in abundant_nums:\n for j in abundant_nums:\n if i + j < LIMIT:\n expressible[i + j] = True\n else:\n break\n ans = sum(i for (i, x) in enumerate(expressible) if not x)\n return str(ans)",
"def solveProblem021():\n total = 0\n for i in range(2, 10000):\n divs = getProperDivisors(i)\n s = sum(divs)\n # Skip stuff greater than, we'll get to it later if it's less than max.\n if s > i:\n continue\n if s == i:\n continue\n t = sum(getProperDivisors(s))\n if t == i:\n total = total + i + s\n print(\"The Sum is: %d\" % (total,))",
"def problem2(m, p):\n total = 0\n for k in range(m, m ** p):\n if is_prime(k):\n total = total + sum_of_digits(k)\n return total",
"def problem():\n for a in range(1, 380):\n for b in range(a):\n if a + b + (a**2 + b**2)**0.5 == 1000:\n return int(a * b * (a**2 + b**2)**0.5)",
"def fn(x):\n if x <= 0: return int(x == 0)\n return sum(fn(x - xx) for xx in nums)",
"def total(n):\n if n < 0:\n return None\n else:\n result = 0\n for i in range(n + 1):\n result += i\n return result",
"def total(n):\n if n < 0:\n return None\n else:\n result = 0\n for i in range(n + 1):\n result += i\n return result",
"def sum_numbers(sequence):\r\n\r\n total = 0\r\n seq = get_numbers(sequence)\r\n for element in seq:\r\n total += element\r\n\r\n return total",
"def test_suite():\n test(sum_all_elements([1,3,1,4,3,8]) == 5)\n test(sum_all_elements([1,3,5,7]) == 16)\n test(sum_all_elements([1, -7, 10, 23]) == -6)\n test(sum_all_elements(range(1,555,2)) == 76729)",
"def summationReduce(lower, upper):\r\n if lower > upper:\r\n return 0\r\n else:\r\n return reduce(lambda x, y: x + y, range(lower, upper + 1))",
"def summation(self):\n return sum(self.read_ints())",
"def amicable_numbers(n):\n amicables = []\n sumDivisors = {}\n for i in range(1, n):\n divisors = proper_divisors(i)\n sumDivisors[i] = sum(divisors)\n for i in range(1, n):\n sumDivisorsOfi = sumDivisors[i]\n if sumDivisorsOfi < n:\n compare = sumDivisors[sumDivisorsOfi]\n if compare == i and sumDivisorsOfi != i:\n amicables.append(i)\n return amicables",
"def get_3_5_sum(num):\n nums = range(num)\n mults = [num for num in nums if (num % 3 == 0 or num % 5 == 0)]\n return sum(mults)",
"def total(num_list):\n num_sum = 0.0\n for item in num_list:\n num_sum += item\n return num_sum",
"def sum_numbers(numbers=None):\n if numbers is None:\n return sum(range(1, 101))\n else:\n return sum(numbers)",
"def sum_numbers(numbers):\n sum = 0\n for number in numbers:\n sum += number\n\n return sum",
"def method2():\n n = 1000\n s = 0\n multiples = [3,5]\n total = []\n\n for m in multiples:\n total.append(0)\n\n minValue = 0\n while(minValue < 1000):\n minValue = 1000\n minPosition = 0\n for i, v in enumerate(total):\n if v < minValue:\n minValue = v\n minPosition = i\n\n temp = total[minPosition] + multiples[minPosition]\n\n if(temp < 1000) and (temp not in total):\n s += temp\n\n total[minPosition] = temp\n\n return s"
] | [
"0.6975791",
"0.6823644",
"0.6765109",
"0.6445833",
"0.6376807",
"0.6357019",
"0.6341432",
"0.6254372",
"0.62135017",
"0.62022454",
"0.6195927",
"0.6166796",
"0.61620706",
"0.61299115",
"0.6123273",
"0.6048174",
"0.6036229",
"0.60346645",
"0.602642",
"0.602642",
"0.6026195",
"0.60181266",
"0.59931827",
"0.59926796",
"0.5991386",
"0.59912604",
"0.5985684",
"0.5961434",
"0.5951528",
"0.5922894"
] | 0.70709777 | 0 |
Get an instance of the event loop manager for this factory. | def manager(self):
if not self._manager:
self._manager = TwistedEventLoopManager()
return self._manager | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_event_loop():\n try:\n return asyncio.get_running_loop()\n except RuntimeError:\n return asyncio.new_event_loop()",
"def get_event_loop(*args, **kwargs):\r\n\r\n return get_loop(*args, **kwargs)",
"def get_event_loop() -> KivyEventLoop:\n return asyncio.get_event_loop()",
"def getLoop():\n return asyncio.get_event_loop_policy().get_event_loop()",
"def current_event_loop(self):\n loop = current_loop.get()\n if loop is None:\n loop = super().get_event_loop()\n return loop",
"def event_manager(self):\n return ScriptDB.objects.get(db_key=\"Event Manager\")",
"def get_test_event_loop():\n if not _IS_XOS_ASYNC:\n loop = _asynclib.get_event_loop()\n else:\n loop = _get_xos_async_test_event_loop()\n return loop",
"def getManager(self):\n return self._manager",
"def get_event_loop(self):\n try:\n task = trio.lowlevel.current_task()\n except RuntimeError:\n pass\n else:\n # Trio context. Note: NOT current_loop.get()! If this is called from\n # asyncio code, current_task() is the trio-asyncio loop runner task,\n # which has the correct loop set in its contextvar; but (on Python\n # 3.7+) our current context is quite possibly something different, and\n # might have the wrong contextvar value (e.g. in the case of a\n # loop1.call_later() in loop2's context).\n return task.context.get(current_loop)\n\n # Not Trio context\n if _faked_policy.policy is not None:\n return _faked_policy.policy.get_event_loop()\n\n # This will return the thread-specific event loop set using\n # set_event_loop(), or if none has been set, will call back into\n # our new_event_loop() to make a SyncTrioEventLoop and set it as\n # this thread's event loop.\n return super().get_event_loop()",
"def get_manager():\n return __manager__",
"def GetManager(self):\r\n\r\n return self.manager",
"def get_manager():\n\n return multiprocessing.Manager()",
"def load_event_loop():\n while True:\n try:\n async_loop = asyncio.new_event_loop()\n asyncio.set_event_loop(async_loop)\n return async_loop\n except:\n time.sleep(3)",
"def set_asyncio_event_loop(event_loop_path: Optional[str]) -> AbstractEventLoop:\n if event_loop_path is not None:\n event_loop_class: Type[AbstractEventLoop] = load_object(event_loop_path)\n event_loop = event_loop_class()\n asyncio.set_event_loop(event_loop)\n else:\n try:\n with catch_warnings():\n # In Python 3.10.9, 3.11.1, 3.12 and 3.13, a DeprecationWarning\n # is emitted about the lack of a current event loop, because in\n # Python 3.14 and later `get_event_loop` will raise a\n # RuntimeError in that event. Because our code is already\n # prepared for that future behavior, we ignore the deprecation\n # warning.\n filterwarnings(\n \"ignore\",\n message=\"There is no current event loop\",\n category=DeprecationWarning,\n )\n event_loop = asyncio.get_event_loop()\n except RuntimeError:\n # `get_event_loop` raises RuntimeError when called with no asyncio\n # event loop yet installed in the following scenarios:\n # - Previsibly on Python 3.14 and later.\n # https://github.com/python/cpython/issues/100160#issuecomment-1345581902\n event_loop = asyncio.new_event_loop()\n asyncio.set_event_loop(event_loop)\n return event_loop",
"def getInstance():\n if GameLoop.__instance==None:\n GameLoop()\n return GameLoop.__instance",
"def _get_loop(self, *args: typing.Any, **kwargs: typing.Any) -> typing.Optional[asyncio.AbstractEventLoop]:\n if callable(self.loop_getter):\n if self.loop_getter_need_context:\n return self.loop_getter(*args, **kwargs) # pylint: disable=not-callable\n return self.loop_getter() # pylint: disable=not-callable\n return self.loop_getter",
"def event_loop():\n loop = asyncio.get_event_loop_policy().new_event_loop()\n yield loop\n loop.close()",
"def get_entity_manager(self):\n return self.game.entity_manager",
"def event_loop():\n loop = asyncio.get_event_loop()\n yield loop\n loop.close()",
"def _get_event_loop() -> Tuple[asyncio.AbstractEventLoop, bool]:\n try:\n loop = asyncio.get_event_loop()\n if loop.is_closed():\n loop = asyncio.new_event_loop()\n should_close_loop = True\n else:\n should_close_loop = False\n except RuntimeError:\n loop = asyncio.new_event_loop()\n should_close_loop = True\n return loop, should_close_loop",
"def loop(event_loop: AbstractEventLoop) -> AbstractEventLoop:\n yield event_loop\n event_loop.close()",
"def event_loop(request):\n loop = asyncio.get_event_loop()\n yield loop",
"def mock_event_loop() -> Generator[AbstractEventLoop, Any, None]:\n loop = get_event_loop()\n yield loop\n loop.close()",
"def getInstance() -> EndToEndFactoryV2:\n if(EndToEndFactoryV2._instance == None):\n EndToEndFactoryV2._instance = EndToEndFactoryV2()\n return EndToEndFactoryV2._instance",
"def event_manager(axis_device: AxisDevice) -> EventManager:\n axis_device.enable_events()\n return axis_device.event",
"def GetAuiManager(self):\r\n\r\n return self._mgr",
"def _io_event_loop_thread(self):\r\n io_event_loop = asyncio.get_event_loop_policy().new_event_loop()\r\n asyncio.set_event_loop(io_event_loop)\r\n assert isinstance(io_event_loop, AbstractEventLoop)\r\n self._io_event_loop = io_event_loop\r\n self._event_loop_started.release()\r\n self._io_event_loop.run_forever()",
"def get_extension_manager(self):\n return get_extension_manager()",
"def check_event_loop():\n loop = asyncio.get_event_loop()\n if loop.is_closed():\n asyncio.set_event_loop(asyncio.new_event_loop())",
"def get_task_manager(task_manager=None):\n global _task_manager\n if _task_manager is None:\n if task_manager is None:\n _task_manager = TaskManagerImpl()\n else:\n constructor = dynamic_import(task_manager)\n _task_manager = constructor()\n\n return _task_manager"
] | [
"0.74030995",
"0.73900837",
"0.6850553",
"0.675802",
"0.6663396",
"0.66488266",
"0.65802383",
"0.63889515",
"0.63624215",
"0.633409",
"0.6304668",
"0.62948436",
"0.6289953",
"0.6274178",
"0.6228322",
"0.62256956",
"0.6130377",
"0.59798896",
"0.596554",
"0.5839572",
"0.5799308",
"0.5727924",
"0.5671989",
"0.5645577",
"0.5538176",
"0.5535405",
"0.55238533",
"0.54916793",
"0.5464464",
"0.5452364"
] | 0.78999525 | 0 |
Set the maximum delay in seconds for reconnecting to rosbridge (3600 seconds by default). | def set_max_delay(cls, max_delay):
LOGGER.debug("Updating max delay to {} seconds".format(max_delay))
# See https://twistedmatrix.com/documents/19.10.0/api/twisted.internet.protocol.ReconnectingClientFactory.html
cls.maxDelay = max_delay | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_retry_timeout(self, retry_timeout):",
"def set_timeout(self, timeout):\n self.timeout = timeout",
"def setTimeOut(self, timeout=6.0):\n self.timeout = timeout",
"def settimeout(self,timeout=10):\r\n # Update\r\n self.timeout = timeout",
"def set_timeout(self, seconds):\n self._timeout = seconds",
"def setdefaulttimeout(timeout):\r\n global default_timeout\r\n default_timeout = timeout",
"def setdefaulttimeout(timeout):\r\n global _TIMEOUT\r\n _TIMEOUT = timeout",
"def reconnect(self, seconds: int) -> None:",
"def timeout(ctx, seconds):\n config_db = ConfigDBConnector()\n config_db.connect()\n\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"nat_timeout\": seconds})",
"def reset_time_out(self):\n self.reconnect()\n self.reconnect_params()",
"def timeout(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n seconds = 600\n\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"nat_timeout\": seconds})",
"def settimeout(self, timeout):\r\n self.sock.settimeout(timeout)",
"def set_timeout(self, timeout):\n pass",
"def set_timeout(self, timeout_secs):\n self._timeout_secs = timeout_secs",
"def set_timeout(self, timeout):\n if self._timeout != timeout:\n self._timeout = timeout\n if self._zerorpc:\n self.close()\n self.connect()",
"def tcp_timeout(ctx, seconds):\n config_db = ConfigDBConnector()\n config_db.connect()\n\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"nat_tcp_timeout\": seconds})",
"def tcp_timeout(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n seconds = 86400\n\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"nat_tcp_timeout\": seconds})",
"def settimeout(self, value: int) -> None:\n ...",
"def settimeout(self, to):\r\n self._timeout = to",
"def set_timeout_millis(self, value):\n self.dp.set_timeout_millis(value)",
"def set_sleep_time(self, milliseconds:int):\n self.send_command(f\"configure mainLoopSleepTime {milliseconds}\")",
"def connect_retry_interval(self) -> int:\n return pulumi.get(self, \"connect_retry_interval\")",
"def settimeout(self, value):\r\n return self.sock.settimeout(value)",
"def set_timeout(self, timeout):\n if self.interface is not None:\n self.interface.timeout = timeout",
"def set_timeout(self, millis):\n self.timeout = self.current_milli_time() + millis",
"def set_timeout(value):\n environ[\"XRD_REQUESTTIMEOUT\"] = str(value)",
"def SetBacklightTimeout(self,Timeout):\n self._BacklightTimeout = Timeout",
"def __init__(self, timeout=129600):\n self.timeout = timeout",
"def timeout(self):\n self._status_update(\"Pyloton: Timeout\")\n time.sleep(3)",
"def setTimeout(self, timeout):\n self.timeout = timeout"
] | [
"0.6883001",
"0.6444856",
"0.64174986",
"0.64024204",
"0.6298543",
"0.6273594",
"0.6224674",
"0.6222063",
"0.6192602",
"0.614306",
"0.6132622",
"0.6123391",
"0.6113951",
"0.6093116",
"0.60815483",
"0.60418",
"0.6031176",
"0.60070217",
"0.6000383",
"0.5989497",
"0.5939702",
"0.5935828",
"0.5928373",
"0.59022754",
"0.588843",
"0.5881648",
"0.5872401",
"0.58527315",
"0.58521825",
"0.58117896"
] | 0.7337287 | 0 |
Set the initial delay in seconds for reconnecting to rosbridge (1 second by default). | def set_initial_delay(cls, initial_delay):
LOGGER.debug("Updating initial delay to {} seconds".format(initial_delay))
# See https://twistedmatrix.com/documents/19.10.0/api/twisted.internet.protocol.ReconnectingClientFactory.html
cls.initialDelay = initial_delay | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reconnect(self, seconds: int) -> None:",
"def set_retry_timeout(self, retry_timeout):",
"def attempt_reconnect(self):\n time.sleep(self.reconnect_delay)\n self.connect_to()",
"def reset_time_out(self):\n self.reconnect()\n self.reconnect_params()",
"def delay():\r\n time.sleep(2)",
"def default_delay(self) -> int:\n return DEFAULT_DELAY",
"def set_max_delay(cls, max_delay):\n LOGGER.debug(\"Updating max delay to {} seconds\".format(max_delay))\n # See https://twistedmatrix.com/documents/19.10.0/api/twisted.internet.protocol.ReconnectingClientFactory.html\n cls.maxDelay = max_delay",
"def default_delay(self):\n return DEFAULT_DELAY",
"def setdefaulttimeout(timeout):\r\n global _TIMEOUT\r\n _TIMEOUT = timeout",
"def set_sleep_time(self, milliseconds:int):\n self.send_command(f\"configure mainLoopSleepTime {milliseconds}\")",
"def delay(ms: int, /) -> None:",
"def setdefaulttimeout(timeout):\r\n global default_timeout\r\n default_timeout = timeout",
"def setInitialTime(self, t0):\n _cantera.reactornet_setInitialTime(self.__reactornet_id, t0)",
"def delay(seconds):\n\n # Perform the delay\n time.sleep(seconds)",
"def deepsleep(time_ms: int = None) -> None:",
"def setInitialTime(self, T0):\n raise \"use method setInitialTime of class ReactorNet\"\n #_cantera.reactor_setInitialTime(self.__reactor_id, T0)",
"def reconnect(self):\n if self.is_connected:\n self.disconnect()\n self._reset()\n time.sleep(config.RECONNECT_DELAY) # increase reconnect delay?\n self.connect()",
"def _connect_later(self, wait_time):\n # Trivial function, but useful for unit testing\n self._io_loop.call_later(wait_time, self._connect, True)",
"def set_auto_start_delay(self, nVmAutoStartDelay):\n\t\tcall_sdk_function('PrlVmCfg_SetAutoStartDelay', self.handle, nVmAutoStartDelay)",
"def settimeout(self,timeout=10):\r\n # Update\r\n self.timeout = timeout",
"async def connect(\n self, *, auto_reconnect: bool = False, reconnect_delay: float = -1\n ):\n if self._state == const.STATE_CONNECTED:\n return\n if reconnect_delay < 0:\n reconnect_delay = self._reconnect_delay\n self._auto_reconnect = False\n await self._connect()\n self._auto_reconnect = auto_reconnect",
"def set_timeout(self, seconds):\n self._timeout = seconds",
"def __init__(self, delay=0):\n self.delay = delay",
"def _delay(self, n=None):",
"def _rostopic_delay(node, topic, window_size=DEFAULT_WINDOW_SIZE):\n # pause hz until topic is published\n msg_class = get_msg_class(node, topic, blocking=True, include_hidden_topics=True)\n\n if msg_class is None:\n node.destroy_node()\n return\n\n rt = ROSTopicDelay(node, window_size)\n node.create_subscription(\n msg_class,\n topic,\n rt.callback_delay,\n qos_profile_sensor_data)\n\n timer = node.create_timer(1, rt.print_delay)\n while rclpy.ok():\n rclpy.spin_once(node)\n\n node.destroy_timer(timer)\n node.destroy_node()\n rclpy.shutdown()",
"def set_timeout(self, timeout):\n self.timeout = timeout",
"async def sleep(cls, delay: float) -> None:",
"def __init__(self, seconds):\n super(RobotiqCommandTimeout, self).__init__()\n self.start_time = rospy.get_rostime()\n self.duration = rospy.Duration(seconds)",
"def delay(self, seconds):\n\n if self.call is None:\n return\n self.call.delay(seconds)",
"def delay(interval):\n time.sleep(interval / 1000.0)"
] | [
"0.63564193",
"0.63120914",
"0.6191868",
"0.61457485",
"0.60059255",
"0.596495",
"0.5907743",
"0.5899166",
"0.58699244",
"0.58644134",
"0.5837313",
"0.578545",
"0.576132",
"0.5743831",
"0.57401067",
"0.5719824",
"0.57179666",
"0.5700087",
"0.56863904",
"0.5656275",
"0.56456447",
"0.56257975",
"0.560817",
"0.5591152",
"0.5581758",
"0.55523914",
"0.55274147",
"0.5509805",
"0.55037576",
"0.54896617"
] | 0.7081216 | 0 |
Set the maximum number or connection retries when the rosbridge connection is lost (no limit by default). | def set_max_retries(cls, max_retries):
LOGGER.debug("Updating max retries to {}".format(max_retries))
# See https://twistedmatrix.com/documents/19.10.0/api/twisted.internet.protocol.ReconnectingClientFactory.html
cls.maxRetries = max_retries | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def max_retries(self, max_retries: ConfigNodePropertyInteger):\n\n self._max_retries = max_retries",
"def set_connectionretry(value):\n environ[\"XRD_CONNECTIONRETRY\"] = str(value)",
"def max_retries(self) -> ConfigNodePropertyInteger:\n return self._max_retries",
"def set_max_retries(self,mx_rty_passive_activation):\n # We set MxRtyPassiveActivation to 5 because it turns out that one\n # try sometimes does not detect the card properly.\n frame = Pn532Frame(frame_type=PN532_FRAME_TYPE_DATA,\n data=bytearray([PN532_COMMAND_RFCONFIGURATION,\n PN532_RFCONFIGURATION_CFGITEM_MAXRETRIES,\n 0xFF,0x01,mx_rty_passive_activation]))\n self.send_command_check_ack(frame)\n self.read_response()",
"def set_retry_timeout(self, retry_timeout):",
"def maximum_retry_attempts(self) -> Optional[int]:\n return pulumi.get(self, \"maximum_retry_attempts\")",
"def maximum_retry_attempts(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"maximum_retry_attempts\")",
"def maximum_retry_attempts(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"maximum_retry_attempts\")",
"def retry_request(self, method, action, body=None,\r\n headers=None, params=None):\r\n max_attempts = self.retries + 1\r\n for i in range(max_attempts):\r\n try:\r\n return self.do_request(method, action, body=body,\r\n headers=headers, params=params)\r\n except exceptions.ConnectionFailed:\r\n # Exception has already been logged by do_request()\r\n if i < self.retries:\r\n _logger.debug(_('Retrying connection to Neutron service'))\r\n time.sleep(self.retry_interval)\r\n\r\n raise exceptions.ConnectionFailed(reason=_(\"Maximum attempts reached\"))",
"def retries(self, count: int):\n if count < 0:\n raise ValueError(\"negative\")\n\n self._retries = count",
"def fail_max(self, number: int) -> None:\n self._fail_max = number",
"def set_max_nb_robots(nb): #py:set_max_nb_robots\n RUR._set_max_nb_robots_(nb)",
"def set_max_reps(self, max_reps):\n self.max_reps = int(max_reps)",
"def default_backoff(retries, max_retries):\n\n time.sleep(random.random() * (max_retries - retries) / max_retries * 2)",
"def limit():\n bwc = BandwidthConfigurator()\n bwc.limit()",
"def set_max_clients(self, clients: int = 50_000) -> None:\n self.set_db_conf(\"maxclients\", str(clients))",
"def setErrorMax(self, error_max):\n\t\tself.error_max = error_max",
"def connect_retry_interval(self) -> int:\n return pulumi.get(self, \"connect_retry_interval\")",
"def set_max_delay(cls, max_delay):\n LOGGER.debug(\"Updating max delay to {} seconds\".format(max_delay))\n # See https://twistedmatrix.com/documents/19.10.0/api/twisted.internet.protocol.ReconnectingClientFactory.html\n cls.maxDelay = max_delay",
"def retries(self) -> int:\n return self._retries",
"async def _retry_get(url: str, retries: int, **kwargs):\r\n retries -= 1\r\n if retries >= 0:\r\n logger.warning(\r\n f\"Retrying request to {url}. Retries remaining: {retries}\")\r\n return await asyncio.create_task(\r\n self.get(url, retries, **kwargs))\r\n logger.error(\r\n f\"Max retries exceeded: {url}. URL can not be navigated.\")",
"def retry(self, times):\n return Retry((requests.ConnectionError, requests.Timeout), times)",
"def max_num_links(self, max_num_links):\n self._max_num_links = max_num_links",
"def retry(func, *args, **kwargs):\n\n # config\n backoff = 1. + random.random() * 0.1\n max_backoff = 32\n max_retries = 5\n\n # try to make the request\n for i in range(max_retries):\n try:\n # return on success\n return func(*args, **kwargs)\n except Exception:\n # sleep on failure\n time.sleep(backoff)\n backoff = 2 * backoff if backoff < max_backoff else backoff\n \n # max retries exceeded\n raise RuntimeError('The connection to the server timed out.')",
"def handle_max_cons(self):\n msg = \"Too many connections. Service temporary unavailable.\"\n self.respond(\"421 %s\" %msg)\n self.log(msg)\n # If self.push is used, data could not be sent immediately in\n # which case a new \"loop\" will occur exposing us to the risk of\n # accepting new connections. Since this could cause asyncore to\n # run out of fds (...and exposes the server to DoS attacks), we\n # immediately close the channel by using close() instead of\n # close_when_done(). If data has not been sent yet client will\n # be silently disconnected.\n self.close()",
"def set_maximum_iterations(self, n):\n self.maximum_iterations = n\n return",
"def handle_max_cons_per_ip(self):\n msg = \"Too many connections from the same IP address.\"\n self.respond(\"421 %s\" %msg)\n self.log(msg)\n self.close_when_done()",
"def retry(num=5):\n s = requests.Session()\n retries = Retry(total=num, backoff_factor=0.1,\n status_forcelist=[500, 502, 503, 504])\n s.mount('http://', HTTPAdapter(max_retries=retries))\n\n return s",
"def retry_mechanism(self):\n # If future is done we can close the transport\n if self.on_response_received.done():\n self.transport.close()\n elif self._retries < self._max_retries:\n self._retries += 1\n logger.debug(f'Retry #{self._retries} of {self._max_retries}')\n self._send_request()\n else:\n logger.debug(f'Max number of retries ({self._max_retries}) reached, closing socket')\n self.on_response_received.set_exception(MaxRetriesException)\n self.transport.close()",
"def maxclients(self) -> Optional[int]:\n return pulumi.get(self, \"maxclients\")"
] | [
"0.7256527",
"0.7019554",
"0.6741667",
"0.6694928",
"0.6693451",
"0.64660996",
"0.61524314",
"0.61477476",
"0.6094627",
"0.60413826",
"0.5966964",
"0.59602076",
"0.5908201",
"0.58888686",
"0.5843294",
"0.5831719",
"0.58292305",
"0.58066034",
"0.57978034",
"0.5777352",
"0.575466",
"0.57409143",
"0.57407224",
"0.57263964",
"0.5716326",
"0.5675897",
"0.5665369",
"0.56414187",
"0.56151545",
"0.55856794"
] | 0.7344416 | 0 |
Call the given function from a thread, and wait for the result synchronously for as long as the timeout will allow. | def blocking_call_from_thread(self, callback, timeout):
result_placeholder = defer.Deferred()
if timeout:
result_placeholder.addTimeout(timeout, reactor, onTimeoutCancel=self.raise_timeout_exception)
return threads.blockingCallFromThread(reactor, callback, result_placeholder) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def call_in_waiting_thread(\n __call: Union[Callable[[], T], Call[T]],\n thread: threading.Thread,\n timeout: Optional[float] = None,\n ) -> T:\n raise NotImplementedError()",
"def process_and_join_thread(thread, timeout=0.1):\n def test_func():\n thread.join(timeout)\n return not thread.is_alive()\n\n process_and_assert(test_func)",
"def call_soon_in_waiting_thread(\n __call: Union[Callable[[], T], Call[T]],\n thread: threading.Thread,\n timeout: Optional[float] = None,\n ) -> Call[T]:\n call = _cast_to_call(__call)\n waiter = get_waiter_for_thread(thread)\n if waiter is None:\n raise RuntimeError(f\"No waiter found for thread {thread}.\")\n\n call.set_timeout(timeout)\n waiter.submit(call)\n return call",
"def wait_for_call_in_loop_thread(\n __call: Union[Callable[[], T], Call[T]],\n timeout: Optional[float] = None,\n done_callbacks: Optional[Iterable[Call]] = None,\n ) -> T:\n raise NotImplementedError()",
"def wait_for_call_in_new_thread(\n __call: Union[Callable[[], T], Call[T]],\n timeout: Optional[float] = None,\n done_callbacks: Optional[Iterable[Call]] = None,\n ) -> T:\n raise NotImplementedError()",
"def call_in_loop_thread(\n __call: Union[Callable[[], Awaitable[T]], Call[Awaitable[T]]],\n timeout: Optional[float] = None,\n ) -> T:\n raise NotImplementedError()",
"def _wait_for(\n func: Callable,\n expected_result: Any = True,\n timeout: int = 10,\n print_error: bool = True,\n sleep_for: int = 1,\n **kwargs,\n) -> None:\n end = time() + timeout\n\n while time() <= end:\n try:\n retval = func(**kwargs)\n except Exception as err: # pylint: disable=broad-except\n if print_error:\n logger.error(err)\n else:\n if retval == expected_result:\n return None\n sleep(sleep_for)\n\n raise WaitForException(\n f\"func: {func}, didn't return {expected_result} within specified timeout: {timeout}\"\n )",
"def blockingCallOnMainThread(func, *args, **kwargs):\n def blockingCallFromThread(f, *a, **kw):\n queue = Queue.Queue()\n def _callFromThread():\n result = defer.maybeDeferred(f, *a, **kw)\n result.addBoth(queue.put)\n reactor.callFromThread(_callFromThread)\n\n result = None\n while True:\n try:\n result = queue.get(True, 30)\n except Queue.Empty as qe:\n if True: #not reactor.running: # reactor.running is only False AFTER shutdown, we are during.\n raise ValueError(\"Reactor no longer active, aborting.\")\n else:\n break\n\n if isinstance(result, failure.Failure):\n result.raiseException()\n return result\n\n if currentThread().getName() == 'MainThread':\n return func(*args, **kwargs)\n else:\n return blockingCallFromThread(func, *args, **kwargs)",
"def _thread_run_for_result(future, func, *args):\n result = func(future, *args)\n future._set_result(result)",
"def with_timeout(t, func, *args, **kwargs):\n\n func_thread = threading.Thread(target=func, args=args, kwargs=kwargs)\n func_thread.daemon = True # quit interpreter even if still running\n func_thread.start()\n func_thread.join(t)\n if func_thread.is_alive():\n raise nagiosplugin.Timeout('{0}s'.format(t))",
"def _timeout(self, timeout, f, *args, **kwargs):\r\n\r\n t = spawn_thread(target=f, args=args, kwargs=kwargs)\r\n t.daemon = True\r\n t.start()\r\n t.join(timeout)\r\n\r\n if not t.is_alive():\r\n if t.exc_info:\r\n return t.exc_info\r\n return t.result\r\n else:\r\n try:\r\n msg = '[%s] Execution was forcefully terminated'\r\n raise RuntimeError(msg % t.name)\r\n except:\r\n return sys.exc_info()",
"def call_in_new_thread(\n __call: Union[Callable[[], T], Call[T]], timeout: Optional[float] = None\n ) -> T:\n raise NotImplementedError()",
"def wait_fluently(condition: Callable, timeout: TimeoutType, err_msg: str):\n if timeout is None:\n timeout = 0\n start_time = time.time()\n while True:\n res = condition()\n if res:\n return res\n if time.time() - start_time >= timeout:\n raise TimeoutException(err_msg)\n time.sleep(0.3)",
"def wait_until(func, wait_for=None, sleep_for=0.5):\n res = func()\n\n if res:\n return res\n\n if wait_for:\n deadline = time.time() + wait_for\n while not res and time.time() <= deadline:\n gevent.sleep(sleep_for)\n res = func()\n\n else:\n while not res:\n gevent.sleep(sleep_for)\n res = func()\n\n return res",
"def wait_on(\n function: Callable,\n desc: str,\n timeout: timeout_type,\n delta: timeout_type = DEFAULT_POLLING_DELTA,\n polling_backoff: timeout_type = DEFAULT_POLLING_BACKOFF,\n sleep_: Optional[Callable] = None,\n):\n sleep = sleep_ or time.sleep\n total_wait = 0.0\n while True:\n if total_wait > timeout:\n raise TimeoutAssertionError(TIMEOUT_MESSAGE_TEMPLATE.format(total_wait, desc))\n value = function()\n if value is not None:\n return value\n total_wait += delta\n sleep(delta)\n delta += polling_backoff",
"def call_soon_in_loop_thread(\n __call: Union[Callable[[], Awaitable[T]], Call[Awaitable[T]]],\n timeout: Optional[float] = None,\n ) -> Call[T]:\n call = _cast_to_call(__call)\n runner = get_global_loop()\n call.set_timeout(timeout)\n runner.submit(call)\n return call",
"def timeout(func, args=(), kwargs={}, timeout_duration=10, default=None): \n import threading\n class InterruptableThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.result = default\n def run(self):\n self.result = func(*args, **kwargs)\n it = InterruptableThread()\n it.start()\n it.join(timeout_duration)\n if it.isAlive():\n return it.result\n else:\n return it.result",
"def async_thread_call(fun, *args, **kwargs):\n t = Thread(\n target=_decorated_mp,\n args=(host(), port(), fun) + args,\n kwargs=kwargs)\n\n t.start()\n return t",
"def call_soon_in_new_thread(\n __call: Union[Callable[[], T], Call[T]], timeout: Optional[float] = None\n ) -> Call[T]:\n call = _cast_to_call(__call)\n runner = WorkerThread(run_once=True)\n call.set_timeout(timeout)\n runner.submit(call)\n return call",
"def run_async(function):\n @functools.wraps(function)\n def wrapped_fxn(*args, **kwargs):\n t = threading.Thread(target=function, args=args, kwargs=kwargs)\n t.daemon = True\n t.start()\n return wrapped_fxn",
"def _ThreadCall(self, func, args, callback=None, thread=None):\n if thread is None:\n thread = self._infeed_pool\n\n def _ErrorCallback(e):\n tf.logging.exception(e)\n # Terminate the main thread.\n _thread.interrupt_main()\n\n return thread.apply_async(\n func, args, callback=callback, error_callback=_ErrorCallback)",
"def threaded_call(self, thread_callable, result_callable, *args, **kwargs):\n thread_args = (thread_callable, result_callable, None, args, kwargs)\n t = threading.Thread(target=self._thread_caller, group=None, args=thread_args)\n t.start()",
"def call_in_thread_sync(self, func, args=None, kwargs=None, sync=True, callback=None, timeout=None, default_result=None, pass_exception=True, tag=None, priority=0, error_on_stopped=True, same_thread_shortcut=True):\n if same_thread_shortcut and tag is None and sync and self.is_in_controlled():\n res=func(*(args or []),**(kwargs or {}))\n if callback:\n callback(res)\n return res\n call=callsync.QScheduledCall(func,args,kwargs)\n if callback:\n call.add_callback(callback,pass_result=True,call_on_fail=False)\n if self.add_stop_notifier(call.fail):\n call.add_callback(lambda: self.remove_stop_notifier(call.fail),call_on_fail=True,pass_result=False)\n self._place_call(call,tag=tag,priority=priority)\n result=call.result_synchronizer\n if sync:\n result=result.get_value_sync(timeout=timeout,default=default_result,pass_exception=pass_exception,error_on_fail=error_on_stopped)\n return result",
"def deadline(fn, *args, **kw):\r\n DEFAULT_TIMEOUT_SECS = 0.150\r\n\r\n from threading import Thread\r\n q = Queue(maxsize=1)\r\n timeout = kw.pop('timeout', DEFAULT_TIMEOUT_SECS)\r\n class AnonymousThread(Thread):\r\n def run(self):\r\n q.put(fn(*args, **kw))\r\n AnonymousThread().start()\r\n try:\r\n return q.get(timeout=timeout)\r\n except Empty:\r\n raise Timeout",
"def timeout(func, args=(), timeout_duration=2, default=None, **kwargs):\n import threading\n\n class InterruptableThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.result = default\n\n def run(self):\n try:\n self.result = func(*args, **kwargs)\n except:\n pass\n\n it = InterruptableThread()\n it.start()\n it.join(timeout_duration)\n return it.result",
"def _in_thread(func, *args, **kwargs):\r\n def _f():\r\n func(*args, **kwargs)\r\n t = threading.Thread(target=_f, name='/*/*')\r\n t.start()\r\n return t",
"def thread_func(*args, **kwargs):\n exception, res = None, None\n try:\n res = func(*args, **kwargs)\n except Exception as e:\n exception = e\n return callback(exception, res)",
"def call_repeatedly(interval, function, args):\n stopped = threading.Event()\n\n def loop():\n while not stopped.wait(interval):\n function(**args)\n\n threading.Thread(target=loop).start()\n\n # return the thread closing handle\n return stopped.set",
"def wait_to_complete(test_function, *args, **kwargs):\n log.info(\"Called wait_to_complete for function %s\",\n test_function.__name__)\n delay = kwargs.pop('whoville_delay', config.short_retry_delay)\n max_wait = kwargs.pop('whoville_max_wait', config.short_max_wait)\n timeout = time.time() + max_wait\n while time.time() < timeout:\n log.debug(\"Calling test_function\")\n test_result = test_function(*args, **kwargs)\n log.debug(\"Checking result\")\n if test_result:\n log.debug(\"Function output [%s] eval to True, returning output\",\n str(test_result)[:25])\n return test_result\n log.debug(\"Function output [%s] evaluated to False, sleeping...\",\n str(test_result)[:25])\n time.sleep(delay)\n log.debug(\"Hit Timeout, raising TimeOut Error\")\n raise ValueError(\"Timed Out waiting for {0} to complete\".format(\n test_function.__name__))",
"def run_in_main_thread(f: Callable[..., T], *args: Any, **kwargs: Any) -> Awaitable[T]:\n return _run_in_runner(sublime.set_timeout, f, *args, **kwargs)"
] | [
"0.7358714",
"0.6953007",
"0.6871666",
"0.68064636",
"0.6705682",
"0.66787475",
"0.66639745",
"0.65851456",
"0.65605766",
"0.64959145",
"0.6363027",
"0.6351364",
"0.63138753",
"0.6275723",
"0.62373906",
"0.6194692",
"0.6164229",
"0.6100586",
"0.6081948",
"0.607668",
"0.6076638",
"0.6054088",
"0.60402405",
"0.6034761",
"0.5994708",
"0.59319633",
"0.59188014",
"0.5899352",
"0.58584845",
"0.5858442"
] | 0.7449713 | 0 |
Get the callback which, when called, provides result_placeholder with the result. | def get_inner_callback(self, result_placeholder):
def inner_callback(result):
result_placeholder.callback({"result": result})
return inner_callback | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_inner_errback(self, result_placeholder):\n\n def inner_errback(error):\n result_placeholder.callback({\"exception\": error})\n\n return inner_errback",
"def placeholder():\n return ResultProxy(TaskResult())",
"def on_result(self, callback):\n self.result_callback = callback",
"def resultcallback(group):\n if hasattr(group, \"result_callback\") and group.result_callback is not None:\n decorator = group.result_callback()\n else:\n # Click < 8.0\n decorator = group.resultcallback()\n return decorator",
"def execute(self):\n return self.callback(*self.args)",
"def resultCallback(self, withResult, withValue):\n if not self._called:\n self._called = True\n try:\n ((self._successTo\n if withResult else\n self._failureTo) or (lambda r, m: None))(withResult, withValue)\n except Exception as ex:\n thesplog('Exception in callback: %s', ex, exc_info=True, level=logging.ERROR)\n # Ensure additional callbacks are still called even if a callback gets an exceptions.\n if self._thenTo:\n self._thenTo.resultCallback(withResult, withValue)",
"def _get_result(self):\r\n \r\n return self._result",
"def get_result(self) -> Any:\n ...",
"def result(value, order):\n return ResultProxy(TaskResult(value, order))",
"def proxy_result(self):\n return None",
"def callback(cb): \n def cb_func(*args):\n self = args[0]\n (value, is_last) = cb(*args)\n if (value is not None):\n self._cb_return[cb.__name__] = self._cb_return.get(cb.__name__, []) + \\\n [value]\n if (is_last):\n self._cb_event[cb.__name__] = True\n return cb_func",
"def get_callback(self):\n return self.callbacks[self.type]",
"def _get_callback(self, value):\n if value:\n # Should be a DBRef\n return self.model.find_one({\"_id\": value.id})\n return value",
"def callback(self):\n return self._callback",
"def return_result(self, result):\n if self._result_already_returned():\n self.tab.logger.log(\"error: result is already returned\", min_level=1)\n\n self.deferred.callback(result)\n # self.deferred = None",
"def create_callback(output_element,retfunc):\n def callback(*input_values):\n return retfunc(*input_values)\n return callback",
"def wait(result):\n if is_result_proxy(result):\n result.__wrapped__ # force the evaluation",
"def callback(self, function: Optional[Callable[[int], None]]) -> None:",
"def get_result(self, state):\n pass",
"def callback(self, fun: Callable[[], None] | None) -> None:",
"def getcallback(cls, cmd, arg):\n return [dict({'cmd': cmd, 'arg': arg, 'ret': None})]",
"def run_callback(func, plus, result):\n data = result.value\n error = None if result.successful() else \"%s\" % result.exception\n try:\n if plus is None:\n func(data, error=error)\n else:\n func(data, plus, error=error)\n except Exception as error:\n logger.error(\"RPC callback for %s.%s raised exception.\",\n self.remote_service_coord.name, method,\n exc_info=True)",
"def getCallable():",
"def getResult(self, *args, **kwargs):\r\n return None",
"def _handle_result(self, result):\n if self.result_callback != None:\n #Call the result callback but expect failure.\n try:\n self.result_callback(result, self.rpcclient)\n except Exception as ex:\n self.log.failure(\"Error in result handler for '{cmd!r}'.\",cmd=self.command)\n else:\n #If no handler is set, all we do is log.\n self.logg.error(\"Error: no on_result defined for '{cmd!r}' command result: {res!r}.\",cmd=self.command,res=result)",
"def bind_result(\n function: Callable[[_FirstType], Result[_UpdatedType, _SecondType]],\n) -> Kinded[Callable[\n [KindN[_ResultLikeKind, _FirstType, _SecondType, _ThirdType]],\n KindN[_ResultLikeKind, _UpdatedType, _SecondType, _ThirdType],\n]]:\n @kinded\n def factory(\n container: KindN[_ResultLikeKind, _FirstType, _SecondType, _ThirdType],\n ) -> KindN[_ResultLikeKind, _UpdatedType, _SecondType, _ThirdType]:\n return container.bind_result(function)\n return factory",
"def callback(self):\n if self._weak:\n return self._callback()\n return self._callback",
"def register_output(self, result):\n self.output_placeholder_ids += (self._store_placeholders(result).value,)",
"def test_lookup_adds_callback(self):\n # Reset event_loop so we start in a clean state.\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n self.event_loop = asyncio.get_event_loop()\n lookup = Lookup(FindValue, self.target, self.node, self.event_loop)\n lookup._handle_response = mock.MagicMock()\n keys = []\n for k, v in lookup.pending_requests.items():\n keys.append(k)\n v.set_result('foo')\n self.event_loop.run_until_complete(v)\n self.assertEqual(lookup._handle_response.call_count, 3)\n for i, key in enumerate(keys):\n # check the callback called _handle_response with the correct\n # arguments.\n arg_key = lookup._handle_response.call_args_list[i][0][0]\n self.assertEqual(arg_key, key)\n arg_contact = lookup._handle_response.call_args_list[i][0][1]\n self.assertIn(arg_contact, lookup.contacted)\n arg_future = lookup._handle_response.call_args_list[i][0][2]\n self.assertEqual(arg_future.result(), 'foo')",
"def perform_callback(self, *args, **kwargs):\n pass"
] | [
"0.69873226",
"0.6811237",
"0.6682561",
"0.60813755",
"0.59589845",
"0.5794099",
"0.57086885",
"0.5683732",
"0.56691265",
"0.55882865",
"0.5580149",
"0.5574545",
"0.5567593",
"0.5566626",
"0.55497885",
"0.55393505",
"0.5519004",
"0.55113274",
"0.5498118",
"0.5496209",
"0.5481174",
"0.5422043",
"0.53657687",
"0.53417414",
"0.53366953",
"0.531168",
"0.5304035",
"0.52992266",
"0.5287286",
"0.52638173"
] | 0.835267 | 0 |
Get the errback which, when called, provides result_placeholder with the error. | def get_inner_errback(self, result_placeholder):
def inner_errback(error):
result_placeholder.callback({"exception": error})
return inner_errback | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def repr_failure(self, excinfo):\n if excinfo.errisinstance(MypyError):\n return excinfo.value.args[0]\n return super().repr_failure(excinfo)",
"def errback(result):\n append(result)\n return None",
"def result_stderr(result):\n return result[1][1]",
"def error(reason, order):\n return ResultProxy(TaskResult(TaskError(reason), order))",
"def instantiateShootErrback():\n d = defer.Deferred()\n try:\n 1/0\n except:\n d.errback()\n d.addErrback(lambda x: None)",
"def error(self):\n return self._decorator_wrapper(EventName.error)",
"def return_error(self, error):\n if self._result_already_returned():\n self.tab.logger.log(\"error: result is already returned\", min_level=1)\n self.deferred.errback(error)\n # self.deferred = None",
"def _get_error_text(self, result: dict) -> str:\n try:\n return result[self._FIELD_TEXT]\n except KeyError:\n return self._DEFAULT_ERROR_MSG",
"def exception(self) -> typing.Optional[BaseException]:\n if not self.done():\n raise InvalidStateError(\"result is not yet available\")\n elif self.cancelled():\n raise self._cancelled\n elif isinstance(self._outcome, outcomes.Error):\n return self._outcome.error\n else:\n return None",
"def err(self):\n return self._err.getvalue()",
"def grabError(self, error): #$NON-NLS-1$\r",
"def get_error(self):\n return self.e",
"def __call__(self, *args, **kwargs):\r\n return self.error(*args, **kwargs)",
"def geterr():\n return __errprof.state.copy()",
"def cursor_error(cls, val):\n return cls('cursor_error', val)",
"def cursor_error(cls, val):\n return cls('cursor_error', val)",
"def _compute_error(self,expected_out,actual_out,error_func):\n\n error = error_func(expected_out,actual_out)\n return error",
"def do_get_error(self):\n if self._last_exception is None:\n print('no errors')\n else:\n traceback.print_exception(*self._last_exception)",
"def error_redirect(err):\n error = err\n return render_template('error.html',\n title='That doesn\\'t exist!',\n error=error)",
"def formatError(self, test, err):\n test.capturedOutput = output = self.buffer\n self._buf = None\n if not output:\n # Don't return None as that will prevent other\n # formatters from formatting and remove earlier formatters\n # formats, instead return the err we got\n return err\n ec, ev, tb = err\n return (ec, self.addCaptureToErr(ev, output), tb)",
"def GetErrorInfo(self) -> Optional[str]:\n if self.state == TestRunState.ERROR:\n return self.error_reason\n if self.state == TestRunState.CANCELED:\n return _TEST_RUN_CANCEL_REASON_MAP.get(self.cancel_reason)",
"def back_err(self):\n return self._derived_properties[\"bkgd_err\"]",
"def xerr(self, i):\n return self.errors[0][i]",
"def get_error_message(self):\n try:\n msg = self.failed_restrictions[0].get_error_message(\n is_api=self.request and self.request.is_api\n )\n except IndexError:\n msg = None\n return msg",
"def error():\n return None",
"def repr_failure(self, excinfo):\n if isinstance(excinfo.value, NbCellError):\n msg_items = [bcolors.FAIL + \"Notebook cell execution failed\" + bcolors.ENDC]\n formatstring = bcolors.OKBLUE + \"Cell %d: %s\\n\\n\" + \\\n \"Input:\\n\" + bcolors.ENDC + \"%s\\n\\n\" + \\\n bcolors.OKBLUE + \"Traceback:%s\" + bcolors.ENDC\n msg_items.append(formatstring % excinfo.value.args)\n return \"\\n\".join(msg_items)\n else:\n return \"pytest plugin exception: %s\" % str(excinfo.value)",
"def geterrcall(errtype):\n if errtype not in __errprof:\n raise KeyError(\"Unknown error type: %s\" % errtype)\n else:\n return __errprof.getcall(errtype)",
"def get_error(self):\n\t\treturn handle_to_object(call_sdk_function('PrlJob_GetError', self.handle))",
"def test_original_failure(self):\n try:\n 1 / 0\n except ZeroDivisionError:\n f = Failure()\n dr = EventualResult(fail(f), None)\n self.assertIdentical(dr.original_failure(), f)",
"def get_error(self):\n return self.exc_info"
] | [
"0.653857",
"0.64817667",
"0.62341356",
"0.6198013",
"0.6175378",
"0.59674317",
"0.59652656",
"0.58356804",
"0.5834942",
"0.5793907",
"0.57484853",
"0.57245123",
"0.5630114",
"0.5626561",
"0.56203365",
"0.56203365",
"0.5619572",
"0.561235",
"0.56108445",
"0.56080973",
"0.5576624",
"0.55632985",
"0.5559315",
"0.5558334",
"0.5547986",
"0.55312914",
"0.5528776",
"0.5527074",
"0.5525762",
"0.55232704"
] | 0.8095896 | 0 |
Prepare the texture cache | def initTextureCache():
global textureCache
textureCache = {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prepare_texture(resource):\n global tex\n image = pygame.image.load(resource)\n image = pygame.transform.scale(image, (Case.pixel_size, Case.pixel_size))\n image_rect = image.get_rect()\n return image, image_rect",
"def populate_texture(self, texture):\n texture.blit_buffer(self._cbuffer, colorfmt='bgr', bufferfmt='ubyte')",
"def _load_textures(self):\n search_tex = RPLoader.load_texture(self.get_resource(\"search_tex.png\"))\n area_tex = RPLoader.load_texture(self.get_resource(\"area_tex.png\"))\n\n for tex in [search_tex, area_tex]:\n tex.set_minfilter(SamplerState.FT_linear)\n tex.set_magfilter(SamplerState.FT_linear)\n tex.set_wrap_u(SamplerState.WM_clamp)\n tex.set_wrap_v(SamplerState.WM_clamp)\n\n self._smaa_stage.area_tex = area_tex\n self._smaa_stage.search_tex = search_tex",
"def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm",
"def setupTexture(self):\r\n # Configure the texture rendering parameters\r\n glEnable(GL_TEXTURE_2D)\r\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\r\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\r\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)\r\n\r\n # Re-select our texture, could use other generated textures\r\n # if we had generated them earlier.\r\n glBindTexture(GL_TEXTURE_2D, self.imageID)",
"def setupTexture( self ):\n glEnable(GL_TEXTURE_2D)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)\n glBindTexture(GL_TEXTURE_2D, self.imageID)",
"def load_textures(self):\n for texture_meta in self.gltf.textures:\n texture = MaterialTexture()\n\n if texture_meta.source is not None:\n texture.texture = self.images[texture_meta.source]\n\n if texture_meta.sampler is not None:\n texture.sampler = self.samplers[texture_meta.sampler]\n\n self.textures.append(texture)",
"def load(self):\r\n self._open_image()\r\n\r\n components, data = image_data(self.image)\r\n\r\n texture = self.ctx.texture(self.image.size, components, data,)\r\n texture.extra = {\"meta\": self.meta}\r\n\r\n if self.meta.mipmap_levels is not None:\r\n self.meta.mipmap = True\r\n\r\n if self.meta.mipmap:\r\n if isinstance(self.meta.mipmap_levels, tuple):\r\n texture.build_mipmaps(*self.meta.mipmap_levels)\r\n else:\r\n texture.build_mipmaps()\r\n\r\n if self.meta.anisotropy:\r\n texture.anisotropy = self.meta.anisotropy\r\n\r\n self._close_image()\r\n\r\n return texture",
"def initCacheFile(self):\n self.cacheData = {\"data\": []}\n for i in range(int(self.frameCount)):\n self.cacheData[\"data\"].append({\"isLoaded\": False,\n \"faces\": []})\n self.saveCacheFile()",
"def prepare_map(self):\n for y_coord, row in enumerate(self.contents):\n for x_coord, tile in enumerate(row):\n bit_map = self.get_tile_bitmap(tile)\n self.image[y_coord * TILE_SIZE:(y_coord+1) * TILE_SIZE,\n x_coord * TILE_SIZE:(x_coord+1) * TILE_SIZE] = bit_map",
"def texture( self, mode ):\n texture = mode.cache.getData( self, 'texture' )\n if texture is None:\n texture = glGenTextures( 1 )\n holder = mode.cache.holder( self, texture, 'texture' )\n return texture",
"def pygame_load_texture(filename, filter=True, mipmap=True):\n if filename not in _texture_cache:\n pygame = __import__(\"pygame\", {},{},[])\n if os.path.exists(filename):\n img = pygame.image.load(filename)\n else:\n img = pygame.image.load(os.path.join(data_directory, filename))\n data, size = pygame.image.tostring(img, 'RGBA', True), img.get_size()\n _texture_cache[filename] = load_texture(data, size, \"RGBA\",\n filter, mipmap), size\n return _texture_cache[filename]",
"def preDraw(self, xform=None, bbox=None):\n\n self.modulateTexture.bindTexture(gl.GL_TEXTURE0)\n self.clipTexture .bindTexture(gl.GL_TEXTURE1)\n self.colourTexture .bindTexture(gl.GL_TEXTURE2)\n self.cmapTexture .bindTexture(gl.GL_TEXTURE3)",
"def setup_cache(self):\n train_cache_path = self.cache.get_cache_path_and_check(TRAIN_STR, self.task_name)\n dev_cache_path = self.cache.get_cache_path_and_check(DEV_STR, self.task_name)\n test_cache_path = self.cache.get_cache_path_and_check(TEST_STR, self.task_name)\n\n self.train_cache_writer = None\n self.dev_cache_writer = None\n self.test_cache_writer = None\n\n if os.path.exists(train_cache_path):\n f = h5py.File(train_cache_path, 'r')\n self.train_cache = (torch.tensor(f[str(i)][()]) for i in range(len(f.keys())))\n else:\n self.train_cache_writer = h5py.File(train_cache_path, 'w')\n if os.path.exists(dev_cache_path):\n f2 = h5py.File(dev_cache_path, 'r')\n self.dev_cache = (torch.tensor(f2[str(i)][()]) for i in range(len(f2.keys())))\n else:\n self.dev_cache_writer = h5py.File(dev_cache_path, 'w')\n if os.path.exists(test_cache_path):\n f3 = h5py.File(test_cache_path, 'r')\n self.test_cache = (torch.tensor(f3[str(i)][()]) for i in range(len(f3.keys())))\n else:\n self.test_cache_writer = h5py.File(test_cache_path, 'w')",
"def __call__(self):\n Texture()",
"def _preprocess(self) -> None:\n self.cache = Cache(self._db, self._collection)",
"def _create_texture(self, char, glyph, width, height, glyph_width, glyph_height):\n\n if char not in self._texture_cache:\n ID = glGenTextures (1)\n glBindTexture (GL_TEXTURE_2D, ID)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n #glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);\n #glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);\n tex2d = \"\"\n for j in xrange (height):\n for i in xrange (width):\n if (i >= glyph_width) or (j >= glyph_height):\n value = chr (0)\n tex2d += value*4\n else:\n value = chr (glyph.getpixel ((i, j)))\n tex2d += value*4\n\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, tex2d)\n self._texture_cache[char] = ID\n\n return self._texture_cache[char]",
"def prepare_texture_matrix(self):\n\t\ttexture_matrix = self.normalize_data()\n\t\tlabels = MatrixCreation().independent_variable_labels()\n\t\t\n\t\tcolumns_to_be_deleted = []\n\n\t\tfor key, label in labels.items():\n\t\t\tif label not in self.principal_components:\n\t\t\t\tcolumns_to_be_deleted.append(key)\n\n\t\ttexture_matrix = numpy.delete(texture_matrix,columns_to_be_deleted,axis=1)\n\n\t\treturn texture_matrix",
"def __init__(self, filename, mipmaps=False):\n print(\"Loading Texture \" + filename)\n\n self.mipmaps = mipmaps\n self.filename = filename\n\n self.image = pyglet.resource.image(self.filename)\n self.texture = self.image.texture\n self._verify('width')\n self._verify('height')\n\n if self.mipmaps:\n glGenerateMipmap(self.texture.target)",
"def preprocess(self):\n for texgroup in self.textureGroups.itervalues():\n texgroup.dirty = True",
"def setTexture(self,pathToNewTexture):\n self.spritePath=pathToNewTexture\n self.spriteImageFile=(Image.open(self.spritePath))\n self.reDraw()",
"def _prep_cache(xs_cache, E_g=None, phi_g=None):\n if E_g is not None:\n xs_cache[\"E_g\"] = E_g\n\n if phi_g is not None:\n xs_cache[\"phi_g\"] = phi_g",
"def _load_opengl(self):\r\n opengles.glGenTextures(4, ctypes.byref(self._tex), 0)\r\n from pi3d.Display import Display\r\n if Display.INSTANCE:\r\n Display.INSTANCE.textures_dict[str(self._tex)] = [self._tex, 0]\r\n opengles.glBindTexture(GL_TEXTURE_2D, self._tex)\r\n RGBv = GL_RGBA if self.alpha else GL_RGB\r\n opengles.glTexImage2D(GL_TEXTURE_2D, 0, RGBv, self.ix, self.iy, 0, RGBv,\r\n GL_UNSIGNED_BYTE,\r\n ctypes.string_at(self.image, len(self.image)))\r\n opengles.glEnable(GL_TEXTURE_2D)\r\n opengles.glGenerateMipmap(GL_TEXTURE_2D)\r\n opengles.glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\r\n if self.mipmap:\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,\r\n GL_LINEAR_MIPMAP_NEAREST)\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,\r\n GL_LINEAR)\r\n else:\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,\r\n GL_NEAREST)\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,\r\n GL_NEAREST)\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,\r\n GL_MIRRORED_REPEAT)\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,\r\n GL_MIRRORED_REPEAT)",
"def load(self):\r\n self._open_image()\r\n\r\n # Handle images with palettes\r\n if self.image.palette and self.image.palette.mode == 'RGB':\r\n logger.debug(\"Converting P image to RGB using palette\")\r\n self.image = self.image.convert('RGB', palette=self.image.palette)\r\n\r\n components, data = image_data(self.image)\r\n\r\n texture = self.ctx.texture(\r\n self.image.size,\r\n components,\r\n data,\r\n )\r\n texture.extra = {'meta': self.meta}\r\n\r\n if self.meta.mipmap_levels is not None:\r\n self.meta.mipmap = True\r\n\r\n if self.meta.mipmap:\r\n if isinstance(self.meta.mipmap_levels, tuple):\r\n texture.build_mipmaps(*self.meta.mipmap_levels)\r\n else:\r\n texture.build_mipmaps()\r\n\r\n if self.meta.anisotropy:\r\n texture.anisotropy = self.meta.anisotropy\r\n\r\n self._close_image()\r\n\r\n return texture",
"def _copy_to_gpu(self):\n self.dispatch('on_texture')",
"def prepareFrameCache(self, frame, cacheType): # real signature unknown; restored from __doc__\n pass",
"def _cache_checker_matrices(self):\r\n if self.model.mat_texid is not None:\r\n self._geom_checker_mats = []\r\n for geom_id in range(self.model.ngeom):\r\n mat_id = self.model.geom_matid[geom_id]\r\n tex_id = self.model.mat_texid[mat_id]\r\n texture = self.textures[tex_id]\r\n h, w = texture.bitmap.shape[:2]\r\n self._geom_checker_mats.append(self._make_checker_matrices(h, w))\r\n\r\n # add skybox\r\n skybox_tex_id = -1\r\n for tex_id in range(self.model.ntex):\r\n skybox_textype = 2\r\n if self.model.tex_type[tex_id] == skybox_textype:\r\n skybox_tex_id = tex_id\r\n if skybox_tex_id >= 0:\r\n texture = self.textures[skybox_tex_id]\r\n h, w = texture.bitmap.shape[:2]\r\n self._skybox_checker_mat = self._make_checker_matrices(h, w)\r\n else:\r\n self._skybox_checker_mat = None",
"def pyglet_load_texture(filename):\n if filename not in _texture_cache:\n image = __import__(\"pyglet\", {},{},[\"image\"]).image\n if os.path.exists(filename):\n img = image.load(filename)\n else:\n img = image.load(os.path.join(data_directory, filename))\n _texture_cache[filename] = img.texture\n return _texture_cache[filename]",
"def _clear_image_cache(self):\n logger.debug(\"Clearing image cache\")\n self._pathoutput = None\n self._previewoutput = None\n self._previewtrain = dict()\n self._previewcache = dict(modified=None, # cache for extract and converts\n placeholder=None)",
"def load(self):\n\t\tglTexImage3D(GL_TEXTURE_3D, 0, GL_LUMINANCE16_ALPHA16, \n\t\t\tself.width, self.width, self.width, 0, GL_LUMINANCE_ALPHA, \n\t\t\tGL_UNSIGNED_SHORT, ctypes.byref(self.data))"
] | [
"0.6436398",
"0.61779374",
"0.61091405",
"0.6104321",
"0.6104049",
"0.6022134",
"0.5968218",
"0.5916947",
"0.59023964",
"0.58662575",
"0.5826121",
"0.5810249",
"0.56719536",
"0.56409365",
"0.56295377",
"0.56141543",
"0.56082964",
"0.5571378",
"0.5571103",
"0.5556055",
"0.55550283",
"0.554159",
"0.5532813",
"0.5514608",
"0.5471052",
"0.54587525",
"0.545451",
"0.54472816",
"0.54324704",
"0.5431551"
] | 0.8022769 | 0 |
Draw the triangles in the array. GL_T2F_C4F_N3F_V3F | def drawFromArray(array, textured):
if textured:
npt = 36 # Number of things per traingle
else:
npt = 30
vC = len(array) / npt # Veretex count
npv = npt / 3 # Number of things per vertex
glBegin(GL_TRIANGLES)
if textured:
for j in range(vC):
for i in range(3):
cV = j * npt + i * npv
glTexCoord2f(array[cV], array[cV + 1])
glColor4f(array[cV + 2], array[cV + 3], array[cV + 4], array[cV + 5])
glNormal3f(array[cV + 6], array[cV + 7], array[cV + 8])
glVertex3f(array[cV + 9], array[cV + 10], array[cV + 11])
else:
for j in range(vC):
for i in range(3):
cV = j * npt + i * npv
glColor4f(array[cV], array[cV + 1], array[cV + 2], array[cV + 3])
glNormal3f(array[cV + 4], array[cV + 5], array[cV + 6])
glVertex3f(array[cV + 7], array[cV + 8], array[cV + 9])
glEnd() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_triangle(tup):\n x, y, z = tup[0], tup[1], tup[2]\n t_draw = turtle.Turtle()\n for index in range(3):\n t_draw.forward()",
"def drawTriangle(t, color, x, y):\n ## t.color(color)\n ## t.begin_fill()\n for i in range(3):\n t.forward(x)\n t.right(y)",
"def drawNormalsFromArray(array, textured):\r\n\r\n\tif textured:\r\n\t\tnpt = 36\r\n\telse:\r\n\t\tnpt = 30\r\n\r\n\tvC = len(array) / npt\r\n\tnpv = npt / 3\r\n\r\n\tglBegin(GL_LINES)\r\n\r\n\tif textured:\r\n\t\tfor j in range(vC):\r\n\t\t\tx = 0.0\r\n\t\t\ty = 0.0\r\n\t\t\tz = 0.0\r\n\r\n\t\t\tcV = j * npt\r\n\r\n\t\t\tnx = array[cV + 6]\r\n\t\t\tny = array[cV + 7]\r\n\t\t\tnz = array[cV + 8]\r\n\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tcV = j * npt + i * npv\r\n\t\t\t\tx = x + array[cV + 9]\r\n\t\t\t\ty = y + array[cV + 10]\r\n\t\t\t\tz = z + array[cV + 11]\r\n\r\n\t\t\tx = x / 3.0\r\n\t\t\ty = y / 3.0\r\n\t\t\tz = z / 3.0\r\n\r\n\t\t\tglColor4f(0.0, 0.0, 0.0, 1.0)\r\n\t\t\tglVertex3f(x, y, z)\r\n\t\t\tglVertex3f(x + nx, y + ny, z + nz)\r\n\r\n\telse:\r\n\t\tfor j in range(vC):\r\n\t\t\tx = 0.0\r\n\t\t\ty = 0.0\r\n\t\t\tz = 0.0\r\n\r\n\t\t\tcV = j * npt\r\n\r\n\t\t\tnx = array[cV + 4]\r\n\t\t\tny = array[cV + 5]\r\n\t\t\tnz = array[cV + 6]\r\n\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tcV = j * npt + i * npv\r\n\t\t\t\tx = x + array[cV + 7]\r\n\t\t\t\ty = y + array[cV + 8]\r\n\t\t\t\tz = z + array[cV + 9]\r\n\r\n\t\t\tx = x / 3.0\r\n\t\t\ty = y / 3.0\r\n\t\t\tz = z / 3.0\r\n\r\n\t\t\tglColor4f(0.0, 0.0, 0.0, 1.0)\r\n\t\t\tglVertex3f(x, y, z)\r\n\t\t\tglVertex3f(x + nx, y + ny, z + nz)\r\n\r\n\tglEnd()",
"def draw_triangles(self, triangles: Collection):\n # project the points into 2D and compute each shaded/faded color\n processed_triangles = []\n for p1, p2, p3, color in progress_iterator(triangles, \"Processing triangles...\"):\n shaded_color = self.compute_shaded_color(p1, p2, p3, color)\n *p1_p, z1 = self.project_point(p1)\n *p2_p, z2 = self.project_point(p2)\n *p3_p, z3 = self.project_point(p3)\n centroid_z = (z1 + z2 + z3) / 3\n faded_color = self.compute_fog_faded_color(shaded_color, centroid_z)\n processed_triangles.append((centroid_z, p1_p, p2_p, p3_p, faded_color))\n \n # sort the list of triangles back-to-front (by centroid Z depth)\n processed_triangles.sort(key=lambda tri: tri[0], reverse=True)\n \n # draw the triangles\n for _, p1, p2, p3, color in progress_iterator(processed_triangles, \"Adding triangles to the canvas...\"):\n self.draw_triangle(p1, p2, p3, color)\n print(f\" Added {len(processed_triangles)} triangles\")",
"def drawTwoTriangles():\n\n drawTriangle(200,100,\"blue\",\"pink\")\n Lucia.up()\n Lucia.forward(220)\n Lucia.down()\n drawTriangle(100,200,\"grey\",\"blue\")\n Lucia.seth(0)",
"def __init__(self, camera=None, light=None, name=\"\", \r\n corners=((-0.5, -0.28868), (0.0, 0.57735), (0.5, -0.28868)),\r\n x=0.0, y=0.0, z=0.0, sx=1.0, sy=1.0, sz=1.0,\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0):\r\n super(Triangle, self).__init__(camera, light, name, x, y, z, rx, ry, rz,\r\n sx, sy, sz, cx, cy, cz)\r\n self.ttype = GL_TRIANGLES\r\n self.verts = []\r\n self.norms = []\r\n self.texcoords = []\r\n self.inds = []\r\n c = corners # alias for convenience\r\n\r\n self.verts = ((c[0][0], c[0][1], 0.0), (c[1][0], c[1][1], 0.0), (c[2][0], c[2][1], 0.0))\r\n self.norms = ((0, 0, -1), (0, 0, -1), (0, 0, -1))\r\n self.texcoords = ((0.0, 0.0), (0.5, 0.86603), (1.0, 0.0))\r\n\r\n self.inds = ((0, 1, 2), ) #python quirk: comma for tuple with only one val\r\n\r\n self.buf = []\r\n self.buf.append(Buffer(self, self.verts, self.texcoords, self.inds, self.norms))",
"def draw():\n global trackball, flashlight, \\\n vertex_buffer, normal_buffer, \\\n colors, color_buffer, selected_face, add_face, \\\n shaders\n\n # Clear the rendering information.\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n # Clear the transformation stack.\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\n glPushMatrix()\n\n # Transform the objects drawn below by a rotation.\n trackball.glRotate()\n\n # * * * * * * * * * * * * * * * *\n # Draw all the triangular facets.\n glUseProgram(shaders)\n\n h_vertex = glGetAttribLocation(shaders,'vertex')\n h_normal = glGetAttribLocation(shaders,'normal')\n h_color = glGetAttribLocation(shaders,'color')\n h_eye = glGetUniformLocation(shaders,'eye')\n h_light = glGetUniformLocation(shaders,'light')\n\n # all the vertex positions\n glEnableVertexAttribArray(h_vertex)\n glBindBuffer (GL_ARRAY_BUFFER, vertex_buffer)\n glVertexAttribPointer(h_vertex, 3, GL_FLOAT, GL_FALSE, 0, None)\n \n # all the vertex normals\n glEnableVertexAttribArray(h_normal)\n glBindBuffer (GL_ARRAY_BUFFER, normal_buffer)\n glVertexAttribPointer(h_normal, 3, GL_FLOAT, GL_FALSE, 0, None)\n\n # all the face vertex colors\n glEnableVertexAttribArray(h_color)\n glBindBuffer (GL_ARRAY_BUFFER, color_buffer)\n\n if selected_face and add_face:\n # paint that face's vertices Green\n rgb_selected = [0.7,0.9,0.6] #GREEN\n for change in range(9):\n colors[selected_face.id * 9 + change] = rgb_selected[change % 3]\n # update the color buffer\n glBufferData (GL_ARRAY_BUFFER, len(colors)*4, \n (c_float*len(colors))(*colors), GL_STATIC_DRAW)\n add_face = False\n\n glVertexAttribPointer(h_color, 3, GL_FLOAT, GL_FALSE, 0, None)\n \n # position of the flashlight\n light = flashlight.rotate(vector(0.0,0.0,1.0));\n glUniform3fv(h_light, 1, (2.0*radius*light).components())\n\n # position of the viewer's eye\n eye = trackball.recip().rotate(vector(0.0,0.0,1.0))\n glUniform3fv(h_eye, 1, eye.components())\n\n glDrawArrays (GL_TRIANGLES, 0, len(face.instances) * 3)\n\n glDisableVertexAttribArray(h_vertex)\n glDisableVertexAttribArray(h_normal)\n glDisableVertexAttribArray(h_color)\n\n glPopMatrix()\n\n # Render the scene.\n glFlush()\n\n glutSwapBuffers()",
"def triangles_svg_path(self):\n verts = self.vertices.split(',') # leave as string\n tris = [int(v) for v in self.triangles.split(',')]\n data = []\n for i in xrange(0, len(tris), 3):\n v0 = 2 * tris[i]\n v1 = 2 * tris[i + 1]\n v2 = 2 * tris[i + 2]\n data.append(u\"M%s,%sL%s,%sL%s,%sz\" % (\n verts[v0], verts[v0 + 1],\n verts[v1], verts[v1 + 1],\n verts[v2], verts[v2 + 1],\n ))\n return u\"\".join(data)",
"def get_triangles( self, N ):\n\n # store N as an instance variable\n self.N = N\n\n # initialize array to store locations of points for all triangles in the\n # tessellation sequence\n self.triangles = np.zeros( ( self.N, 3, 2 ) )\n\n # define points of the first triangle in the tessellation sequence\n point_c = np.array( [ 0, 0 ] )\n point_b = self.a * np.array( [ np.cos( self.C ), np.sin( self.C ) ] )\n point_a = np.array( [ self.b, 0 ] )\n\n # stack the points into a single array of shape (3, 2 )\n triangle = np.vstack( [ point_c, point_b, point_a ] )\n\n # loop over the number of triangles in the sequence\n for i in range( self.N ):\n\n # store the points of the i-th triangle in the array\n self.triangles[ i ] = triangle\n\n # compute the next triangle in the tessellation sequence\n triangle = self.next_triangle( triangle = triangle )\n\n # shift the next triangle in the tessellation sequence such that its\n # point C is in the same location as point B of the previous triangle\n triangle += ( self.triangles[ i - 1, 1 ] - self.triangles[ 0, 0 ] )",
"def draw( self ):\r\n print \"Drawing cuboid!\"\r\n glTranslated( *self.pos3D ) # This moves the origin of drawing , so that we can use the above coordinates at each draw location\r\n if self.rotnByOGL:\r\n glRotated( self.thetaDeg , *self.rotAxis )\r\n # glTranslated( 0 , 0 , 0 ) # This moves the origin of drawing , so that we can use the above coordinates at each draw location\r\n print \"DEBUG:\" , \"Translated to\" , 0 , 0 , 0\r\n glColor3ub( *self.color ) # Get the color according to the voxel type\r\n print \"DEBUG:\" , \"Set color to\" , self.color\r\n pyglet.graphics.draw_indexed( \r\n 8 , # --------------------- Number of seqential triplet in vertex list\r\n GL_QUADS , # -------------- Draw quadrilaterals\r\n self.indices , # ---------- Indices where the coordinates are stored\r\n ( 'v3f' , self.vertX ) # vertex list , OpenGL offers an optimized vertex list object , but this is not it\r\n ) # 'v3i' # This is for integers I suppose!\r\n \r\n glColor3ub( *self.colorLine )\r\n pyglet.gl.glLineWidth( 3 )\r\n pyglet.graphics.draw_indexed( \r\n 8 , # --------------------- Number of seqential triplet in vertex list\r\n GL_LINES , # -------------- Draw quadrilaterals\r\n self.linDices , # ---------- Indices where the coordinates are stored\r\n ( 'v3f' , self.vertX ) # vertex list , OpenGL offers an optimized vertex list object , but this is not it\r\n ) # 'v3i' # This is for integers I suppose!\r\n \r\n print \"DEBUG:\" , \"Indices\"\r\n print self.indices \r\n print \"DEBUG:\" , \"Vertices\"\r\n print self.vertices \r\n \"\"\" URL: http://pyglet.readthedocs.io/en/pyglet-1.2-maintenance/programming_guide/graphics.html#vertex-lists\r\n \r\n There is a significant overhead in using pyglet.graphics.draw and pyglet.graphics.draw_indexed due to pyglet \r\n interpreting and formatting the vertex data for the video device. Usually the data drawn in each frame (of an animation) \r\n is identical or very similar to the previous frame, so this overhead is unnecessarily repeated.\r\n \r\n A VertexList is a list of vertices and their attributes, stored in an efficient manner that’s suitable for direct \r\n upload to the video card. On newer video cards (supporting OpenGL 1.5 or later) the data is actually stored in video memory.\r\n \"\"\"\r\n if self.rotnByOGL:\r\n glRotated( -self.thetaDeg , *self.rotAxis )\r\n glTranslated( *np.multiply( self.pos3D , -1 ) ) # Reset the transform coordinates\r\n print \"DEBUG:\" , \"Translated to\" , 0 , 0 , 0\r\n print \"Done drawing!\"",
"def drawFace():\r\n\tglPushMatrix()\r\n\tglTranslatef(-0.5,-0.5,0)\r\n\tglBegin(GL_LINE_LOOP)\r\n\t\r\n\tglVertex3f(0,VALUE,0)\r\n\tglVertex3f(VALUE,0,0)\r\n\t\r\n\tglVertex3f(LENGTH-VALUE,0,0)\r\n\tglVertex3f(LENGTH,VALUE,0)\r\n\t\r\n\tglVertex3f(LENGTH,LENGTH-VALUE,0)\r\n\tglVertex3f(LENGTH-VALUE,LENGTH,0)\r\n\t\r\n\tglVertex3f(VALUE,LENGTH,0)\r\n\tglVertex3f(0,LENGTH-VALUE,0)\r\n\t\r\n\tglEnd()\r\n\tglPopMatrix()",
"def draw_triangle(coords, triangle, colour):\n # Draw the matplotlib stuff\n x = [line[0] for line in triangle]\n y = [line[1] for line in triangle]\n plt.fill(x, y, c=[a/255.0 for a in colour])",
"def draw_triangle(self, p1: Point2D, p2: Point2D, p3: Point2D, color: Color):\n if self._fast_draw:\n color_str = \"#%02x%02x%02x\" % tuple([round(255.0*x) for x in color])\n x1, y1 = p1\n x2, y2 = p2\n x3, y3 = p3\n turtle.getcanvas().create_polygon((x1,-y1,x2,-y2,x3,-y3), fill=color_str)\n else:\n turtle.goto(*p1)\n turtle.fillcolor(color)\n turtle.begin_fill()\n turtle.goto(*p2)\n turtle.goto(*p3)\n turtle.end_fill()",
"def draw_triangle():\r\n turtle.forward(100)\r\n turtle.left(120)\r\n turtle.forward(100)\r\n turtle.left(120)\r\n turtle.forward(100)\r\n turtle.left(120)",
"def triangle_facets(length=2.0, divisions=4):\n\n # Starting with a equilateral triangle.\n vertices = [\n (0, (math.sqrt(3) / 3.0) * length, 0.0),\n (-length / 2, -(math.sqrt(3) / 6.0) * length, 0.0),\n (length / 2, -(math.sqrt(3) / 6.0) * length, 0.0),\n ]\n\n # The facets should not be accumulated for all of them as the first\n # triangle will Z-fight with remaining triangles. The overall style is to\n # have 'holes' where there are triangles that aren't filled.\n\n facets = [(0, 1, 2)]\n new_triangles = facets\n\n for _ in range(divisions):\n facets = []\n for new_triangle in new_triangles:\n next_vertices, next_triangles = _divide(new_triangle, vertices)\n vertices.extend(next_vertices)\n facets.extend(next_triangles)\n\n new_triangles = facets\n\n return vertices, facets",
"def MeshPyTri(points,facets,*args,**kwargs):\n info = triangle.MeshInfo()\n info.set_points(points)\n info.set_facets(facets)\n\n return triangle.build(info,*args,**kwargs)",
"def triangle(self, freq: int, /) -> None:",
"def output_triangles_at_T(tri, T, fh):\n fh.write(\"id;time;wkt;n0;n1;n2;v0;v1;v2;finite;info\\n\")\n for t in tri:\n if t is None:\n continue\n fh.write(\"{0};{6};{1};{2[0]};{2[1]};{2[2]};{3[0]};{3[1]};{3[2]};{4};{5}\\n\".format(id(t), \n t.str_at(T), \n [id(n) for n in t.neighbours], \n [id(v) for v in t.vertices], \n t.is_finite, \n t.info,\n T))",
"def exportTriangles(self):\n # Filter out triangles with any vertex in the extended BBox\n return [(a-4,b-4,c-4)\n for (a,b,c) in self.triangles if a > 3 and b > 3 and c > 3]",
"def draw_triangle(vertices, shape):\n # add 0.5 to account for fact that pixel centers are at (0.5, 0.5)\n barycenters = barycentric_coords(vertices, numpy.indices(shape) + 0.5)\n return (barycenters >= 0).all(axis=0)",
"def Render(self, mode):\n\n shaders.glUseProgram(self.shader)\n try:\n self.vbo.bind()\n try:\n glEnableClientState(GL_VERTEX_ARRAY)\n GLVertexPointer(self.vbo)\n glDrawArrays(GL_TRIANGLES, 0, 9)\n finally:\n self.vbo.unbind()\n glDisableClientState(GL_VERTEX_ARRAY)\n finally:\n shaders.glUseProgram(0)",
"def draw(self):\n if len(self.__points) >= 2:\n self._total_length = 0\n for i in range(len(self.__points) - 1):\n p1 = self.__points[i]\n p2 = self.__points[i + 1]\n coords = self.__line_segment(p1, p2)\n if not coords is None:\n pyglet.graphics.draw_indexed(4, pyglet.gl.GL_TRIANGLES,\n [0, 1, 2, 1, 2, 3],\n ('v2i', coords),\n ('c4b', self.color * 4)\n )\n coords = self.__line_cap(p2)\n pyglet.graphics.draw_indexed(4, pyglet.gl.GL_TRIANGLES,\n [0, 1, 2, 0, 2, 3],\n ('v2i', coords),\n ('c4b', self.color * 4)\n )",
"def dessinerTriangle(p0, p1, p2,texture=None):\n \n \n if texture == None:\n r,v,b = 0,1,0\n glColor3f(r,v,b)\n glDisable(GL_TEXTURE_2D)\n glColor3f(1.0,0.0,0.0)\n glBegin(GL_TRIANGLES)\n glVertex3f(p0[0],p0[1],p0[2])\n glVertex3f(p1[0],p1[1],p1[2])\n glVertex3f(p2[0],p2[1],p2[2])\n glEnd()\n glEnable(GL_TEXTURE_2D)\n else:\n glBindTexture(GL_TEXTURE_2D,texture.id)\n glBegin(GL_TRIANGLES)\n glTexCoord2f(0.0,0.0)\n glVertex3f(p0[0],p0[1],p0[2])\n glTexCoord2f(0.0,0.0)\n glVertex3f(p1[0],p1[1],p1[2])\n glTexCoord2f(1,1)\n glVertex3f(p2[0],p2[1],p2[2])\n glEnd()",
"def exportTriangles(self):\n # Filter out triangles with any vertex in the extended BBox\n return [(a-4, b-4, c-4)\n for (a, b, c) in self.triangles if a > 3 and b > 3 and c > 3]",
"def Triangle(self, c1=(0.,0.), c2=(0.,1.), c3=(1.,0.), npoints=10, element_type=\"tri\", equally_spaced=True):\n\n if not isinstance(c1,tuple) or not isinstance(c2,tuple) or not isinstance(c3,tuple):\n raise ValueError(\"The coordinates c1, c2 and c3 should be given in tuples of two elements each (x,y)\")\n\n npoints = int(npoints)\n\n\n npoints = npoints - 1\n if npoints < 0:\n npoints = 0\n\n c1 = np.array(c1); c2 = np.array(c2); c3 = np.array(c3)\n opoints = np.vstack((c1,c2,c3))\n oelements = np.array([[0,1,2]])\n\n if element_type==\"tri\":\n mesh = self.TriangularProjection(points=opoints, npoints=npoints, equally_spaced=equally_spaced)\n self.__update__(mesh)\n\n\n elif element_type == \"quad\":\n\n # SPLIT THE TRIANGLE INTO 3 QUADS\n omesh = Mesh()\n omesh.element_type=\"tri\"\n omesh.elements = oelements\n omesh.nelem = omesh.elements.shape[0]\n omesh.points = opoints\n omesh.GetBoundaryEdges()\n\n sys.stdout = open(os.devnull, \"w\")\n omesh.ConvertTrisToQuads()\n sys.stdout = sys.__stdout__\n\n\n npoints = int(npoints/2) + 1\n mesh = self.QuadrilateralProjection(points=omesh.points[omesh.elements[0,:],:],\n npoints=npoints, equally_spaced=equally_spaced)\n for i in range(1,omesh.nelem):\n mesh += self.QuadrilateralProjection(points=omesh.points[omesh.elements[i,:],:],\n npoints=npoints, equally_spaced=equally_spaced)\n\n self.__update__(mesh)",
"def draw_triangle_filled(x1, y1,\n x2, y2,\n x3, y3, color):\n\n first_point = [x1, y1]\n second_point = [x2, y2]\n third_point = [x3, y3]\n point_list = (first_point, second_point, third_point)\n draw_polygon_filled(point_list, color)",
"def triangle_areas(points: np.ndarray, triangles: np.ndarray) -> np.ndarray:\n xy = points[triangles]\n # s1 = xy[:, 2, :] - xy[:, 1, :]\n # s2 = xy[:, 0, :] - xy[:, 2, :]\n # s3 = xy[:, 1, :] - xy[:, 0, :]\n # which can be simplified to\n # s = xy[:, [2, 0, 1]] - xy[:, [1, 2, 0]] # 3D\n s = xy[:, [2, 0]] - xy[:, [1, 2]] # 2D\n a = np.linalg.det(s)\n return a * 0.5",
"def Triangle(points=None):\n if points is None:\n points = [[0, 0, 0], [1, 0, 0], [0.5, 0.5**0.5, 0]]\n\n if len(points) != 3:\n raise TypeError('Points must be given as length 3 np.ndarray or list')\n\n check_valid_vector(points[0], 'points[0]')\n check_valid_vector(points[1], 'points[1]')\n check_valid_vector(points[2], 'points[2]')\n\n cells = np.array([[3, 0, 1, 2]])\n return wrap(pyvista.PolyData(points, cells))",
"def set_triangles(self, val=True):\n self.use_triangle = val",
"def draw(self):\n self.vertex_list.draw(pyglet.gl.GL_QUADS)\n self.label.draw()"
] | [
"0.67465776",
"0.654085",
"0.64874524",
"0.64179724",
"0.6185056",
"0.6159797",
"0.60921663",
"0.60909677",
"0.5993798",
"0.5980469",
"0.5941917",
"0.5735626",
"0.572072",
"0.5639475",
"0.55550486",
"0.5534714",
"0.5518024",
"0.5507215",
"0.5502695",
"0.5499609",
"0.5481365",
"0.54777235",
"0.546361",
"0.5463585",
"0.5449623",
"0.54382694",
"0.5435437",
"0.5434326",
"0.5415608",
"0.5405823"
] | 0.66250885 | 1 |
This method is to find DB2 version from the unzip binary | def get_db2_binary_fixpack_version(STAGE_DIRECTORY):
val = ""
path_for_base_db2_file = ""
db2_version_list = ['10.1', '10.5', '9.7', '9.5']
dir_list = glob.glob("%s/*" % (STAGE_DIRECTORY))
for elm in dir_list:
fixpack_type = os.path.split(elm)[-1]
if fixpack_type.startswith('universal'):
steplog.info("This is a universal fixpack binary : %s" % fixpack_type)
server_dirname = elm
steplog.debug("Server Directory Name : %s " % server_dirname)
steplog.debug("Stage Directory Path : %s " % STAGE_DIRECTORY)
if ostools.is_aix():
path_for_base_db2_files = os.path.join(STAGE_DIRECTORY, server_dirname, 'db2', 'aix', 'FILES')
elif ostools.is_linux():
path_for_base_db2_files = os.path.join(STAGE_DIRECTORY, server_dirname, 'db2', 'linuxamd64', 'FILES')
elif fixpack_type.startswith('server'):
steplog.info("This is a server fixpack binary : %s" % fixpack_type)
server_dirname = elm
steplog.debug("Server Directory Name : %s " % server_dirname)
steplog.debug("Stage Directory Path : %s " % STAGE_DIRECTORY)
if ostools.is_aix():
path_for_base_db2_files = os.path.join(STAGE_DIRECTORY, server_dirname, 'db2', 'aix', 'FILES')
elif ostools.is_linux():
path_for_base_db2_files = os.path.join(STAGE_DIRECTORY, server_dirname, 'db2', 'linuxamd64', 'FILES')
if os.path.exists(path_for_base_db2_file):
steplog.info("DB2 binary is found and it is uncompressed")
try:
files = os.listdir(path_for_base_db2_files)
for item in files:
if item.startswith('INSTANCE_SETUP_SUPPORT_'):
val = item
steplog.info("Found the file to extract the fixpack version : %s" % val)
break
s1 = re.compile(r'_(\d+\.\d+)\.', re.DOTALL)
match_found = s1.search(val)
if match_found:
db2_version_from_file = match_found.group(1)
if db2_version_from_file in db2_version_list:
steplog.info("This is the valid DB2 version : %s" % db2_version_from_file)
pat = re.compile(r"INSTANCE_SETUP_SUPPORT_\d+\.\d+\.\d+\.(\d+)_")
fp_found = pat.search(val)
return fp_found.group(1)
else:
return False
except IOError:
steplog.info('There is no file exists')
else:
steplog.info("DB2 binary may have not unzipped") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_db2_binary_build_version(STAGE_DIRECTORY):\r\n val = \"\"\r\n path_for_base_db2_file = \"\"\r\n db2_version_list = ['10.1', '10.5', '9.7', '9.5']\r\n dir_list = glob.glob(\"%s/*\" % (STAGE_DIRECTORY))\r\n for elm in dir_list:\r\n fixpack_type = os.path.split(elm)[-1]\r\n if fixpack_type.startswith('universal'):\r\n steplog.info(\"This is a universal fixpack binary : %s\" % fixpack_type)\r\n server_dirname = elm\r\n steplog.debug(\"Server Directory Name : %s \" % server_dirname)\r\n steplog.debug(\"Stage Directory Path : %s \" % STAGE_DIRECTORY)\r\n if ostools.is_aix():\r\n path_for_base_db2_files = os.path.join(STAGE_DIRECTORY, server_dirname, 'db2')\r\n elif ostools.is_linux():\r\n path_for_base_db2_files = os.path.join(STAGE_DIRECTORY, server_dirname, 'db2')\r\n \r\n elif fixpack_type.startswith('server'):\r\n steplog.info(\"This is a server fixpack binary : %s\" % fixpack_type)\r\n server_dirname = elm\r\n steplog.debug(\"Server Directory Name : %s \" % server_dirname)\r\n steplog.debug(\"Stage Directory Path : %s \" % STAGE_DIRECTORY)\r\n if ostools.is_aix():\r\n path_for_base_db2_files = os.path.join(STAGE_DIRECTORY, server_dirname, 'db2')\r\n elif ostools.is_linux():\r\n path_for_base_db2_files = os.path.join(STAGE_DIRECTORY, server_dirname, 'db2')\r\n \r\n \r\n if os.path.exists(path_for_base_db2_file):\r\n steplog.info(\"DB2 binary is found and it is uncompressed\")\r\n try:\r\n files = os.listdir(path_for_base_db2_files)\r\n for item in files:\r\n if item.startswith('spec'):\r\n val = item\r\n steplog.info(\"Found the file to extract the fixpack version : %s\" % val)\r\n break\r\n spec_file = os.path.join(path_for_base_db2_file, 'spec')\r\n if os.path.isfile(spec_file):\r\n with open(spec_file) as fh:\r\n for line in fh:\r\n if \"special_\" in line:\r\n build_level_list = line.split('=\")\r\n build_level = build_level_list[-1]\r\n steplog.info(\"Special fixpack build level : %s \" % build_level)\r\n break\r\n return build_level \r\n except IOError:\r\n steplog.info('There is no file exists')\r\n else:\r\n steplog.info(\"DB2 binary may have not unzipped\")",
"def _get_version_from_db(dbname):\n with psycopg2.connect(f\"dbname='{dbname}'\") as conn, conn.cursor() as cr:\n query = \"SELECT replace((regexp_matches(latest_version, '^\\d+\\.0|^saas~\\d+\\.\\d+|saas~\\d+'))[1], '~', '-') FROM ir_module_module WHERE name='base'\"\n cr.execute(query)\n return cr.fetchone()[0]",
"def find_version():\n regex = r\"^ATRAM_VERSION = ['\\\"]v?([^'\\\"]*)['\\\"]\"\n with open(\"./lib/db.py\", 'r') as f:\n match = re.search(regex, f.read(), re.M)\n if match:\n return match.group(1)\n\n raise RuntimeError(\"Unable to find version string.\")",
"def db_version():\n return IMPL.db_version()",
"def get_release_version():\n try:\n zipfile = glob.glob('adodb-*.zip')[0]\n except IndexError:\n print(\"ERROR: release zip file not found in '{}'\".format(release_path))\n sys.exit(1)\n\n try:\n version = re.search(\n r\"^adodb-([\\d]+\\.[\\d]+\\.[\\d]+)(-(alpha|beta|rc)\\.[\\d]+)?\\.zip$\",\n zipfile\n ).group(1)\n except AttributeError:\n print('''ERROR: unable to extract version number from '{}'\n Only 3 groups of digits separated by periods are allowed'''\n .format(zipfile))\n sys.exit(1)\n\n return version",
"def get_version(version_file='_version.py'):\n filename = os.path.join(os.path.dirname(__file__), 'mysql', 'toolkit', version_file)\n with open(filename, 'rb') as fp:\n return fp.read().decode('utf8').split('=')[1].strip(\" \\n'\")",
"def read_version():\n # code parts were taken from here https://stackoverflow.com/a/67692\n\n path2setup = os.path.dirname(__file__)\n version_file = os.path.abspath(\n os.path.join(path2setup, \"diffusion_maps\", \"version.py\"))\n\n spec = importlib.util.spec_from_file_location(\"version\", version_file)\n version = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(version)\n return version.version.v_short",
"def get_version():\n\n with open('u2fval/__init__.py', 'r') as f:\n match = VERSION_PATTERN.search(f.read())\n return match.group(1)",
"def version(self):\n self.cursor.execute(\"SELECT VERSION()\")\n # Fetch a single row using fetchone() method.\n data = self.cursor.fetchone()\n print(\"Database version : %s \" % data)",
"def get_latest_version(db_path):\n\t\t\n\t\t# create a file system and return latest version\n\t\treturn VersionedFile(db_path).get_latest_version()",
"def _get_version(self):",
"def get_version():\n import ast\n\n with open(os.path.join(\"cruzdb\", \"__init__.py\"), \"r\") as init_file:\n module = ast.parse(init_file.read())\n\n version = (ast.literal_eval(node.value) for node in ast.walk(module)\n if isinstance(node, ast.Assign)\n and node.targets[0].id == \"__version__\")\n try:\n return next(version)\n except StopIteration:\n raise ValueError(\"version could not be located\")",
"def sql_version(connection):\n cursor = connection.cursor()\n cursor.execute(\"SELECT ecs.versionTable.version FROM ecs.versionTable;\")\n for ver in cursor.fetchone():\n version = ver\n cursor.close()\n return version",
"def version(self):\n a = re.search('(?<=_V)\\d{1,2}', self.fname)\n if a is None:\n return None\n else:\n return int(a.group())",
"def get_fw_version(self):\n summary = self.get_version_summary()\n pattern = '\\$.*? .*? .*? .*? .*? .*? .*? .*? (.*?) \\r\\n' \n fw_version = re.findall(pattern,summary).pop()\n return fw_version",
"def detect_version(conn):\n try:\n with conn.begin():\n db_version = conn.scalar(text(\n \"SELECT version FROM configuration\"))\n except exc.ProgrammingError:\n with conn.begin():\n packages_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_tables \"\n \"WHERE schemaname = 'public' AND tablename = 'packages'\")))\n with conn.begin():\n statistics_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_views \"\n \"WHERE schemaname = 'public' AND viewname = 'statistics'\")))\n with conn.begin():\n files_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_tables \"\n \"WHERE schemaname = 'public' AND tablename = 'files'\")))\n if not packages_exists:\n # Database is uninitialized\n return None\n elif not files_exists:\n # Database is too ancient to upgrade\n raise RuntimeError(\"Database version older than 0.4; cannot upgrade\")\n elif not statistics_exists:\n return \"0.4\"\n else:\n return \"0.5\"\n else:\n return db_version",
"def __get_db_version_int(self):\r\n query = QtSql.QSqlQuery('PRAGMA user_version')\r\n query.first()\r\n return query.value(0).toInt()[0]",
"def whichdb(filename):\n\n import struct\n\n # Check for dbm first -- this has a .pag and a .dir file\n try:\n f = open(filename + os.extsep + \"pag\", \"rb\")\n f.close()\n f = open(filename + os.extsep + \"dir\", \"rb\")\n f.close()\n return \"dbm\"\n except IOError:\n pass\n\n # Check for dumbdbm next -- this has a .dir and and a .dat file\n try:\n # First check for presence of files\n sizes = os.stat(filename + os.extsep + \"dat\").st_size, \\\n os.stat(filename + os.extsep + \"dir\").st_size\n # dumbdbm files with no keys are empty\n if sizes == (0, 0):\n return \"dumbdbm\"\n f = open(filename + os.extsep + \"dir\", \"rb\")\n try:\n if f.read(1) in [\"'\", '\"']:\n return \"dumbdbm\"\n finally:\n f.close()\n except (OSError, IOError):\n pass\n\n # See if the file exists, return None if not\n try:\n f = open(filename, \"rb\")\n except IOError:\n return None\n\n # Read the start of the file -- the magic number\n s16 = f.read(16)\n f.close()\n s = s16[0:4]\n\n # Return \"\" if not at least 4 bytes\n if len(s) != 4:\n return \"\"\n\n # Convert to 4-byte int in native byte order -- return \"\" if impossible\n try:\n (magic,) = struct.unpack(\"=l\", s)\n except struct.error:\n return \"\"\n\n # Check for GNU dbm\n if magic == 0x13579ace:\n return \"gdbm\"\n\n # Check for BSD hash\n if magic in (0x00061561, 0x61150600):\n return \"dbhash\"\n\n # BSD hash v2 has a 12-byte NULL pad in front of the file type\n try:\n (magic,) = struct.unpack(\"=l\", s16[-4:])\n except struct.error:\n return \"\"\n\n # Check for BSD hash\n if magic in (0x00061561, 0x61150600):\n return \"dbhash\"\n\n # Unknown\n return \"\"",
"def fiwalk_installed_version(fiwalk='fiwalk'):\n from subprocess import Popen,PIPE\n import re\n for line in Popen([fiwalk,'-V'],stdout=PIPE).stdout.read().split(\"\\n\"):\n g = re.search(\"^FIWalk Version:\\s+(.*)$\",line)\n if g:\n return g.group(1)\n return None",
"def query_version(self):\n return self.connection.cursor().execute('SELECT version()').fetchone()[0]",
"def read_version(self, fname):\n version = 'unknown'\n lines = open(fname).readlines()\n for line in lines:\n if \" Version\" in line:\n version = line.split()[-2]\n break\n return version",
"def get_version():\n version = \"unknown\"\n try:\n version_file = open(VERSIONFILE, \"r\")\n for line in version_file:\n if line.startswith('__version__'):\n version = line.split(\"'\")[1]\n break\n except EnvironmentError:\n pass # Okay, there is no version file.\n return version",
"def getDBReleaseVersion(dbh, jobPars):\n\n return dbh.getDBReleaseVersion(jobPars=jobPars)",
"def _get_version(self):\n if _cbc_version is None:\n return _extract_version('')\n return _cbc_version",
"def find_version():\n _locals = locals()\n src_dir = os.path.abspath(os.path.dirname(__file__))\n version_file = os.path.join(src_dir, 'loudml', '_version.py')\n with io_open(version_file, mode='r') as fd:\n exec(fd.read()) # __version__ is set in the exec call.\n return _locals['__version__']",
"def GetVersion(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMD2_GetVersion(self)",
"def extract_version():\n version = ''\n directory = os.path.dirname(__file__)\n filename = os.path.join(directory, 'cube_helper', '__init__.py')\n\n with open(filename) as fd:\n for line in fd:\n line = line.strip()\n if line.startswith('__version__'):\n try:\n version = line.split('=')[1].strip(' \"\\'')\n except Exception:\n pass\n break\n\n if not version:\n print('WARNING: Unable to parse version information from '\n 'file: {}'.format(filename))\n version = '0.0.0'\n\n return version",
"def db_version(engine):\n return IMPL.db_version(engine)",
"def isDBReleaseFile(dbh, lfn):\n\n if dbh:\n return dbh.extractVersion(lfn)\n else:\n return False",
"def _get_version():\n try:\n code, output = _run_cmd('git', 'describe', '--tags')\n if code:\n return 'unknown'\n output = output.decode('utf8').strip().split('-')\n if len(output) != 3:\n return 'unknown'\n version = '%s+%s' % (output[0], output[2])\n\n code, _ = _run_cmd('git', 'diff', '--quiet')\n if code:\n version += '+dirty'\n\n return version\n except OSError:\n return 'unknown'"
] | [
"0.7822165",
"0.64030504",
"0.6169352",
"0.6081289",
"0.59697837",
"0.5912215",
"0.5901315",
"0.58462954",
"0.575202",
"0.5719525",
"0.57052153",
"0.5702572",
"0.5686477",
"0.56752723",
"0.56657815",
"0.5625068",
"0.56241447",
"0.56233686",
"0.56189835",
"0.56164765",
"0.56131035",
"0.5576338",
"0.5569362",
"0.55489695",
"0.55461335",
"0.5543028",
"0.5536777",
"0.5528966",
"0.5520173",
"0.55138004"
] | 0.76075125 | 1 |
Test the open and close functionality using an identifier. | def testOpenCloseIdentifier(self):
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, inode=self._IDENTIFIER_PASSWORDS_TXT,
parent=self._bde_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestOpenCloseIdentifier(file_object) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_auto_open(self):\n # todo implement",
"def testOpenCloseInode(self):\n self._TestOpenCloseInode(self._tsk_partition_path_spec)",
"def testOpenCloseInode(self):\n self._TestOpenCloseInode(self._tsk_partition_path_spec)",
"def test_cover_open_close(self):\n with patch.dict(TYPES, {'WindowCoveringBasic': self.mock_type}):\n state = State('cover.open_window', 'open',\n {ATTR_SUPPORTED_FEATURES: 3})\n get_accessory(None, state, 2, {})",
"def test_auto_close(self):\n multi_fs = MountFS()\n m1 = MemoryFS()\n m2 = MemoryFS()\n multi_fs.mount('/m1', m1)\n multi_fs.mount('/m2', m2)\n self.assertTrue(not m1.closed)\n self.assertTrue(not m2.closed)\n multi_fs.close()\n self.assertTrue(m1.closed)\n self.assertTrue(m2.closed)",
"def test_open_and_close(get_touchmat):\n touchmat = get_touchmat\n\n connected = touchmat.is_device_connected()\n assert connected is True\n\n assert touchmat.open_count() == 1\n count = touchmat.close()\n assert isinstance(count, int)\n assert count == 0\n assert touchmat.open_count() == 0\n with pytest.raises(PySproutError) as execinfo:\n # Any call should fail\n touchmat.state({'touch' : False})\n assert 'Device is not open' in str(execinfo.value)\n count = touchmat.open()\n assert isinstance(count, int)\n assert count == 1\n assert touchmat.open_count() == 1\n # Any call should work\n touchmat.state({'touch' : False})",
"def test_open_close():\n spds = SDPS(VirtualDevice(), 'MX28')\n assert not spds.is_opened\n spds.open()\n assert spds.is_opened\n spds.open()\n #TODO: analyze caplog, there should be no new records\n assert spds.is_opened",
"def testClose(t, env):\n c = env.c1\n c.init_connection()\n fh, stateid = c.create_confirm(t.code)\n ops = c.use_obj(fh)\n ops += [c.close_op(c.get_seqid(t.code), stateid)]\n _replay(c, ops)",
"def test_closing_id(node_factory):\n l1, l2 = node_factory.get_nodes(2)\n\n # Close by full channel ID.\n l1.rpc.connect(l2.info['id'], 'localhost', l2.port)\n l1.fund_channel(l2, 10**6)\n cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']\n l2.rpc.close(cid)\n l1.daemon.wait_for_log(\"Forgetting remote peer .*\")\n l2.daemon.wait_for_log(\"Forgetting remote peer .*\")\n\n # Close by peer ID.\n l2.rpc.connect(l1.info['id'], 'localhost', l1.port)\n l1.daemon.wait_for_log(\"hand_back_peer .*: now local again\")\n l2.fund_channel(l1, 10**6)\n pid = l1.info['id']\n l2.rpc.close(pid)\n l1.daemon.wait_for_log(\"Forgetting remote peer .*\")\n l2.daemon.wait_for_log(\"Forgetting remote peer .*\")",
"def test_open_action(self):\n with assert_setup_component(1, 'cover'):\n assert setup.setup_component(self.hass, 'cover', {\n 'cover': {\n 'platform': 'template',\n 'covers': {\n 'test_template_cover': {\n 'position_template':\n \"{{ 0 }}\",\n 'open_cover': {\n 'service': 'test.automation',\n },\n 'close_cover': {\n 'service': 'cover.close_cover',\n 'entity_id': 'cover.test_state'\n },\n }\n }\n }\n })\n\n self.hass.start()\n self.hass.block_till_done()\n\n state = self.hass.states.get('cover.test_template_cover')\n assert state.state == STATE_CLOSED\n\n self.hass.services.call(\n DOMAIN, SERVICE_OPEN_COVER,\n {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True)\n self.hass.block_till_done()\n\n assert len(self.calls) == 1",
"def testOpen(self):\n # a lazy_open open() shouldn't do anything\n self.assertCallsExact(lambda: BitBangDevice(lazy_open=True), [])\n # a non-lazy_open open() should open the port...\n self.assertCalls(lambda: BitBangDevice(), \"ftdi_usb_open_desc_index\")\n # and set the bit mode\n self.assertCalls(lambda: BitBangDevice(), \"ftdi_set_bitmode\")\n # and given a device_id, it should do a open_desc\n self.assertCalls(lambda: BitBangDevice(\"bogus\"), \"ftdi_usb_open_desc_index\")",
"def test_open_fill(self):",
"def test_no_auto_close(self):\n multi_fs = MountFS(auto_close=False)\n m1 = MemoryFS()\n m2 = MemoryFS()\n multi_fs.mount('/m1', m1)\n multi_fs.mount('/m2', m2)\n self.assertTrue(not m1.closed)\n self.assertTrue(not m2.closed)\n multi_fs.close()\n self.assertTrue(not m1.closed)\n self.assertTrue(not m2.closed)",
"def testCloseFail(t, env):\n c = env.c1\n c.init_connection()\n fh, stateid = c.create_confirm(t.code)\n ops = c.use_obj(fh)\n ops += [c.close_op(c.get_seqid(t.code)+1, stateid)]\n _replay(c, ops, NFS4ERR_BAD_SEQID)",
"async def test_action_legacy(\n hass: HomeAssistant,\n entity_registry: er.EntityRegistry,\n enable_custom_integrations: None,\n) -> None:\n entry = entity_registry.async_get_or_create(DOMAIN, \"test\", \"5678\")\n\n assert await async_setup_component(\n hass,\n automation.DOMAIN,\n {\n automation.DOMAIN: [\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_open\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"open\",\n },\n },\n ]\n },\n )\n await hass.async_block_till_done()\n\n open_calls = async_mock_service(hass, \"cover\", \"open_cover\")\n\n hass.bus.async_fire(\"test_event_open\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n\n assert open_calls[0].domain == DOMAIN\n assert open_calls[0].service == \"open_cover\"\n assert open_calls[0].data == {\"entity_id\": entry.entity_id}",
"def close(*args):",
"def close(*args):",
"def close(*args):",
"def close(*args):",
"def close(*args):",
"def test_template_open_and_close(self):\n with assert_setup_component(0, 'cover'):\n assert setup.setup_component(self.hass, 'cover', {\n 'cover': {\n 'platform': 'template',\n 'covers': {\n 'test_template_cover': {\n 'value_template':\n \"{{ 1 == 1 }}\",\n 'open_cover': {\n 'service': 'cover.open_cover',\n 'entity_id': 'cover.test_state'\n },\n },\n }\n }\n })\n\n self.hass.start()\n self.hass.block_till_done()\n\n assert self.hass.states.all() == []",
"def testCloseWait(t, env):\n c = env.c1\n c.init_connection()\n fh, stateid = c.create_confirm(t.code)\n sleeptime = c.getLeaseTime() * 2\n env.sleep(sleeptime)\n ops = c.use_obj(fh)\n ops += [c.close_op(c.get_seqid(t.code), stateid)]\n _replay(c, ops, [NFS4_OK, NFS4ERR_EXPIRED])",
"def test_open_state(testchannel):\n\n with testchannel.open() as t:\n assert t.state == ChannelState.open\n\n assert testchannel.state == ChannelState.closed",
"def test_mode_get_at_id(mocker):\n mocker.patch('serial.Serial.open')\n mocker.patch('serial.Serial.flushInput')\n cgs = mocker.patch('pysds011.driver.SDS011.cmd_get_mode')\n runner = CliRunner()\n result = runner.invoke(main, ['--id', 'ABCD', 'mode'])\n cgs.assert_called_once_with(id=b'\\xab\\xcd')\n\n assert result.exit_code == 0",
"def test_openDialog(self):\n\n def test_uri(self, uri):\n self.setUp()\n self.setup_err()\n self.run_script('foo.openDialog(\"%s\")' % uri)\n self.assert_failed(with_warnings=True)\n\n uris = ['http://foo/bar/',\n 'https://foo/bar/',\n 'ftp://foo/bar/',\n 'data:asdf']\n for uri in uris:\n yield test_uri, self, uri",
"def test_rectangle_open(self):\n before_b = \"\"\"\\\n before\n aaaxxxbbb\n aaaxxxbbb\n aaaxxxbbb\n aaaxxxbbb\n after\n \"\"\"\n after_b = \"\"\"\\\n before\n aaa xxxbbb\n aaa xxxbbb\n aaa xxxbbb\n aaa xxxbbb\n after\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.3\", \"5.6\"),\n after_sel=(\"2.3\", \"5.6\"),\n command_name=\"rectangle-open\",\n )",
"def test_open_alreadyopen(testchannel, state):\n\n testchannel._state = state\n with pytest.raises(ChannelOpenError):\n testchannel.open()",
"def test_open_close(self):\n zed = ZipEditor(self.azipfile)\n self.assertEqual(self.azipfile, zed.file)\n self.assertIsNone(zed.getdir())\n zed.open()\n self.assertIsNotNone(zed.tmpdir.name)\n self.assertEqual(zed.tmpdir.name, zed.getdir())\n zed.close()\n self.assertIsNone(zed.getdir())",
"def testOpenClose(self):\n file_writer = writers.FileWriter()\n\n with test_lib.TempDirectory() as temp_directory:\n filename = os.path.join(temp_directory, 'testfile')\n file_writer.Open(filename)\n\n file_writer.Close()",
"async def test_action(\n hass: HomeAssistant,\n entity_registry: er.EntityRegistry,\n enable_custom_integrations: None,\n) -> None:\n entry = entity_registry.async_get_or_create(DOMAIN, \"test\", \"5678\")\n\n assert await async_setup_component(\n hass,\n automation.DOMAIN,\n {\n automation.DOMAIN: [\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_open\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"open\",\n },\n },\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_close\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"close\",\n },\n },\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_stop\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"stop\",\n },\n },\n ]\n },\n )\n await hass.async_block_till_done()\n\n open_calls = async_mock_service(hass, \"cover\", \"open_cover\")\n close_calls = async_mock_service(hass, \"cover\", \"close_cover\")\n stop_calls = async_mock_service(hass, \"cover\", \"stop_cover\")\n\n hass.bus.async_fire(\"test_event_open\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 0\n assert len(stop_calls) == 0\n\n hass.bus.async_fire(\"test_event_close\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 1\n assert len(stop_calls) == 0\n\n hass.bus.async_fire(\"test_event_stop\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 1\n assert len(stop_calls) == 1\n\n assert open_calls[0].domain == DOMAIN\n assert open_calls[0].service == \"open_cover\"\n assert open_calls[0].data == {\"entity_id\": entry.entity_id}\n assert close_calls[0].domain == DOMAIN\n assert close_calls[0].service == \"close_cover\"\n assert close_calls[0].data == {\"entity_id\": entry.entity_id}\n assert stop_calls[0].domain == DOMAIN\n assert stop_calls[0].service == \"stop_cover\"\n assert stop_calls[0].data == {\"entity_id\": entry.entity_id}"
] | [
"0.64999694",
"0.63243794",
"0.63243794",
"0.6233374",
"0.5917162",
"0.58881676",
"0.5855626",
"0.5776896",
"0.5713112",
"0.55846924",
"0.5579715",
"0.54898465",
"0.54159117",
"0.53447366",
"0.5338322",
"0.5319229",
"0.5319229",
"0.5319229",
"0.5319229",
"0.5319229",
"0.5286011",
"0.52745265",
"0.52525663",
"0.52505076",
"0.522567",
"0.52111006",
"0.51777035",
"0.51384467",
"0.51327986",
"0.5131605"
] | 0.7052777 | 1 |
Test the seek functionality. | def testSeek(self):
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/a_directory/another_file',
inode=self._IDENTIFIER_ANOTHER_FILE, parent=self._bde_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestSeek(file_object) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testSeek(self):\n self._TestSeek(self._tsk_partition_path_spec)",
"def testSeek(self):\n self._TestSeek(self._tsk_partition_path_spec)",
"def seekable(self):\n ...",
"def seek(self, *args) -> \"int\":\n return _ida_fpro.qfile_t_seek(self, *args)",
"def test_seek_tell(self):\n self.default_kwargs['seek_callback'] = self._seek_callback\n self.default_kwargs['tell_callback'] = self._tell_callback\n self.encoder = StreamEncoder(**self.default_kwargs)\n test_samples = np.random.rand(DEFAULT_BLOCKSIZE, 1).astype('int16')\n self.encoder.process(test_samples)\n self.encoder.finish()\n self.assertTrue(self.write_callback_called)\n self.assertTrue(self.seek_callback_called)\n self.assertTrue(self.tell_callback_called)",
"def seekable(self):\n # Not seekable, but we do support tell...\n return False",
"def seek(self, time):\n command = 'seek ' + str(time)\n self.run_command(command)",
"def test_seek_only(self):\n self.default_kwargs['seek_callback'] = self._seek_callback\n self.encoder = StreamEncoder(**self.default_kwargs)\n with self.assertRaisesRegex(EncoderInitException, 'FLAC__STREAM_ENCODER_INIT_STATUS_INVALID_CALLBACKS'):\n self.encoder._init()",
"def seek(self, position: int, whence: int = 0) -> None:\n raise NotImplementedError(\n 'Seek operation is not supported by this object: %r' % self\n ) # pragma: no cover",
"def _seek(self, time_offset):\n if (time.time() - (self.time_start + self.time_offset)) < 0.1:\n log.info('Seek recived within 100ms of start - Assuming this is a bounceback from test_audio - applying automatic time mutator of {0}s'.format(time_offset))\n self.time_mutator = time_offset\n self.time_start = time.time() - time_offset\n log.info('seek {0}'.format(time_offset))",
"def seekable(self):\n return True",
"async def async_media_seek(self, position):\n if not self._slave_mode:\n _LOGGER.debug(\"Seek. Device: %s, DUR: %s POS: %\", self.name, self._duration, position)\n if self._duration > 0 and position >= 0 and position <= self._duration:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:seek:{0}\".format(str(position)), None)\n self._position_updated_at = utcnow()\n self._idletime_updated_at = self._position_updated_at\n if value != \"OK\":\n _LOGGER.warning(\"Failed to seek. Device: %s, Got response: %s\", self.entity_id, value)\n else:\n await self._master.async_media_seek(position)",
"def testPipeFound(self):\n safeFoundHelper(self)\n self.assertCurrentState(safe.Seeking)",
"def verify_consumer_seek(c, seek_to_msg):\n\n tp = confluent_kafka.TopicPartition(seek_to_msg.topic(),\n seek_to_msg.partition(),\n seek_to_msg.offset())\n print('seek: Seeking to %s' % tp)\n c.seek(tp)\n\n while True:\n msg = c.poll()\n assert msg is not None\n if msg.error():\n print('seek: Ignoring non-message: %s' % msg)\n continue\n\n if msg.topic() != seek_to_msg.topic() or msg.partition() != seek_to_msg.partition():\n continue\n\n print('seek: message at offset %d' % msg.offset())\n assert msg.offset() == seek_to_msg.offset(), \\\n 'expected message at offset %d, not %d' % (seek_to_msg.offset(), msg.offset())\n break",
"async def seek(self, pos: int):\n pos = max(pos, 0) # Prevent seeking before start of track\n await self._bot.lavalink.ws.send(op='seek', guildId=self.guild_id, position=pos)",
"def seek(self, offset, whence=io.SEEK_SET):\n if offset != 0 and whence == io.SEEK_SET:\n # logging.debug('IterStream: trying to seek to offset {0}.'\n # .format(offset))\n if offset > self.curr_pos:\n self.readinto(bytearray(offset - self.curr_pos))\n elif offset == self.curr_pos:\n pass\n else: # need to re-create iterable\n self.reset()\n self.readinto(bytearray(offset))\n if self.curr_pos != offset:\n # logging.debug('IterStream: curr_pos {0} != offset {1}!'\n # .format(self.curr_pos, offset))\n raise RuntimeError('programming error in IterStream.tell!')\n return self.curr_pos\n elif whence == io.SEEK_END: # seek to end\n # logging.debug('IterStream: seek to end')\n if self.size is None:\n # logging.debug('IterStream: trying to seek to end but size '\n # 'unknown --> raise IOError')\n raise IOError('size unknown, cannot seek to end')\n self.at_end = True # fake jumping to the end\n self.iterable = None # cannot safely be used any more\n self.leftover = None\n return self.size\n elif whence == io.SEEK_SET: # seek to start\n # logging.debug('IterStream: seek to start')\n self.reset()\n return 0\n elif whence == io.SEEK_CUR: # e.g. called by tell()\n # logging.debug('IterStream: seek to curr pos')\n if self.at_end:\n return self.size\n return self.curr_pos\n elif whence not in (io.SEEK_SET, io.SEEK_CUR, io.SEEK_END):\n # logging.debug('Illegal 2nd argument to seek(): {0}'\n # .format(whence))\n raise IOError('Illegal 2nd argument to seek(): {0}'.format(whence))\n else:\n # logging.debug('not implemented: {0}, {1}'.format(offset, whence))\n raise NotImplementedError('seek only partially implemented. '\n 'Cannot yet seek to {0} from {1}'\n .format(offset, whence))",
"def test_basic_seek(self):\n tags = []\n for i in range(15):\n tags.append(_TagInfo(\n '1.0.' + str(i),\n 'commit' + str(i),\n ''))\n for i in range(15):\n shuffle(tags)\n self.assertEqual(_seek_last_semver_tag(tags).name, '1.0.14')\n self.assertEqual(_seek_last_semver_tag(tags, '1.0.14').name,\n '1.0.13')",
"def _seek(self, offset):\n assert offset % self.recordsize == 0\n file_number, file_offset = divmod(offset,\n self.filesize - self.header_size)\n self.open(file_number)\n self.fh_raw.seek(file_offset + self.header_size)\n self.offset = offset",
"def seeked(self):\n # type: () -> int\n return self._seeked",
"def seek(self, offset):\n spotifyconnect.Error.maybe_raise(lib.SpPlaybackSeek(offset))",
"def buildSeekIndex(self):\n runs = 0;\n while runs < 100:\n runs += 1\n bytes = self.seekfile.read(FileFlvStreamProvider.BUFFERFILL_CHUNCKSIZE)\n if len(bytes)==0:\n # either the file is finished, or the file is still rendering, either way, nothing to do\n return\n else:\n self.seekbuffer.write(bytes)\n while True:\n self.seekbuffer.transactionStart();\n try:\n pos = self.seekbuffer.bytesRead();\n chunk = self.flvseeker.readChunk()\n except util.bufferedstringstream.BufferedStringStreamInsufficientDataException:\n self.seekbuffer.transactionRollback();\n break\n self.seekbuffer.transactionCommit();\n self.maxtimestamp = max(chunk.time, self.maxtimestamp)\n if (chunk.CHUNKTYPE == util.flv.FLVVideoChunk.CHUNKTYPE and chunk.isKeyFrame) or not self.hasVideo(): # if we don't have video, every frame is a keyframe\n if len(self.aKeyFrame) == 0 or chunk.time > self.aKeyFrame[-1][\"timestamp\"]+200: #we might be replaying a part we checkout out before, make sure that the timestamps stay in order; in addition, we store at most 5 timestamps per second, to save memory\n self.aKeyFrame.append({\"timestamp\":chunk.time, \"filepos\":pos})\n if len(bytes) < FileFlvStreamProvider.BUFFERFILL_CHUNCKSIZE:\n return",
"def seekable(self):\n self._check_not_closed()\n return False",
"def seek(self, val):\n if self.p:\n self.p.set_position(val/100.0 + self.p.get_position())",
"def seek(self, offset, from_what=0):\n if from_what == 0: # From the begining\n if offset >= self.tell():\n self.seek(offset - self.tell(), from_what=1)\n else:\n raise NotImplementedError(\"Can't seek backwards\")\n elif from_what == 1: # From the cursor position\n if offset < 0:\n raise NotImplementedError(\"Can't seek backwards\")\n else:\n self.read(offset)\n else:\n raise NotImplementedError(\"Can't seek from there\")\n return self.tell()",
"def test_read_different_location(self):\n try:\n self.reader.read(self.books[1], 0, 1)\n self.fail(\"Readed book was not in the library\")\n except AssertionError:\n pass",
"def media_seek(self, position: float) -> None:\n media_controller = self._media_controller()\n media_controller.seek(position)",
"def seekable(self):\n\n return self._check_idx",
"def tell(self):\n return self._seek_pos",
"def seek(self, loc):\n assert loc == 0\n\n # rewind progress bar\n if self.progressbar:\n self.progressbar.update(-self._tell)\n\n self._fp_left.seek(loc)\n self._fp_right.seek(loc)\n self._tell = loc\n self._buf = Buffer()",
"def test_level2_fobj(filename, use_seek):\n f = get_test_data(filename)\n if not use_seek:\n class SeeklessReader:\n \"\"\"Simulate file-like object access without seek.\"\"\"\n\n def __init__(self, f):\n self._f = f\n\n def read(self, n=None):\n \"\"\"Read bytes.\"\"\"\n return self._f.read(n)\n\n f = SeeklessReader(f)\n Level2File(f)"
] | [
"0.7948346",
"0.7948346",
"0.72462237",
"0.71158445",
"0.7113932",
"0.70292354",
"0.67135704",
"0.6571655",
"0.6559805",
"0.6557865",
"0.65500414",
"0.65049493",
"0.63947237",
"0.6227877",
"0.6180919",
"0.61479574",
"0.6112446",
"0.6111428",
"0.60112697",
"0.5997623",
"0.5995808",
"0.5989129",
"0.5980215",
"0.5908207",
"0.5906513",
"0.5887028",
"0.58826774",
"0.58733284",
"0.5862026",
"0.58493626"
] | 0.7980517 | 0 |
Test the open and close functionality using an identifier. | def testOpenCloseIdentifier(self):
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, inode=self._IDENTIFIER_PASSWORDS_TXT,
parent=self._bde_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestOpenCloseIdentifier(file_object) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_auto_open(self):\n # todo implement",
"def testOpenCloseInode(self):\n self._TestOpenCloseInode(self._tsk_partition_path_spec)",
"def testOpenCloseInode(self):\n self._TestOpenCloseInode(self._tsk_partition_path_spec)",
"def test_cover_open_close(self):\n with patch.dict(TYPES, {'WindowCoveringBasic': self.mock_type}):\n state = State('cover.open_window', 'open',\n {ATTR_SUPPORTED_FEATURES: 3})\n get_accessory(None, state, 2, {})",
"def test_auto_close(self):\n multi_fs = MountFS()\n m1 = MemoryFS()\n m2 = MemoryFS()\n multi_fs.mount('/m1', m1)\n multi_fs.mount('/m2', m2)\n self.assertTrue(not m1.closed)\n self.assertTrue(not m2.closed)\n multi_fs.close()\n self.assertTrue(m1.closed)\n self.assertTrue(m2.closed)",
"def test_open_and_close(get_touchmat):\n touchmat = get_touchmat\n\n connected = touchmat.is_device_connected()\n assert connected is True\n\n assert touchmat.open_count() == 1\n count = touchmat.close()\n assert isinstance(count, int)\n assert count == 0\n assert touchmat.open_count() == 0\n with pytest.raises(PySproutError) as execinfo:\n # Any call should fail\n touchmat.state({'touch' : False})\n assert 'Device is not open' in str(execinfo.value)\n count = touchmat.open()\n assert isinstance(count, int)\n assert count == 1\n assert touchmat.open_count() == 1\n # Any call should work\n touchmat.state({'touch' : False})",
"def test_open_close():\n spds = SDPS(VirtualDevice(), 'MX28')\n assert not spds.is_opened\n spds.open()\n assert spds.is_opened\n spds.open()\n #TODO: analyze caplog, there should be no new records\n assert spds.is_opened",
"def testClose(t, env):\n c = env.c1\n c.init_connection()\n fh, stateid = c.create_confirm(t.code)\n ops = c.use_obj(fh)\n ops += [c.close_op(c.get_seqid(t.code), stateid)]\n _replay(c, ops)",
"def test_closing_id(node_factory):\n l1, l2 = node_factory.get_nodes(2)\n\n # Close by full channel ID.\n l1.rpc.connect(l2.info['id'], 'localhost', l2.port)\n l1.fund_channel(l2, 10**6)\n cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']\n l2.rpc.close(cid)\n l1.daemon.wait_for_log(\"Forgetting remote peer .*\")\n l2.daemon.wait_for_log(\"Forgetting remote peer .*\")\n\n # Close by peer ID.\n l2.rpc.connect(l1.info['id'], 'localhost', l1.port)\n l1.daemon.wait_for_log(\"hand_back_peer .*: now local again\")\n l2.fund_channel(l1, 10**6)\n pid = l1.info['id']\n l2.rpc.close(pid)\n l1.daemon.wait_for_log(\"Forgetting remote peer .*\")\n l2.daemon.wait_for_log(\"Forgetting remote peer .*\")",
"def test_open_action(self):\n with assert_setup_component(1, 'cover'):\n assert setup.setup_component(self.hass, 'cover', {\n 'cover': {\n 'platform': 'template',\n 'covers': {\n 'test_template_cover': {\n 'position_template':\n \"{{ 0 }}\",\n 'open_cover': {\n 'service': 'test.automation',\n },\n 'close_cover': {\n 'service': 'cover.close_cover',\n 'entity_id': 'cover.test_state'\n },\n }\n }\n }\n })\n\n self.hass.start()\n self.hass.block_till_done()\n\n state = self.hass.states.get('cover.test_template_cover')\n assert state.state == STATE_CLOSED\n\n self.hass.services.call(\n DOMAIN, SERVICE_OPEN_COVER,\n {ATTR_ENTITY_ID: ENTITY_COVER}, blocking=True)\n self.hass.block_till_done()\n\n assert len(self.calls) == 1",
"def testOpen(self):\n # a lazy_open open() shouldn't do anything\n self.assertCallsExact(lambda: BitBangDevice(lazy_open=True), [])\n # a non-lazy_open open() should open the port...\n self.assertCalls(lambda: BitBangDevice(), \"ftdi_usb_open_desc_index\")\n # and set the bit mode\n self.assertCalls(lambda: BitBangDevice(), \"ftdi_set_bitmode\")\n # and given a device_id, it should do a open_desc\n self.assertCalls(lambda: BitBangDevice(\"bogus\"), \"ftdi_usb_open_desc_index\")",
"def test_open_fill(self):",
"def test_no_auto_close(self):\n multi_fs = MountFS(auto_close=False)\n m1 = MemoryFS()\n m2 = MemoryFS()\n multi_fs.mount('/m1', m1)\n multi_fs.mount('/m2', m2)\n self.assertTrue(not m1.closed)\n self.assertTrue(not m2.closed)\n multi_fs.close()\n self.assertTrue(not m1.closed)\n self.assertTrue(not m2.closed)",
"def testCloseFail(t, env):\n c = env.c1\n c.init_connection()\n fh, stateid = c.create_confirm(t.code)\n ops = c.use_obj(fh)\n ops += [c.close_op(c.get_seqid(t.code)+1, stateid)]\n _replay(c, ops, NFS4ERR_BAD_SEQID)",
"async def test_action_legacy(\n hass: HomeAssistant,\n entity_registry: er.EntityRegistry,\n enable_custom_integrations: None,\n) -> None:\n entry = entity_registry.async_get_or_create(DOMAIN, \"test\", \"5678\")\n\n assert await async_setup_component(\n hass,\n automation.DOMAIN,\n {\n automation.DOMAIN: [\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_open\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"open\",\n },\n },\n ]\n },\n )\n await hass.async_block_till_done()\n\n open_calls = async_mock_service(hass, \"cover\", \"open_cover\")\n\n hass.bus.async_fire(\"test_event_open\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n\n assert open_calls[0].domain == DOMAIN\n assert open_calls[0].service == \"open_cover\"\n assert open_calls[0].data == {\"entity_id\": entry.entity_id}",
"def close(*args):",
"def close(*args):",
"def close(*args):",
"def close(*args):",
"def close(*args):",
"def test_template_open_and_close(self):\n with assert_setup_component(0, 'cover'):\n assert setup.setup_component(self.hass, 'cover', {\n 'cover': {\n 'platform': 'template',\n 'covers': {\n 'test_template_cover': {\n 'value_template':\n \"{{ 1 == 1 }}\",\n 'open_cover': {\n 'service': 'cover.open_cover',\n 'entity_id': 'cover.test_state'\n },\n },\n }\n }\n })\n\n self.hass.start()\n self.hass.block_till_done()\n\n assert self.hass.states.all() == []",
"def testCloseWait(t, env):\n c = env.c1\n c.init_connection()\n fh, stateid = c.create_confirm(t.code)\n sleeptime = c.getLeaseTime() * 2\n env.sleep(sleeptime)\n ops = c.use_obj(fh)\n ops += [c.close_op(c.get_seqid(t.code), stateid)]\n _replay(c, ops, [NFS4_OK, NFS4ERR_EXPIRED])",
"def test_open_state(testchannel):\n\n with testchannel.open() as t:\n assert t.state == ChannelState.open\n\n assert testchannel.state == ChannelState.closed",
"def test_mode_get_at_id(mocker):\n mocker.patch('serial.Serial.open')\n mocker.patch('serial.Serial.flushInput')\n cgs = mocker.patch('pysds011.driver.SDS011.cmd_get_mode')\n runner = CliRunner()\n result = runner.invoke(main, ['--id', 'ABCD', 'mode'])\n cgs.assert_called_once_with(id=b'\\xab\\xcd')\n\n assert result.exit_code == 0",
"def test_openDialog(self):\n\n def test_uri(self, uri):\n self.setUp()\n self.setup_err()\n self.run_script('foo.openDialog(\"%s\")' % uri)\n self.assert_failed(with_warnings=True)\n\n uris = ['http://foo/bar/',\n 'https://foo/bar/',\n 'ftp://foo/bar/',\n 'data:asdf']\n for uri in uris:\n yield test_uri, self, uri",
"def test_rectangle_open(self):\n before_b = \"\"\"\\\n before\n aaaxxxbbb\n aaaxxxbbb\n aaaxxxbbb\n aaaxxxbbb\n after\n \"\"\"\n after_b = \"\"\"\\\n before\n aaa xxxbbb\n aaa xxxbbb\n aaa xxxbbb\n aaa xxxbbb\n after\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.3\", \"5.6\"),\n after_sel=(\"2.3\", \"5.6\"),\n command_name=\"rectangle-open\",\n )",
"def test_open_alreadyopen(testchannel, state):\n\n testchannel._state = state\n with pytest.raises(ChannelOpenError):\n testchannel.open()",
"def test_open_close(self):\n zed = ZipEditor(self.azipfile)\n self.assertEqual(self.azipfile, zed.file)\n self.assertIsNone(zed.getdir())\n zed.open()\n self.assertIsNotNone(zed.tmpdir.name)\n self.assertEqual(zed.tmpdir.name, zed.getdir())\n zed.close()\n self.assertIsNone(zed.getdir())",
"def testOpenClose(self):\n file_writer = writers.FileWriter()\n\n with test_lib.TempDirectory() as temp_directory:\n filename = os.path.join(temp_directory, 'testfile')\n file_writer.Open(filename)\n\n file_writer.Close()",
"async def test_action(\n hass: HomeAssistant,\n entity_registry: er.EntityRegistry,\n enable_custom_integrations: None,\n) -> None:\n entry = entity_registry.async_get_or_create(DOMAIN, \"test\", \"5678\")\n\n assert await async_setup_component(\n hass,\n automation.DOMAIN,\n {\n automation.DOMAIN: [\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_open\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"open\",\n },\n },\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_close\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"close\",\n },\n },\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_stop\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"stop\",\n },\n },\n ]\n },\n )\n await hass.async_block_till_done()\n\n open_calls = async_mock_service(hass, \"cover\", \"open_cover\")\n close_calls = async_mock_service(hass, \"cover\", \"close_cover\")\n stop_calls = async_mock_service(hass, \"cover\", \"stop_cover\")\n\n hass.bus.async_fire(\"test_event_open\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 0\n assert len(stop_calls) == 0\n\n hass.bus.async_fire(\"test_event_close\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 1\n assert len(stop_calls) == 0\n\n hass.bus.async_fire(\"test_event_stop\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 1\n assert len(stop_calls) == 1\n\n assert open_calls[0].domain == DOMAIN\n assert open_calls[0].service == \"open_cover\"\n assert open_calls[0].data == {\"entity_id\": entry.entity_id}\n assert close_calls[0].domain == DOMAIN\n assert close_calls[0].service == \"close_cover\"\n assert close_calls[0].data == {\"entity_id\": entry.entity_id}\n assert stop_calls[0].domain == DOMAIN\n assert stop_calls[0].service == \"stop_cover\"\n assert stop_calls[0].data == {\"entity_id\": entry.entity_id}"
] | [
"0.650188",
"0.6326021",
"0.6326021",
"0.6236123",
"0.5919346",
"0.5891023",
"0.5856933",
"0.5779075",
"0.57124746",
"0.5586663",
"0.5581367",
"0.54916066",
"0.54180956",
"0.5346649",
"0.5339649",
"0.5322929",
"0.5322929",
"0.5322929",
"0.5322929",
"0.5322929",
"0.52880347",
"0.5276069",
"0.5255174",
"0.52496976",
"0.5225802",
"0.5212816",
"0.5180366",
"0.5141276",
"0.51356804",
"0.51326895"
] | 0.70521885 | 0 |
Test the seek functionality. | def testSeek(self):
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/a_directory/another_file',
inode=self._IDENTIFIER_ANOTHER_FILE, parent=self._bde_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestSeek(file_object) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testSeek(self):\n self._TestSeek(self._tsk_partition_path_spec)",
"def testSeek(self):\n self._TestSeek(self._tsk_partition_path_spec)",
"def seekable(self):\n ...",
"def seek(self, *args) -> \"int\":\n return _ida_fpro.qfile_t_seek(self, *args)",
"def test_seek_tell(self):\n self.default_kwargs['seek_callback'] = self._seek_callback\n self.default_kwargs['tell_callback'] = self._tell_callback\n self.encoder = StreamEncoder(**self.default_kwargs)\n test_samples = np.random.rand(DEFAULT_BLOCKSIZE, 1).astype('int16')\n self.encoder.process(test_samples)\n self.encoder.finish()\n self.assertTrue(self.write_callback_called)\n self.assertTrue(self.seek_callback_called)\n self.assertTrue(self.tell_callback_called)",
"def seekable(self):\n # Not seekable, but we do support tell...\n return False",
"def seek(self, time):\n command = 'seek ' + str(time)\n self.run_command(command)",
"def test_seek_only(self):\n self.default_kwargs['seek_callback'] = self._seek_callback\n self.encoder = StreamEncoder(**self.default_kwargs)\n with self.assertRaisesRegex(EncoderInitException, 'FLAC__STREAM_ENCODER_INIT_STATUS_INVALID_CALLBACKS'):\n self.encoder._init()",
"def seek(self, position: int, whence: int = 0) -> None:\n raise NotImplementedError(\n 'Seek operation is not supported by this object: %r' % self\n ) # pragma: no cover",
"def _seek(self, time_offset):\n if (time.time() - (self.time_start + self.time_offset)) < 0.1:\n log.info('Seek recived within 100ms of start - Assuming this is a bounceback from test_audio - applying automatic time mutator of {0}s'.format(time_offset))\n self.time_mutator = time_offset\n self.time_start = time.time() - time_offset\n log.info('seek {0}'.format(time_offset))",
"def seekable(self):\n return True",
"async def async_media_seek(self, position):\n if not self._slave_mode:\n _LOGGER.debug(\"Seek. Device: %s, DUR: %s POS: %\", self.name, self._duration, position)\n if self._duration > 0 and position >= 0 and position <= self._duration:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:seek:{0}\".format(str(position)), None)\n self._position_updated_at = utcnow()\n self._idletime_updated_at = self._position_updated_at\n if value != \"OK\":\n _LOGGER.warning(\"Failed to seek. Device: %s, Got response: %s\", self.entity_id, value)\n else:\n await self._master.async_media_seek(position)",
"def testPipeFound(self):\n safeFoundHelper(self)\n self.assertCurrentState(safe.Seeking)",
"def verify_consumer_seek(c, seek_to_msg):\n\n tp = confluent_kafka.TopicPartition(seek_to_msg.topic(),\n seek_to_msg.partition(),\n seek_to_msg.offset())\n print('seek: Seeking to %s' % tp)\n c.seek(tp)\n\n while True:\n msg = c.poll()\n assert msg is not None\n if msg.error():\n print('seek: Ignoring non-message: %s' % msg)\n continue\n\n if msg.topic() != seek_to_msg.topic() or msg.partition() != seek_to_msg.partition():\n continue\n\n print('seek: message at offset %d' % msg.offset())\n assert msg.offset() == seek_to_msg.offset(), \\\n 'expected message at offset %d, not %d' % (seek_to_msg.offset(), msg.offset())\n break",
"async def seek(self, pos: int):\n pos = max(pos, 0) # Prevent seeking before start of track\n await self._bot.lavalink.ws.send(op='seek', guildId=self.guild_id, position=pos)",
"def seek(self, offset, whence=io.SEEK_SET):\n if offset != 0 and whence == io.SEEK_SET:\n # logging.debug('IterStream: trying to seek to offset {0}.'\n # .format(offset))\n if offset > self.curr_pos:\n self.readinto(bytearray(offset - self.curr_pos))\n elif offset == self.curr_pos:\n pass\n else: # need to re-create iterable\n self.reset()\n self.readinto(bytearray(offset))\n if self.curr_pos != offset:\n # logging.debug('IterStream: curr_pos {0} != offset {1}!'\n # .format(self.curr_pos, offset))\n raise RuntimeError('programming error in IterStream.tell!')\n return self.curr_pos\n elif whence == io.SEEK_END: # seek to end\n # logging.debug('IterStream: seek to end')\n if self.size is None:\n # logging.debug('IterStream: trying to seek to end but size '\n # 'unknown --> raise IOError')\n raise IOError('size unknown, cannot seek to end')\n self.at_end = True # fake jumping to the end\n self.iterable = None # cannot safely be used any more\n self.leftover = None\n return self.size\n elif whence == io.SEEK_SET: # seek to start\n # logging.debug('IterStream: seek to start')\n self.reset()\n return 0\n elif whence == io.SEEK_CUR: # e.g. called by tell()\n # logging.debug('IterStream: seek to curr pos')\n if self.at_end:\n return self.size\n return self.curr_pos\n elif whence not in (io.SEEK_SET, io.SEEK_CUR, io.SEEK_END):\n # logging.debug('Illegal 2nd argument to seek(): {0}'\n # .format(whence))\n raise IOError('Illegal 2nd argument to seek(): {0}'.format(whence))\n else:\n # logging.debug('not implemented: {0}, {1}'.format(offset, whence))\n raise NotImplementedError('seek only partially implemented. '\n 'Cannot yet seek to {0} from {1}'\n .format(offset, whence))",
"def test_basic_seek(self):\n tags = []\n for i in range(15):\n tags.append(_TagInfo(\n '1.0.' + str(i),\n 'commit' + str(i),\n ''))\n for i in range(15):\n shuffle(tags)\n self.assertEqual(_seek_last_semver_tag(tags).name, '1.0.14')\n self.assertEqual(_seek_last_semver_tag(tags, '1.0.14').name,\n '1.0.13')",
"def _seek(self, offset):\n assert offset % self.recordsize == 0\n file_number, file_offset = divmod(offset,\n self.filesize - self.header_size)\n self.open(file_number)\n self.fh_raw.seek(file_offset + self.header_size)\n self.offset = offset",
"def seeked(self):\n # type: () -> int\n return self._seeked",
"def seek(self, offset):\n spotifyconnect.Error.maybe_raise(lib.SpPlaybackSeek(offset))",
"def buildSeekIndex(self):\n runs = 0;\n while runs < 100:\n runs += 1\n bytes = self.seekfile.read(FileFlvStreamProvider.BUFFERFILL_CHUNCKSIZE)\n if len(bytes)==0:\n # either the file is finished, or the file is still rendering, either way, nothing to do\n return\n else:\n self.seekbuffer.write(bytes)\n while True:\n self.seekbuffer.transactionStart();\n try:\n pos = self.seekbuffer.bytesRead();\n chunk = self.flvseeker.readChunk()\n except util.bufferedstringstream.BufferedStringStreamInsufficientDataException:\n self.seekbuffer.transactionRollback();\n break\n self.seekbuffer.transactionCommit();\n self.maxtimestamp = max(chunk.time, self.maxtimestamp)\n if (chunk.CHUNKTYPE == util.flv.FLVVideoChunk.CHUNKTYPE and chunk.isKeyFrame) or not self.hasVideo(): # if we don't have video, every frame is a keyframe\n if len(self.aKeyFrame) == 0 or chunk.time > self.aKeyFrame[-1][\"timestamp\"]+200: #we might be replaying a part we checkout out before, make sure that the timestamps stay in order; in addition, we store at most 5 timestamps per second, to save memory\n self.aKeyFrame.append({\"timestamp\":chunk.time, \"filepos\":pos})\n if len(bytes) < FileFlvStreamProvider.BUFFERFILL_CHUNCKSIZE:\n return",
"def seekable(self):\n self._check_not_closed()\n return False",
"def seek(self, val):\n if self.p:\n self.p.set_position(val/100.0 + self.p.get_position())",
"def seek(self, offset, from_what=0):\n if from_what == 0: # From the begining\n if offset >= self.tell():\n self.seek(offset - self.tell(), from_what=1)\n else:\n raise NotImplementedError(\"Can't seek backwards\")\n elif from_what == 1: # From the cursor position\n if offset < 0:\n raise NotImplementedError(\"Can't seek backwards\")\n else:\n self.read(offset)\n else:\n raise NotImplementedError(\"Can't seek from there\")\n return self.tell()",
"def test_read_different_location(self):\n try:\n self.reader.read(self.books[1], 0, 1)\n self.fail(\"Readed book was not in the library\")\n except AssertionError:\n pass",
"def media_seek(self, position: float) -> None:\n media_controller = self._media_controller()\n media_controller.seek(position)",
"def seekable(self):\n\n return self._check_idx",
"def tell(self):\n return self._seek_pos",
"def seek(self, loc):\n assert loc == 0\n\n # rewind progress bar\n if self.progressbar:\n self.progressbar.update(-self._tell)\n\n self._fp_left.seek(loc)\n self._fp_right.seek(loc)\n self._tell = loc\n self._buf = Buffer()",
"def test_level2_fobj(filename, use_seek):\n f = get_test_data(filename)\n if not use_seek:\n class SeeklessReader:\n \"\"\"Simulate file-like object access without seek.\"\"\"\n\n def __init__(self, f):\n self._f = f\n\n def read(self, n=None):\n \"\"\"Read bytes.\"\"\"\n return self._f.read(n)\n\n f = SeeklessReader(f)\n Level2File(f)"
] | [
"0.7948346",
"0.7948346",
"0.72462237",
"0.71158445",
"0.7113932",
"0.70292354",
"0.67135704",
"0.6571655",
"0.6559805",
"0.6557865",
"0.65500414",
"0.65049493",
"0.63947237",
"0.6227877",
"0.6180919",
"0.61479574",
"0.6112446",
"0.6111428",
"0.60112697",
"0.5997623",
"0.5995808",
"0.5989129",
"0.5980215",
"0.5908207",
"0.5906513",
"0.5887028",
"0.58826774",
"0.58733284",
"0.5862026",
"0.58493626"
] | 0.7980517 | 1 |
Load a graph matrix from Matlab file | def load_graph(graphname,path='./data/',mname='A'):
data=sio.loadmat(path+graphname)
return data[mname] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_graph(graph_path):\n print(\"\\nTarget matrix creation started.\\n\")\n graph = nx.from_edgelist(pd.read_csv(graph_path).values.tolist())\n graph.remove_edges_from(graph.selfloop_edges())\n return graph",
"def load_file(filename):\n # Create matrix from csv lines\n with open(filename) as f:\n m = [list(map(int, line.split(','))) for line in f]\n # Create digraph from matrix\n graph = utils.graph.DiGraph()\n ROWS = len(m)\n COLS = len(m[0])\n for r in range(ROWS):\n for c in range(COLS):\n u = (r, c)\n # Add add to node to the right\n if c+1 < COLS:\n v = (r, c+1)\n weight = m[r][c+1]\n graph.add_edge(u, v, weight)\n # Add add to node below\n if r+1 < ROWS:\n v = (r+1, c)\n weight = m[r+1][c]\n graph.add_edge(u, v, weight)\n # Add add to node above\n if 0 <= r-1:\n v = (r-1, c)\n weight = m[r-1][c]\n graph.add_edge(u, v, weight)\n # also add a start element and create edges to first column\n start_node = 'START'\n for row in range(ROWS):\n node = (row, 0)\n weight = m[row][0]\n graph.add_edge(start_node, node, weight)\n # also add an end element and create edges to the list column\n end_node = 'END'\n c = COLS-1\n for row in range(ROWS):\n node = (row, c)\n weight = 0 # Valid?\n graph.add_edge(node, end_node, weight)\n return graph, start_node, end_node",
"def load(self, path):\n self.matrix = np.loadtxt(path, dtype=float)\n self.rank = len(self.matrix)",
"def read_graph(filename):\n with open(filename) as f:\n g = eval(f.read())\n return g",
"def read_graph(path):\n edge_list = pd.read_csv(path).values.tolist()\n graph = nx.from_edgelist(edge_list)\n return graph",
"def fromfile(self, path):\n\t\tdata = filetools.read_data(path)\n\t\tprint \"File read: %i lines\" % len(data)\n\t\tself.build_matrix(data)",
"def loadmm(filepath):\n X = mmread(filepath)\n return fast_sparse_matrix(X)",
"def load_graph(self, path):\n if path.split('.')[-1]=='gexf':\n self.graph = nx.read_gexf(path)\n else:\n self.graph = nx.read_gpickle(path)",
"def load_data(path=\"data/cora/\", dataset=\"cora\"):\n print('Loading {} dataset...'.format(dataset))\n\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset),\n dtype=np.dtype(str))\n features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n labels = encode_onehot(idx_features_labels[:, -1])\n\n n_nodes, d_edge = features.shape\n\n # build graph\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset),\n dtype=np.int32)\n print(edges_unordered)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\n dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(labels.shape[0], labels.shape[0]),\n dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n node_features = normalize(features)\n adj = normalize(adj + sp.eye(adj.shape[0]))\n\n # Edge matrix\n edge_features = None\n is3d = False\n if(is3d):\n indices = [[], [] , []]\n values = []\n sizes = [n_nodes, n_nodes, d_edge]\n\n i, j = adj.nonzero()\n for e in range(len(i)):\n i_idx = node_features[i[e],:].nonzero()[1]\n j_idx = node_features[j[e],:].nonzero()[1]\n for ii in i_idx:\n indices[0].append(i[e])\n indices[1].append(j[e])\n indices[2].append(ii)\n if ii in j_idx:\n values.append((node_features[i[e],:][0,ii] + node_features[j[e],:][0,ii])/2)\n else:\n values.append(node_features[i[e],:][0,ii])\n for jj in j_idx:\n if jj in j_idx:\n continue\n else:\n indices[0].append(i[e])\n indices[1].append(j[e])\n indices[2].append(jj)\n values.append(node_features[j[e],:][0,jj])\n indices = torch.LongTensor(indices)\n values = torch.FloatTensor(values)\n edge_features = torch.sparse_coo_tensor(indices, values, sizes)\n else:\n indices = [[], []]\n values = []\n sizes = [n_nodes*n_nodes, d_edge]\n\n i, j = adj.nonzero()\n for e in range(len(i)):\n i_idx = node_features[i[e],:].nonzero()[1]\n j_idx = node_features[j[e],:].nonzero()[1]\n for ii in i_idx:\n indices[0].append(i[e]+n_nodes*j[e])\n indices[1].append(ii)\n if ii in j_idx:\n values.append((node_features[i[e],:][0,ii] + node_features[j[e],:][0,ii])/2)\n else:\n values.append(node_features[i[e],:][0,ii])\n for jj in j_idx:\n if jj in j_idx:\n continue\n else:\n indices[0].append(i[e]+n_nodes*j[e])\n indices[1].append(jj)\n values.append(node_features[j[e],:][0,jj])\n indices = torch.LongTensor(indices)\n values = torch.FloatTensor(values)\n edge_features = torch.sparse_coo_tensor(indices, values, sizes)\n\n idx_train = range(140)\n idx_val = range(200, 500)\n idx_test = range(500, 1500)\n\n node_features = torch.FloatTensor(np.array(node_features.todense()))\n\n labels = torch.LongTensor(np.where(labels)[1])\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n\n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n\n return adj, edge_features, node_features, labels, idx_train, idx_val, idx_test",
"def load_graph(graph_path):\n graph = nx.from_edgelist(pd.read_csv(graph_path).values.tolist())\n graph.remove_edges_from(graph.selfloop_edges())\n return graph",
"def loadDataZachary(fileName):\n\n \"Initialize a graph\"\n G = nx.Graph()\n\n \"Open file\"\n f = open(fileName)\n\n line = f.readline().rstrip(\"\\n\").rstrip(\"\\r\")\n while line:\n if(line[0]!=\"%\"):\n ls =line.split(' ')\n num,nums=int(ls[0]),int(ls[1])\n G.add_edge(num,nums)\n line = f.readline().rstrip(\"\\n\").rstrip(\"\\r\")\n\n \"Closing the file\"\n f.close()\n\n return G, 'Zachary'",
"def read_graph(Amatrix):\n\tG = nx.from_numpy_matrix(Amatrix)\n\tG = G.to_undirected()\n\treturn G",
"def load_graph(file_name, directed=True):\n G = nx.DiGraph() if directed else nx.Graph()\n with open(file_name, \"r\") as f:\n for line in f:\n tokens = line.split()\n u = int(tokens[0])\n v = int(tokens[1])\n if len(tokens) > 2:\n w = float(tokens[2])\n G.add_edge(u, v, weight=w)\n else:\n G.add_edge(u,v)\n return G",
"def load_matrix(file):\n print 'Loading matrix from', file\n\n \n # The slice [()] is for the cases where np.save has stored a\n # sparse matrix in a zero-dimensional array\n\n return np.load(file)[()]",
"def readGraphFromYAMLFile(self, filename):\n self.G = nx.read_yaml(filename)\n # TODO: buiild up the indexes !!!",
"def load_sparse_matrix(self, filename):\n with open(filename) as data_file:\n data = json.load(data_file)\n values = data[\"values\"]\n print(\"JSON\")\n print(values)",
"def load(filename):\n return sio.loadmat(filename, appendmat=False, squeeze_me=True)['data']",
"def import_matrix(fileMatrix):\n with open(fileMatrix) as fMat:\n matrix = np.zeros((3,4))\n for ligne in fMat:\n if ligne.startswith(' 1') or ligne.startswith(' 2') or ligne.startswith(' 3'):\n matrix[int(ligne.split()[0])-1,:] = float(ligne.split()[1]),float(ligne.split()[2]),float(ligne.split()[3]),float(ligne.split()[4])\n return deepcopy(matrix)",
"def load_graph(filename):\n\twith tf.gfile.FastGFile(filename, 'rb') as f:\n\t\tgraph_def = tf.GraphDef()\n\t\tgraph_def.ParseFromString(f.read())\n\t\ttf.import_graph_def(graph_def, name='')",
"def read_graph(filename, node_index_one=0, node_index_two=1):\n tsv = csv.reader(open(filename), delimiter='\\t')\n return make_graph(tsv, node_index_one, node_index_two)",
"def load_data(path):\n data = loadmat(path)\n return data['X'], data['y']",
"def load_graph(filename):\n with tf.gfile.FastGFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')",
"def load_graph(filename):\n with tf.gfile.FastGFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')",
"def load_graph(filename):\n with tf.gfile.GFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')",
"def loadgraph(self, path):\n\n raise NotImplementedError",
"def loadh5(fname, path='/data'):\n fp = open_read(fname)\n slab = fp.get_node(path)\n mat = slab.read()\n fp.close()\n return mat",
"def load_graph(self, filename):\n try:\n file_extention = list(filename.split(\".\"))[-1]\n if file_extention == \"gml\":\n self.graph = nx.read_gml(filename)\n if file_extention == \"adjlist\":\n self.graph = nx.read_adjlist(filename)\n if file_extention == \"yaml\":\n self.graph = nx.read_yaml(filename)\n except Exception as e:\n print(\"Error in loading Graph file: The error is\", e)",
"def read_graph(filename, directed=True):\n if not directed:\n G = nx.Graph()\n else:\n G = nx.DiGraph()\n with open(filename) as f:\n for line in f:\n d = line.split()\n G.add_edge(int(d[0]), int(d[1]))\n print('Read Graph')\n return G",
"def load_data(path=\"data/cora/\", dataset=\"cora\"):\n print('Loading {} dataset...'.format(dataset))\n\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset), dtype=np.dtype(str))\n features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n labels = encode_onehot(idx_features_labels[:, -1])\n\n # build graph\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset), dtype=np.int32)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\n dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(labels.shape[0], labels.shape[0]), dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n print('Dataset has {} nodes, {} edges, {} features.'.format(adj.shape[0], edges.shape[0], features.shape[1]))\n\n return features.todense(), adj, labels",
"def load_dat(file_name):\n data = loadmat(file_name)\n y = data['y']\n X = data['X']\n return X,y"
] | [
"0.722248",
"0.7186501",
"0.70386183",
"0.6996951",
"0.69011986",
"0.68942666",
"0.67839843",
"0.6767686",
"0.6647032",
"0.66407907",
"0.6560666",
"0.6553847",
"0.6541998",
"0.6532575",
"0.65297693",
"0.65271306",
"0.6526533",
"0.64736503",
"0.64715844",
"0.6471263",
"0.6467073",
"0.64622915",
"0.64622915",
"0.64581454",
"0.6449561",
"0.64450467",
"0.64392436",
"0.6398896",
"0.63906986",
"0.63358045"
] | 0.75700504 | 0 |
Returns the Clang version number given an executable. | def _getClangVersion(clangExe):
args = [getPath(clangExe), '--version']
try:
p = subprocess.Popen(
args=args,
stdout=subprocess.PIPE,
)
except EnvironmentError, e:
raise EnvironmentError(
"cake: failed to launch %s: %s\n" % (args[0], str(e))
)
stdoutText = p.stdout.readline()
p.stdout.close()
exitCode = p.wait()
if exitCode != 0:
raise EnvironmentError(
"%s: failed with exit code %i\n" % (args[0], exitCode)
)
# Parse through the line to get the version number. Examples:
# Ubuntu clang version 3.6.2-svn238746-1~exp1 (branches/release_36) (based on LLVM 3.6.2)
# clang version 3.5.0 (217039)
versionText = "version "
index = stdoutText.find(versionText)
if index == -1:
raise EnvironmentError(
"%s: version format invalid: %s\n" % (args[0], stdoutText)
)
versionString = stdoutText[index + len(versionText):]
index = versionString.find('-')
index2 = versionString.find(' ')
if index != -1:
if index2 != -1:
index = min(index, index2)
else:
if index2 != -1:
index = index2
versionString = versionString[:index].strip()
return versionString | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetClangVersion(clang_path):\n\n version_re = re.compile(r'clang version (\\d+)\\.(\\d+)')\n output = subprocess.check_output([clang_path, '--version'])\n match = version_re.search(output)\n if match:\n major = int(match.group(1))\n minor = int(match.group(2))\n else:\n print >> sys.stderr, output\n raise Exception('unexpected version string')\n\n return major, minor",
"def version(self, *args, **kwargs):\n\n stdout, stderr = self.ctx.execute((self.exe, '--version'), quieter=1)\n\n m = re.match(\n r'(?:Apple clang .* \\(based on LLVM (\\S+)\\))'\n r'|'\n r'(?:clang version (\\S+))', stdout.decode())\n if m:\n if m.group(1):\n return m.group(1)\n else:\n return m.group(2)\n\n return None",
"def parse_version(bin_path):\n version_str = subprocess.check_output([bin_path, \"--version\"\n ]).decode(\"utf-8\").strip()\n match = re.match(\"^clang-format version ([0-9.]*).*$\", version_str)\n return match.group(1) if match else None",
"def get_version():\n version_file = Path(__file__).resolve().parent / \"clinker\" / \"__init__.py\"\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read_text(), re.M\n )\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Failed to find version string\")",
"def version(self, executable):\n smack_output = self._version_from_tool(executable)\n if smack_output:\n return smack_output.split(\" \")[2]\n else:\n # old versions of SMACK used to print to stderr\n return self._version_from_tool(executable, use_stderr=True).split(\" \")[2]",
"def find_xcode_major_version():\n cmd = ['xcodebuild', '-version']\n command_trace.log(cmd)\n\n result = str(subprocess.check_output(cmd))\n version = result.split('\\n', 1)[0]\n version = re.sub(r'Xcode ', '', version)\n version = re.sub(r'\\..*', '', version)\n return int(version)",
"def get_version():\n version_dict = {}\n exec(open(\"src/chimera/version.py\").read(), version_dict)\n return version_dict['version']",
"def get_gcc_ver(exe=\"gcc\"):\n cmd = [exe, '-v']\n major = -1\n minor = -1\n patch = -1\n raw = sub.check_output(cmd, stderr=sub.STDOUT).decode('ascii').lower().split('\\n')\n for line in raw:\n if line.startswith('gcc version'):\n tokens = line.split()\n # we obtain a version string such as \"5.4.0\"\n verstr = tokens[2].strip()\n vertup = verstr.split('.')\n major = int(vertup[0])\n minor = int(vertup[1])\n patch = int(vertup[2])\n ver = major, minor, patch\n return ver",
"def CheckClangVersion():\n\n if not gyp_driver_utils.Which('clang'):\n sys.stderr.write('clang not found in PATH.\\n')\n raise Exception('clang not found')\n\n # Check that the right version of clang is in the path.\n chromium_path = os.path.join('..', '..', '..', 'external', 'chromium')\n update_script = os.path.join(chromium_path,\n 'tools', 'clang', 'scripts', 'update.sh')\n clang_bin_path = os.path.join(\n chromium_path,\n 'third_party',\n 'llvm-build',\n 'Release+Asserts',\n 'bin')\n clang_bin = os.path.join(clang_bin_path, 'clang')\n if not os.path.exists(clang_bin):\n print >> sys.stderr, 'clang not found, updating'\n subprocess.check_call(update_script)\n\n if not os.path.exists(clang_bin):\n raise Exception('clang not found')\n\n req_major, req_minor = GetClangVersion(clang_bin)\n major, minor = GetClangVersion('clang')\n\n if major < req_major or (major == req_major and minor < req_minor):\n msg = '\\nclang version %d.%d or higher required' % (req_major, req_minor)\n msg += ' (%d.%d found).\\n' % (major, minor)\n msg += 'Please ensure ' + clang_bin_path + ' is in the PATH.'\n print >> sys.stderr, msg\n raise Exception('clang version')",
"def version():\n\n version = None\n output = gitopen(['--version'])\n m = re.search(br\" version ([\\d\\.A-Za-z]+)\", output)\n if m is not None:\n version = m.group(1).decode('utf-8')\n return version",
"def version():\n return Tns.exec_command(command='--version')",
"def get_setup_version():\n if os.path.isdir(\".git\"):\n process = subprocess.Popen(COMMAND_DESCRIBE_VERSION, **SUBPROCESS_KWARGS)\n process.wait()\n version = process.communicate()[0].decode(\"utf-8\").strip()\n return re.match(re_version, version).group(1)\n else:\n return \"0.1\"",
"def get_version():\n try:\n return check_output(\n \"git describe --tags\".split(\" \")\n ).decode('utf-8').strip()\n except CalledProcessError:\n return check_output(\n \"git rev-parse --short HEAD\".split(\" \")\n ).decode('utf-8').strip()",
"def version_number() -> int:\n return 0",
"def get_version_from_executable(\n cls,\n bin_path: Union[Path, str],\n *,\n cwd: Optional[Union[Path, str]] = None,\n env: Optional[Dict[str, str]] = None,\n ) -> Optional[Version]:\n output = subprocess.check_output(\n [str(bin_path), \"-version\"], cwd=cwd, env=env\n ).decode()\n match = re.search(cls.VERSION_OUTPUT_REGEX, output)\n if not match:\n return None\n return cls.parse_version_string(output)",
"def _find_clang_format():\n required_clang_format_major = 10\n\n def parse_version(bin_path):\n \"\"\"\n Get clang-format version string. Returns None if parsing fails.\n \"\"\"\n version_str = subprocess.check_output([bin_path, \"--version\"\n ]).decode(\"utf-8\").strip()\n match = re.match(\"^clang-format version ([0-9.]*).*$\", version_str)\n return match.group(1) if match else None\n\n def parse_version_major(bin_path):\n \"\"\"\n Get clang-format major version. Returns None if parsing fails.\n \"\"\"\n version = parse_version(bin_path)\n return int(version.split(\".\")[0]) if version else None\n\n def find_bin_by_name(bin_name):\n \"\"\"\n Returns bin path if found. Otherwise, returns None.\n \"\"\"\n bin_path = shutil.which(bin_name)\n if bin_path is None:\n return None\n else:\n major = parse_version_major(bin_path)\n return bin_path if major == required_clang_format_major else None\n\n bin_path = find_bin_by_name(\"clang-format\")\n if bin_path is not None:\n bin_version = parse_version(bin_path)\n return bin_path, bin_version\n\n bin_path = find_bin_by_name(f\"clang-format-{required_clang_format_major}\")\n if bin_path is not None:\n bin_version = parse_version(bin_path)\n return bin_path, bin_version\n\n raise RuntimeError(\n f\"clang-format version {required_clang_format_major} not found. \"\n \"See http://www.open3d.org/docs/release/contribute/styleguide.html#style-guide \"\n \"for help on clang-format installation.\")",
"def gcc_version(gcc):\n\tversion = \"\"\n\ttry:\n\t\tversion = os.popen(\"%s --version\" % gcc).readline().split()[-1]\n\texcept:\n\t\tpass\n\treturn version",
"def version_from_path(cls, tools: ToolCache, java_path: str | Path) -> str:\n output = tools.subprocess.check_output(\n [\n os.fsdecode(Path(java_path) / \"bin\" / \"javac\"),\n \"-version\",\n ],\n )\n # javac's output should look like \"javac 17.0.X\\n\"\n return output.strip(\"\\n\").split(\" \")[1]",
"def parse_version_major(bin_path):\n version = parse_version(bin_path)\n return int(version.split(\".\")[0]) if version else None",
"def detect_version(self):\n\n version = self.scm_object.detect_version(self.args.__dict__).strip()\n logging.debug(\"VERSION(auto): %s\", version)\n return version",
"def version_number(path: str) -> str:\n exp = r'__version__[ ]*=[ ]*[\"|\\']([\\d]+\\.[\\d]+\\.[\\d]+[\\.dev[\\d]*]?)[\"|\\']'\n version_re = re.compile(exp)\n\n with open(path, 'r') as fqe_version:\n version = version_re.search(fqe_version.read()).group(1)\n\n return version",
"def version():\n cmd = \"{} -v\".format(_detect_os())\n out = __salt__[\"cmd.run\"](cmd).splitlines()\n ret = out[0].split(\": \")\n return ret[1]",
"def get_clang_format_command(self, version):\n clang_format_name = 'clang-format-{0}'.format(version)\n return self._cmd_runner.find_executable(clang_format_name)",
"def src_get_version():\n return ffi.string(_lib.src_get_version()).decode()",
"def main() -> int:\n version: str | None = None\n\n if (path_pyproject := Path(\"pyproject.toml\")).is_file():\n with open(path_pyproject, \"rb\") as fp:\n data = tomllib.load(fp)\n\n try:\n version = data[\"project\"][\"version\"]\n except KeyError:\n pass\n\n if version is None and (path_setup_cfg := Path(\"setup.cfg\")).is_file():\n parser = configparser.ConfigParser()\n parser.read(path_setup_cfg)\n\n try:\n version = parser[\"metadata\"][\"version\"]\n except KeyError:\n pass\n\n if version is None:\n return 1\n print(version)\n return 0",
"def get_version(program: str) -> str:\n import subprocess\n cmd = \"dpkg -l | grep '{}'\".format(program)\n process = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE,\n stdin=subprocess.PIPE)\n (out, _) = process.communicate()\n result = out.decode()\n version = result.split()\n\n if len(version) >= 3:\n if version[1] == program:\n return version[2]\n return \"Cannot find version for '{}'\".format(program)",
"def get_version(program):\n\n return \"%s from mrtools %s\" % (program, mrtools_version)",
"def get_version():\n\n with open('yubico/yubico_version.py', 'r') as f:\n match = VERSION_PATTERN.search(f.read())\n return match.group(1)",
"def __get_version_seq_typing():\n\n try:\n cli = [\"seq_typing.py\", \"--version\"]\n p = subprocess.Popen(cli, stdout=PIPE, stderr=PIPE)\n stdout = p.communicate()[0]\n\n version = stdout.splitlines()[0].split()[-1].decode(\"utf8\")\n except Exception as e:\n logger.debug(e)\n version = \"undefined\"\n\n return version",
"def version():\n\n version_full = semver()\n CWD = os.path.dirname(__file__)\n got_git = os.path.exists(os.path.join(os.path.dirname(__file__), \"..\", \".git\"))\n if not got_git:\n return version_full\n try:\n # determine git binary\n git = \"git\"\n try:\n subprocess.check_output([git, \"--version\"])\n except Exception:\n git = \"/usr/bin/git\"\n try:\n subprocess.check_output([git, \"--version\"])\n except Exception as e:\n return version_full\n\n version_full = subprocess.check_output([git, \"describe\", \"--always\", \"--tags\"], cwd=CWD).strip().decode(\"ascii\")\n version_full = version_full.replace(\"-\", \"+\", 1).replace(\"-\", \".\") # Make this consistent with PEP440\n\n except Exception as e:\n print(\"Could not determine PODPAC version from git repo.\\n\" + str(e))\n\n return version_full"
] | [
"0.7412703",
"0.7406646",
"0.698086",
"0.6378129",
"0.62712646",
"0.61881644",
"0.6167377",
"0.60677725",
"0.59315634",
"0.58684695",
"0.5847262",
"0.5826577",
"0.57785463",
"0.577731",
"0.5769445",
"0.5765052",
"0.5748158",
"0.57396495",
"0.57374156",
"0.5718005",
"0.57160175",
"0.5705256",
"0.57021224",
"0.5688219",
"0.5687424",
"0.56836385",
"0.56763244",
"0.56654996",
"0.56414783",
"0.55747837"
] | 0.7883222 | 0 |
Seeks all numbers in the dict objects and squares them | def seekNumbersAndSquare(dictionary):
print("Squaring objects with integers")
for key in dictionary:
if isinstance(dictionary[key], (int, float)):
dictionary[key] = math.pow(dictionary[key], 2)
return dictionary | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def seekNumbersAndSquareInLists(dictionary):\n for key in dictionary:\n if isinstance(dictionary[key], list):\n dictionary[key] = [math.pow(item, 2) if isinstance(item, (int, float)) else item for item in dictionary[key] ]\n \n return dictionary",
"def fill_given_numbers(square, row, col, sq_nr, dicts, squares_coords):\n rm, cm, sm = dicts\n sq = squares_coords\n for row_idx, sr in enumerate(square):\n for col_idx, sv in enumerate(sr):\n coord = (row + row_idx, col + col_idx)\n if sv == 0:\n sq[coord] = sq_nr\n continue\n rm[coord[0]].append(sv)\n cm[coord[1]].append(sv)\n sm[sq_nr].append(sv)\n return dicts, sq",
"def square_nums(number_list):",
"def dict_fun(obj):\n dict_memory_sum = 0 # calculates the total memory used by fields in a dictionary\n for each_key in obj.keys():\n dict_obj_val = obj[each_key]\n if type(dict_obj_val) == list:\n dict_memory_sum = dict_memory_sum + list_fun(dict_obj_val)\n elif type(dict_obj_val) == dict:\n dict_memory_sum = dict_memory_sum + dict_fun(obj[each_key])\n else:\n dict_memory_sum = dict_memory_sum + norm_fun(obj[each_key])\n return dict_memory_sum + list_fun(obj.keys())",
"def test_dict_same_occurrence_all_number(self):\n\n argument = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 10, 12: 10, 13: 10}\n actual = file_io.top_ten(argument)\n expected = [[10, 13], [10, 12], [10, 11], [10, 10], [9, 9], [8, 8], [7, 7], [6, 6], [5, 5], [4, 4]]\n self.assertEqual(actual, expected)",
"def data_processing(data_dic: Dict[str, int]):\n\n sum_0 = 0\n for key, value in data_dic.items():\n if int(list(key)[0]) + int(list(key)[1]) == 0:\n sum_0 += value\n return sum_0 / shots",
"def iter_nums():\n saved = dict()\n\n def get_or_zero(x, y):\n \"\"\" Get the value at (x, y) in the cache, or return 0 \"\"\"\n coord = (x, y)\n if coord in saved:\n return saved[coord]\n else:\n return 0\n\n for coord in iter_coords():\n x, y = coord\n if coord == (0, 0):\n val = 1\n else:\n val = 0\n val += get_or_zero(x-1, y-1)\n val += get_or_zero(x, y-1)\n val += get_or_zero(x+1, y-1)\n val += get_or_zero(x-1, y)\n val += get_or_zero(x+1, y)\n val += get_or_zero(x-1, y+1)\n val += get_or_zero(x, y+1)\n val += get_or_zero(x+1, y+1)\n\n saved[coord] = val\n\n yield val",
"def all_reduce(self):\n return {k: reduce_number(v) for k, v in self.items()}",
"def print_dict(dictionary):\r\n for key in sorted(dictionary.keys()):\r\n value = dictionary[key]\r\n if type(value) == float:\r\n value = round(value, 8)\r\n print (key, value)",
"def populate_dicts(m, square_sides, dicts):\n sq_nr = 0\n squares_coords = {}\n for row in range(0, square_sides ** 2, square_sides):\n for col in range(0, square_sides ** 2, square_sides):\n sq_nr += 1\n square = [islice(m[i], col, square_sides + col) for i in range(row, row + square_sides)]\n dicts, square_coords = fill_given_numbers(square, row, col, sq_nr, dicts, squares_coords)\n return dicts, square_coords",
"def print_dict(dictionary):\n for key in sorted(dictionary.keys()):\n value = dictionary[key]\n if type(value) == float:\n value = round(value, 8)\n print key, value",
"def __iter__(self):\n for value in dict.__iter__(self):\n for count in range(self[value]):\n yield value",
"def calcdice(indict, intoks): # type: ({}, []) -> {}\n\n if 'Tokens' not in indict:\n return indict\n\n dictset = set(indict['Tokens'])\n inset = set(intoks)\n\n dicescore = (2.0 * len(dictset.intersection(inset))) / (len(dictset) + len(inset))\n indict['Dice'] = dicescore\n\n return indict",
"def summarize(allowances):\n total_allowances = 0\n if isinstance(allowances, dict):\n for key, value in allowances.items():\n total_allowances = total_allowances + int(value)\n #end for\n else:\n total_allowances = allowances\n return total_allowances",
"def sum_of_squares(n, k, zeros=False):\n yield from power_representation(n, 2, k, zeros)",
"def test_dict_size_ten_all_number(self):\n argument = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10}\n actual = file_io.top_ten(argument)\n expected = [[10, 10], [9, 9], [8, 8], [7, 7], [6, 6], [5, 5], [4, 4], [3, 3], [2, 2], [1, 1]]\n self.assertEqual(actual, expected)",
"def square_factor(a):\n f = a if isinstance(a, dict) else factorint(a)\n return Mul(*[p**(e//2) for p, e in f.items()])",
"def find_multiple(self, num):\n result = dict()\n for n in range(1, num+1):\n temp = self.find_prime_divisors(n)\n result.update({k:v for k,v in temp.items() if k not in result or result[k] < v})\n return reduce(operator.mul, (pow(k, v) for k,v in result.items()))",
"def worker(nums, outdict):\n for n in nums:\n outdict[n] = primes2(n)",
"def grid_vals(grid):\n\tletters = list(grid)\n\t#print \"---------------------------------\\n-------------------\"\n\t#print letters\n\t#print \"----------------------------------\\n-------------------\"\n\tassert len(letters) == 81\n\ttempdict = zip(squares, letters)\n\treturn dict(tempdict)",
"def normalize(dictionary, num):\n for key in dictionary.keys():\n dictionary[key] = float(dictionary[key])/num\n return dictionary",
"def squares(s):\n\n \"*** YOUR CODE HERE ***\"\n return [int(x**(1/2)) for x in s if x**(1/2) == round(x**(1/2))]",
"def normalize(self):\n total = float(sum(self.values()))\n for key in self:\n self[key] /= total",
"def divide_dict(src_dict, num):\n return {key:(value/num) for key, value in src_dict.items()}",
"def __init__(self):\n self.square_size = 3 # large squares on a side\n self.size = self.square_size**2 # squares on a side\n numbers = self.numbers = tuple(range(1, self.size + 1))\n rows = self.rows = range(self.size)\n cols = self.cols = range(self.size)\n self.values = {(r,c): numbers for r in rows for c in cols}\n self.number_strings = '.' + ''.join(str(x) for x in self.numbers)",
"def multiplication_total_of(num_list):",
"def __init__(self):\n self.map = dict()\n self.nums = []",
"def add_numprocs(value):\n return_list = []\n for l in value:\n _tmp_dict = l\n _tmp_dict[\"NP\"] = l[\"totsize\"] / l[\"subdimsize\"]\n return_list.append(_tmp_dict)\n return sorted(return_list, key=lambda i: i[\"NP\"])",
"def test_reduce_sum_keys(self):\n dictionary = {\n 1: { 2: {}, 3: {} },\n 2: { 4: {}, 6: {} },\n }\n\n \n # filter out the odd elements\n actual = dicttools.reduce(lambda acc, key, _: acc + key, dictionary, 0)\n self.assertEquals(18, actual, msg=\"%s != %s\" % (actual, 18))",
"def square_numbers_2(nums):\n for i in nums:\n yield(i*i)"
] | [
"0.66149116",
"0.58740765",
"0.5765753",
"0.5512213",
"0.5443256",
"0.54291236",
"0.541231",
"0.53813",
"0.53510463",
"0.5307061",
"0.53039116",
"0.5298743",
"0.52881813",
"0.5285586",
"0.5282342",
"0.52787817",
"0.5258328",
"0.5250214",
"0.52262485",
"0.5218104",
"0.521059",
"0.52095467",
"0.52066135",
"0.5200936",
"0.5171526",
"0.51539606",
"0.513827",
"0.5093631",
"0.50769407",
"0.5065149"
] | 0.7583046 | 0 |
Seeks all lists in the dict objects then squares numbers in those dictionaries | def seekNumbersAndSquareInLists(dictionary):
for key in dictionary:
if isinstance(dictionary[key], list):
dictionary[key] = [math.pow(item, 2) if isinstance(item, (int, float)) else item for item in dictionary[key] ]
return dictionary | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def seekNumbersAndSquare(dictionary):\n print(\"Squaring objects with integers\")\n for key in dictionary:\n if isinstance(dictionary[key], (int, float)):\n dictionary[key] = math.pow(dictionary[key], 2)\n \n return dictionary",
"def dict_fun(obj):\n dict_memory_sum = 0 # calculates the total memory used by fields in a dictionary\n for each_key in obj.keys():\n dict_obj_val = obj[each_key]\n if type(dict_obj_val) == list:\n dict_memory_sum = dict_memory_sum + list_fun(dict_obj_val)\n elif type(dict_obj_val) == dict:\n dict_memory_sum = dict_memory_sum + dict_fun(obj[each_key])\n else:\n dict_memory_sum = dict_memory_sum + norm_fun(obj[each_key])\n return dict_memory_sum + list_fun(obj.keys())",
"def list_fun(obj):\n list_memory_sum = 0 # used to calculate total memory occupied by list elements\n for item in obj:\n if type(item) != dict:\n list_memory_sum = list_memory_sum + norm_fun(item)\n else:\n list_memory_sum = list_memory_sum + dict_fun(item)\n return list_memory_sum",
"def fill_given_numbers(square, row, col, sq_nr, dicts, squares_coords):\n rm, cm, sm = dicts\n sq = squares_coords\n for row_idx, sr in enumerate(square):\n for col_idx, sv in enumerate(sr):\n coord = (row + row_idx, col + col_idx)\n if sv == 0:\n sq[coord] = sq_nr\n continue\n rm[coord[0]].append(sv)\n cm[coord[1]].append(sv)\n sm[sq_nr].append(sv)\n return dicts, sq",
"def populate_dicts(m, square_sides, dicts):\n sq_nr = 0\n squares_coords = {}\n for row in range(0, square_sides ** 2, square_sides):\n for col in range(0, square_sides ** 2, square_sides):\n sq_nr += 1\n square = [islice(m[i], col, square_sides + col) for i in range(row, row + square_sides)]\n dicts, square_coords = fill_given_numbers(square, row, col, sq_nr, dicts, squares_coords)\n return dicts, square_coords",
"def square_nums(number_list):",
"def second_round_output(self, ram_dict):\n\t\tresult = {}\n\t\tfor key in ram_dict:\n\t\t\tresult[key] = [len(ram_dict[key]), ram_dict[key]]\n\t\treturn result",
"def summarize(allowances):\n total_allowances = 0\n if isinstance(allowances, dict):\n for key, value in allowances.items():\n total_allowances = total_allowances + int(value)\n #end for\n else:\n total_allowances = allowances\n return total_allowances",
"def add_numprocs(value):\n return_list = []\n for l in value:\n _tmp_dict = l\n _tmp_dict[\"NP\"] = l[\"totsize\"] / l[\"subdimsize\"]\n return_list.append(_tmp_dict)\n return sorted(return_list, key=lambda i: i[\"NP\"])",
"def dict_species_sums(mech):\n if mech == \"racm_esrl_vcp\":\n sum_dict = {}\n # Arrays for different gasses and pm groupings\n sum_dict.update(\n {\n \"noy_gas\": [\n \"hno3\",\n \"no\",\n \"no2\",\n \"no3\",\n \"pan\",\n \"tpan\",\n \"hono\",\n \"hno4\",\n \"onit\",\n \"n2o5\",\n \"ison\",\n \"nald\",\n \"mpan\",\n ]\n }\n )\n sum_dict.update({\"noy_gas_weight\": [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1]})\n sum_dict.update(\n {\"noy_aer\": [\"no3ai\", \"no3aj\"]}\n ) # Need to confirm here if there is a size cutoff for noy obs?\n sum_dict.update({\"nox\": [\"no\", \"no2\"]})\n sum_dict.update({\"pm25_cl\": [\"clai\", \"claj\"]})\n sum_dict.update({\"pm25_cl_weight\": [1, 1]})\n sum_dict.update({\"pm25_ec\": [\"eci\", \"ecj\"]})\n sum_dict.update({\"pm25_ec_weight\": [1, 1]})\n sum_dict.update({\"pm25_na\": [\"naai\", \"naaj\"]})\n sum_dict.update({\"pm25_na_weight\": [1, 1]})\n sum_dict.update({\"pm25_nh4\": [\"nh4ai\", \"nh4aj\"]})\n sum_dict.update({\"pm25_nh4_weight\": [1, 1]})\n sum_dict.update({\"pm25_no3\": [\"no3ai\", \"no3aj\"]})\n sum_dict.update({\"pm25_no3_weight\": [1, 1]})\n sum_dict.update({\"pm25_so4\": [\"so4ai\", \"so4aj\"]})\n sum_dict.update({\"pm25_so4_weight\": [1, 1]})\n sum_dict.update(\n {\n \"pm25_om\": [\n \"asoa1i\",\n \"asoa1j\",\n \"asoa2i\",\n \"asoa2j\",\n \"asoa3i\",\n \"asoa3j\",\n \"asoa4i\",\n \"asoa4j\",\n \"bsoa1i\",\n \"bsoa1j\",\n \"bsoa2i\",\n \"bsoa2j\",\n \"bsoa3i\",\n \"bsoa3j\",\n \"bsoa4i\",\n \"bsoa4j\",\n \"orgpai\",\n \"orgpaj\",\n ]\n }\n )\n elif mech == \"redhc\":\n sum_dict = {}\n # Arrays for different gasses and pm groupings\n sum_dict.update({\"noy_gas\": [\"hno3\", \"no\", \"no2\", \"no3\", \"pan\", \"ho2no2\", \"onit\", \"n2o5\"]})\n sum_dict.update({\"noy_gas_weight\": [1, 1, 1, 1, 1, 1, 1, 2]})\n sum_dict.update(\n {\"noy_aer\": [\"no3ai\", \"no3aj\"]}\n ) # Need to confirm here if there is a size cutoff for noy obs?\n sum_dict.update({\"nox\": [\"no\", \"no2\"]})\n sum_dict.update({\"pm25_cl\": [\"clai\", \"claj\"]})\n sum_dict.update({\"pm25_cl_weight\": [1, 1]})\n sum_dict.update({\"pm25_ec\": [\"eci\", \"ecj\"]})\n sum_dict.update({\"pm25_ec_weight\": [1, 1]})\n sum_dict.update({\"pm25_na\": [\"naai\", \"naaj\"]})\n sum_dict.update({\"pm25_na_weight\": [1, 1]})\n sum_dict.update({\"pm25_nh4\": [\"nh4ai\", \"nh4aj\"]})\n sum_dict.update({\"pm25_nh4_weight\": [1, 1]})\n sum_dict.update({\"pm25_no3\": [\"no3ai\", \"no3aj\"]})\n sum_dict.update({\"pm25_no3_weight\": [1, 1]})\n sum_dict.update({\"pm25_so4\": [\"so4ai\", \"so4aj\"]})\n sum_dict.update({\"pm25_so4_weight\": [1, 1]})\n sum_dict.update(\n {\n \"pm25_om\": [\n \"asoa0j\",\n \"asoa0i\",\n \"asoa1i\",\n \"asoa1j\",\n \"asoa2i\",\n \"asoa2j\",\n \"asoa3i\",\n \"asoa3j\",\n \"bsoa1i\",\n \"bsoa1j\",\n \"bsoa2i\",\n \"bsoa2j\",\n \"bsoa3i\",\n \"bsoa3j\",\n \"poa0j\",\n \"poa0i\",\n \"poa1j\",\n \"poa1i\",\n \"poa2j\",\n \"poa2i\",\n \"poa3j\",\n \"poa3i\",\n ]\n }\n )\n\n else:\n raise NotImplementedError(\"Mechanism not supported, update _wrfchem_mm.py file in MONETIO\")\n\n return sum_dict",
"def calcdice(indict, intoks): # type: ({}, []) -> {}\n\n if 'Tokens' not in indict:\n return indict\n\n dictset = set(indict['Tokens'])\n inset = set(intoks)\n\n dicescore = (2.0 * len(dictset.intersection(inset))) / (len(dictset) + len(inset))\n indict['Dice'] = dicescore\n\n return indict",
"def grid_vals(grid):\n\tletters = list(grid)\n\t#print \"---------------------------------\\n-------------------\"\n\t#print letters\n\t#print \"----------------------------------\\n-------------------\"\n\tassert len(letters) == 81\n\ttempdict = zip(squares, letters)\n\treturn dict(tempdict)",
"def _sum_over_dicts(total_n_grams: Dict[int, Tensor], n_grams: Dict[int, Tensor]) ->Dict[int, Tensor]:\n for n in n_grams:\n total_n_grams[n] += n_grams[n]\n return total_n_grams",
"def worker(nums, outdict):\n for n in nums:\n outdict[n] = primes2(n)",
"def count_dict(self, lst):\n nos = list(self.digits)\n digit_count = dict([(digit, 0) for digit in nos])\n for item in lst:\n for num in item:\n digit_count[num] += 1\n return digit_count",
"def test_fn_call_with_dict():\n l = [1, 2, 3, 4, 5]\n ds = [defaultdict(int), defaultdict(int), defaultdict(int)]\n for d in ds:\n for fn in [s7.div, s7.mul, s7.add, \"abcd\", 1234]:\n try:\n f = s7.count_fn_called_with_dict(dict_=d, fn=fn)\n for i in range(0, random.randint(2, 10)):\n f(*l)\n assert fn in d.keys() and d[fn] == (i + 1)\n except Exception as e:\n assert e.__class__.__name__ == TypeError.__name__",
"def test_dict_same_occurrence_all_number(self):\n\n argument = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 10, 12: 10, 13: 10}\n actual = file_io.top_ten(argument)\n expected = [[10, 13], [10, 12], [10, 11], [10, 10], [9, 9], [8, 8], [7, 7], [6, 6], [5, 5], [4, 4]]\n self.assertEqual(actual, expected)",
"def multDic(dic, x):\n pass",
"def evaluate(out_dict, n):\n out = dict()\n for key, entry in out_dict.items():\n out[key] = dict()\n for it_count, data in entry.items():\n total = 0.\n count = 0\n for x_list in data.values():\n total += analytic_value_VaR(x_list[-1])\n count += 1\n out[key][it_count] = total / count\n np.save('normal_out_all_cvar_%d.npy' % n, out)\n print(out)",
"def printValues():\r\n grand_prod_cost = []\r\n grand_album_sales = []\r\n for x in d:\r\n print(\"----------------------------------------------\")\r\n print(\"Statistics for Band '\" + x + \"'\")\r\n thisDict = d[x]\r\n print(\"1)What is the total production cost of the album? :\", round(sum(thisDict[ProdCost]), 2))\r\n print(\"2)What is the total sales of the album? :\", round(sum(thisDict[AlbumSales]), 2))\r\n print(\"3)What is the average production cost of the album?:\", round(mean(thisDict[ProdCost]), 2))\r\n print(\"4)What is the average of the album sale? :\", round(mean(thisDict[AlbumSales]), 2))\r\n print(\"5)Net Profit/Loss :\", round(sum(thisDict[AlbumSales]) - sum(thisDict[ProdCost]), 2))\r\n\r\n grand_prod_cost +=thisDict[ProdCost]\r\n grand_album_sales +=(thisDict[AlbumSales])\r\n\r\n print('**********************************************************************************')\r\n print('Statistics of all albums')\r\n print('1)What is the total production cost of all albums? :', round(sum(grand_prod_cost), 2))\r\n print('2)What is the total sales of all albums? :', round(sum(grand_album_sales), 2))\r\n print('3)What is the average production cost of all albums?:', round(mean(grand_prod_cost),2))\r\n print('4)What is the average of all album sales? :', round(mean((grand_album_sales)),2))\r\n print('5)Net Profit/Loss :', round((sum(grand_album_sales) - sum(grand_prod_cost)),2))\r\n print('**********************************************************************************')",
"def multiplication_total_of(num_list):",
"def create_dicts_for_results(dict_all_embeddings, dict_mission, our_initial, n):\r\n keys_ours, keys_state_of_the_art = divide_to_keys(dict_all_embeddings)\r\n keys = list(dict_all_embeddings.keys())\r\n\r\n list_dicts = []\r\n\r\n for key in keys:\r\n if key in keys_ours:\r\n embd_algo = dict_all_embeddings[key][1]\r\n regression = dict_all_embeddings[key][0]\r\n initial = our_initial\r\n else:\r\n embd_algo = key\r\n regression = \"\"\r\n initial = [n]\r\n t = round(dict_all_embeddings[key][2], 3)\r\n dict_results_by_arr = dict_mission[key]\r\n ratio_arr = list(dict_results_by_arr.keys())\r\n for r in ratio_arr:\r\n all_micro = dict_results_by_arr[r][0]\r\n all_macro = dict_results_by_arr[r][1]\r\n all_auc = dict_results_by_arr[r][3]\r\n for i in range(len(initial)):\r\n std_micro, std_macro, std_auc = calculate_std(r, i, dict_mission, keys_ours, keys_state_of_the_art)\r\n if key in keys_ours:\r\n t = round(dict_all_embeddings[key][8][i])\r\n initial_size = initial[i]\r\n test_ratio = r\r\n micro_f1 = float(round(all_micro[i], 3))\r\n macro_f1 = float(round(all_macro[i], 3))\r\n auc = float(round(all_auc[i], 3))\r\n if key in keys_state_of_the_art:\r\n initial_size = \"\"\r\n dict_results = {\"initial size\": initial_size, \"embed algo\": embd_algo, \"regression\": regression,\r\n \"test\": test_ratio, \"micro-f1\": str(micro_f1)+\"+-\"+std_micro,\r\n \"macro-f1\": str(macro_f1)+\"+-\"+std_macro, \"auc\": str(auc)+\"+-\"+std_auc, \"time\": t}\r\n list_dicts.append(dict_results)\r\n return list_dicts",
"def calculate_vals(self):\n for pp in self.powerplants:\n pp[\"vals\"] = self.possible_vals(pp)\n pp[\"index\"] = 0",
"def third(self, typedic, typeSimdic):\r\n for key_a in typedic:\r\n for key_b in typedic:\r\n if key_a != key_b:\r\n for pro_a in typedic[key_a]:\r\n if pro_a in typedic[key_b]:\r\n for pro_b in typedic[key_b]:\r\n if pro_a != pro_b:\r\n if key_a in self.thirdRes and pro_b in self.thirdRes[key_a]:\r\n self.thirdRes[key_a][pro_b] += float(\r\n self.getScores(1, 1, 1))\r\n else:\r\n self.addtwodimdict(\r\n self.thirdRes, key_a, pro_b, float(self.getScores(1, 1, 1)))",
"def __iter__(self):\n for value in dict.__iter__(self):\n for count in range(self[value]):\n yield value",
"def test_dict_size_ten_all_number(self):\n argument = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10}\n actual = file_io.top_ten(argument)\n expected = [[10, 10], [9, 9], [8, 8], [7, 7], [6, 6], [5, 5], [4, 4], [3, 3], [2, 2], [1, 1]]\n self.assertEqual(actual, expected)",
"def totals_map():\n totals_map = [*map(sum,poke_stats)]\n\n return(totals_map)",
"def _process(proc_data: List[Dict]) -> List[Dict]:\n for entry in proc_data:\n for key in entry:\n try:\n entry[key] = int(entry[key])\n except Exception:\n pass\n\n return proc_data",
"def measurements(levels_results, measure_levels, simple_results, measure_simple, list_results, measure_list,\n sizes, boxes, outer_box, tries):\n for size in sizes:\n # there may be too few boxes for this size of sample, so skip it\n if len(boxes) < size:\n print(\"Not enough boxes in dictionary for size: \" + str(size))\n continue\n\n boxes_sized = boxes[:size]\n\n if measure_levels is True:\n t = timeit.Timer(lambda: levels_algorithm(outer_box, boxes_sized))\n result = t.timeit(number=tries) / tries\n levels_results.append(result)\n\n if measure_simple is True:\n t = timeit.Timer(lambda: simple_algorithm(outer_box, boxes_sized))\n result = t.timeit(number=tries) / tries\n simple_results.append(result)\n\n if measure_list is True:\n t = timeit.Timer(lambda: list_algorithm(outer_box, boxes_sized))\n result = t.timeit(number=tries) / tries\n list_results.append(result)",
"def remove_updated_from_dicts(fit, dicts, squares_coords):\n row, col, n = fit\n rm, cm, sm = dicts\n sq = squares_coords\n rm[row].remove(n)\n cm[col].remove(n)\n sm[squares_coords[row, col]].remove(n)\n del sq[(row, col)]\n return dicts"
] | [
"0.704333",
"0.61906844",
"0.59104836",
"0.59045947",
"0.5815408",
"0.5467958",
"0.5410325",
"0.5372194",
"0.53586155",
"0.5355751",
"0.5331904",
"0.5262356",
"0.5239842",
"0.52345526",
"0.522166",
"0.5198905",
"0.5185901",
"0.51802784",
"0.5179829",
"0.50994056",
"0.5074193",
"0.5071771",
"0.505183",
"0.5044612",
"0.5029939",
"0.5029218",
"0.5021522",
"0.50203925",
"0.50124913",
"0.50103766"
] | 0.71875894 | 0 |
Remove nodes whose associated component sequence is shorter than min_len. | def filter_by_seq_len(self, min_len):
if not isinstance(min_len, numbers.Number):
raise TypeError("min_len must be a number")
# Iterate over the nodes and remove any nodes shorter than min_len
old_nodes = set(self.nodes)
for n in old_nodes:
comp_name = n[:-2]
if self.get_component_len(comp_name) < min_len:
self.remove_node(n) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_small_strings(self, min_string_length):\n\n self.__corpora = [string for string in self.__corpora if len(string) >= min_string_length]",
"def remove_shorts(word_list, minimum_length):\n\tworking_word_list = []\n\tfor word in word_list:\n\t\tif len(word) >= minimum_length:\n\t\t\tworking_word_list.append(word)\n\treturn working_word_list",
"def trim_texts_by_count(self, min_count=100):\n\n for tid, text in self.graph.nodes(data=True):\n if text['count'] < min_count:\n self.graph.remove_node(tid)",
"def prune(self, min_count):\n if not self.sorted:\n self.sort()\n for k, count in enumerate(self.Nx):\n if count < min_count:\n self.truncate(k)\n break",
"def _remove_short_sequences(self, file_path, min_sequence_length, file_format=\"fasta\"):\n assert self.validate_file(file_path)\n assert isinstance(min_sequence_length, int), \"Expected natural digit\"\n assert isinstance(file_format, str), \"Expected file format 'fasta'\"\n\n file_path_output = tempfile.mktemp(dir=self._tmp_dir)\n with open(file_path) as stream_input, open(file_path_output, 'w') as stream_output:\n total_base_pairs = self._stream_sequences_of_min_length(\n stream_input, stream_output,\n sequence_min_length=min_sequence_length,\n file_format=file_format\n )\n if total_base_pairs == 0:\n msg = \"No valid sequences > {} found!\".format(min_sequence_length)\n self._logger.error(msg)\n raise IOError(msg)\n return file_path_output",
"def filter_by_minimum_length(\n self, min_length: int, output_file: Path = None, point_to_new_file: bool = True\n ) -> None:\n if output_file is None:\n output_file = (\n Path(self._input_file.parent)\n / f\"{self._input_file.stem}_minlength{self._input_file.suffix}\"\n )\n else:\n output_file = Path(output_file)\n fasta = pyfastx.Fasta(\n self.file_path.as_posix(), build_index=False, full_name=True\n )\n with open(output_file, \"w+\", encoding=\"UTF-8\") as outfile:\n for record_name, record_seq in fasta:\n if len(record_seq) >= min_length:\n outfile.write(f\">{record_name}\\n{record_seq}\\n\")\n if point_to_new_file:\n self.file_path = output_file",
"def filter_by_min_length(self, nchars):\n \n self.docs = self._filter_by_length(nchars, 'min')\n return self",
"def prune_short_lines(lines, min_length): \n pruned_lines = [line for line in lines] # converts MultiLineString to list\n to_prune = []\n \n for i, line in enumerate(pruned_lines):\n if line.length < min_length:\n to_prune.append(i)\n for n in neighbors(pruned_lines, line):\n contact_point = line.intersection(pruned_lines[n])\n pruned_lines[n] = bend_towards(pruned_lines[n], \n where=contact_point,\n to=line.centroid)\n \n return [line for i, line in enumerate(pruned_lines) if i not in to_prune]",
"def filter_min_length(self, string):\n newstring = string\n length = len(newstring)\n min_length = 3\n num_to_add = min_length - length\n while num_to_add > 0:\n newstring = newstring + \"x\"\n num_to_add = num_to_add - 1\n\n return newstring",
"def delete_small_trajectories(trajectories, best_parameters):\n print('Filtering small trajectories...', end = ' ')\n size = best_parameters['min_size']\n pop_ind =[]\n for k, trajectory in enumerate(trajectories):\n traj = vis.get_points(trajectory)\n if len(np.unique(traj, axis = 0))<=size:\n pop_ind.append(k)\n for index in sorted(pop_ind, reverse = True):\n del trajectories[index]\n print('Done.')",
"def _filter_len(self):\n if max([len(x) for x in self._word_set]) == len(self._to_word):\n return self._word_set\n else:\n new_words = set()\n for word in self._word_set:\n if len(self._from_word) == len(word):\n new_words.add(word)\n\n return new_words",
"def merge_sentences_min_len(text: List[str], min_len: int) -> List[str]:\n\n def reducer(acc, x):\n if acc and (sum(map(len, acc[-1])) < min_len):\n acc[-1].append(x)\n return acc\n else:\n return acc + [[x]]\n\n new_text = ['. '.join(sents) for sents in reduce(reducer, text, [])]\n\n return new_text",
"def prune(pybel_list, min_RMSD):\n #Set up OBAling object\n align = openbabel.OBAlign()\n #Loop\n i = 0\n total_removed = 0\n while i < len(pybel_list):\n referens = pybel_list[i].OBMol #reference\n align.SetRefMol(referens)\n j = i + 1\n while j < len(pybel_list):\n target = pybel_list[j].OBMol #target\n align.SetTargetMol(target)\n #Align and ret rmsd\n if align.Align():\n rmsd = align.GetRMSD()\n if rmsd < min_RMSD:\n pybel_list.pop(j) #remove from both lists\n total_removed += 1\n else:\n j = j + 1\n else:\n print \"Couldn't align\"\n raise Exception()\n #end of inner loop\n i = i + 1\n #end of outer loop\n print \"finished deleting, total number of \\\n removed conformers is\", total_removed\n return pybel_list",
"def small_word_filter(words, min_=1):\n new_words = []\n for w in words:\n if(len(w) > min_):\n new_words += [w]\n return new_words",
"def remove_pruned_subsets(subsets, min_deps):\n for n in subsets[:]:\n if min_deps.contains_superset(n.attrs):\n subsets.remove(n)",
"def filter_by_length(genes, transcripts, min_length):\n filtered_transcripts = {}\n filtered_genes = {}\n\n for transcript_id in transcripts:\n curr_transcript = transcripts[transcript_id]\n length = curr_transcript.get_length()\n\n if length >= min_length:\n filtered_transcripts[transcript_id] = curr_transcript\n gene_id = curr_transcript.gene_id\n if gene_id in genes:\n filtered_genes[gene_id] = genes[gene_id]\n\n return filtered_genes, filtered_transcripts",
"def remove_below_lower_length_limit(self) -> None:\n for column_name in self.data:\n threshold_executor = TrimUtils.remove_text_below_lower_length_threshold(\n self.config[f'{column_name}_lower_length_limit']\n )\n self.data = self.data[self.data[column_name].map(threshold_executor)]\n self.data.reset_index(drop=True, inplace=True)",
"def enforce_node_consistency(self):\n for node in self.domains:\n #creates a list of words per node to remove since we cannot remove the elements in a set while it is iterating\n words_to_remove= []\n\n for word in self.domains[node]:\n if len(word) != node.length:\n words_to_remove.append(word)\n\n for word in words_to_remove:\n self.domains[node].remove(word)",
"def dereplication_fulllength(amplicon_file, minseqlen, mincount):\n\n seq_list = []\n for seq in read_fasta(amplicon_file, minseqlen):\n seq_list.append(seq)\n\n for o in Counter(seq_list).most_common():\n if o[1] > mincount:\n yield o",
"def removeLowFreqWords(self, words, minFreq):\n\t\tfrequency = defaultdict(int)\n\t\tfor word in words:\n\t\t\tfrequency[word] += 1\n\t\tremoved = [word for word in words if frequency[word] > minFreq]\t\t\n\t\treturn removed",
"def chimera_removal(amplicon_file, minseqlen, mincount, chunk_size, kmer_size):\n # Sequence\n sequences = []\n occ = []\n for de_rep in dereplication_fulllength(amplicon_file, minseqlen, mincount):\n sequences.append(de_rep[0])\n occ.append(de_rep[1])\n\n # Séparation en segment de taille chunk_size + génération du dictionnaire de kmer\n segments, kmer_dico = [], {}\n for i in range(len(sequences)):\n segments.append(get_chunks(sequences[i], chunk_size))\n kmer_dico = get_unique_kmer(kmer_dico, sequences[i], i, kmer_size)\n\n # Génération des best_mates pour un segment donné\n best_mates = []\n for sequence_chunks in segments:\n for each_chunk in sequence_chunks:\n best_mates.append(search_mates(kmer_dico, each_chunk, kmer_size))\n\n # Recherche de séquences parentes - séquences présentes dans toutes les listes\n seq_parentes = common(best_mates[0], best_mates[1])\n\n # Déterminer si une séquence est une chimère\n chimera_id = []\n chunk_seq_list = [get_chunks(sequences[seq_parentes[0]], chunk_size)]\n chunk_seq_list += [get_chunks(sequences[seq_parentes[1]], chunk_size)]\n for i in range(len(sequences)):\n if not i in seq_parentes:\n chunk_chim = get_chunks(sequences[i], chunk_size)\n\n perc_identity_matrix = [[] for c in range(len(chunk_chim))]\n for j in range(len(chunk_seq_list)):\n for l,chunk in enumerate(chunk_chim):\n perc_identity_matrix[l].append(\n get_identity(nw.global_align(chunk, chunk_seq_list[j][l], gap_open=-1, gap_extend=-1, matrix=os.path.abspath(os.path.join(os.path.dirname(__file__), '../agc')) + \"/MATCH\")))\n\n if detect_chimera(perc_identity_matrix):\n chimera_id.append(i)\n\n\n for i in range(len(sequences)):\n if not i in chimera_id:\n yield [sequences[i], occ[i]]",
"def truncate_reads(tmp_dir, infile, unaligned_set, n, min_len):\n\n outfile = \"{0}/truncated.fastq\".format(tmp_dir)\n with ps.FastxFile(infile, \"r\") as inf, open(outfile, \"w\") as outf:\n for entry in inf:\n if entry.name in unaligned_set or n == min_len:\n entry.sequence = entry.sequence[:n]\n entry.quality = entry.quality[:n]\n outf.write(str(entry) + \"\\n\")\n return outfile",
"def del_min(self):\n min_idx = self.__pq[1]\n self.__swap(1, self.__n)\n self.__n -= 1\n self.__sink(1)\n self.__keys[self.__pq[self.__n + 1]] = None\n self.__qp[self.__pq[self.__n + 1]] = -1\n return min_idx",
"def dereplication_fulllength(amplicon_file, minseqlen, mincount):\n occ = {}\n for seq in read_fasta(amplicon_file, minseqlen):\n if not seq in occ:\n occ[seq] = 0\n occ[seq] += 1\n\n # Sort occ dictionary by value - descending order\n new_occ = {\n k: v for k, v in sorted(occ.items(), key=lambda item: item[1], reverse=True)\n }\n\n for seq, count in new_occ.items():\n if count >= mincount:\n try:\n yield [seq, count]\n except StopIteration:\n return",
"def within_length(flowgram, minlength=0, maxlength=400):\r\n seq = flowgram.toSeq()\r\n l = len(seq)\r\n return (l >= minlength and l <= maxlength)",
"def remove_min(self):\n raise NotImplementedError('must be implemented by subclass')",
"def remove_min(self):\n raise NotImplementedError('must be implemented by subclass')",
"def remove(self, pos, length):\n if pos in self.removals:\n self.removals[pos] += length\n else:\n self.removals[pos] = length",
"def filter_contigs(contig_file, min_length):\n basename_original = os.path.splitext(contig_file)[0] \n basename_filtered = basename_original + \"_min_len_\" + str(min_length)\n contigs_filtered = basename_filtered + \".fasta\"\n num_reads = fasta_filter.seq_length_greater(contig_file, contigs_filtered, min_length)\n if num_reads == 0:\n print \"All of the contigs are smaller than \" + str(min_length) + \" nucleotide.\"\n print \"NOTHING is going to be annotated. (Decrease length or drop the argument for annotation.)\"\n sys.exit(0)\n return (basename_filtered, contigs_filtered)",
"def removeExcessBMElem(l, correct_length):\n l.ensure_lookup_table()\n length = len(l)\n i = length - correct_length\n while i > 0:\n l.remove(l[correct_length + i - 1])\n l.ensure_lookup_table()\n i -= 1"
] | [
"0.6422194",
"0.61645573",
"0.5984004",
"0.5932776",
"0.5850423",
"0.58373356",
"0.5711098",
"0.56887233",
"0.55182254",
"0.5453946",
"0.54460245",
"0.5416913",
"0.53723514",
"0.533372",
"0.5279013",
"0.5275511",
"0.5242233",
"0.52177817",
"0.5207964",
"0.52019596",
"0.51788336",
"0.5159253",
"0.5132838",
"0.5106093",
"0.5100256",
"0.50624615",
"0.50624615",
"0.5052962",
"0.5052814",
"0.50295436"
] | 0.9105574 | 0 |
Remove edges with weight less than w. | def filter_by_weight(self, w):
G = nx.Graph()
for u, v in self.edges:
if self.graph[u][v]["weight"] >= w:
# Add the nodes first in case they have data
G.add_node(u, **self.nodes(data=True)[u])
G.add_node(v, **self.nodes(data=True)[v])
G.add_edge(u, v, **self.graph[u][v])
self.graph = G | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_heavier_than(self, w):\n G = nx.DiGraph()\n for u, v in self.edges:\n if self.graph[u][v][\"weight\"] <= w:\n G.add_edge(u, v, weight=self.graph[u][v][\"weight\"], alignment=self.graph[u][v][\"alignment\"])\n self.graph = G",
"def trim_edges(g, weight=1):\n g2 = nx.Graph()\n for fnode, tonode, edgedata in g.edges(data=True):\n if edgedata[\"weight\"] > weight:\n g2.add_edge(fnode, tonode, **edgedata)\n return g2",
"def _prunelowestweight(self):\r\n # note: must be called with acquired self._lock!\r\n numentries = len(self._dict)\r\n if numentries >= self.maxentries:\r\n # evict according to entry's weight\r\n items = [(entry.weight, key) for key, entry in self._dict.iteritems()]\r\n items.sort()\r\n index = numentries - self.prunenum\r\n if index > 0:\r\n for weight, key in items[:index]:\r\n del self._dict[key]",
"def trim(self):\n while np.any(self.vertex_valance <= 1):\n edge_to_keep = np.all(self.vertex_valance[self.edges] > 1,\n axis=1).tolist();\n self.raw_wires.filter_edges(edge_to_keep);\n vertex_to_keep = [len(self.get_vertex_neighbors(i)) > 0 for i in\n range(self.num_vertices)];\n self.raw_wires.filter_vertices(vertex_to_keep);\n\n self.__initialize_wires();\n if len(self.vertices) == 0:\n raise RuntimeError(\"Zero vertices left after trimming.\");",
"def filter_edges_by_weight_range(self, min_weight=None, max_weight=None):\n def weight_filter():\n for g1, g2, w in self.gen:\n ok_min = ok_max = True\n if min_weight is not None and w < min_weight:\n ok_min = False\n #else:\n # print(\"min ok: {} >= {}\".format(w, min_weight))\n if max_weight is not None and w > max_weight:\n ok_max = False\n #else:\n # print(\"max ok: {} <= {}\".format(w, max_weight))\n if ok_min and ok_max:\n yield g1, g2, w\n return self.filter(weight_filter())",
"def filter_edges(self, to_keep):\n self.raw_wires.filter_edges(to_keep);\n self.__initialize_wires();",
"def weights_clipping(self):\n max_weigth = np.amax(np.abs(self._weights))\n self._weights = self._clipping*self._weights/max_weigth",
"def trim(self):\n\n # identify edges outside window\n\n min_time = self.latest_time - self.t_window\n edges_to_trim = self.graph.es.select(time_lt=min_time)\n if self.verbose: print(\"Edges to trim: \"+str(edges_to_trim))\n\n # remove edges outside of t_window\n self.graph.delete_edges(edges_to_trim)\n\n # identify vertices with 0 degree to delete\n vertices_to_trim = self.graph.vs.select(_degree=0)\n if self.verbose: print(\"Vertices to trim: \"+str(vertices_to_trim._name_index))\n self.graph.delete_vertices(vertices_to_trim)",
"def cut_ppl_off(self, G):\r\n for pre, node in list(G.edges):\r\n ew = G.edges[pre, node]['weight']\r\n if ew <= -.95:\r\n G.remove_edge(pre, node)\r\n elif ew >= 1:\r\n G.edges[pre, node]['weight'] = 1.0\r\n else:\r\n continue\r\n return G",
"def filter_non_one(self):\n G = nx.Graph()\n\n for u, v in self.edges:\n if self.graph[u][v][\"weight\"] == 1:\n # Add the nodes first in case they have data\n G.add_node(u, **self.nodes(data=True)[u])\n G.add_node(v, **self.nodes(data=True)[v])\n G.add_edge(u, v, **self.graph[u][v])\n\n self.graph = G",
"def trim_edges(self, keep=0.5):\n\n for tid1, tid2 in self.graph.edges():\n if random.random() > keep:\n self.graph.remove_edge(tid1, tid2)",
"def filter_log_by_weight(directly_follows_graph, edge_weight):\n edges_filtered = dict()\n for (key, value) in directly_follows_graph.items():\n if value > edge_weight:\n edges_filtered[key] = value\n return edges_filtered",
"def filter_vertices(self, to_keep):\n self.raw_wires.filter_vertices(to_keep);\n self.__initialize_wires();",
"def __filterEdges(self):",
"def get_edges_weighted(self):\n edges = []\n for v in self.vertices.values():\n for w in v.neighbors:\n edges.append((v.name, w.name, v.neighbors[w]))\n return edges",
"def remove_weights(self):\n cdef StdVectorFst result = StdVectorFst(isyms=self.isyms, osyms=self.osyms)\n openfst.ArcMap(self.fst[0], result.fst, openfst.RmTropicalWeightMapper())\n return result",
"def prune_weights(model, fraction):\n weights = model.get_weights()\n\n def prune_weight_matrix(weight_matrix):\n # Copy the weights so we don't modify the original network.\n weight_matrix = np.copy(weight_matrix)\n flat_weight_matrix = np.reshape(weight_matrix, (-1,))\n kth = int(len(flat_weight_matrix) * fraction)\n # Determine the k least relevant weights using np.argpartition.\n indices = np.argpartition(np.abs(flat_weight_matrix), kth)\n # Prune them.\n flat_weight_matrix[indices[:kth]] = 0\n weight_matrix = np.reshape(flat_weight_matrix, weight_matrix.shape)\n return weight_matrix\n\n weights[:-1] = list(map(prune_weight_matrix, weights[:-1]))\n\n (_, n_classes) = weights[-1].shape\n # Create a pruned model.\n return create_model(\n LAYER_SIZES,\n n_classes,\n layer_fn=Sparse,\n layer_kwargs_fn=sparse_kwargs,\n weights=weights,\n )",
"def test_bad_weights(self, dim, graph):\r\n s = [0, 1]\r\n w = np.ones(dim - 1)\r\n with pytest.raises(ValueError, match=\"Number of node weights must match number of nodes\"):\r\n clique.shrink(s, graph, node_select=w)",
"def clean_edges(self):",
"def remove_edge(self, edge: Edge) -> Edge:",
"def truncate_weights(self, max_weight):\n S = sum(self.weights)\n to_trunc = (self.weights > S*max_weight)\n n_to_trunc = sum(to_trunc)\n if n_to_trunc == 0:\n S = sum(self.weights)\n if not S > 0.:\n raise ValueError(f'Sum of weights is {S} but should be positive')\n self.weights /= S\n return S\n \n print(f\"Truncating {n_to_trunc:d} weights\")\n to_not_trunc = torch.logical_not(to_trunc)\n sum_untrunc = sum(self.weights[to_not_trunc])\n if sum_untrunc == 0:\n # Impossible to truncate further!\n S = sum(self.weights)\n if not S > 0.:\n raise ValueError(f'Sum of weights is {S} but should be positive')\n self.weights /= S\n return S\n trunc_to = max_weight * sum_untrunc / (1. - max_weight * n_to_trunc)\n max_untrunc = torch.max(self.weights[to_not_trunc])\n ## trunc_to calculation is done so that\n ## after w[to_trunc]=trunc_to\n ## w[to_trunc] / sum(w) all equal max_weight\n ## **But** we don't want to truncate below next smallest weight\n if trunc_to >= max_untrunc:\n self.weights[to_trunc] = trunc_to\n S = sum(self.weights)\n if not S > 0.:\n raise ValueError(f'Sum of weights is {S} but should be positive')\n self.weights /= S\n return S\n else:\n self.weights[to_trunc] = max_untrunc\n return self.truncate_weights(max_weight)",
"def truncate_weights(weights, min_weight=0.01, rescale=True):\n if not isinstance(weights, pd.Series):\n raise ValueError(\"Weight vector is not a Series\")\n\n adj_weights = weights[:]\n adj_weights[adj_weights.abs() < min_weight] = 0.0\n\n if rescale:\n if not adj_weights.sum():\n raise ValueError(\"Cannot rescale weight vector as sum is not finite\")\n\n adj_weights /= adj_weights.sum()\n\n return adj_weights",
"def RemoveWeights(frame, zero_nans=False):\n if \"Wpol\" not in frame and \"Wunpol\" not in frame:\n return\n\n if not frame[\"T\"].weighted:\n return frame\n ValidateMaps(frame)\n\n tmap = frame.pop(\"T\")\n\n if \"Wpol\" in frame:\n wmap = frame[\"Wpol\"]\n qmap = frame.pop(\"Q\")\n umap = frame.pop(\"U\")\n maps.remove_weights(tmap, qmap, umap, wmap, zero_nans=zero_nans)\n else:\n wmap = frame[\"Wunpol\"]\n maps.remove_weights_t(tmap, wmap, zero_nans=zero_nans)\n\n frame[\"T\"] = tmap\n if \"Wpol\" in frame:\n frame[\"Q\"] = qmap\n frame[\"U\"] = umap\n\n return frame",
"def update_edge_weight(self, G):\r\n for node in G.nodes:\r\n n1 = G.nodes[node]['belief_strength'] #gets a node's belief strength\r\n for pre in G.predecessors(node):\r\n n2 = G.nodes[pre]['belief_strength'] #gets the node's predecessors' belief strength\r\n dif = abs(n1-n2)\r\n if n1*n2> 0:\r\n G.edges[pre, node]['weight'] += (dif/2000) #clean\r\n else:\r\n G.edges[pre, node]['weight'] -= (dif/2000)\r\n return G",
"def cutoff_graph( g, simi_cutoff ) :\n g = copy.deepcopy( g )\n edges_to_be_deleted = []\n for e in g.edges() :\n if (g[e[0]][e[1]][\"similarity\"] < simi_cutoff) :\n edges_to_be_deleted.append( e )\n g.remove_edges_from( edges_to_be_deleted )\n return g",
"def graph_no_edges():\n from weighted_graph import Weighted\n example_graph = Weighted()\n example_graph.add_node('BB')\n example_graph.add_node(82)\n example_graph.add_node(99)\n example_graph.add_node('AA')\n return example_graph",
"def plotWeights(w):\n w = w[:,:,0,:]\n # rescale w to 0.0 - 1.0\n mincode = np.amin(w)\n maxcode = np.amax(w)\n w = (w - mincode) / (maxcode - mincode)\n\n out = np.zeros((15, 15))\n for x in range(0,4):\n for y in range(0,4):\n c = x*4+y\n out[x*4:x*4+3, y*4:y*4+3] = w[:,:,c]\n return out",
"def remove_weight_norm_(self):\n\n def _remove_weight_norm(m):\n try:\n torch.nn.utils.remove_weight_norm(m)\n except ValueError:\n return\n\n self.apply(_remove_weight_norm)",
"def goemans_williamson_weighted(graph):\n adjacency = nx.linalg.adjacency_matrix(graph)\n adjacency = adjacency.toarray()\n solution = _solve_cut_vector_program(adjacency)\n sides = _recover_cut(solution)\n\n nodes = list(graph.nodes)\n left = {vertex for side, vertex in zip(sides, nodes) if side < 0}\n right = {vertex for side, vertex in zip(sides, nodes) if side >= 0}\n return Cut(left, right)",
"def normalize_weights(self, w):\n n = w.astype(np.float64, copy=True)\n c = float(np.sum(w))\n n /= c\n return n"
] | [
"0.8140378",
"0.74055135",
"0.62548697",
"0.6137488",
"0.61238205",
"0.60734946",
"0.6054999",
"0.60355353",
"0.6000572",
"0.5942194",
"0.5858607",
"0.58000296",
"0.57271606",
"0.5666711",
"0.5632245",
"0.56165063",
"0.5598718",
"0.55903673",
"0.55457175",
"0.5539666",
"0.5533667",
"0.5531768",
"0.55297387",
"0.5521981",
"0.5500923",
"0.5494044",
"0.5474488",
"0.54705775",
"0.5468242",
"0.5456312"
] | 0.7758672 | 1 |
Remove edges where the weight is not equal to one. | def filter_non_one(self):
G = nx.Graph()
for u, v in self.edges:
if self.graph[u][v]["weight"] == 1:
# Add the nodes first in case they have data
G.add_node(u, **self.nodes(data=True)[u])
G.add_node(v, **self.nodes(data=True)[v])
G.add_edge(u, v, **self.graph[u][v])
self.graph = G | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def trim_edges(g, weight=1):\n g2 = nx.Graph()\n for fnode, tonode, edgedata in g.edges(data=True):\n if edgedata[\"weight\"] > weight:\n g2.add_edge(fnode, tonode, **edgedata)\n return g2",
"def remove_heavier_than(self, w):\n G = nx.DiGraph()\n for u, v in self.edges:\n if self.graph[u][v][\"weight\"] <= w:\n G.add_edge(u, v, weight=self.graph[u][v][\"weight\"], alignment=self.graph[u][v][\"alignment\"])\n self.graph = G",
"def trim_edges(self, keep=0.5):\n\n for tid1, tid2 in self.graph.edges():\n if random.random() > keep:\n self.graph.remove_edge(tid1, tid2)",
"def filter_by_weight(self, w):\n G = nx.Graph()\n\n for u, v in self.edges:\n if self.graph[u][v][\"weight\"] >= w:\n # Add the nodes first in case they have data\n G.add_node(u, **self.nodes(data=True)[u])\n G.add_node(v, **self.nodes(data=True)[v])\n G.add_edge(u, v, **self.graph[u][v])\n\n self.graph = G",
"def remove_trivial_edges(self):\n if self.E > 0:\n valid = self.edges[:, 0] != self.edges[:, 1]\n self.edges = self.edges[valid]\n self.weights = self.weights[valid]\n self.E = np.sum(valid)\n return self.E",
"def filter_edges(self, to_keep):\n self.raw_wires.filter_edges(to_keep);\n self.__initialize_wires();",
"def trim(self):\n while np.any(self.vertex_valance <= 1):\n edge_to_keep = np.all(self.vertex_valance[self.edges] > 1,\n axis=1).tolist();\n self.raw_wires.filter_edges(edge_to_keep);\n vertex_to_keep = [len(self.get_vertex_neighbors(i)) > 0 for i in\n range(self.num_vertices)];\n self.raw_wires.filter_vertices(vertex_to_keep);\n\n self.__initialize_wires();\n if len(self.vertices) == 0:\n raise RuntimeError(\"Zero vertices left after trimming.\");",
"def graph_no_edges():\n from weighted_graph import Weighted\n example_graph = Weighted()\n example_graph.add_node('BB')\n example_graph.add_node(82)\n example_graph.add_node(99)\n example_graph.add_node('AA')\n return example_graph",
"def clean_edges(self):\n for from_node in self.all_nodes():\n for to_node in self.all_nodes():\n if from_node == to_node:\n continue\n dup = list(filter(lambda x: x.from_node == from_node and x.to_node == to_node, self.edges))\n if len(dup) > 1:\n for d in dup[1:]:\n self.edges.remove(d)",
"def cut_ppl_off(self, G):\r\n for pre, node in list(G.edges):\r\n ew = G.edges[pre, node]['weight']\r\n if ew <= -.95:\r\n G.remove_edge(pre, node)\r\n elif ew >= 1:\r\n G.edges[pre, node]['weight'] = 1.0\r\n else:\r\n continue\r\n return G",
"def trim(self):\n\n # identify edges outside window\n\n min_time = self.latest_time - self.t_window\n edges_to_trim = self.graph.es.select(time_lt=min_time)\n if self.verbose: print(\"Edges to trim: \"+str(edges_to_trim))\n\n # remove edges outside of t_window\n self.graph.delete_edges(edges_to_trim)\n\n # identify vertices with 0 degree to delete\n vertices_to_trim = self.graph.vs.select(_degree=0)\n if self.verbose: print(\"Vertices to trim: \"+str(vertices_to_trim._name_index))\n self.graph.delete_vertices(vertices_to_trim)",
"def __filterEdges(self):",
"def remove_edges(self, valid):\n if np.size(valid) != self.E:\n raise ValueError(\"the input vector does not have the correct size\")\n valid = np.reshape(valid, np.size(valid))\n self.E = int(valid.sum())\n self.edges = self.edges[valid != 0]\n self.weights = self.weights[valid != 0]",
"def _prunelowestweight(self):\r\n # note: must be called with acquired self._lock!\r\n numentries = len(self._dict)\r\n if numentries >= self.maxentries:\r\n # evict according to entry's weight\r\n items = [(entry.weight, key) for key, entry in self._dict.iteritems()]\r\n items.sort()\r\n index = numentries - self.prunenum\r\n if index > 0:\r\n for weight, key in items[:index]:\r\n del self._dict[key]",
"def run_removing_edges(self):\n indices = np.where(self.X==1)\n idx=[]\n for i in range(len(indices[0])):\n idx.append((indices[0][i],indices[1][i]))\n idx = np.array(idx)\n return self.node_equivalent(idx)",
"def clean_edges(self):",
"def remove_edges(self, node: NodeKey) -> Edge:",
"def remove_edge(self, edge: Edge) -> Edge:",
"def disconnect_nodes(self):\n for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):\n if src_id != trg_id:\n # `discard` ignores non-existing elements (unlike `remove`)\n app.edges[src_id].discard(trg_id)\n self.mark_as_unsaved()\n self.update()",
"def cutoff_graph( g, simi_cutoff ) :\n g = copy.deepcopy( g )\n edges_to_be_deleted = []\n for e in g.edges() :\n if (g[e[0]][e[1]][\"similarity\"] < simi_cutoff) :\n edges_to_be_deleted.append( e )\n g.remove_edges_from( edges_to_be_deleted )\n return g",
"def filter_vertices(self, to_keep):\n self.raw_wires.filter_vertices(to_keep);\n self.__initialize_wires();",
"def delete_edges_from(self, edges: Iterable):\n for i, j in edges:\n self.delete_edge(i, j)",
"def edges_without_adjacencies(self):\n edges = dict(self.eligible_edges_with_indexes)\n for adj in self.adjacencies.values():\n for edge_info in adj:\n if edge_info.self_edge_index in edges:\n edges[edge_info.self_edge_index] = None\n return list(filter(lambda x: x is not None, edges.values()))",
"def edge_filter(edge):\n # <= because self loops\n idx, jdx = edge\n return ((not graph.has_edge(idx, jdx) or is_multigraph) and\n (idx <= jdx or is_directed) and\n (idx != jdx or self_loops))",
"def remove_edge(self, v1, v2):\n verts = self.vertices\n if v1 in verts and v2 in verts[v1].adj:\n del verts[v1].adj[v2]\n if v2 in verts and v1 in verts[v2].adj:\n del verts[v2].adj[v1]",
"def _remove_edge(self, actor, target):\n nodes = (actor, target)\n for i in (0, 1):\n try:\n self._vertices[nodes[i]]\n except KeyError:\n continue\n\n self._vertices[nodes[i]].remove_neighbor(nodes[(i + 1) % 2])\n\n if self._vertices[nodes[i]].degree == 0:\n del self._vertices[nodes[i]]",
"def trim_adjacency_list(adj):\n old_list = adj.copy()\n for key in adj.keys():\n if links_to(old_list, key) == []:\n del(adj[key])\n return adj",
"def exclude_nodes(self, nodes):",
"def filter_graph(self, sorted_node, ploidy):\n \n for node in sorted_node:\n \n # while number of prefix edge > ploidy level\n while len(self.prefix[node]) > ploidy:\n min_weight_node = min(self.prefix[node], key=self.prefix[node].get)\n self.remove_edge(min_weight_node, node)\n \n # while number of suffix edge > ploidy level\n while len(self.suffix[node]) > ploidy:\n min_weight_node = min(self.suffix[node], key=self.suffix[node].get)\n self.remove_edge(node, min_weight_node)\n \n print(\"Graph is reduced to best overlap graph.\")",
"def prune_weights(model, fraction):\n weights = model.get_weights()\n\n def prune_weight_matrix(weight_matrix):\n # Copy the weights so we don't modify the original network.\n weight_matrix = np.copy(weight_matrix)\n flat_weight_matrix = np.reshape(weight_matrix, (-1,))\n kth = int(len(flat_weight_matrix) * fraction)\n # Determine the k least relevant weights using np.argpartition.\n indices = np.argpartition(np.abs(flat_weight_matrix), kth)\n # Prune them.\n flat_weight_matrix[indices[:kth]] = 0\n weight_matrix = np.reshape(flat_weight_matrix, weight_matrix.shape)\n return weight_matrix\n\n weights[:-1] = list(map(prune_weight_matrix, weights[:-1]))\n\n (_, n_classes) = weights[-1].shape\n # Create a pruned model.\n return create_model(\n LAYER_SIZES,\n n_classes,\n layer_fn=Sparse,\n layer_kwargs_fn=sparse_kwargs,\n weights=weights,\n )"
] | [
"0.7693412",
"0.6992114",
"0.6946491",
"0.69120675",
"0.65813303",
"0.6574783",
"0.6481754",
"0.64104885",
"0.6400494",
"0.6344421",
"0.6318638",
"0.6294974",
"0.6269559",
"0.62638175",
"0.62178195",
"0.6197531",
"0.6176533",
"0.61634064",
"0.59876657",
"0.5881332",
"0.5871327",
"0.58483326",
"0.5825783",
"0.5818328",
"0.58172566",
"0.5785025",
"0.5763832",
"0.57301974",
"0.572676",
"0.5725733"
] | 0.75805086 | 1 |
Replace weights with matching weights from a new scaffold graph. All other edge data remains. | def steal_weights_from(self, in_sg):
G = ScaffoldGraph(self.components_fasta_fname)
for u, v in self.edges:
if in_sg.has_edge(u, v):
# Make a copy of the edge data and remove the weight
new_edge_data = dict(self.graph[u][v])
new_edge_data.pop("weight")
G.add_edge(u, v, weight=in_sg[u][v]["weight"], **new_edge_data)
return G | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _reweight(self):\n self._seed_weights = [self._graph.degree(seed) for seed in self._seeds]\n weight_sum = np.sum(self._seed_weights)\n self._seed_weights = [float(weight)/weight_sum for weight in self._seed_weights]",
"def reassignWeights(self,weights):\n\t\n\t\tbranches = self.collectAllBranches()\n\n\t\tfor i in range(self.nBranches):\n\n\t\t\tbranches[i].weight = weights[i]",
"def reset_weights(self):\n self.head.reset_weights()",
"def update_weights(self):\n\t\tpass",
"def reset_edges(self):\n super().reset_edges()\n\n # If we're in default state, notheing to rest\n if self._modified_weighted_adj_matrices is None:\n return\n\n # Degrees are reset, so we need to reset the original weight scaling\n if self.scale_weights and not self.scaling_skipped:\n self._scale_weights_to_degree()\n self._generate_weighted_adj_matrices()\n else:\n # No weight scaling so just load prev values from cache\n self.weighted_adj_matrices = {**self.weighted_adj_matrices, **self._modified_weighted_adj_matrices}\n self._modified_weighted_adj_matrices = None",
"def reset_weights(self):\n self.policy_backbone.reset_weights()\n self.value_backbone.reset_weights()\n self.action_head.reset_weights()\n self.critic_head.reset_weights()",
"def update_weights(self):\n\n self.weights -= self.loss_grads\n self.loss_grads = np.zeros(self.weights.shape)",
"def update_weights(self):\n self._weights = self._weights + self.update_weights_value\n self.weights_clipping()",
"def update_edge_weight(self, G):\r\n for node in G.nodes:\r\n n1 = G.nodes[node]['belief_strength'] #gets a node's belief strength\r\n for pre in G.predecessors(node):\r\n n2 = G.nodes[pre]['belief_strength'] #gets the node's predecessors' belief strength\r\n dif = abs(n1-n2)\r\n if n1*n2> 0:\r\n G.edges[pre, node]['weight'] += (dif/2000) #clean\r\n else:\r\n G.edges[pre, node]['weight'] -= (dif/2000)\r\n return G",
"def max_weight_matching(self):\n undirected_sg = nx.Graph()\n for u, v in self.edges:\n undirected_sg.add_edge(u, v, weight=self.graph[u][v][\"weight\"])\n matching = nx.max_weight_matching(G=undirected_sg)\n\n # Remove any cycles that would result from adding intra-sequence edges\n # Make a new graph\n cover_graph = nx.Graph()\n cover_graph.add_nodes_from(undirected_sg.nodes(data=True))\n\n # Add edges connecting contig ends\n node_base_set = set([i[:-2] for i in list(cover_graph.nodes)])\n for node in node_base_set:\n cover_graph.add_edge(node + \"_b\", node + \"_e\", weight=np.inf)\n\n # Add the edges that form the matching\n for u, v in matching:\n cover_graph.add_edge(u, v, **undirected_sg[u][v])\n\n # Remove any potential cycles\n edges_to_delete = []\n for cc in nx.connected_components(G=cover_graph):\n cc = cover_graph.subgraph(cc).copy()\n if cc.number_of_nodes() == cc.number_of_edges():\n assembly_edges = cc.edges(data=True)\n edge_to_delete = min(assembly_edges, key=lambda entry: entry[2][\"weight\"])\n edges_to_delete.append((edge_to_delete[0], edge_to_delete[1]))\n\n # Add the edges that form the matching into a new PatchScaffoldGraph\n new_psg = PatchScaffoldGraph(self.components_fn)\n for u, v in matching:\n if (u, v) not in edges_to_delete and (v, u) not in edges_to_delete:\n new_psg.add_edge(u, v, self.graph[u][v][\"alignment\"])\n new_psg.add_edge(v, u, self.graph[v][u][\"alignment\"])\n\n return new_psg",
"def reset_weights(self):\r\n self._weights = deepcopy(self._tmp_weights)\r\n self._tmp_weights = None",
"def scale_edge_weights(graph):\n g = graph.copy()\n original_weights = []\n for edge in g.edges():\n original_weights.append(g.edges()[edge]['weight'])\n scaler = MinMaxScaler(feature_range=(.5,12))\n new_weights = scaler.fit_transform(np.array(original_weights).reshape(-1,1)).flatten()\n for i,edge in enumerate(g.edges()):\n g.edges()[edge]['weight'] = new_weights[i]\n return g",
"def set_weights(graph):\n vecs = graph.graph['tfidf']\n for n1, n2 in graph.edges:\n v1 = vecs[:,list(graph.nodes).index(n1)].transpose()\n v2 = vecs[:,list(graph.nodes).index(n2)].transpose()\n graph[n1][n2]['weight'] = smp.cosine_similarity(X=v1, Y=v2)[0,0]",
"def update_disc_copy(self):\n source = self.discriminator\n dest = self.discriminator_copy\n\n assert len(source.layers) == len(dest.layers)\n for dest_layer, source_layer in zip(dest.layers, source.layers):\n dest_layer.set_weights(source_layer.get_weights())",
"def update_disc_copy(self):\n source = self.discriminator\n dest = self.discriminator_copy\n\n assert len(source.layers) == len(dest.layers)\n for dest_layer, source_layer in zip(dest.layers, source.layers):\n dest_layer.set_weights(source_layer.get_weights())",
"def update_edge_weights(self):\n # set all weights to 0\n for arc in self.arc_info.keys():\n self.arc_info[arc][\"weight\"] = 0\n # iterate through all paths and add weights to arcs\n for (path, weight) in zip(self.paths, self.weights):\n for arc in path:\n # Count this path's flow toward the arc's total\n self.arc_info[arc][\"weight\"] = self.arc_info[arc][\"weight\"] + \\\n weight",
"def update_weights(self):\r\n\r\n inedges=self.in_edges\r\n for edge in inedges:\r\n weight=edge.weight+self.learning_rate*self.delta*(edge.source.activation)\r\n edge.change_weight(weight)",
"def change_weight(self, new_weight_arr):\n self.weights = new_weight_arr",
"def sample(self, graph: nx.classes.graph.Graph) -> nx.classes.graph.Graph:\n self._nodes = set()\n self._edges = set()\n self._check_graph(graph)\n self._graph = graph\n self._create_initial_seed_set()\n while len(self._nodes) < self.number_of_nodes:\n self._reweight()\n self._do_update()\n new_graph = nx.from_edgelist(self._edges)\n return new_graph",
"def updateWeights(self,weightUpdate):\n\t\n\t\tbranches = self.collectAllBranches()\n\n\t\tfor i in range(self.nBranches):\n\n\t\t\tbranches[i].weight -= weightUpdate[i]",
"def filter_by_weight(self, w):\n G = nx.Graph()\n\n for u, v in self.edges:\n if self.graph[u][v][\"weight\"] >= w:\n # Add the nodes first in case they have data\n G.add_node(u, **self.nodes(data=True)[u])\n G.add_node(v, **self.nodes(data=True)[v])\n G.add_edge(u, v, **self.graph[u][v])\n\n self.graph = G",
"def change_weight(self, new_weight):\r\n self.old_weight = self.weight\r\n self.weight = new_weight",
"def resetWeights(T):\n T.children = [(t,0) for t in T.children]\n for t,w in T.children:\n resetWeights(t)",
"def reset_weights(self):\n np.random.seed(self.seed)\n self.node_embedding = xavier_normal(size=(self.vocab_size, self.layer1_size), as_type=np.float32)\n self.context_embedding = xavier_normal(size=(self.vocab_size, self.layer1_size), as_type=np.float32)\n\n\n self.centroid = np.zeros((self.k, self.layer1_size), dtype=np.float32)\n self.covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.inv_covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.pi = np.zeros((self.vocab_size, self.k), dtype=np.float32)",
"def update_weight(graph: list, vertex: int, mst_set: set, weights: list, parent: list):\n for i in range(len(graph)):\n if 0 < graph[vertex][i] < weights[i] and i not in mst_set:\n weights[i] = graph[vertex][i]\n parent[i] = vertex",
"def prepare_weights(self, hs, negative, wv, docvecs, update=False):\n # set initial input/projection and hidden weights\n if not update:\n self.reset_weights(hs, negative, wv, docvecs)\n else:\n self.update_weights(hs, negative, wv)",
"def update_weights(self):\n self._weights = self._weights + self.update_weights_value",
"def reset_from(self, other_model):\n self.wv.vocab = other_model.wv.vocab\n self.wv.index2word = other_model.wv.index2word\n self.vocabulary.cum_table = other_model.vocabulary.cum_table\n self.corpus_count = other_model.corpus_count\n self.docvecs.count = other_model.docvecs.count\n self.docvecs.doctags = other_model.docvecs.doctags\n self.docvecs.offset2doctag = other_model.docvecs.offset2doctag\n self.trainables.reset_weights(self.hs, self.negative, self.wv, self.docvecs)",
"def reset_graph(self):\n raise NotImplementedError",
"def add_random_weights(G):\n for (_,_,d) in G.edges(data=True):\n d[\"weight\"] = random.random()"
] | [
"0.6404683",
"0.6398367",
"0.631176",
"0.6254147",
"0.62163275",
"0.6184205",
"0.60976106",
"0.6038473",
"0.60075223",
"0.59709936",
"0.58987814",
"0.58407134",
"0.58398473",
"0.5817509",
"0.5817509",
"0.58084476",
"0.5761213",
"0.5685151",
"0.5646441",
"0.56451553",
"0.5620068",
"0.5616164",
"0.55889827",
"0.55540824",
"0.5547866",
"0.5531312",
"0.5520177",
"0.5487627",
"0.5487006",
"0.54738045"
] | 0.6553183 | 0 |
Combine edges connecting the same nodes. Return a new SG graph. | def merge(self):
G = ScaffoldGraph(self.components_fasta_fname)
# TODO implement an add_nodes_from wrapper so direct access is unnecessary
G.graph.add_nodes_from(self.nodes(data=True))
for u, v, c in self.graph.edges:
edge_data_list = [self.graph[u][v][i] for i in self.graph[u][v]]
G.add_edge(u, v, **self._merge_edge_dicts(*edge_data_list))
return G | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_edges(self):\n for u in self.G.nodes():\n for v in self.G.nodes():\n if u != v and u != \"Sink\" and v != \"Source\":\n self.G.add_edge(\n u, v, cost=self.manhattan(u, v), time=self.manhattan(u, v)\n )",
"def concatenate_graphs(G1, G2):\n V = G1.V + G2.V\n edges = np.vstack((G1.edges, G1.V + G2.edges))\n weights = np.hstack((G1.weights, G2.weights))\n G = WeightedGraph(V, edges, weights)\n return G",
"def getMultipleEdgesBetweenSameNodesGraph(self):\n return create_quadEdgeCross(self)",
"def update(self):\n\n for node in self.nodes:\n for edge in node.edges:\n for i, edge_node in enumerate(edge.nodes):\n if edge_node.id != node.id:\n edge_node.add_edge(edge)\n\n return self",
"def _build_graph2(self, g1):\n g2 = g1.copy()\n for source, target, weight in self._remaining_edges:\n if weight == -1:\n self._gt_edges.append((source, target))\n if g2.has_edge(source, target):\n g2.remove_edge(source, target)\n return g2",
"def __toNetworkX(self):\n G = nx.Graph()\n G.add_nodes_from(range(self.n))\n for u in range(self.n):\n for v in range(self.n):\n if self.adjacent(u, v):\n G.add_edge(u, v)\n\n return G",
"def duplicate(self):\r\n graph = DistanceGraph(self.size)\r\n for node in self.edges:\r\n for edge in self.edges[node]:\r\n graph.edges[node][edge] = self.edges[node][edge]\r\n return graph",
"def make_graph(self):\n # update the neighbors in the graph\n self.update_neighbors()\n\n # Go through each node and get their neighbors\n self.edges = []\n for node_name in self.nodes:\n\n # get the neighbors\n node_neighbors = self.nodes[node_name].neighbors\n\n # go through neighbors\n for neighbor_name in node_neighbors:\n\n # Make the edge key\n edge_key = \"-\".join(sorted([node_name, neighbor_name]))\n\n # Add it to the edge list if it is not already present\n if edge_key not in self.edges:\n\n self.edges.append(edge_key)\n\n return self.edges",
"def graph_with_edges():\n from weighted_graph import Weighted\n new_graph = Weighted()\n new_graph.add_node('A')\n new_graph.add_node('B')\n new_graph.add_node('C')\n new_graph.add_node('D')\n new_graph.add_node('E')\n new_graph.add_node('F')\n new_graph.add_edge('A', 'B')\n new_graph.add_edge('A', 'C')\n new_graph.add_edge('B', 'D')\n new_graph.add_edge('B', 'E')\n new_graph.add_edge('C', 'B')\n new_graph.add_edge('F', 'A')\n new_graph.add_edge('C', 'F')\n return new_graph",
"def add_all_edges(self):\n for n1 in self.vertices():\n for n2 in self.vertices():\n if n1 != n2:\n self.add_edge((n1, n2))",
"def connect_nodes(self):\n for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):\n if src_id != trg_id:\n app.edges[src_id].add(trg_id)\n self.mark_as_unsaved()\n self.update()",
"def build_graph(edges):\n \n G = nx.MultiGraph()\n G.add_edges_from(edges)\n return G",
"def all_pairs(self):\n return chain(self.nx_graph.edges(), nx.non_edges(self.nx_graph))",
"def example_graph():\n g = nx.Graph()\n g.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D'), ('D', 'E'), ('D', 'F'), ('D', 'G'), ('E', 'F'), ('G', 'F')])\n return g",
"def to_networkx(self):\n g = nx.Graph()\n for v in self.vs.values():\n g.add_node(v)\n for v in self.fs:\n g.add_node(v)\n for u in v.neighbors:\n g.add_edge(v, u)\n return g",
"def permuteEdges(self):\n\t\tpermuted_graph = copy.copy(self)\n\t\t# swap about half the edges\n\t\ti = len(self.graph)/2\n\t\twhile i > 0:\n\t\t\t# swap edge targets\n\t\t\tsourceA, targetA = random.choice(permuted_graph.graph.keys())\n\t\t\tiTypeA, emA = permuted_graph.graph[(sourceA, targetA)]\n\t\t\tsourceB, targetB = random.choice(permuted_graph.graph.keys())\n\t\t\tiTypeB, emB = permuted_graph.graph[(sourceB, targetB)]\n\n\t\t\t# can't be the same random choice, obviously...\n\t\t\tif sourceA == sourceB or targetA == targetB:\n\t\t\t\tcontinue\n\n\t\t\t# add edges\n\t\t\tpermuted_graph.graph[(sourceA, targetB)] = (iTypeA, emA)\n\t\t\tpermuted_graph.graph[(sourceB, targetA)] = (iTypeB, emB)\n\n\t\t\tdel permuted_graph.graph[(sourceA, targetA)]\n\t\t\tdel permuted_graph.graph[(sourceB, targetB)]\n\n\t\t\ti -= 1\n\n\t\t# return a new graph object\t\t\n\t\treturn permuted_graph",
"def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if (neighbour, vertex) not in edges:\n edges.append((vertex, neighbour))\n \n for pair in edges:\n for otherpair in edges:\n if pair[1] == otherpair[0]:\n edges.append((pair[0],otherpair[1]))\n return edges",
"def mix_graphs(source_graph1, source_graph2):\n g = clone_graph(source_graph1, identifier=source_graph1.identifier)\n g = clone_graph(source_graph2, target_graph=g)\n return g",
"def sub_graph_merging(self):",
"def merge_graph(self, other):\n self.add_nodes( (nLabel,nInfo) for nLabel,nInfo in other.nodes() )\n \n for nLabel,nInfo in other.nodes():\n for edgeLabel,edgeInfo in other.edgesFrom(nLabel):\n self.add_edge(edgeLabel,edgeInfo)",
"def _bayes_net_graph(nodes: List[str], edges: List[Tuple[str, str]]):\n sources_and_target = [[target] for target in range(len(nodes))]\n\n for source_node, target_node in edges:\n source = nodes.index(source_node)\n target = nodes.index(target_node)\n sources_and_target[target].insert(0, source)\n\n return [\n tuple(st for st in sts) if len(sts) > 1 else sts[0]\n for sts in sources_and_target\n ]",
"def create_graph_from_edges(edges):\n G = nx.Graph()\n for e in edges:\n p1 = e[0]\n p2 = e[1]\n dist = LA.norm(np.array(p2) - np.array(p1))\n G.add_edge(p1, p2, weight=dist)\n return G",
"def connect_all(graph, nodeset):\n for element in nodeset:\n graph.add_node(element)\n for element1 in nodeset:\n for element2 in nodeset:\n if not element1 == element2:\n graph.add_edge(element1, element2)\n return graph",
"def merge_nodes(self,n0,n1):\n # -- Sanity checks - does not yet allow for collapsing edges.\n\n # if they share any cells, would update the cells, but for now\n # just signal failure.\n n0_cells=list(self.node_to_cells(n0))\n n1_cells=list(self.node_to_cells(n1))\n cell_to_edge_cache={}\n\n for c in n1_cells:\n if c in n0_cells:\n print(\"cell %d common to both nodes\"%c)\n raise GridException(\"Not ready for merging nodes in the same cell\")\n # otherwise record and fix up below\n\n # while we're looping, cache the edges as they will\n # be mutated along the way.\n cell_to_edge_cache[c]=self.cell_to_edges(c).copy()\n\n # do they share an edge, but not already fixed in the above stanza?\n j=self.nodes_to_edge(n0,n1)\n if j is not None:\n raise GridException(\"Not ready for merging endpoints of an edge\")\n\n edge_map={} # index of superceded edge => superceding edge\n\n # Update edges of n1 to point to n0\n # if that would cause a duplicate edge, then the n1 version is deleted\n n1_edges=list(self.node_to_edges(n1)) # make copy since we'll mutate it\n for j in n1_edges:\n if self.edges['nodes'][j,0]==n1:\n nj=0\n elif self.edges['nodes'][j,1]==n1:\n nj=1\n else:\n assert False # sanity check\n newnodes=self.edges[j]['nodes'].copy()\n newnodes[nj]=n0\n # it's possible that this is an edge which already exists\n jother=self.nodes_to_edge(*newnodes)\n if jother is not None:\n # want to keep jother, delete j. but is there info on\n # cells which should be brought over?\n edge_map[j]=jother\n # wait to delete j until after cells have been moved to jother.\n else:\n self.log.debug(\"Modifying edge j=%d\"%j)\n self.modify_edge(j,nodes=newnodes)\n\n # -- Transition any cells. \n for c in n1_cells:\n # update the node list:\n cnodes=self.cell_to_nodes(c).copy()\n nc=list(cnodes).index(n1)\n cnodes[nc]=n0\n\n # Dangerous to use cell_to_edges, since it may\n # have to consult the edge topology, which is disrupted\n # in the above code. \n # cell_to_edges: first checks cells['edges'], may \n # go to cell_to_nodes(c): that's safe.\n # and nodes_to_edge\n # -> node_to_edges, which in turn may consult self.edges['nodes']\n\n #cedges=self.cell_to_edges(c).copy()\n cedges=cell_to_edge_cache[c]\n\n for ji,j in enumerate(cedges):\n if j in edge_map:\n # is there were edges['cells'] should be updated?\n\n # sever the edge=>cell pointer, to p\n # could just set to [-1,-1], but this keeps things very explicit\n # for debugging\n j_cells=list(self.edges['cells'][j])\n j_cells_side=j_cells.index(c)\n j_cells[ j_cells_side ] = -1\n self.modify_edge(j,cells=j_cells)\n\n # and modify the receiving edge, too\n jo=edge_map[j]\n jo_cells=list(self.edges['cells'][jo])\n # which side of jo? a bit tedious...\n if list(self.edges['nodes'][j]).index(n1) == list(self.edges['nodes'][jo]).index(n0):\n # same orientation\n jo_cells_side=j_cells_side\n elif list( self.edges['nodes'][j]).index(n1) == 1-list(self.edges['nodes'][jo]).index(n0):\n jo_cells_side=1-j_cells_side\n else:\n raise Exception(\"Failed in some tedium\")\n assert jo_cells[jo_cells_side]<0\n jo_cells[jo_cells_side]=c\n self.modify_edge(edge_map[j],cells=jo_cells)\n # yikes. any chance that worked?\n\n cedges[ji]=edge_map[j]\n\n # maybe this is where we'd update cells['edges'] too?\n self.modify_cell(c,nodes=cnodes,edges=cedges)\n\n for dead_edge in edge_map:\n self.delete_edge(dead_edge)\n\n self.delete_node(n1)",
"def _build_graphs(self):\n g1 = self._build_graph1()\n g2 = self._build_graph2(g1)\n return g1, g2",
"def ordering_graph(self):\n\n g = nx.DiGraph()\n\n # add times\n for t in self.nodes_iter():\n g.add_node(t)\n\n # add existing edges\n for t1, t2 in self.edges_iter():\n g.add_edge(t1, t2)\n\n # connect every pair of anchored times\n anchored = sorted(self.anchored())\n for t1, t2 in itertools.combinations(anchored, 2):\n g.add_edge(t1, t2)\n\n # connect every time with its sucessors\n _g = g.copy()\n for t1 in _g:\n for t2 in set([target for (_, target) in nx.bfs_edges(_g, t1)]):\n g.add_edge(t1, t2)\n\n return g",
"def graph_union(*args, **kwargs):\n\n if not len(args) > 1:\n raise AttributeError('At least two input Graphs required')\n\n # Validate if all arguments are Graphs\n check_graphbase_instance(*args)\n\n all_share_common_origin = all([share_common_origin(args[0], n) for n in args[1:]])\n if all_share_common_origin and not kwargs.get('return_copy', False):\n\n nids = []\n for graph in args:\n nids.extend([n for n in graph.nodes if n not in nids])\n\n eids = []\n for graph in args:\n eids.extend([e for e in graph.edges if e not in eids])\n\n result = args[0].origin.getnodes(nids)\n result.edges.set_view(eids)\n return result\n else:\n\n # make a deep copy of the first graph\n result = args[0].copy(deep=True, copy_view=False)\n\n # we need control over the node ID to add\n # temporary turn off auto_nid if needed\n auto_nid = result.data.auto_nid\n result.data.auto_nid = False\n\n for graph in args[1:]:\n for node, attrib in graph.nodes.items():\n if node not in result.nodes:\n result.add_node(node, **attrib)\n\n for edge, attrib in graph.edges.items():\n if edge not in result.edges:\n result.add_edge(*edge, **attrib)\n\n # Restore auto_nid\n result.data.auto_nid = auto_nid\n\n return result",
"def _build_graph_general(self): \n\n #Find a canonical coloring scheme\n #Each node has a color that is determined by the non-mapped aspects\n nodecolors=set()\n for nl in self.net.iter_node_layers():\n nodecolors.add(self._slice_node_layer_not_allowed(nl))\n nodecolors_sorted=sorted(list(nodecolors))\n del nodecolors\n self._assert_full_order(nodecolors_sorted)\n self.colormap=dict( ((color,colorid) for colorid,color in enumerate(nodecolors_sorted) ))\n\n #each aux node has a color that is determined by the aspect\n self.auxcolormap=dict( ((auxcolor, auxcolorid+len(self.colormap)) for auxcolorid,auxcolor in enumerate(sorted(self.asp)) ) )\n\n\n #Add the underlying network\n #node-layers:\n for nl in self.net.iter_node_layers():\n nlid=self._get_node_id(nl)\n color=self._slice_node_layer_not_allowed(nl)\n colorid=self.colormap[color]\n self.add_node(nlid,colorid)\n\n #edges between node-layers:\n for nl1 in self.net.iter_node_layers():\n for nl2 in self.net[nl1]:\n nl1id=self._get_node_id(nl1)\n nl2id=self._get_node_id(nl2)\n self.add_link(nl1id,nl2id)\n\n\n #Add the auxiliary nodes and edges\n #add the aux nodes\n for a in self.asp:\n for elayer in self.net.slices[a]:\n auxid=self._get_auxnode_id( (a,elayer) )\n auxcolorid=self.auxcolormap[a]\n self.add_node(auxid,auxcolorid)\n \n #add the aux edges\n for nl in self.net.iter_node_layers():\n for a in self.asp:\n nlid=self._get_node_id(nl)\n auxid=self._get_auxnode_id( (a,nl[a]) )\n self.add_link(nlid,auxid)",
"def get_graph(self):\n graph = copy.deepcopy(self.G)\n for source, dests in graph.items():\n for dest in dests:\n constraint = graph[source][dest]['constraint']\n new_constraint = self.preprocess_constraint(constraint)\n graph[source][dest]['constraint'] = new_constraint\n return graph",
"def graph(self):\n graph = nx.DiGraph()\n for name, joint in self.joints.items():\n graph.add_edge(*joint.connects, joint=name)\n return graph"
] | [
"0.6441835",
"0.6406052",
"0.63969254",
"0.63897246",
"0.6340736",
"0.6339551",
"0.63006055",
"0.62655014",
"0.62605125",
"0.6246468",
"0.62380224",
"0.62217516",
"0.6177223",
"0.61563164",
"0.6108449",
"0.60815537",
"0.6053574",
"0.6048477",
"0.60336834",
"0.60213846",
"0.6005166",
"0.59877723",
"0.5986485",
"0.5984307",
"0.5974015",
"0.5970474",
"0.59591115",
"0.59568566",
"0.5956072",
"0.59558064"
] | 0.67115784 | 0 |
Update the graph given AGP files. | def add_agps(self, in_agps, in_weights=None, exclusion_set=None):
if exclusion_set is None:
exclusion_set = set()
if in_weights is None:
in_weights = [1] * len(in_agps)
else:
if len(in_agps) != len(in_weights):
raise ValueError("Must assign one weight per AGP file, or none at all.")
# Iterate through the AGP files to update the graph
for agp, weight in zip(in_agps, in_weights):
agp = os.path.abspath(agp)
if agp in self.agps:
raise ValueError("%s appears more than once." % agp)
# Add this AGP and its weight to the master list
self.agps.append(agp)
self.weights.append(weight)
for ap in self._get_assembly_points(agp, weight):
for u, v, w in ap.get_realizations():
if u[:-2] not in exclusion_set and v[:-2] not in exclusion_set:
u_base, v_base = u[:-2], v[:-2]
u_len = self.get_component_len(u_base)
self.add_edge(
u,
v,
weight=w,
source_fname=ap.source_agp_fname,
is_known_gap_size=ap.is_known_gap_size,
gap_size=ap.gap_size,
gap_type=ap.gap_type,
linkage=ap.linkage,
linkage_evidence=ap.linkage_evidence,
seqs=[u_base, v_base],
pos=[u_len, 0]
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self, paths):\n raise NotImplementedError",
"def update_graph(graph):\n\n if not isinstance(graph, WeightedGraph):\n raise TypeError('update_graph(graph): graph must be a WeightedGraph object')\n\n # check if graph has been already updated\n if graph.updated >= 1:\n return\n else:\n graph.updated = 1\n\n # update every vertice of the graph\n for vertice in graph.Vertices:\n update_adj_list(vertice)",
"def update(self):\r\n self.g = self.create_graph()",
"def update(self, edges) -> None:\n for v1, v2 in edges:\n self.add(v1, v2)",
"def force_update_graph(self):\n self.updated_data = 1\n self.update_graph()",
"def updateAnnotations(self):\n self.backupDatafiles()\n print(\"Updating annotation files \", self.field(\"trainDir\"))\n listOfDataFiles = QDir(self.field(\"trainDir\")).entryList(['*.data'])\n for file in listOfDataFiles:\n # Read the annotation\n segments = Segment.SegmentList()\n newsegments = Segment.SegmentList()\n segments.parseJSON(os.path.join(self.field(\"trainDir\"), file))\n allSpSegs = np.arange(len(segments)).tolist()\n newsegments.metadata = segments.metadata\n for segix in allSpSegs:\n seg = segments[segix]\n if self.field(\"species\") not in [fil[\"species\"] for fil in seg[4]]:\n newsegments.addSegment(seg) # leave non-target segments unchanged\n else:\n for seg2 in self.segments:\n if seg2[1] == seg:\n # find the index of target sp and update call type\n seg[4][[fil[\"species\"] for fil in seg[4]].index(self.field(\"species\"))][\"calltype\"] = self.clusters[seg2[-1]]\n newsegments.addSegment(seg)\n newsegments.saveJSON(os.path.join(self.field(\"trainDir\"), file))",
"def update(self, params):\n for gauge in self.gauges:\n self.safexec(gauge.update, params)",
"def _file_update(self, filename):\n values = TaskInfo._parse_file(filename)\n self._load_dict(values)",
"def update():\n\n # load the OPML file and update any feeds\n for o in oercloud.Session().query(oercloud.Feed).filter_by(\n feed_type=oercloud.feed.OPML):\n \n aggregator.LOG.info(\"Loading OPML from %s\" % o.url)\n update_feed_list(opml.parse(o.url))\n\n # check each feed and see if it should be polled\n check_feeds()",
"def update_graph(self):\n if self.update_callback:\n self.update_callback()",
"def readdata(self, filepaths):\n pass",
"def update(src):",
"def update(self, src, labels): # real signature unknown; restored from __doc__\n pass",
"def update(self, attributes, ifAll=False, forceGraphInfo=False):\n for key in attributes:\n k = key.lower().replace('', '')\n try:\n if k in self.headersKeys:\n if attributes[key] != '':\n self.headers.update({k: attributes[key]})\n elif k in self.headers:\n del self.headers[k]\n elif (k in self.graphInfoKeys or forceGraphInfo\n or k.startswith('subplots')):\n if attributes[key] != '':\n self.graphInfo.update({k: attributes[key]})\n elif k in self.graphInfo:\n del self.graphInfo[k]\n # by default nothing in sampleInfo, everything in curves\n else:\n if ifAll:\n for i in range(self.length()):\n self.curve(i).update({k: attributes[key]})\n else:\n self.curve(-1).update({k: attributes[key]})\n except Exception as e:\n print('Error Graph.update: key', key, ' attributes',\n attributes, 'exception', e)",
"def update_chicago_graph(path=\"chicago.xml\"):\n\n\tChicago = download_chicago_graph()\n\tsave_chicago_graph(Chicago, path)",
"def test_watch_graph_changes(self):\n self.make_files(foo='foo', bar='bar')\n with pike.Graph('g') as graph:\n pike.glob('.', '*')\n watcher = pike.watch_graph(graph)\n ret = watcher.run()\n self.assertItemsEqual([f.data.read() for f in ret['default']],\n [b'foo', b'bar'])\n self.make_files(foo='foo', bar='foo')\n ret = watcher.run()\n self.assertItemsEqual([f.data.read() for f in ret['default']],\n [b'foo', b'foo'])",
"def updateFileInfo(self, data, pid):\n self.db.updateLinkInfo(data)\n self.evm.dispatchEvent(\"packageUpdated\", pid)",
"def update_graph(self, name, owner_email=None, graph=None, is_public=None):\n\t\tif graph is not None:\n\t\t\tdata = {\n\t\t\t\t'name': graph.get_name(),\n\t\t\t\t'is_public': 0 if is_public is None else is_public,\n\t\t\t\t'graph_json': graph.compute_graph_json(),\n\t\t\t\t'style_json': graph.get_style_json()\n\t\t\t}\n\t\telse:\n\t\t\tdata = {\n\t\t\t\t'is_public': 0 if is_public is None else is_public,\n\t\t\t}\n\n\t\tgraph = self.get_graph(name, owner_email=owner_email)\n\t\tif graph is None or 'id' not in graph:\n\t\t\traise Exception('Graph with name `%s` doesnt exist for user `%s`!' % (name, self.username))\n\t\telse:\n\t\t\treturn self._make_request(\"PUT\", '/api/v1/graphs/' + str(graph['id']), data=data).json()",
"def _update_cfg_from_files(self, files):\n\t\tfor file in files:\n\t\t\twith open(self.SettingsFolder + file) as f:\n\t\t\t\tself._add_cfg_to_list(file[:-4], yaml.load(f))",
"def update(self, gppkg_filename):\n run_command(\"gppkg --update %s\" % gppkg_filename)\n self.assertTrue(self.check_install(gppkg_filename))",
"def update_graph(q):\n global GRAPHS\n try:\n r = requests.get(url=WIKIDATA_ENTITY_BASE + q, headers={'Accept': 'text/turtle'})\n g_new = Graph()\n g_new.parse(data=r.text, format='turtle')\n except:\n print(\"Update for %s failed\" % (q))\n return\n if q in GRAPHS:\n g_old = GRAPHS[q]\n print(\"%s: old, %d -> new, %d triples\" % (q, len(g_old), len(g_new)))\n in_both, in_old, in_new = graph_diff(g_old, g_new)\n print(\"%s: < %d, == %d, > %d\" % (q, len(in_old), len(in_both), len(in_new)))\n for s, p, o in in_old:\n print(\"< %s %s %s\" % (str(s), str(p), str(o)))\n for s, p, o in in_new:\n print(\"> %s %s %s\" % (str(s), str(p), str(o)))\n else:\n print(\"%s: new, %d triples\" % (q, len(g_new)))\n GRAPHS[q] = g_new",
"def updateall(self, params):\n for gauge in self.gauges:\n self.safexec(gauge.updateall, params)",
"def gbf_pub_update():\r\n LOG.info(\"Start: Update datasets in RLIDGeo warehouse.\")\r\n month_stamps = [\r\n datetime.date.today().strftime(\"%Y_%m\"),\r\n (\r\n datetime.date.today().replace(day=1)\r\n - datetime.timedelta(days=1)\r\n ).strftime(\"%Y_%m\"),\r\n ]\r\n for month_stamp in month_stamps:\r\n snapshot_db_path = SNAPSHOT_DB_PATH.format(month_stamp)\r\n if not os.path.exists(snapshot_db_path):\r\n LOG.warning(\"Snapshot database %s does not exist.\", snapshot_db_path)\r\n continue\r\n\r\n for _dataset in DATASETS:\r\n arcetl.features.update_from_dicts(\r\n dataset_path=_dataset.path(\"pub\"),\r\n update_features=source_rows(snapshot_db_path, _dataset.path(\"source\")),\r\n id_field_names=_dataset.id_field_names,\r\n field_names=_dataset.field_names,\r\n delete_missing_features=False,\r\n use_edit_session=False,\r\n )\r\n LOG.info(\"End: Update.\")",
"def draw_graph(file_name, graph):\n g_out = pgv.AGraph(strict=False, directed=True)\n for i in graph:\n g_out.add_edge(i[0], i[1], color='black')\n edge = g_out.get_edge(i[0], i[1])\n\n if len(i) > 2:\n edge.attr['label'] = i[2]\n\n g_out.layout(prog='dot')\n g_out.draw(path=\"{file_name}.svg\".format(**locals()))",
"def load_graph(context, files):\n import os.path\n import glob\n import re\n\n log.debug(\"Loading %s graph.\" % context)\n for f in files:\n if(f[-5:] == \".rdfa\"):\n format = \"rdfa\"\n elif(f[-7:] == \".jsonld\"):\n format = \"json-ld\"\n else:\n log.info(\"Unrecognised file format: %s\" % f) \n return \n if(format == \"rdfa\"):\n uri = getNss(context)\n g = STORE.graph(URIRef(uri))\n g.parse(file=open(full_path(f),\"r\"),format=format)\n STORE.bind(context,uri)\n elif(format == \"json-ld\"):\n STORE.parse(file=open(full_path(f),\"r\"),format=format, context=context_data)\n\n QUERYGRAPH = None #In case we have loaded graphs since the last time QUERYGRAPH was set",
"def load_graph(self, filename):\n try:\n file_extention = list(filename.split(\".\"))[-1]\n if file_extention == \"gml\":\n self.graph = nx.read_gml(filename)\n if file_extention == \"adjlist\":\n self.graph = nx.read_adjlist(filename)\n if file_extention == \"yaml\":\n self.graph = nx.read_yaml(filename)\n except Exception as e:\n print(\"Error in loading Graph file: The error is\", e)",
"def refresh(self):\n self.update_from_file()\n self.update_from_env()",
"def _update_files():\n configuration_settings = get_configuration()\n\n # Need to find all of the files that are stored in the input_files directories in order to start building the\n # reports that will be used to generate the static log files.\n for input_path in configuration_settings.processing.inputs:\n search_path = pathlib.Path(input_path)\n\n # Currently going to make the assumption that everyone is using the path naming convention that I'm dictating\n # which is YYYY/MM/DD/file.ext\n for file_component in search_path.glob('*/*/*/*'):\n # Store all of the files into a dictionary containing the keys and a list of the files that are associated\n # with that day\n updaters.update_files(search_path, file_component)",
"def update(*args):",
"def update(self):\n \n dbpath, config = self._start()\n \n self.config.obo = check_file(config.obo, dbpath, \"obo\") \n desc_file = check_file(config.model_descriptions, dbpath,\n \"model_descriptions\", allow_none=True) \n phen_file = check_file(config.model_phenotypes, dbpath,\n \"model_phenotypes\", allow_none=True)\n \n summary = self._update(desc_file, phen_file) \n if len(summary[\"incorrect_ids\"]) == 0 and not config.skip_compute:\n self._compute(models=summary[\"new_phenotypes\"])\n \n self._end()"
] | [
"0.58074886",
"0.56377",
"0.55851614",
"0.55292803",
"0.54849255",
"0.5454546",
"0.54253596",
"0.5414197",
"0.5399051",
"0.5384302",
"0.5313532",
"0.52659905",
"0.5249226",
"0.524616",
"0.52403075",
"0.5229307",
"0.5228529",
"0.5216199",
"0.5182244",
"0.51628125",
"0.5146706",
"0.5111691",
"0.5104927",
"0.50704616",
"0.50688046",
"0.5014902",
"0.501035",
"0.4983091",
"0.49519154",
"0.49420434"
] | 0.5825942 | 0 |
Remove edges with weight > w | def remove_heavier_than(self, w):
G = nx.DiGraph()
for u, v in self.edges:
if self.graph[u][v]["weight"] <= w:
G.add_edge(u, v, weight=self.graph[u][v]["weight"], alignment=self.graph[u][v]["alignment"])
self.graph = G | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def trim_edges(g, weight=1):\n g2 = nx.Graph()\n for fnode, tonode, edgedata in g.edges(data=True):\n if edgedata[\"weight\"] > weight:\n g2.add_edge(fnode, tonode, **edgedata)\n return g2",
"def filter_by_weight(self, w):\n G = nx.Graph()\n\n for u, v in self.edges:\n if self.graph[u][v][\"weight\"] >= w:\n # Add the nodes first in case they have data\n G.add_node(u, **self.nodes(data=True)[u])\n G.add_node(v, **self.nodes(data=True)[v])\n G.add_edge(u, v, **self.graph[u][v])\n\n self.graph = G",
"def __filterEdges(self):",
"def filter_edges(self, to_keep):\n self.raw_wires.filter_edges(to_keep);\n self.__initialize_wires();",
"def cut_ppl_off(self, G):\r\n for pre, node in list(G.edges):\r\n ew = G.edges[pre, node]['weight']\r\n if ew <= -.95:\r\n G.remove_edge(pre, node)\r\n elif ew >= 1:\r\n G.edges[pre, node]['weight'] = 1.0\r\n else:\r\n continue\r\n return G",
"def filter_non_one(self):\n G = nx.Graph()\n\n for u, v in self.edges:\n if self.graph[u][v][\"weight\"] == 1:\n # Add the nodes first in case they have data\n G.add_node(u, **self.nodes(data=True)[u])\n G.add_node(v, **self.nodes(data=True)[v])\n G.add_edge(u, v, **self.graph[u][v])\n\n self.graph = G",
"def clean_edges(self):",
"def trim_edges(self, keep=0.5):\n\n for tid1, tid2 in self.graph.edges():\n if random.random() > keep:\n self.graph.remove_edge(tid1, tid2)",
"def filter_graph(self, sorted_node, ploidy):\n \n for node in sorted_node:\n \n # while number of prefix edge > ploidy level\n while len(self.prefix[node]) > ploidy:\n min_weight_node = min(self.prefix[node], key=self.prefix[node].get)\n self.remove_edge(min_weight_node, node)\n \n # while number of suffix edge > ploidy level\n while len(self.suffix[node]) > ploidy:\n min_weight_node = min(self.suffix[node], key=self.suffix[node].get)\n self.remove_edge(node, min_weight_node)\n \n print(\"Graph is reduced to best overlap graph.\")",
"def cutoff_graph( g, simi_cutoff ) :\n g = copy.deepcopy( g )\n edges_to_be_deleted = []\n for e in g.edges() :\n if (g[e[0]][e[1]][\"similarity\"] < simi_cutoff) :\n edges_to_be_deleted.append( e )\n g.remove_edges_from( edges_to_be_deleted )\n return g",
"def filter_edges_by_weight_range(self, min_weight=None, max_weight=None):\n def weight_filter():\n for g1, g2, w in self.gen:\n ok_min = ok_max = True\n if min_weight is not None and w < min_weight:\n ok_min = False\n #else:\n # print(\"min ok: {} >= {}\".format(w, min_weight))\n if max_weight is not None and w > max_weight:\n ok_max = False\n #else:\n # print(\"max ok: {} <= {}\".format(w, max_weight))\n if ok_min and ok_max:\n yield g1, g2, w\n return self.filter(weight_filter())",
"def filter_log_by_weight(directly_follows_graph, edge_weight):\n edges_filtered = dict()\n for (key, value) in directly_follows_graph.items():\n if value > edge_weight:\n edges_filtered[key] = value\n return edges_filtered",
"def remove_edges(self, node: NodeKey) -> Edge:",
"def trim(self):\n while np.any(self.vertex_valance <= 1):\n edge_to_keep = np.all(self.vertex_valance[self.edges] > 1,\n axis=1).tolist();\n self.raw_wires.filter_edges(edge_to_keep);\n vertex_to_keep = [len(self.get_vertex_neighbors(i)) > 0 for i in\n range(self.num_vertices)];\n self.raw_wires.filter_vertices(vertex_to_keep);\n\n self.__initialize_wires();\n if len(self.vertices) == 0:\n raise RuntimeError(\"Zero vertices left after trimming.\");",
"def trim(self):\n\n # identify edges outside window\n\n min_time = self.latest_time - self.t_window\n edges_to_trim = self.graph.es.select(time_lt=min_time)\n if self.verbose: print(\"Edges to trim: \"+str(edges_to_trim))\n\n # remove edges outside of t_window\n self.graph.delete_edges(edges_to_trim)\n\n # identify vertices with 0 degree to delete\n vertices_to_trim = self.graph.vs.select(_degree=0)\n if self.verbose: print(\"Vertices to trim: \"+str(vertices_to_trim._name_index))\n self.graph.delete_vertices(vertices_to_trim)",
"def graph_no_edges():\n from weighted_graph import Weighted\n example_graph = Weighted()\n example_graph.add_node('BB')\n example_graph.add_node(82)\n example_graph.add_node(99)\n example_graph.add_node('AA')\n return example_graph",
"def weights_clipping(self):\n max_weigth = np.amax(np.abs(self._weights))\n self._weights = self._clipping*self._weights/max_weigth",
"def filter_vertices(self, to_keep):\n self.raw_wires.filter_vertices(to_keep);\n self.__initialize_wires();",
"def _prunelowestweight(self):\r\n # note: must be called with acquired self._lock!\r\n numentries = len(self._dict)\r\n if numentries >= self.maxentries:\r\n # evict according to entry's weight\r\n items = [(entry.weight, key) for key, entry in self._dict.iteritems()]\r\n items.sort()\r\n index = numentries - self.prunenum\r\n if index > 0:\r\n for weight, key in items[:index]:\r\n del self._dict[key]",
"def remove_edge(self, edge: Edge) -> Edge:",
"def remove_trivial_edges(self):\n if self.E > 0:\n valid = self.edges[:, 0] != self.edges[:, 1]\n self.edges = self.edges[valid]\n self.weights = self.weights[valid]\n self.E = np.sum(valid)\n return self.E",
"def update_edge_weight(self, G):\r\n for node in G.nodes:\r\n n1 = G.nodes[node]['belief_strength'] #gets a node's belief strength\r\n for pre in G.predecessors(node):\r\n n2 = G.nodes[pre]['belief_strength'] #gets the node's predecessors' belief strength\r\n dif = abs(n1-n2)\r\n if n1*n2> 0:\r\n G.edges[pre, node]['weight'] += (dif/2000) #clean\r\n else:\r\n G.edges[pre, node]['weight'] -= (dif/2000)\r\n return G",
"def clean_edges(self):\n for from_node in self.all_nodes():\n for to_node in self.all_nodes():\n if from_node == to_node:\n continue\n dup = list(filter(lambda x: x.from_node == from_node and x.to_node == to_node, self.edges))\n if len(dup) > 1:\n for d in dup[1:]:\n self.edges.remove(d)",
"def goemans_williamson_weighted(graph):\n adjacency = nx.linalg.adjacency_matrix(graph)\n adjacency = adjacency.toarray()\n solution = _solve_cut_vector_program(adjacency)\n sides = _recover_cut(solution)\n\n nodes = list(graph.nodes)\n left = {vertex for side, vertex in zip(sides, nodes) if side < 0}\n right = {vertex for side, vertex in zip(sides, nodes) if side >= 0}\n return Cut(left, right)",
"def remove_edges(self, valid):\n if np.size(valid) != self.E:\n raise ValueError(\"the input vector does not have the correct size\")\n valid = np.reshape(valid, np.size(valid))\n self.E = int(valid.sum())\n self.edges = self.edges[valid != 0]\n self.weights = self.weights[valid != 0]",
"def test_bad_weights(self, dim, graph):\r\n s = [0, 1]\r\n w = np.ones(dim - 1)\r\n with pytest.raises(ValueError, match=\"Number of node weights must match number of nodes\"):\r\n clique.shrink(s, graph, node_select=w)",
"def prune_weights(model, fraction):\n weights = model.get_weights()\n\n def prune_weight_matrix(weight_matrix):\n # Copy the weights so we don't modify the original network.\n weight_matrix = np.copy(weight_matrix)\n flat_weight_matrix = np.reshape(weight_matrix, (-1,))\n kth = int(len(flat_weight_matrix) * fraction)\n # Determine the k least relevant weights using np.argpartition.\n indices = np.argpartition(np.abs(flat_weight_matrix), kth)\n # Prune them.\n flat_weight_matrix[indices[:kth]] = 0\n weight_matrix = np.reshape(flat_weight_matrix, weight_matrix.shape)\n return weight_matrix\n\n weights[:-1] = list(map(prune_weight_matrix, weights[:-1]))\n\n (_, n_classes) = weights[-1].shape\n # Create a pruned model.\n return create_model(\n LAYER_SIZES,\n n_classes,\n layer_fn=Sparse,\n layer_kwargs_fn=sparse_kwargs,\n weights=weights,\n )",
"def get_edges_weighted(self):\n edges = []\n for v in self.vertices.values():\n for w in v.neighbors:\n edges.append((v.name, w.name, v.neighbors[w]))\n return edges",
"def run_removing_edges(self):\n indices = np.where(self.X==1)\n idx=[]\n for i in range(len(indices[0])):\n idx.append((indices[0][i],indices[1][i]))\n idx = np.array(idx)\n return self.node_equivalent(idx)",
"def remove_weights(self):\n cdef StdVectorFst result = StdVectorFst(isyms=self.isyms, osyms=self.osyms)\n openfst.ArcMap(self.fst[0], result.fst, openfst.RmTropicalWeightMapper())\n return result"
] | [
"0.7666064",
"0.7618665",
"0.6603214",
"0.6586793",
"0.6484665",
"0.6472489",
"0.64522976",
"0.6375308",
"0.62455815",
"0.6229252",
"0.61846286",
"0.61768115",
"0.61122054",
"0.6103859",
"0.6095987",
"0.60443485",
"0.6027359",
"0.6025895",
"0.60210896",
"0.5985605",
"0.59797263",
"0.5942704",
"0.5900592",
"0.5873173",
"0.5872755",
"0.5869488",
"0.5833879",
"0.5812022",
"0.5794513",
"0.5685421"
] | 0.78451276 | 0 |
Return a new PatchScaffoldGraph that is a matching | def max_weight_matching(self):
undirected_sg = nx.Graph()
for u, v in self.edges:
undirected_sg.add_edge(u, v, weight=self.graph[u][v]["weight"])
matching = nx.max_weight_matching(G=undirected_sg)
# Remove any cycles that would result from adding intra-sequence edges
# Make a new graph
cover_graph = nx.Graph()
cover_graph.add_nodes_from(undirected_sg.nodes(data=True))
# Add edges connecting contig ends
node_base_set = set([i[:-2] for i in list(cover_graph.nodes)])
for node in node_base_set:
cover_graph.add_edge(node + "_b", node + "_e", weight=np.inf)
# Add the edges that form the matching
for u, v in matching:
cover_graph.add_edge(u, v, **undirected_sg[u][v])
# Remove any potential cycles
edges_to_delete = []
for cc in nx.connected_components(G=cover_graph):
cc = cover_graph.subgraph(cc).copy()
if cc.number_of_nodes() == cc.number_of_edges():
assembly_edges = cc.edges(data=True)
edge_to_delete = min(assembly_edges, key=lambda entry: entry[2]["weight"])
edges_to_delete.append((edge_to_delete[0], edge_to_delete[1]))
# Add the edges that form the matching into a new PatchScaffoldGraph
new_psg = PatchScaffoldGraph(self.components_fn)
for u, v in matching:
if (u, v) not in edges_to_delete and (v, u) not in edges_to_delete:
new_psg.add_edge(u, v, self.graph[u][v]["alignment"])
new_psg.add_edge(v, u, self.graph[v][u]["alignment"])
return new_psg | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_matching(self):\n verts, plaqs, d_verts, d_plaqs = self.get_stabs()\n\n # def get_matching(anyons, d_anyons):\n # edges = self.get_edges(anyons)\n # for i0, i1, weight in edges:\n # nxgraph.add_edge(i0, i1, weight=-weight)\n # output = nx.algorithms.matching.max_weight_matching(nxgraph, maxcardinality=True)\n # return [[d_anyons[i0], d_anyons[i1]] for i0, i1 in output]\n\n def get_matching(anyons, d_anyons):\n output = pm.getMatching(len(anyons), self.get_edges(anyons))\n return [[d_anyons[i0], d_anyons[i1], anyons[i0], anyons[i1]] for i0, i1 in enumerate(output) if i0 > i1]\n\n self.matching = []\n if verts:\n self.matching += get_matching(verts, d_verts)\n if plaqs:\n self.matching += get_matching(plaqs, d_plaqs)",
"def sub_graph_merging(self):",
"def annotate(self):\n logger.debug(f\"found ckt:{self.hier_graph_dict}\")\n\n names = list(self.hier_graph_dict)\n\n for name in names:\n circuit_name= name\n G1 = self.hier_graph_dict[name][\"graph\"]\n self._group_block_const(G1,circuit_name)\n self._group_cap_const(G1,circuit_name)\n\n for circuit_name in list(self.hier_graph_dict.keys()):\n logger.debug(f\"START MATCHING in circuit: {circuit_name}\")\n circuit = self.hier_graph_dict[circuit_name]\n G1 = circuit[\"graph\"]\n # map and reduce graph to dictionary\n mapped_graph_list = self._mapped_graph_list(G1, circuit_name, self.pg )\n const_list = self.hier_graph_dict[circuit_name]['constraints']\n self.hier_graph_dict[circuit_name][\"graph\"] = self._reduce_graph(G1, circuit_name, mapped_graph_list, const_list)\n \n for const in list(const_list):\n self._check_const_length(self.hier_graph_dict[circuit_name].constraints,const)\n check_nodes(self.hier_graph_dict)\n logger.debug(f\"Grest ckt is {circuit['graph'].nodes(data=True)}\")\n if circuit_name not in self.no_array:\n symmetry_blocks = FindSymmetry(circuit[\"graph\"], circuit[\"ports\"], circuit[\"ports_weight\"], self.stop_points)\n for symm_blocks in symmetry_blocks.values():\n logger.debug(f\"generated constraints: {pprint.pformat(symm_blocks, indent=4)}\")\n if isinstance(symm_blocks, dict) and \"graph\" in symm_blocks.keys():\n logger.debug(f\"added new hierarchy: {symm_blocks['name']} {symm_blocks['graph'].nodes()}\")\n self.hier_graph_dict[symm_blocks['name']] = symm_blocks\n assert False, \"Don't understand what's being deleted here\"\n del self.hier_graph_dict[symm_blocks['name']]['name']\n\n self.lib_names = [lib_ele['name'] for lib_ele in self.lib]\n for ckt_name, circuit in self.hier_graph_dict.items():\n if 'id' in self.hier_graph_dict[ckt_name] and len(self.hier_graph_dict[ckt_name]['id']) > 1:\n copies = len(self.hier_graph_dict[ckt_name]['id'])\n self.lib_names += [ckt_name + '_type' + str(n) for n in range(copies)]\n return self.lib_names",
"def _mapped_graph_list(self,G1, sname, POWER=None):\n logger.debug(f\"Matching circuit Graph nodes: {G1.nodes} edges:{G1.edges(data=True)}\")\n mapped_graph_list = {}\n for lib_ele in self.lib:\n block_name = lib_ele['name']\n if block_name==sname:\n continue\n G2 = lib_ele['graph']\n\n # Digital instances only transistors:\n if self._is_digital(G2,sname):\n continue\n if not self._is_small(G1, G2):\n continue\n\n if len(G2.nodes)<=len(G1.nodes):\n logger.debug(f\"Matching: {block_name} : {G2.nodes} {G2.edges(data=True)}\")\n GM = isomorphism.GraphMatcher(\n G1, G2,\n node_match = isomorphism.categorical_node_match(['inst_type'],\n ['nmos']),\n edge_match = isomorphism.categorical_edge_match(['weight'], [1]))\n if GM.subgraph_is_isomorphic():\n logger.debug(f\"ISOMORPHIC : {block_name}\")\n map_list = []\n\n for Gsub in GM.subgraph_isomorphisms_iter():\n\n all_nd = [key for key in Gsub.keys() if 'net' not in G1.nodes[key][\"inst_type\"]]\n logger.debug(f\"matched inst: {all_nd}\")\n if len(all_nd)>1 and self._is_clk(Gsub) :\n logger.debug(f\"Discarding match due to clock {Gsub}\")\n continue\n elif len(all_nd)>1 and self._is_do_not_identify(Gsub,sname):\n logger.debug(f\"Discarding match due to user constraint {Gsub}\")\n continue\n \n if block_name.startswith('DP') or block_name.startswith('CMC'):\n if G1.nodes[all_nd[0]]['values'] == G1.nodes[all_nd[1]]['values'] and \\\n compare_balanced_tree(G1,get_key(Gsub,'DA'),get_key(Gsub,'DB'),[all_nd[0]],[all_nd[1]]) :\n if 'SA' in Gsub.values() and \\\n compare_balanced_tree(G1,get_key(Gsub,'SA'),get_key(Gsub,'SB'),[all_nd[0]],[all_nd[1]]):\n map_list.append(Gsub)\n logger.debug(f\"Matched Lib: {' '.join(Gsub.values())}\")\n logger.debug(f\"Matched Circuit: {' '.join(Gsub)}\")\n # remove pseudo diff pair\n elif block_name.startswith('DP') and POWER is not None and get_key(Gsub,'S') in POWER:\n logger.debug(f\"skipping pseudo DP {POWER}: {' '.join(Gsub)}\")\n else:\n map_list.append(Gsub)\n logger.debug(f\"Matched Lib: {' '.join(Gsub.values())}\")\n logger.debug(f\"Matched Circuit: {' '.join(Gsub)} power:{POWER}\")\n else:\n logger.debug(f\"Discarding match {block_name} due to non matching branches\")\n elif block_name.startswith('SCM') and G1.nodes[all_nd[0]]['values'] != G1.nodes[all_nd[1]]['values']:\n logger.debug(f\"Discarding match {block_name} due to value mismatch\")\n\n else:\n map_list.append(Gsub)\n logger.debug(f\"Matched Lib: {' '.join(Gsub.values())}\")\n logger.debug(f\"Matched Circuit: {' '.join(Gsub)}\")\n if len(map_list)>1:\n fix_order_for_multimatch(G1,map_list,map_list[-1])\n mapped_graph_list[block_name] = map_list\n\n return mapped_graph_list",
"def add_patch(self, pset, patch):\n car = patch.pop()\n if car in pset:\n sel = [ x for x in pset[car] if patch.path == x.path ]\n if sel:\n sel[0].combine(patch)\n else:\n pset[car].append(patch)\n else:\n pset[car] = [patch]",
"def duplicate(self):\n\t\treturn Graph(self.vertices[:], self.edges[:])",
"def matched_sub_graph_instances(self, graph: Graph):\n if self.replacement_desc.match_kind == 'points': # instance is specified with lists of start/end nodes\n match = self._match_sub_graph_for_points(graph)\n if match is not None:\n yield match\n elif self.replacement_desc.match_kind == 'scope': # instance is specified with a node name pattern\n for instance in self.replacement_desc.sub_graph_instances():\n match = self._match_sub_graph_for_scope(graph, instance)\n if match is not None:\n yield match\n else:\n raise Error('Unsupported match kind \"{}\". Match kinds \"points\" or \"scope\" are supported only. '.format(\n self.replacement_desc.match_kind) +\n refer_to_faq_msg(35))",
"def duplicate(self):\r\n graph = DistanceGraph(self.size)\r\n for node in self.edges:\r\n for edge in self.edges[node]:\r\n graph.edges[node][edge] = self.edges[node][edge]\r\n return graph",
"def build_csls_subgraphs(self):\n # Graph for calculating only score\n scores_s2t = self.build_eval_graph(\n \"ScoreS2T\", self.src_ten, self.tgt_ten, en_knn=False, en_mean=False\n )\n scores_t2s = self.build_eval_graph(\n \"ScoreT2S\", self.tgt_ten, self.src_ten, en_knn=False, en_mean=False\n )\n # Graphs for calculating average between source and target\n avg1_s2t = self.build_eval_graph(\"Avg1\", self.src_ten, self.tgt_ten, knn=10)\n avg2_s2t = self.build_eval_graph(\"Avg2\", self.tgt_ten, self.src_ten, knn=10)\n # Graph for selecting top 100 elements\n top100_matches = tf.nn.top_k(self.score_ph, 100)\n # Graph for selecting top 2 elements\n top2_matches = tf.nn.top_k(self.score_ph, 2)\n # Graph for calculating similarity\n csls_mean_score = self.build_sim_graph(\"SimGraph\")\n\n # Dictionary\n csls_graphs = {\n \"ScoreGraph\": scores_s2t,\n \"ScoreG_T2S\": scores_t2s,\n \"Avg1S2T\": avg1_s2t,\n \"Avg2S2T\": avg2_s2t,\n \"Top100\": top100_matches,\n \"Top2\": top2_matches,\n \"CSLS_Criteria\": csls_mean_score,\n }\n return csls_graphs",
"def get_patches(self):\n self.get_source_patch_masks()\n self.get_target_patch_masks()\n self.get_source_patches()",
"def match(self, other):\n matches = match_descriptors(self.base_view.descriptors, other.descriptors,\n cross_check=True)\n matches = pd.Series({m[0]: m[1] for m in matches}).reindex(\n self._match_table.index)\n self._match_table[other.position.id] = matches",
"def intersect(self, match):\n intersection = set()\n for m in self.matches:\n intersection.add(m.intersect(match))\n return FlowSpace(intersection)",
"def merge(self):\n G = ScaffoldGraph(self.components_fasta_fname)\n\n # TODO implement an add_nodes_from wrapper so direct access is unnecessary\n G.graph.add_nodes_from(self.nodes(data=True))\n for u, v, c in self.graph.edges:\n edge_data_list = [self.graph[u][v][i] for i in self.graph[u][v]]\n G.add_edge(u, v, **self._merge_edge_dicts(*edge_data_list))\n\n return G",
"def clone(self):\n return _libsbml.LayoutSpeciesReferencePlugin_clone(self)",
"def master_spec_graph(master_spec):\n\tif not isinstance(master_spec, spec_pb2.MasterSpec):\n\t\traise TypeError(\"master_spec_graph() expects a MasterSpec input.\")\n\tgraph = pygraphviz.AGraph(directed=True)\n\tgraph.node_attr.update(shape=\"box\", style=\"filled\", fillcolor=\"white\", fontname=\"roboto, helvetica, arial\", fontsize=11)\n\tgraph.edge_attr.update(fontname=\"roboto, helvetica, arial\", fontsize=11)\n\tfor component in master_spec.component:\n\t\tgraph.add_node(component.name, label=_component_contents(component))\n\tfor component in master_spec.component:\n\t\tfor linked_feature in component.linked_feature:\n\t\t\tgraph.add_edge(linked_feature.source_component, component.name, label=_linked_feature_label(linked_feature))\n\twith warnings.catch_warnings():\n\t\twarnings.simplefilter(\"ignore\")\n\t\treturn graph.draw(format=\"svg\", prog=\"dot\")",
"def generate(self):\n self.graph_repl = self.master.graph_repl",
"def make_cocktail(self):\n\t\t#steps for visualization\n\t\tsteps = []\n\t\tcount = 0\n\t\tmatches_count = 0\n\t\tfor ingredient in self.ingredients:\n\t\t\tif ingredient not in self.compatible_ingredients:\n\t\t\t\tprint(\"can't find {} in any recipe - skipping\".format(ingredient))\n\t\t\telse:\n\t\t\t\tingredients_tried = []\n\t\t\t\t#initialize dfs stack with current input ingredient\n\t\t\t\tingredients_to_try = [ingredient]\n\t\t\t\t#set of ingredients to check for valid ingredient combinations\n\t\t\t\tcocktail_recipe = set()\n\t\t\t\twhile ingredients_to_try:\n\t\t\t\t\tcurrent = ingredients_to_try.pop()\n\t\t\t\t\tcount += 1\n\t\t\t\t\tsteps.append({\"node\": current, \"color\": \"possible\"})\n\t\t\t\t\tcocktail_recipe.add(current)\n\t\t\t\t\tingredients_tried.append(current)\n\t\t\t\t\t#get neighbors\n\t\t\t\t\tpotential_ingredients = self.ingredients_pointing_to(current)\n\t\t\t\t\tfor ing in potential_ingredients:\n\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\t#make sure it's an ingredient we have\n\t\t\t\t\t\tif ing in self.ingredients:\n\t\t\t\t\t\t\tcocktail_recipe.add(ing)\n\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\t\tsteps.append({\"node\": ing, \"color\": 'possible'})\n\t\t\t\t\t\t\tif self.valid_cocktail(cocktail_recipe):\n\t\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\t\t\tfound_match = self.matching_cocktails(cocktail_recipe)\n\t\t\t\t\t\t\t\t#for visualization\n\t\t\t\t\t\t\t\tif found_match:\n\t\t\t\t\t\t\t\t\tmatches_count += 1\n\t\t\t\t\t\t\t\t\tfor ingredient in cocktail_recipe:\n\t\t\t\t\t\t\t\t\t\tsteps.append({\"node\": ingredient, \"color\": 'match'})\n\t\t\t\t\t\t\t\t\tif matches_count > 2:\n\t\t\t\t\t\t\t\t\t\tif self.visualization:\n\t\t\t\t\t\t\t\t\t\t\tself.animation_steps = steps\n\t\t\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\t#add to stack\n\t\t\t\t\t\t\t\tif ing not in ingredients_tried and ing not in ingredients_to_try:\n\t\t\t\t\t\t\t\t\tingredients_to_try.append(ing)\n\t\t\t\t\t\t\tsteps.append({\"node\": ing, \"color\": 'no_match'})\n\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\t\tcocktail_recipe.remove(ing)\n\t\t\t\t\tsteps.append({\"node\": current, \"color\": \"no_match\"})\n\t\tself.animation_steps = steps\n\t\treturn",
"def copy(self):\n cls = self.__class__\n new_graph = cls.__new__(cls)\n new_graph._nodes = self._nodes[:]\n new_graph._node_wip = self._node_wip[:]\n new_graph._edges = self._edges[:]\n if self._sorted_nodes:\n new_graph._sorted_nodes = self._sorted_nodes[:]\n else:\n new_graph._sorted_nodes = None\n new_graph.predecessors = {}\n for key, val in self.predecessors.items():\n new_graph.predecessors[key] = self.predecessors[key][:]\n new_graph.successors = {}\n for key, val in self.successors.items():\n new_graph.successors[key] = self.successors[key][:]\n return new_graph",
"def sub_graph_merging(self):\n raise NotImplementedError()",
"def get_graph(self):\n graph = copy.deepcopy(self.G)\n for source, dests in graph.items():\n for dest in dests:\n constraint = graph[source][dest]['constraint']\n new_constraint = self.preprocess_constraint(constraint)\n graph[source][dest]['constraint'] = new_constraint\n return graph",
"def clone(self):\n sc=copy.copy(self)\n sc.farms=list()\n for f in self.farms:\n sc.farms.append(f.clone(f.name, f.size))\n sc.airborne=list()\n for a in self.airborne:\n sc.airborne.append(a.clone(a.farma, a.farmb, a.distance))\n return sc",
"def assignPatches(stars, visit, nPatches=16, radiusFoV=1.8):\n maxx, maxy = gnomonic_project_toxy(0., np.radians(radiusFoV), 0., 0.)\n nsides = nPatches**0.5\n\n # This should move all coords to 0 < x < nsides-1\n px = np.floor((stars['x'] + maxy)/(2.*maxy)*nsides)\n py = np.floor((stars['y'] + maxy)/(2.*maxy)*nsides)\n\n stars['subPatch'] = px + py*nsides\n stars['patchID'] = stars['subPatch'] + visit['visitID']*nPatches\n return stars",
"def generatePatch(self):\n\n image_processor = ImageProcessor()\n\n # Load the network______________________________________________________________________________________________\n # - g_input: Input to the generator\n # - g_output_patch_only: Patch generated\n # - surrounding_region: Region surrounding the masked image to be merged with the generated patch\n # - training: Whether the model is training or not. When invoking the model, False should be passed in\n\n network = Network()\n d_input, g_input, g_output, g_output_patch_only, d_optimizer, g_optimizer, surrounding_region, \\\n patch_ground_truth, d_cost_fake, d_cost_real, g_cost, training = network.network(batch_size)\n\n\n # Create a new TensorFlow session\n sess = tf.InteractiveSession()\n sess.run(tf.global_variables_initializer())\n\n\n # Get the paths of all the files within the test dataset location and shuffle the images\n file_paths = np.array(glob.glob(self.test_dataset_location))\n number_of_instances = len(file_paths)\n indexes = np.random.permutation(number_of_instances)\n file_paths = file_paths[indexes]\n\n\n # Load learnt model\n mi.load_checkpoint(sess)\n\n\n # Iterate through each batch of images\n for i in range(number_of_instances // batch_size):\n\n # Retrieve batch of training images\n batch_file_paths = file_paths[i * batch_size: i * batch_size + batch_size]\n _, g_batch, image_full, surrounding_region_batch, _ = image_processor.create_batch(batch_file_paths)\n\n # Generate patches for the batch of images\n generated_patches = sess.run(g_output_patch_only, feed_dict={g_input: g_batch,\n surrounding_region: surrounding_region_batch, training: False})\n\n # Save the completed images. Both the ground truth (1) and images with the generated patch using unsharp\n # intensities of the default 2.5 and 0.4 are saved\n for k in range(0, batch_size):\n img_id = batch_size * i + k\n\n image_processor.save_image(image_full[k], img_id, 1)\n\n generated_patch = generated_patches[k]\n\n sharpened_patch = image_processor.unsharp_mask(generated_patch)\n sharpened_image = image_processor.merge_patch_with_image(sharpened_patch, image_full[k],\n patch_startX, patch_startY)\n image_processor.save_image(sharpened_image, img_id, 2)\n\n sharpened_patch = image_processor.unsharp_mask(generated_patch, 0.5)\n sharpened_image = image_processor.merge_patch_with_image(sharpened_patch, image_full[k],\n patch_startX, patch_startY)\n image_processor.save_image(sharpened_image, img_id, 3)\n\n print(i * batch_size)",
"def get_matches(self, update_flow=True):\n if update_flow:\n self.update_flow()\n flow = (self.capacity-self.residual).toarray()\n\n matches = self.fixed_matches.copy()\n for p_idx in range(self.pod_num):\n for m_idx in range(self.mentor_num):\n for s_idx, u, v in self.shared_slots[p_idx, m_idx]:\n if flow[u, v]>0:\n if self.stage==1:\n matches.append(\n (s_idx, self.pod_info['name'][p_idx], self.mentor_info['email'][m_idx])\n )\n if self.stage==2:\n matches.append(\n (s_idx%SLOT_NUM, self.pod_info['name'][p_idx], self.mentor_info['email'][m_idx])\n )\n return matches",
"def match_fill(self,MatchMake_inst):\n if type(MatchMake_inst) is not MatchMake:\n raise TypeError(\"Wrong datatype: MatchMake_inst has to be of custom type MatchMake!\")\n\n res_dict = {}\n for match in MatchMake_inst._score_list:\n try:\n res_dict[match.match_score].append(match.match_target)\n \n except KeyError:\n res_dict[match.match_score] = [match.match_target]\n \n\n self.add(MatchMake_inst._source) # add source in network\n parent = [MatchMake_inst._source]\n for score in sorted(res_dict.keys(),reverse=True):\n for target in res_dict[score]:\n self.add(target,parent,score)\n parent = res_dict[score]",
"def match(*args, current=None):\n # current is the original edge and clone is the change\n # this function should only be getting nodes with the same edges\n # if I change this to assume nodes of the same edge attr then I can\n # send this function \"equivalent edges\"\n scores = []\n for clone in args:\n if current.edge_attribute == clone.edge_attribute:\n source_condit = (\n clone.source.original_id == current.source.id\n or clone.source.id == current.source.id\n )\n target_condit = (\n clone.target.original_id == current.target.id\n or clone.target.id == current.target.id\n )\n if source_condit and target_condit:\n scores.append(2)\n return scores\n elif source_condit or target_condit:\n\n scores.append(1)\n else:\n # TODO: check subgraph/call is_similar\n # if subgraph is isomorphic then return 2\n scores.append(0)\n elif len(current.edge_attribute) > len(clone.edge_attribute):\n scores.append(-1)\n else: # edge attribute of current is shorter than of clone\n scores.append(-2)\n return scores",
"def get_patch_from_image(cls):\n path, _, x1, y1, x2, y2, height, width = get_candidate_row_from_df(cls)\n img = cv2.imread(path)\n patch = img[y1: y2, x1: x2]\n return patch",
"def findPattern(someStan, targetPattern):\n pat = getattr(someStan, 'pattern', None)\n if pat == targetPattern:\n return someStan.cloneNode()\n for child in getattr(someStan, 'children', []):\n result = findPattern(child, targetPattern)\n if result is not None:\n return result.cloneNode()",
"def clone(self):\n return _libsbml.ListOfSpeciesReferenceGlyphs_clone(self)",
"def _subgraph_isomorphism_matcher(digraph, nxpattern, node_pred, edge_pred):\n graph_matcher = iso.DiGraphMatcher(digraph, nxpattern, node_match=node_pred, edge_match=edge_pred)\n yield from graph_matcher.subgraph_isomorphisms_iter()"
] | [
"0.5528811",
"0.5522304",
"0.5502465",
"0.5262912",
"0.51598346",
"0.5142724",
"0.5116374",
"0.49745867",
"0.4946554",
"0.49250278",
"0.49031502",
"0.48919278",
"0.48642075",
"0.48015532",
"0.48003212",
"0.4798948",
"0.47783658",
"0.47769517",
"0.47430834",
"0.47409213",
"0.47366053",
"0.47161302",
"0.47125044",
"0.4710881",
"0.46773225",
"0.46569553",
"0.46526116",
"0.4643157",
"0.4637347",
"0.46342617"
] | 0.6056853 | 0 |
A convolution is compatible with a CheckpointedHistory only if it contains a history that is compatible for each suboptimizer. | def compatible(self, history):
return (isinstance(history, CheckpointedMultipleHistory) and
np.array([self.mapHistory(s, history) is not None
for s in self.subs]).all()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_conv_consistency(self) -> None:\n x = Input(\n 'const1',\n [1, 3, 3, 3],\n Float32(),\n )\n w = Constant(\n 'weight',\n Float32(),\n np.zeros([1, 2, 2, 3])\n )\n input_ops = {'X': cast(Operator, x), 'W': cast(Operator, w)}\n\n Conv(\n 'conv_under_test',\n [1, 3, 3, 3],\n Float32(),\n input_ops,\n pads=[1, 2, 1, 2],\n strides=[2, 2]\n )\n\n print(\"Consistency test for conv operator passed!\")",
"def check_conv(extract):\n call = extract\n clip_found = False\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"nn.relu\":\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"clip\":\n clip_found = True\n if call.attrs[\"a_min\"] != 0.0 or call.attrs[\"a_max\"] != 6.0:\n return False\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n\n while call.op.name != \"nn.conv2d\":\n call = call.args[0]\n\n attrs, args = call.attrs, call.args\n if attrs.data_layout != \"NCHW\":\n return False\n\n if (\n (not clip_found)\n and (attrs.kernel_size[0] == 3)\n and (attrs.dilation[0] != 1)\n and (attrs.groups != 1)\n and (attrs.channels == attrs.groups)\n ):\n return False\n\n data_typ = args[0].checked_type\n kernel_typ = args[1].checked_type\n is_depthwise = is_depthwise_conv2d(\n data_typ.shape,\n attrs[\"data_layout\"],\n kernel_typ.shape,\n attrs[\"kernel_layout\"],\n attrs[\"groups\"],\n )\n if attrs.groups != 1 and not is_depthwise:\n return False\n return True",
"def _cross_layer_equalize(head_conv, tail_conv):\n head_weights, tail_weights = [], []\n # Get head conv weights and bias\n if head_conv.layer['class_name'] == 'Conv2D':\n w = head_conv.weights['kernel:0']\n oc = w.shape[3] # k * k * ic * oc for Conv2D\n head_weights.append(w.reshape(-1, oc))\n if head_conv.layer['config']['use_bias']:\n b = head_conv.weights['bias:0']\n head_weights.append(b.reshape(1, -1))\n else:\n w = head_conv.weights['depthwise_kernel:0']\n oc = w.shape[2] # k * k * ic * 1 for DepthwiseConv2D\n head_weights.append(w.reshape(-1, oc))\n if head_conv.layer['config']['use_bias']:\n b = head_conv.weights['bias:0']\n head_weights.append(b.reshape(1, -1))\n\n # Get tail conv weights and bias\n if tail_conv.layer['class_name'] == 'Conv2D':\n w = tail_conv.weights['kernel:0']\n ic = w.shape[2] # k * k * ic * oc for Conv2D\n w = w.transpose(0, 1, 3, 2)\n tail_weights.append(w.reshape(-1, ic))\n else:\n w = tail_conv.weights['depthwise_kernel:0']\n ic = w.shape[2] # k * k * ic * 1 for DepthwiseConv2D\n tail_weights.append(w.reshape(-1, ic))\n\n head_weights = np.abs(np.concatenate(head_weights, axis=0))\n tail_weights = np.abs(np.concatenate(tail_weights, axis=0))\n\n # Calculate scale\n scale = _calc_scale(head_weights, tail_weights)\n\n # print('Equalize: {} and {}.'.format(head_conv.layer['config']['name'],\n # tail_conv.layer['config']['name']))\n # Scale head conv weights and bias\n if head_conv.layer['class_name'] == 'Conv2D':\n head_conv.weights['kernel:0'] *= scale.reshape(1, 1, 1, -1)\n if head_conv.layer['config']['use_bias']:\n head_conv.weights['bias:0'] *= scale\n else:\n head_conv.weights['depthwise_kernel:0'] *= scale.reshape(1, 1, -1, 1)\n if head_conv.layer['config']['use_bias']:\n head_conv.weights['bias:0'] *= scale\n\n # Scale tail conv weights and bias\n if tail_conv.layer['class_name'] == 'Conv2D':\n tail_conv.weights['kernel:0'] /= scale.reshape(1, 1, -1, 1)\n else:\n tail_conv.weights['depthwise_kernel:0'] /= scale.reshape(1, 1, -1, 1)",
"def _convs_unoptimized(args, filter_size, num_features, bias, bias_start=0.0, convtype='convolution'):\n\n # Calculate the total size of arguments on dimension 1\n\n total_arg_size_depth = 0\n shapes = [a.get_shape().as_list() for a in args]\n shape_length = len(shapes[0])\n for shape in shapes:\n if len(shape) not in [3, 4, 5]:\n raise ValueError(\"Conv Linear expects 3D, 4D or 5D arguments: %s\" % str(shapes))\n if len(shape) != len(shapes[0]):\n raise ValueError(\"Conv Linear expects all args to be of same Dimension: %s\" % str(shapes))\n else:\n total_arg_size_depth += shape[-1]\n dtype = [a.dtype for a in args][0]\n\n if shape_length != 4 and convtype == \"separable\":\n print ('[ERROR] separable convLSTM is only implemented for conv2D')\n raise NotImplementedError \n\n if len(args) != 2:\n print ('LSTM is only implemented with len(args) = 2!')\n raise NotImplementedError\n\n # Determine correct conv operation\n\n c_i = shapes[0][-1] # number of input channels per tensor in args\n c_o = num_features//4 # number of output channels per gate and cell state\n\n if convtype == 'separable': \n if shape_length == 3:\n conv_op = tf.nn.separable_conv1d # ? does not exist\n strides = 1\n elif shape_length == 4:\n conv_op = tf.nn.separable_conv2d\n strides = shape_length * [1]\n elif shape_length == 5:\n conv_op = tf.nn.separable_conv3d # ? does not exist\n strides = shape_length * [1]\n else:\n raise NotImplementedError\n channel_multiplier = 1\n elif convtype == 'depthwise': \n if shape_length == 3:\n conv_op = tf.nn.depthwise_conv1d # ? does not exist\n strides = 1\n elif shape_length == 4:\n conv_op = tf.nn.depthwise_conv2d\n strides = shape_length * [1]\n elif shape_length == 5:\n conv_op = tf.nn.depthwise_conv3d # ? does not exist\n strides = shape_length * [1]\n else:\n raise NotImplementedError\n channel_multiplier = 1\n else: # Normal CONV and spatially separable CONV\n if shape_length == 3:\n conv_op = nn_ops.conv1d\n strides = 1\n elif shape_length == 4:\n conv_op = nn_ops.conv2d\n strides = shape_length * [1]\n elif shape_length == 5:\n conv_op = nn_ops.conv3d\n strides = shape_length * [1]\n else:\n raise NotImplementedError\n\n # Now the computation\n\n if convtype == 'spatial':\n # Get kernels\n\n kernel_h = vs.get_variable(\"kernel_h\", [filter_size[0], 1, total_arg_size_depth, num_features], dtype=dtype)\n print('kernel_h: ', [filter_size[0], 1, total_arg_size_depth, num_features])\n kernel_w = vs.get_variable(\"kernel_w\", [1, filter_size[1], total_arg_size_depth, num_features], dtype=dtype)\n print('kernel_w: ', [1, filter_size[1], total_arg_size_depth, num_features])\n\n W_ix_h = kernel_h[..., 0:c_i, 0:1*c_o] # Name pattern: W(eights) for i(nput gate) for h(eight) CONV with x\n W_ih_h = kernel_h[..., c_i:2*c_i, 0:1*c_o]\n W_cx_h = kernel_h[..., 0:c_i, 1*c_o:2*c_o]\n W_ch_h = kernel_h[..., c_i:2*c_i, 1*c_o:2*c_o]\n W_fx_h = kernel_h[..., 0:c_i, 2*c_o:3*c_o]\n W_fh_h = kernel_h[..., c_i:2*c_i, 2*c_o:3*c_o]\n W_ox_h = kernel_h[..., 0:c_i, 3*c_o:4*c_o]\n W_oh_h = kernel_h[..., c_i:2*c_i, 3*c_o:4*c_o]\n\n W_ix_w = kernel_w[..., 0:c_i, 0:1*c_o]\n W_ih_w = kernel_w[..., c_i:2*c_i, 0:1*c_o]\n W_cx_w = kernel_w[..., 0:c_i, 1*c_o:2*c_o]\n W_ch_w = kernel_w[..., c_i:2*c_i, 1*c_o:2*c_o]\n W_fx_w = kernel_w[..., 0:c_i, 2*c_o:3*c_o]\n W_fh_w = kernel_w[..., c_i:2*c_i, 2*c_o:3*c_o]\n W_ox_w = kernel_w[..., 0:c_i, 3*c_o:4*c_o]\n W_oh_w = kernel_w[..., c_i:2*c_i, 3*c_o:4*c_o]\n\n # input gate\n\n i_x_h = conv_op(args[0], W_ix_h, strides, padding=\"SAME\")\n i_x = conv_op(i_x_h, W_ix_w, strides, padding=\"SAME\")\n i_h_h = conv_op(args[1], W_ih_h, strides, padding=\"SAME\")\n i_h = conv_op(i_h_h, W_ih_w, strides, padding=\"SAME\")\n\n # new input (= intermediate step for new cell state)\n\n c_x_h = conv_op(args[0], W_cx_h, strides, padding=\"SAME\")\n c_x = conv_op(c_x_h, W_cx_w, strides, padding=\"SAME\")\n c_h_h = conv_op(args[1], W_ch_h, strides, padding=\"SAME\")\n c_h = conv_op(c_h_h, W_ch_w, strides, padding=\"SAME\")\n\n # forget gate\n\n f_x_h = conv_op(args[0], W_fx_h, strides, padding=\"SAME\")\n f_x = conv_op(f_x_h, W_fx_w, strides, padding=\"SAME\")\n f_h_h = conv_op(args[1], W_fh_h, strides, padding=\"SAME\")\n f_h = conv_op(f_h_h, W_fh_w, strides, padding=\"SAME\")\n\n # output gate\n\n o_x_h = conv_op(args[0], W_ox_h, strides, padding=\"SAME\")\n o_x = conv_op(o_x_h, W_ox_w, strides, padding=\"SAME\")\n o_h_h = conv_op(args[1], W_oh_h, strides, padding=\"SAME\")\n o_h = conv_op(o_h_h, W_oh_w, strides, padding=\"SAME\")\n\n # sum up results\n \n res_x = array_ops.concat(axis=shape_length - 1, values=[i_x, c_x, f_x, o_x])\n res_h = array_ops.concat(axis=shape_length - 1, values=[i_h, c_h, f_h, o_h])\n res = tf.add(res_x, res_h)\n\n elif convtype == 'depthwise':\n # Get kernels\n\n kernel_depth = vs.get_variable(\"kernel_depth\", filter_size + [total_arg_size_depth, 4*channel_multiplier],\n dtype=dtype)\n print('kernel_depth: ', filter_size + [total_arg_size_depth, 4*channel_multiplier])\n\n W_ix = kernel_depth[..., 0:c_i, 0:1*channel_multiplier]\n W_ih = kernel_depth[..., c_i:2*c_i, 0:1*channel_multiplier]\n W_cx = kernel_depth[..., 0:c_i, 1*channel_multiplier:2*channel_multiplier]\n W_ch = kernel_depth[..., c_i:2*c_i, 1*channel_multiplier:2*channel_multiplier]\n W_fx = kernel_depth[..., 0:c_i, 2*channel_multiplier:3*channel_multiplier]\n W_fh = kernel_depth[..., c_i:2*c_i, 2*channel_multiplier:3*channel_multiplier]\n W_ox = kernel_depth[..., 0:c_i, 3*channel_multiplier:4*channel_multiplier]\n W_oh = kernel_depth[..., c_i:2*c_i, 3*channel_multiplier:4*channel_multiplier]\n\n # input gate\n\n i_x = conv_op(args[0], W_ix, strides, padding=\"SAME\")\n i_h = conv_op(args[1], W_ih, strides, padding=\"SAME\")\n\n # new input (= intermediate step for new cell state)\n\n c_x = conv_op(args[0], W_cx, strides, padding=\"SAME\")\n c_h = conv_op(args[1], W_ch, strides, padding=\"SAME\")\n\n # forget gate\n\n f_x = conv_op(args[0], W_fx, strides, padding=\"SAME\")\n f_h = conv_op(args[1], W_fh, strides, padding=\"SAME\")\n\n # output gate\n\n o_x = conv_op(args[0], W_ox, strides, padding=\"SAME\")\n o_h = conv_op(args[1], W_oh, strides, padding=\"SAME\")\n\n # sum up results\n \n res_x = array_ops.concat(axis=shape_length - 1, values=[i_x, c_x, f_x, o_x])\n res_h = array_ops.concat(axis=shape_length - 1, values=[i_h, c_h, f_h, o_h])\n res = tf.add(res_x, res_h)\n\n elif convtype == 'separable':\n # Get kernels\n\n kernel_depth = vs.get_variable(\"kernel_depth\", filter_size + [total_arg_size_depth, 4*channel_multiplier],\n dtype=dtype)\n print('kernel_depth: ', filter_size + [total_arg_size_depth, 4*channel_multiplier])\n kernel_sep = vs.get_variable(\"kernel_sep\", [1, 1, total_arg_size_depth, num_features], dtype=dtype)\n print('kernel_sep: ', [1, 1, total_arg_size_depth, num_features])\n\n W_ix = kernel_depth[..., 0:c_i, 0:1*channel_multiplier]\n W_ih = kernel_depth[..., c_i:2*c_i, 0:1*channel_multiplier]\n W_cx = kernel_depth[..., 0:c_i, 1*channel_multiplier:2*channel_multiplier]\n W_ch = kernel_depth[..., c_i:2*c_i, 1*channel_multiplier:2*channel_multiplier]\n W_fx = kernel_depth[..., 0:c_i, 2*channel_multiplier:3*channel_multiplier]\n W_fh = kernel_depth[..., c_i:2*c_i, 2*channel_multiplier:3*channel_multiplier]\n W_ox = kernel_depth[..., 0:c_i, 3*channel_multiplier:4*channel_multiplier]\n W_oh = kernel_depth[..., c_i:2*c_i, 3*channel_multiplier:4*channel_multiplier]\n\n Wsep_ix = kernel_sep[..., 0:c_i, 0:1*c_o]\n Wsep_ih = kernel_sep[..., c_i:2*c_i, 0:1*c_o]\n Wsep_cx = kernel_sep[..., 0:c_i, 1*c_o:2*c_o]\n Wsep_ch = kernel_sep[..., c_i:2*c_i, 1*c_o:2*c_o]\n Wsep_fx = kernel_sep[..., 0:c_i, 2*c_o:3*c_o]\n Wsep_fh = kernel_sep[..., c_i:2*c_i, 2*c_o:3*c_o]\n Wsep_ox = kernel_sep[..., 0:c_i, 3*c_o:4*c_o]\n Wsep_oh = kernel_sep[..., c_i:2*c_i, 3*c_o:4*c_o]\n\n # input gate\n\n i_x = conv_op(args[0], W_ix, Wsep_ix, strides, padding=\"SAME\")\n i_h = conv_op(args[1], W_ih, Wsep_ih, strides, padding=\"SAME\")\n\n # new input (= intermediate step for new cell state)\n\n c_x = conv_op(args[0], W_cx, Wsep_cx, strides, padding=\"SAME\")\n c_h = conv_op(args[1], W_ch, Wsep_ch, strides, padding=\"SAME\")\n\n # forget gate\n\n f_x = conv_op(args[0], W_fx, Wsep_fx, strides, padding=\"SAME\")\n f_h = conv_op(args[1], W_fh, Wsep_fh, strides, padding=\"SAME\")\n\n # output gate\n\n o_x = conv_op(args[0], W_ox, Wsep_ox, strides, padding=\"SAME\")\n o_h = conv_op(args[1], W_oh, Wsep_oh, strides, padding=\"SAME\")\n\n # sum up results\n \n res_x = array_ops.concat(axis=shape_length - 1, values=[i_x, c_x, f_x, o_x])\n res_h = array_ops.concat(axis=shape_length - 1, values=[i_h, c_h, f_h, o_h])\n res = tf.add(res_x, res_h)\n\n else: # normal CONV\n # Get kernel\n\n kernel = vs.get_variable(\"kernel\", filter_size + [total_arg_size_depth, 4*c_o], dtype=dtype)\n print('kernel: ', filter_size + [total_arg_size_depth, 4*c_o])\n\n W_ix = kernel[..., 0:c_i, 0:1*c_o]\n W_ih = kernel[..., c_i:2*c_i, 0:1*c_o]\n W_cx = kernel[..., 0:c_i, 1*c_o:2*c_o]\n W_ch = kernel[..., c_i:2*c_i, 1*c_o:2*c_o]\n W_fx = kernel[..., 0:c_i, 2*c_o:3*c_o]\n W_fh = kernel[..., c_i:2*c_i, 2*c_o:3*c_o]\n W_ox = kernel[..., 0:c_i, 3*c_o:4*c_o]\n W_oh = kernel[..., c_i:2*c_i, 3*c_o:4*c_o]\n\n # input gate\n\n i_x = conv_op(args[0], W_ix, strides, padding=\"SAME\")\n i_h = conv_op(args[1], W_ih, strides, padding=\"SAME\")\n\n # new input (= intermediate step for new cell state)\n\n c_x = conv_op(args[0], W_cx, strides, padding=\"SAME\")\n c_h = conv_op(args[1], W_ch, strides, padding=\"SAME\")\n\n # forget gate\n\n f_x = conv_op(args[0], W_fx, strides, padding=\"SAME\")\n f_h = conv_op(args[1], W_fh, strides, padding=\"SAME\")\n\n # output gate\n\n o_x = conv_op(args[0], W_ox, strides, padding=\"SAME\")\n o_h = conv_op(args[1], W_oh, strides, padding=\"SAME\")\n\n # sum up results\n \n res_x = array_ops.concat(axis=shape_length - 1, values=[i_x, c_x, f_x, o_x])\n res_h = array_ops.concat(axis=shape_length - 1, values=[i_h, c_h, f_h, o_h])\n res = tf.add(res_x, res_h)\n \n if not bias:\n return res\n bias_term = vs.get_variable(\"biases\", [num_features], dtype=dtype,\n initializer=init_ops.constant_initializer(bias_start, dtype=dtype))\n return res + bias_term",
"def _register_conv_hook(self):\n\n def _record_gradients(module, grad_in, grad_out):\n self.gradients = grad_in[0]\n\n for _, module in self.model.named_modules():\n if isinstance(module, nn.modules.conv.Conv2d) and module.in_channels == 3:\n backward_handle = module.register_backward_hook(_record_gradients)\n self.handle.append(backward_handle)",
"def check_conv_config(self):\n conv_config = self.conv_config\n config = self.base_model.config\n\n new_params_1 = [k for k in conv_config.keys() if not k.endswith(\"_rate_multiplier\")]\n new_params_2 = []\n\n for k, v in conv_config.items():\n if isinstance(v, dict):\n for p in v[\"params\"]:\n if p not in new_params_2:\n new_params_2.append(p)\n\n # order doesn't matter\n assert set(new_params_1) == set(new_params_2), \"New parameters are not consistent, double check conv_config...\"\n\n # also check if new params are in config or not\n missing_params = \"\"\n for new_p in new_params_1:\n if not config.get(new_p, False):\n missing_params += f\"{new_p}, \"\n\n # remove the period if there are missing parameters..\n if missing_params != \"\":\n missing_params = missing_params[:-2]\n\n assert missing_params == \"\", f\"{missing_params} are missing in the config\"",
"def all_views_conv_layer(input_layer,network_type, layer_name, number_of_filters=32, filter_size=(3, 3), stride=(1, 1),\r\n padding='VALID', biases_initializer=tf.zeros_initializer()):\r\n if network_type == \"CC\":\r\n\r\n\r\n input_l_cc, input_r_cc = input_layer\r\n\r\n #with tf.variable_scope(layer_name + \"_CC\") as cc_cope:\r\n h_l_cc = tf.contrib.layers.convolution2d(inputs=input_l_cc, num_outputs=number_of_filters,\r\n kernel_size=filter_size, stride=stride, padding=padding,\r\n weights_initializer=tf.contrib.layers.xavier_initializer(), biases_initializer=biases_initializer)\r\n h_r_cc = tf.contrib.layers.convolution2d(inputs=input_r_cc, num_outputs=number_of_filters,\r\n kernel_size=filter_size, stride=stride, padding=padding, reuse=False,\r\n weights_initializer=tf.contrib.layers.xavier_initializer(), biases_initializer=biases_initializer)\r\n\r\n\r\n h = (h_l_cc, h_r_cc)\r\n\r\n return h\r\n\r\n else:\r\n input_l_mlo, input_r_mlo = input_layer\r\n\r\n # with tf.variable_scope(layer_name + \"_CC\") as cc_cope:\r\n h_l_mlo = tf.contrib.layers.convolution2d(inputs=input_l_mlo, num_outputs=number_of_filters,\r\n kernel_size=filter_size, stride=stride, padding=padding,\r\n weights_initializer=tf.contrib.layers.xavier_initializer(),\r\n biases_initializer=biases_initializer)\r\n h_r_mlo = tf.contrib.layers.convolution2d(inputs=input_r_mlo, num_outputs=number_of_filters,\r\n kernel_size=filter_size, stride=stride, padding=padding, reuse=False,\r\n weights_initializer=tf.contrib.layers.xavier_initializer(),\r\n biases_initializer=biases_initializer)\r\n\r\n h = (h_l_mlo, h_r_mlo)\r\n\r\n return h",
"def rconv2(a,b, ctr = 0):\n\n if (a.get_shape().as_list()[1] >= b.get_shape().as_list()[0]) and (a.get_shape().as_list()[2] >= b.get_shape().as_list()[1]):\n large, small = a, b\n elif (a.get_shape().as_list()[1] <= b.get_shape().as_list()[0]) and (a.get_shape().as_list()[2] <= b.get_shape().as_list()[1]):\n large, small = b, a\n else:\n raise Exception(\"one arg must be larger than the other in both dimensions!\")\n \n ly = large.get_shape().as_list()[1]\n lx = large.get_shape().as_list()[2]\n sy = small.get_shape().as_list()[0]\n sx = small.get_shape().as_list()[1]\n \n sy2 = math.floor((sy+ctr-1)/2)\n sx2 = math.floor((sx+ctr-1)/2)\n \n ty_top = np.arange(sy-sy2-1,0,-1)\n tx_top = np.arange(sx-sx2-1,0,-1)\n ty_bottom = np.arange(ly-2,ly-sy2-2,-1)\n tx_bottom = np.arange(lx-2,lx-sx2-2,-1)\n \n paddings = tf.constant([[0,0], [sy-sy2-1,sy2], [sx-sx2-1, sx2], [0,0]])\n clarge = tf.pad(large, paddings, mode = 'REFLECT')\n \n #clarge = tf.reshape(clarge,[clarge.get_shape().as_list()[0], clarge.get_shape().as_list()[1], clarge.get_shape().as_list()[2]])\n small = tf.reshape(small, [small.get_shape().as_list()[0], small.get_shape().as_list()[1], 1, 1])\n c = tf.nn.conv2d(clarge, small, padding=\"VALID\", strides = [1, 1, 1, 1])\n return c",
"def optimize(self, graph: Graph) -> Tuple[Graph, bool]:\n # this optimization is always applied (since backends do not implement padding)\n flag_changed = False\n\n for tail_layer in [Convolution2D, MaxPooling2D, AveragePooling2D]:\n matches = search_sub_structure(graph, [ZeroPadding2D, Variable, tail_layer])\n while len(matches) > 0:\n match = matches[0]\n a1: ZeroPadding2D = match[0]\n a2: Union[Convolution2D, MaxPooling2D, AveragePooling2D] = match[2]\n\n zero_pad = a1.parameters[\"padding\"]\n conv_pad = a2.parameters[\"padding\"]\n a2.parameters[\"padding\"] = (zero_pad[0] + conv_pad[0], zero_pad[1] + conv_pad[1])\n\n x1 = a1.inputs[\"x\"]\n x2 = a2.inputs[\"x\"]\n\n a1.remove_all()\n # replace_input checks if the shape of x1 and x2 are same, but this restriction does not hold.\n a2.remove_input(x2)\n a2.append_input(\"x\", x1)\n\n flag_changed = True\n matches = search_sub_structure(graph, [ZeroPadding2D, Variable, tail_layer])\n\n return graph, flag_changed",
"def all_conv_ops(self):\n pass",
"def _check_second_cat(cat: Operator) -> bool:\n if len(cat._attrs[\"outputs\"]) != 1:\n return False\n # Similar to the first cat, make sure the second cat's input_accessors\n # do not carry any strided information.\n if not all(\n accessor.actual_shapes is None for accessor in cat._attrs[\"input_accessors\"]\n ):\n return False\n if not all(cat._attrs[\"input_masks\"]):\n return False\n return True",
"def test_checkpoints_are_equal():\n model1, X, y, Xval, yval = make_small_model(num_hidden_layers=1)\n loss = tf.keras.losses.CategoricalCrossentropy(\n from_logits=False, reduction=tf.losses.Reduction.NONE\n )\n model1.compile(loss=loss)\n model1.fit(X, y, validation_data=(Xval, yval), epochs=EPOCHS, batch_size=20)\n model1.save(\"fit.tf\")\n model1.fit(X, y, validation_data=(Xval, yval), epochs=EPOCHS * 2, batch_size=20)\n model1.save(\"refit.tf\")\n\n # same arch, different weights\n same, msg = safekeras.check_checkpoint_equality(\"fit.tf\", \"refit.tf\")\n assert same is False, msg\n\n # should be same\n same, msg = safekeras.check_checkpoint_equality(\"fit.tf\", \"fit.tf\")\n print(msg)\n assert same is True, msg\n\n # different architecture\n model2, X, y, Xval, yval = make_small_model(num_hidden_layers=3)\n model2.compile(loss=loss)\n model2.fit(X, y, validation_data=(Xval, yval), epochs=EPOCHS, batch_size=20)\n model2.save(\"fit2.tf\")\n\n same, msg = safekeras.check_checkpoint_equality(\"fit.tf\", \"fit2.tf\")\n print(msg)\n assert same is False, msg\n\n # coping with trashed files\n cleanup_file(\"fit.tf/saved_model.pb\")\n same, msg = safekeras.check_checkpoint_equality(\"fit.tf\", \"fit2.tf\")\n assert same is False, msg\n same, msg = safekeras.check_checkpoint_equality(\"fit2.tf\", \"fit.tf\")\n assert same is False, msg\n\n same, msg = safekeras.check_checkpoint_equality(\"hello\", \"fit2.tf\")\n assert same is False\n assert \"Error re-loading model from\" in msg\n\n same, msg = safekeras.check_checkpoint_equality(\"fit2.tf\", \"hello\")\n assert same is False\n assert \"Error re-loading model from\" in msg\n\n for name in (\"fit.tf\", \"fit2.tf\", \"refit.tf\"):\n cleanup_file(name)",
"def __init__(self, activation_function, input_size=(1, 1), output_size=(1, 1),\n learning_batch_size=1, filter_size=(1, 1), input_feature_maps=1,\n output_feature_maps=1, convolution_mode='valid', step=1):\n super(ConvolutionalLayer, self).__init__(activation_function, input_size=1, output_size=1,\n learning_batch_size=learning_batch_size)\n self._filter_size = filter_size\n self._input_feature_maps = input_feature_maps\n self._output_feature_maps = output_feature_maps\n self._step = step # Laisser à 1 pour l'instant\n self._convolution_mode = convolution_mode\n self._weights = np.random.randn(self._output_feature_maps, self._input_feature_maps,\n self._filter_size[0], self._filter_size[1])\n self._bias = np.zeros(self._output_feature_maps)\n self._input_size = input_size\n self._output_size = output_size\n if self._convolution_mode == 'full':\n self._output_size = (self._input_size[0] + (self._filter_size[0]-1),\n self._input_size[1] + (self._filter_size[1]-1))\n self._reverse_convolution_mode = 'valid'\n # elif self._convolution_mode == 'same':\n # self._output_size = self._input_size\n # self._reverse_convolution_mode = 'same'\n elif self._convolution_mode == 'valid':\n self._output_size = (self._input_size[0] - (self._filter_size[0]-1),\n self._input_size[1] - (self._filter_size[1]-1))\n self._reverse_convolution_mode = 'full'\n else:\n raise Exception(\"Invalid convolution mode\")\n self.input = np.zeros((self._learning_batch_size, self._input_feature_maps,\n self._input_size[0], self._input_size[1]))\n self.activation_levels = np.zeros((self._learning_batch_size, self._output_feature_maps,\n self._output_size[0], self._output_size[1]))\n self.output = np.zeros((self._learning_batch_size, self._output_feature_maps,\n self._output_size[0], self._output_size[1]))",
"def arch(self, nn) -> 'final node of the tensor flow graph (y_conv)':\n\n print(self)\n\n # first conv. layer \n # 5x5 filter, 1 input channel, 32 output channels\n W_conv1 = nn.weight_variable([5, 5, 1, 32])\n b_conv1 = nn.bias_variable([32])\n stride1 = 1\n h_conv1 = tf.nn.relu(nn.conv2d(nn.x_image, W_conv1, stride1) + b_conv1)\n \n # first pooling layer (2x2) \n h_pool1 = nn.max_pool_2x2(h_conv1)\n\n # second conv. layer \n # 5x5 filter, 32 input channel, 64 output channels\n W_conv2 = nn.weight_variable([5, 5, 32, 64])\n b_conv2 = nn.bias_variable([64])\n stride2 = 1\n h_conv2 = tf.nn.relu(nn.conv2d(h_pool1, W_conv2, stride2) + b_conv2)\n\n # second pooling layer (2x2) \n h_pool2 = nn.max_pool_2x2(h_conv2)\n\n # reshape (flatten) output\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n\n # first fully connected layer\n W_fc1 = nn.weight_variable([7 * 7 * 64, 1024])\n b_fc1 = nn.bias_variable([1024])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # dropout\n h_fc1_drop = tf.nn.dropout(h_fc1, nn.keep_prob)\n\n # second (final) fully connected layer (softmax)\n W_fc2 = nn.weight_variable([1024, 10])\n b_fc2 = nn.bias_variable([10])\n y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n return y_conv",
"def optimize(nn_last_layer, correct_label, learning_rate, num_classes):\n\n # freeze all convolution variables\n tvars = tf.trainable_variables()\n trainable_vars = [var for var in tvars if not(var.name.startswith('conv'))]\n\n #print(\"Trainable parameters are: \")\n #for var in trainable_vars:\n # print(var.name + \"\\n\")\n\n logits = tf.reshape(nn_last_layer, (-1, num_classes), name=\"logits\")\n pred = tf.nn.softmax(logits)\n output = tf.identity(pred, 'prediction')\n\n correct_label = tf.reshape(correct_label, (-1, num_classes))\n cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))\n\n tf.summary.scalar('cross_entropy_loss', cross_entropy_loss, collections=['batch'])\n # add regularization to the loss\n reg_losses = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n tf.summary.scalar('regularization loss', reg_losses, collections=['batch'])\n reg_constant = 0.01\n loss = cross_entropy_loss + reg_constant * reg_losses\n\n tf.summary.scalar('total loss', loss, collections=['batch'])\n\n prediction = tf.argmax(logits, 1)\n correct_label_flatten = tf.argmax(correct_label, 1)\n acc = tf.reduce_mean(tf.cast(tf.equal(prediction, correct_label_flatten), tf.float32))\n tf.summary.scalar('train_acc', acc, collections=['epoch_train'])\n tf.summary.scalar('validation_acc', acc, collections=['epoch_validate'])\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n training_operation = optimizer.minimize(cross_entropy_loss, var_list=trainable_vars)\n\n return logits, training_operation, loss",
"def _explain_convolution(self, input, epsilon=0.005) -> torch.Tensor:\n model_info = self._derive_conv_info(self.model)\n\n input_conv = self._image_to_column(\n input,\n kernel_h=model_info[\"kernel_h\"],\n kernel_w=model_info[\"kernel_w\"],\n padding=model_info[\"padding\"],\n stride=model_info[\"stride\"],\n )\n\n start_layer_relevance = self._layerwise_relevance_propagation(\n self.model, input_conv, self.model(input)\n )\n\n start_layer_relevance = self._column_to_image(\n start_layer_relevance,\n input.size(),\n kernel_h=model_info[\"kernel_h\"],\n kernel_w=model_info[\"kernel_w\"],\n padding=model_info[\"padding\"],\n stride=model_info[\"stride\"],\n )\n\n return start_layer_relevance",
"def arch(self, nn) -> 'final node of the tensor flow graph (y_conv)':\n\n print(self)\n\n # first conv. layer \n # 5x5 filter, 1 input channel, 32 output channels\n W_conv1 = nn.weight_variable([5, 5, 1, 32])\n b_conv1 = nn.bias_variable([32])\n stride1 = 1\n h_conv1 = tf.nn.relu(nn.conv2d(nn.x_image, W_conv1, stride1, 'VALID') + b_conv1) \n # outputs a 24x24x32 image\n \n # first pooling layer (2x2) \n h_pool1 = nn.max_pool_2x2(h_conv1) \n # outputs a 12x12x32 image\n\n # second conv. layer \n # 3x3 filter, 32 input channel, 32 output channels\n W_conv2 = nn.weight_variable([3, 3, 32, 32])\n b_conv2 = nn.bias_variable([32])\n stride2 = 1\n h_conv2 = tf.nn.relu(nn.conv2d(h_pool1, W_conv2, stride2, 'VALID') + b_conv2)\n # outputs a 10x10x32 image\n\n # third conv. layer\n # 3x3 filter, 32 input channel, 32 output channels\n W_conv3 = nn.weight_variable([3, 3, 32, 32])\n b_conv3 = nn.bias_variable([32])\n stride3 = 1\n h_conv3 = tf.nn.relu(nn.conv2d(h_conv2, W_conv3, stride3, 'VALID') + b_conv3)\n # outputs a 8x8x32 image\n\n # reshape (flatten) output\n h_conv3_flat = tf.reshape(h_conv3, [-1, 8*8*32])\n\n # first fully connected layer\n W_fc1 = nn.weight_variable([8 * 8 * 32, 1024])\n b_fc1 = nn.bias_variable([1024])\n h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)\n\n # dropout\n h_fc1_drop = tf.nn.dropout(h_fc1, nn.keep_prob)\n\n # second (final) fully connected layer (softmax)\n W_fc2 = nn.weight_variable([1024, 10])\n b_fc2 = nn.bias_variable([10])\n y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n return y_conv",
"def forward_pass_on_convolutions(self, x):\n conv_output = None\n for module_name, module in self.model._modules.items():\n print(module_name)\n if module_name == 'fc':\n return conv_output, x\n x = module(x) # Forward\n # print(module_name, module)\n if module_name == self.target_layer:\n print('True')\n x.register_hook(self.save_gradient)\n conv_output = x # Save the convolution output on that layer\n return conv_output, x",
"def actor_checkers(a_prev, t_obs_self, v_obs_self, v_obs_others, v_goal, f1=3, k1=[3,3],\n n_h1=64, n_h2=64, n_actions=5, stage=1, bias=True):\n conv = convnet_1(t_obs_self, f1=f1, k1=k1, s1=[1,1], scope='conv')\n conv_linear = tf.layers.dense(inputs=conv, units=32, activation=tf.nn.relu, use_bias=bias, name='conv_linear')\n # concated = tf.concat([conv, v_obs_self, a_prev, v_goal], 1)\n concated = tf.concat([conv_linear, v_obs_self, a_prev, v_goal], 1)\n branch_self = tf.layers.dense(inputs=concated, units=n_h1, activation=tf.nn.relu, use_bias=bias, name='branch_self')\n W_self_h2 = get_variable(\"W_self_h2\", [n_h1, n_h2])\n \n list_mult = []\n list_mult.append( tf.matmul(branch_self, W_self_h2) ) \n\n if stage > 1:\n # use different scope name so that\n # model restoration can ignore new variables\n # that did not exist in previous saved models\n with tf.variable_scope(\"stage-2\"):\n branch_others = tf.layers.dense(inputs=v_obs_others, units=n_h1, activation=tf.nn.relu, use_bias=bias, name='branch_others')\n W_others_h2 = get_variable(\"W_others_h2\", [n_h1, n_h2])\n list_mult.append( tf.matmul(branch_others, W_others_h2) )\n\n b = tf.get_variable('b', [n_h2])\n h2 = tf.nn.relu(tf.nn.bias_add(tf.add_n(list_mult), b))\n\n # Output layer\n out = tf.layers.dense(inputs=h2, units=n_actions, activation=None, use_bias=bias, name='actor_out')\n probs = tf.nn.softmax(out, name='actor_softmax')\n\n return probs",
"def test_convolution_backprop(transformer_factory):\n N = 128\n C, K = 3, 2\n D, T = 1, 1\n H = W = 32\n R = S = 2\n\n padding = dict(pad_d=0, pad_h=0, pad_w=0)\n strides = dict(str_d=1, str_h=1, str_w=1)\n dilation = dict(dil_d=1, dil_h=1, dil_w=1)\n conv_params = padding.copy()\n conv_params.update(strides)\n conv_params.update(dilation)\n\n ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])\n ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])\n ax_i.set_shape((C, D, H, W, N))\n ax_f.set_shape((C, T, R, S, K))\n ax_o = ng.make_axes([\n ng.make_axis(roles=[ar.features_input]).named('C'),\n ng.make_axis(roles=[ar.features_0]).named('D'),\n ng.make_axis(roles=[ar.features_1]).named('H'),\n ng.make_axis(roles=[ar.features_2]).named('W'),\n ax.N\n ])\n\n ax_o[:-1].set_shape((\n K,\n output_dim(D, T, padding['pad_d'], strides['str_d']),\n output_dim(H, R, padding['pad_h'], strides['str_h']),\n output_dim(W, S, padding['pad_w'], strides['str_w']))\n )\n\n inputs = ng.placeholder(axes=ax_i)\n filters = ng.placeholder(axes=ax_f)\n\n # randomly initialize\n input_value = rng.uniform(-1, 1, ax_i)\n filter_value = rng.uniform(-1, 1, ax_f)\n\n assert input_value.shape == ax_i.lengths\n assert filter_value.shape == ax_f.lengths\n\n output = ng.sum(ng.convolution(conv_params, inputs, filters, ax_o), out_axes=())\n\n with ExecutorFactory() as factory:\n dcdf_sym_fun = factory.derivative(output, filters, inputs)\n dcdf_num_fun = factory.numeric_derivative(output, filters, .01, inputs)\n dcdf_sym_val = dcdf_sym_fun(filter_value, input_value)\n dcdf_num_val = dcdf_num_fun(filter_value, input_value)\n\n ng.testing.assert_allclose(dcdf_sym_val, dcdf_num_val, rtol=1)",
"def test_alternate_channel_axes(conv1d_placeholder, output_size, channel_axis):\n channel_axis.name = \"channel\"\n assert len(conv1d_placeholder.axes.find_by_name(\"channel\")) == 1\n\n conv_layer = Convolution((3, output_size), lambda x: 1)\n with pytest.raises(IncompatibleAxesError):\n conv_layer(conv1d_placeholder)\n output = conv_layer(conv1d_placeholder, channel_axes=\"channel\")\n assert output.axes == conv1d_placeholder.axes",
"def cifar10_8layers(input_image, keep_prob, init_method=tf.truncated_normal_initializer()):\n with tf.variable_scope(\"conv1_1\"):\n W1_1 = tf.get_variable(name=\"W1_1\", shape=[3,3,3,32], dtype=tf.float32, \\\n initializer=init_method)\n b1_1 = tf.get_variable(name=\"b1_1\", shape=[32], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv1_1 = conv_relu(input_image, W1_1, b1_1)\n with tf.variable_scope(\"conv1_2\"):\n W1_2 = tf.get_variable(name=\"W1_2\", shape=[3,3,32,32], dtype=tf.float32, \\\n initializer=init_method)\n b1_2 = tf.get_variable(name=\"b1_2\", shape=[32], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv1_2 = max_pool(conv_relu(conv1_1, W1_2, b1_2))\n\t#conv1_2 = tf.nn.dropout(conv1_2, keep_prob)\n with tf.variable_scope(\"conv2_1\"):\n W2_1 = tf.get_variable(name=\"W2_1\", shape=[3,3,32,64], dtype=tf.float32, \\\n initializer=init_method)\n b2_1 = tf.get_variable(name=\"b2_1\", shape=[64], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv2_1 = conv_relu(conv1_2, W2_1, b2_1)\n #conv2_1 = tf.nn.dropout(conv2_1, keep_prob)\n with tf.variable_scope(\"conv2_2\"):\n W2_2 = tf.get_variable(name=\"W2_2\", shape=[3,3,64,64], dtype=tf.float32, \\\n initializer=init_method)\n b2_2 = tf.get_variable(name=\"b2_2\", shape=[64], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv2_2 = max_pool(conv_relu(conv2_1, W2_2, b2_2))\n #conv2_2 = tf.nn.dropout(conv2_2, keep_prob)\n with tf.variable_scope(\"conv3_1\"):\n W3_1 = tf.get_variable(name=\"W3_1\", shape=[3,3,64,128], dtype=tf.float32, \\\n initializer=init_method)\n b3_1 = tf.get_variable(name=\"b3_1\", shape=[128], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv3_1 = conv_relu(conv2_2, W3_1, b3_1)\n #conv3_1 = tf.nn.dropout(conv3_1, keep_prob)\n with tf.variable_scope(\"conv3_2\"):\n W3_2 = tf.get_variable(name=\"W3_2\", shape=[3,3,128,128], dtype=tf.float32, \\\n initializer=init_method)\n b3_2 = tf.get_variable(name=\"b3_2\", shape=[128], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv3_2 = max_pool(conv_relu(conv3_1, W3_2, b3_2))\n conv3_2 = tf.nn.dropout(conv3_2, keep_prob)\n with tf.variable_scope(\"fc1\"):\n W4 = tf.get_variable(name=\"W4\", shape=[4*4*128,256], dtype=tf.float32, \\\n initializer=init_method)\n b4 = tf.get_variable(name=\"b4\", shape=[256], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv3_flat = tf.reshape(conv3_2, [-1, 4*4*128])\n fc1 = fc_relu(conv3_flat, W4, b4)\n fc1 = tf.nn.dropout(fc1, keep_prob)\n with tf.variable_scope(\"fc2\"):\n W5 = tf.get_variable(name=\"W5\", shape=[256,512], dtype=tf.float32, \\\n initializer=init_method)\n b5 = tf.get_variable(name=\"b5\", shape=[512], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n fc2 = fc_relu(fc1, W5, b5)\n fc2 = tf.nn.dropout(fc2, keep_prob)\n with tf.variable_scope(\"output\"):\n W6 = tf.get_variable(name=\"W6\", shape=[512,10], dtype=tf.float32, \\\n initializer=init_method)\n b6 = tf.get_variable(name=\"b6\", shape=[10], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n y_logit = tf.matmul(fc2, W6) + b6\n return y_logit, tf.nn.softmax(y_logit, name=\"softmax\")",
"def add_conv_type1(model, depth, input_shape=None):\n if input_shape is not None:\n model.add(Convolution2D(depth, 5, 5, subsample=(2, 2), \\\n input_shape=input_shape))\n else:\n model.add(Convolution2D(depth, 5, 5, subsample=(2, 2), \\\n activation='relu', W_regularizer=l2(0.05)))",
"def test_axis_preservation(conv1d_placeholder, output_size):\n conv_layer = Convolution((3, output_size), lambda x: 1)\n output = conv_layer(conv1d_placeholder)\n assert output.axes == conv1d_placeholder.axes, (\"Output axes are not the same as input axes: \"\n \"{} != {}\").format(output.axes,\n conv1d_placeholder.axes)",
"def conv2d(X,W,b,strides=1):\n \"\"\"\n If the padding = 'SAME', the input and output images are of the same size by implementing\n zero padding on the input. (TF will compute using the padding equation from notes 4-12-2018) \n If the padding = 'VALID', the input is not padded and the output image size will be less \n than the input image.\n \"\"\"\n net = tf.nn.conv2d(X,W,strides=[1,strides,strides,1],padding='SAME')\n net = tf.nn.bias_add(net,b) #add bias to each convolved value, but all get the same bias value\n return tf.nn.relu(net) #return the output of the detection layer",
"def test_conv2d(self):\n\n \"\"\" Testing creation\"\"\"\n builder = BuildConvLayer()\n self.assertEqual(builder.nx_graph.number_of_nodes(), 0, msg='The config graph is not empty')\n builder.build_graph(builder.define_graph())\n builder.compute_graph.create_session()\n\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].name, 'conv_layer')\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].scope, 'outer_scope')\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].filter, 3)\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].type, 'hidden')\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].activations, tf.nn.relu)\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].use_bias, True)\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].flatten_output, True)\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].padding, 'same')\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].stride, (1, 1))\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].kernel_size, (3, 3))\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].kernel_initializer, tf.zeros_initializer)\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].bias_initializer, tf.zeros_initializer)\n self.assertEqual(builder.nx_graph.node['conv_layer']['config'].tensorboard_verbosity, 2)\n\n self.assertEqual(builder.nx_graph.number_of_nodes(), 2, msg='Incorrect number of nodes in the nx graph')\n self.assertTrue('config' in builder.nx_graph.node['conv_layer'].keys(),\n msg='The config was not added to the graph')\n self.assertTrue('component_builder' in builder.nx_graph.node['conv_layer'].keys(),\n msg='The component builder was not added to the nx node.')\n with builder.compute_graph.tf_graph.as_default():\n self.assertEqual(len(tf.get_collection(tf.GraphKeys.SUMMARIES, scope='test_conv')), 3,\n msg='Tensorboard summaries missing')\n self.assertTrue(builder.nx_graph.node['input']['is_build'], msg='Is build parameter was not updated')\n self.assertTrue(builder.nx_graph.node['conv_layer']['is_build'], msg='Is build parameter was not updated')\n self.assertIsNotNone(builder.nx_graph.node['input']['output'], msg='The nx node output is None')\n self.assertIsNotNone(builder.nx_graph.node['conv_layer']['output'], msg='The nx node output is None')\n\n # get the var form the tf graphs\n with builder.compute_graph.tf_graph.as_default():\n train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='test_conv')\n self.assertEqual(len(train_vars), 2, msg=\"Incorrect number of variables in the tf graph\")\n weights = train_vars[0]\n bias = train_vars[1]\n self.assertEqual(weights.name, 'test_conv/outer_scope/conv_layer/conv2d/kernel:0')\n self.assertEqual(bias.name, 'test_conv/outer_scope/conv_layer/conv2d/bias:0')\n\n builder.compute_graph.initialize_graph_variables()\n result = builder.compute_graph.session.run(builder.compute_graph.hidden['conv_layer'],\n feed_dict={builder.compute_graph.inputs['input']: np.ones((1, 3, 3, 3))})[0]\n self.assertEqual(result.tolist(), [0] * 27, msg='Wrong output')",
"def explain(self):\n # build the 2 versions of the model\n model = self.build_model()\n last_conv_model = self.build_cut_model()\n\n for i, label_name in enumerate(self.label_names):\n # This is the algorithm for the last convolution layer's tensor image\n # Get the index of the image that was classified correctly with the most confidence for the class\n predicted_col_proba = np.array(self.predicted_labels)[0][:, i]\n predicted_col_argsort = predicted_col_proba.argsort()[::-1]\n predicted_col = (predicted_col_proba > 0.2).astype(int)\n true_col = self.true_labels[:, 0]\n\n representative_image_index = None\n for most_probable_arg_index in predicted_col_argsort:\n if predicted_col[most_probable_arg_index] == true_col[most_probable_arg_index]:\n representative_image_index = most_probable_arg_index\n break\n\n # Resize the image to fit the neural network and keep the original resized image\n original_img = io.imread('{}/{}/{}'.format(path_to_img_directory, self.ex_format, np.array(self.image_names)[representative_image_index]))\n original_img = cv2.normalize(original_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n original_img = cv2.resize(original_img, dsize=(self.ex_input_size, self.ex_input_size), interpolation=cv2.INTER_CUBIC)\n img = np.expand_dims(original_img, axis=0)\n original_img = original_img[:, :, :3]\n\n # Get the output of the neural network for this image as a tensor\n model.predict(np.array(img))\n class_output = model.output[:, i]\n last_conv_layer = model.get_layer(self.ex_last_conv_layer_name1).output\n # if self.model_name == 'vit':\n # last_conv_layer = tf.nn.relu(tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024)))\n\n # Get the output for the cut model\n cut_img = last_conv_model.predict(np.array(img))[0]\n if self.model_name == 'vit':\n cut_img = np.reshape(cut_img[:256, :], (16, 16, 1024))\n cut_img = np.mean(cut_img, axis=-1)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n if self.model_name == 'vit':\n cut_img[0, 0] = np.mean(cut_img)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n cut_img = cv2.resize(cut_img, (self.ex_input_size, self.ex_input_size))\n\n # This is the algorithm of the Grad-CAM model\n # Refine the output of the last convolutional layer according to the class output\n grads = K.gradients(class_output, last_conv_layer)[0]\n if self.model_name == 'vit':\n last_conv_layer = tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024))\n last_conv_layer = last_conv_layer / tf.norm(last_conv_layer)\n\n grads = tf.reshape(grads[:, :256, :], (-1, 16, 16, 1024))\n grads = grads / tf.norm(grads)\n\n pooled_grads = K.mean(grads, axis=(0, 1, 2))\n iterate = K.function([model.input], [pooled_grads, last_conv_layer[0]])\n pooled_grads_value, conv_layer_output_value = iterate([img])\n for j in range(self.ex_last_conv_layer_filter_number):\n conv_layer_output_value[:, :, j] *= pooled_grads_value[j]\n\n # Create a 16x16 heatmap and scale it to the same size as the original image\n heatmap = np.mean(conv_layer_output_value, axis=-1)\n heatmap = np.maximum(heatmap, 0)\n heatmap /= np.max(heatmap)\n heatmap = cv2.resize(heatmap, (self.ex_input_size, self.ex_input_size))\n heatmap = np.uint8(255 * heatmap)\n heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\n heatmap = cv2.normalize(heatmap, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n superimposed_img = cv2.addWeighted(original_img, 0.7, heatmap, 0.4, 0)\n\n # save the original image\n plt.matshow(original_img)\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'original', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the cut image\n plt.matshow(cut_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'cut', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the superimposed gradcam image\n plt.matshow(superimposed_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'gradcam', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)",
"def cnn(train_X, train_y, test_X, n_epochs =50, batch_size = 100, eps = 0.01):\n \n def get_onehot(x):\n onehot=np.zeros((len(x),10))\n onehot[np.arange(len(x)),x]=1\n return onehot\n \n def f_props(layers, x):\n for layer in layers:\n x = layer.f_prop(x)\n return x\n \n layers = [ # (縦の次元数)x(横の次元数)x(チャネル数)\n Conv((5, 5, 1, 20), tf.nn.relu), # 28x28x 1 -> 24x24x20\n Pooling((1, 2, 2, 1)), # 24x24x20 -> 12x12x20\n Conv((5, 5, 20, 50), tf.nn.relu), # 12x12x20 -> 8x 8x50\n Pooling((1, 2, 2, 1)), # 8x 8x50 -> 4x 4x50\n Flatten(),\n Dense(4*4*50, 10, tf.nn.softmax)\n ]\n\n x = tf.placeholder(tf.float32, [None, 28, 28, 1])\n t = tf.placeholder(tf.float32, [None, 10])\n\n y = f_props(layers, x)\n cost = -tf.reduce_mean(tf.reduce_sum(t * tf.log(tf.clip_by_value(y, 1e-10, 1.0)), axis=1))\n train = tf.train.GradientDescentOptimizer(eps).minimize(cost)\n valid = tf.argmax(y, 1)\n \n\n print(\"BEGIN: CNN learning with n_epochs = {0}, batch_size = {1}, eps = {2}\".format(n_epochs, batch_size, eps))\n \n train_X = train_X.reshape((train_X.shape[0], 28, 28, 1))\n test_X = test_X.reshape((test_X.shape[0], 28, 28, 1))\n train_y=get_onehot(train_y)\n \n train_X, valid_X, train_y, valid_y = train_test_split(train_X, train_y, test_size=0.1, random_state=42)\n n_batches = train_X.shape[0]//batch_size\n\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n for epoch in range(n_epochs):\n train_X, train_y = shuffle(train_X, train_y, random_state=random_state)\n for i in range(n_batches):\n start = i * batch_size\n end = start + batch_size\n sess.run(train, feed_dict={x: train_X[start:end], t: train_y[start:end]})\n pred_y, valid_cost = sess.run([valid, cost], feed_dict={x: valid_X, t: valid_y})\n print('\\tEPOCH:: %i, Validation cost: %.3f, Validation F1: %.3f' % (epoch + 1, valid_cost, f1_score(np.argmax(valid_y, 1).astype('int32'), pred_y, average='macro')))\n \n pred_y= sess.run(valid, feed_dict={x: test_X})\n return pred_y",
"def forward(self, x):\n\n def run0(x, dummy):\n lout1 = self.lconv1(x)\n out1 = self.conv1(lout1)\n lout2 = self.lconv2(out1 + lout1)\n out2 = self.conv2(lout2)\n lout3 = self.lconv3(out2 + lout2)\n out3 = self.conv3(lout3)\n lout4 = self.lconv4(out3 + lout3)\n out4 = self.conv4(lout4)\n lout5 = self.lconv5(out4 + lout4)\n out5 = self.conv5(lout5)\n lout6 = self.lconv6(out5 + lout5)\n out6 = self.conv6(lout6)\n lout7 = self.lconv7(out6 + lout6)\n out7 = self.conv7(lout7)\n mat = out7[:, :, :, None] + out7[:, :, None, :]\n cur = mat\n if self.num_1d:\n output1d = self.final_1d(out7)\n return cur, output1d\n else:\n return cur\n\n dummy = torch.Tensor(1)\n dummy.requires_grad = True\n if self.num_1d:\n cur, output1d = checkpoint(run0, x, dummy)\n else:\n cur = checkpoint(run0, x, dummy)\n\n def run1(cur):\n first = True\n for lm, m in zip(self.lconvtwos[:7], self.convtwos[:7]):\n if first:\n cur = lm(cur)\n\n first = False\n else:\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run2(cur):\n for lm, m in zip(self.lconvtwos[7:13], self.convtwos[7:13]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run3(cur):\n for lm, m in zip(self.lconvtwos[13:], self.convtwos[13:]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n\n cur = self.final(cur)\n cur = 0.5 * cur + 0.5 * cur.transpose(2, 3)\n return cur\n\n cur = checkpoint(run1, cur)\n cur = checkpoint(run2, cur)\n cur = checkpoint(run3, cur)\n\n if self.num_1d:\n return cur, output1d\n else:\n return cur",
"def __call__(self, x: jnp.ndarray, *, train: bool) -> jnp.ndarray:\n if self.padding == 'SAME':\n padding = ((1, 2), (1, 2))\n elif self.padding == 'VALID':\n padding = ((0, 0), (0, 0))\n else:\n raise ValueError(f'Unkonwn padding: {self.padding}')\n x = nn.Conv(\n features=self.features,\n kernel_size=(3, 3),\n input_dilation=(2, 2),\n padding=padding)(\n x)\n if self.use_batch_norm:\n x = nn.BatchNorm(use_running_average=not train)(x)\n return x"
] | [
"0.5629616",
"0.55029637",
"0.5458144",
"0.5418558",
"0.53147036",
"0.5189352",
"0.50939095",
"0.5077204",
"0.50636894",
"0.5061421",
"0.50255704",
"0.501963",
"0.49810603",
"0.49770007",
"0.49766997",
"0.49694854",
"0.49628457",
"0.4959388",
"0.4954533",
"0.4947943",
"0.49389753",
"0.49255243",
"0.49251276",
"0.49248335",
"0.4923944",
"0.49116534",
"0.4894515",
"0.48885822",
"0.487499",
"0.4870177"
] | 0.5650857 | 0 |
Writes tabular metrics in latex format. | def perf2latex(latex_fn, all_perf, metrics_name, slice_v, cam_v):
slice_num = slice_v.shape[0]
f = open('%s'%latex_fn, 'w')
f.write("\\documentclass{article}\n")
f.write("\\usepackage[utf8]{inputenc}\n")
f.write("\\usepackage{booktabs} \n")
f.write("\\usepackage[]{float}\n")
f.write("\\usepackage[margin=1.2in]{geometry}\n")
f.write("\\begin{document}\n\n")
for m_name in metrics_name:
print(m_name)
f.write('\\begin{table}[tbh]\n')
f.write('\\begin{center}\n')
f.write('\\begin{tabular}{|*{%d}{c|}}\n'%(slice_num + 1))
f.write('\\hline\n')
f.write(' Survey')
#for slice_idx, slice_id in enumerate(slice_cam_id[:-1]):
for j, (slice_id, cam_id) in enumerate(zip(slice_v, cam_v)):
f.write(' & %d\_c%d'%(slice_id, cam_id))
f.write(' \\\\ \n')
f.write('\\hline\n')
m = all_perf[m_name]
print(m.shape)
survey_num = m.shape[0]
for i in range(survey_num):
f.write('%d'%(i))
for j in range(slice_num):
f.write(' & %.3f'%m[i,j])
f.write(' \\\\ \n')
f.write('\\hline\n')
f.write('\\end{tabular}\n')
f.write('\\end{center}\n')
f.write('\\caption{Metric: %s}\n'%(m_name))
f.write('\\end{table}\n\n\n')
f.write('\\end{document}\n')
print('\\end{document}\n')
f.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def out(lam, eng, mat): # {{{1\n print(\"\\\\begin{table}[!htbp]\")\n print(\" \\\\renewcommand{\\\\arraystretch}{1.2}\")\n txt = \" \\\\caption{{\\\\label{{tab:{0}}}properties of {0}}}\"\n # Raw underscores in LaTeX text mode produce “Missing $” errors.\n texlname = lam.name.replace('_', '\\_')\n print(txt.format(texlname))\n print(\" \\\\centering\\\\footnotesize{\\\\rule{0pt}{10pt}\")\n print(\" \\\\tiny calculated by lamprop {}\\\\\\\\[3pt]}}\".format(__version__))\n if eng:\n _engprop(lam)\n if mat:\n _matrices(lam)\n print(\"\\\\end{table}\\n\") # 1}}}",
"def make_latex_table(scalar_metrics, threshold):\n latex = ''\n\n # head of the table\n latex += '\\\\begin{table} \\n'\n latex += '\\\\begin{center} \\n'\n latex += '\\\\begin{tabular}{ l | l } \\n'\n latex += '\\\\toprule \\\\ \\n'\n latex += '\\\\textbf{Metric} & \\\\textbf{Value} \\\\ \\midrule \\n'\n\n # body of the table\n # make 2D list for every section\n body_list = []\n body_list.append(['MSE', str(scalar_metrics['mse']) ])\n body_list.append(['RMSE', str(scalar_metrics['rmse']) ])\n body_list.append(['MAE', str(scalar_metrics['mae']) ])\n\n latex += array_as_latex_table(body_list)\n\n # footer of the table\n latex += '\\end{tabular} \\n'\n latex += '\\end{center} \\n'\n latex += '\\caption{Scaler performance metrics and default values} \\n'\n latex += '\\label{tab:TODO} \\n'\n latex += '\\end{table} \\n'\n\n return latex",
"def latex_table():\n \n t = Table.read('../data/stream_origin.fits')\n N = len(t)\n \n f = open('../paper/stream_origin.tex', 'w')\n for i in range(N):\n t_ = t[i]\n for k in t_.colnames:\n if (t_[k]==np.nan) | (t_[k]=='nan'):\n t_[k] = '\\dots'\n #f.write('{:s} & {:s} & {:s} & {:s} & {:.1f}\\\\\\\\ \\n'.format(t_['Name'], t_['host'], t_['progenitor'], t_['type'], t_['feh']))\n line = '{:s} & {:s} & {:s} & {:s} & {:s}\\\\\\\\ \\n'.format(t_['Name'], t_['host'], t_['progenitor'], t_['friends'], t_['type'])\n f.write(line)\n print(line)\n \n f.close()",
"def generate_table(self, outtablename,\n cols=['A', 'B', 'AB'],\n generateTable=True):\n if generateTable:\n new_indices = ['time (s)', 'mean counts']\n for idx in self.data[cols].describe().index[2:]:\n new_indices.append(idx)\n outTable = self.data[cols].describe()\\\n .set_index(pd.Index(new_indices))\n outTable.to_latex(\n self.tables_dir + outtablename + \".tex\", float_format=\"%d\")\n print(\"Outtable: \", outTable)",
"def output_metrics(self):\n print('')\n for key in sorted(self.metrics):\n print('{}:'.format(key), end='')\n for k, v in self.metrics[key].items():\n if type(v[-1]) is list:\n print('\\t' + k + ': ' + ''.join('{:5.3f} '.format(vs) for vs in v[-1]), end='')\n else:\n print('\\t{}: {:5.3f}'.format(k, v[-1]), end='')\n print('\\n', end='')",
"def write_latex_table(machine, all_benchs, summary, diff, skipped, tex_file, num_splits,\n with_preamble=False, longtable=False, diff_vms=[]):\n\n num_benchmarks = len(all_benchs)\n all_vms = sorted(summary.keys())\n num_vms = len(summary)\n\n # decide how to lay out the splits\n num_vms_rounded = int(math.ceil(num_vms / float(num_splits)) * num_splits)\n vms_per_split = int(num_vms_rounded / float(num_splits))\n splits = [[] for x in xrange(num_splits)]\n vm_num = 0\n split_idx = 0\n for vm_idx in xrange(num_vms_rounded):\n if vm_idx < len(all_vms):\n vm = all_vms[vm_idx]\n else:\n vm = None\n splits[split_idx].append(vm)\n vm_num += 1\n if vm_num % vms_per_split == 0:\n split_idx += 1\n\n with open(tex_file, 'w') as fp:\n if with_preamble:\n fp.write(preamble(TITLE))\n if diff_vms:\n fp.write('\\\\centering{%%\\n\\\\Large{\\\\textbf{%s vs. %s}}%%\\n}\\n\\\\\\\\\\n~\\\\\\\\\\n\\n'\n % (diff_vms[0], diff_vms[1]))\n legends = get_latex_symbol_map() + ' \\\\\\\\ ' + legend()\n fp.write('\\\\centering %s' % legends)\n fp.write('\\n\\n\\n')\n if not longtable:\n fp.write('\\\\begin{landscape}\\n')\n fp.write('\\\\begin{table*}[hptb]\\n')\n fp.write('\\\\vspace{.8cm}\\n')\n fp.write('\\\\begin{adjustbox}{totalheight=12.4cm}\\n')\n # Emit table header.\n heads1 = TABLE_HEADINGS_START1 + '&'.join([TABLE_HEADINGS1] * num_splits)\n heads2 = TABLE_HEADINGS_START2 + '&'.join([TABLE_HEADINGS2] * num_splits)\n heads = '%s\\\\\\\\%s' % (heads1, heads2)\n if longtable:\n fp.write(start_longtable(TABLE_FORMAT, heads))\n else:\n fp.write(start_table(TABLE_FORMAT, heads))\n split_row_idx = 0\n for row_vms in zip(*splits):\n bench_idx = 0\n skipped_before = [b for (b, v) in skipped[SKIPPED_BEFORE] if v == row_vms[0]]\n skipped_after = [b for (b, v) in skipped[SKIPPED_AFTER] if v == row_vms[0]]\n for bench in sorted(all_benchs + skipped_before + skipped_after):\n row = []\n for vm in row_vms:\n if vm is None:\n continue # no more results\n try:\n this_summary = summary[vm][bench]\n except KeyError:\n if bench in skipped_before or bench in skipped_after:\n classification = '\\\\emph{Skipped}'\n else:\n classification = ''\n last_cpt = BLANK_CELL\n time_steady = BLANK_CELL\n last_mean = BLANK_CELL\n steady_iter_var = BLANK_CELL\n steady_time_var = BLANK_CELL\n else:\n if vm in diff and bench in diff[vm]:\n classification = colour_tex_cell(diff[vm][bench][CLASSIFICATIONS], this_summary['style'])\n last_cpt = colour_tex_cell(diff[vm][bench][STEADY_ITER], this_summary['last_cpt'])\n steady_iter_var = colour_tex_cell(diff[vm][bench][STEADY_ITER_VAR], this_summary['steady_iter_var'])\n time_steady = colour_tex_cell(diff[vm][bench][STEADY_ITER], this_summary['time_to_steady_state'])\n last_mean = colour_tex_cell(diff[vm][bench][STEADY_STATE_TIME], this_summary['last_mean'])\n steady_time_var = colour_tex_cell(diff[vm][bench][STEADY_STATE_TIME_VAR], this_summary['steady_time_var'])\n else:\n classification = this_summary['style']\n last_cpt = this_summary['last_cpt']\n steady_iter_var = this_summary['steady_iter_var']\n time_steady = this_summary['time_to_steady_state']\n last_mean = this_summary['last_mean']\n steady_time_var = this_summary['steady_time_var']\n classification = '\\\\multicolumn{1}{l}{%s}' % classification\n if classification == STYLE_SYMBOLS['flat']:\n last_cpt = BLANK_CELL\n time_steady = BLANK_CELL\n if last_cpt == '':\n last_cpt = BLANK_CELL\n if time_steady == '':\n time_steady = BLANK_CELL\n if last_mean == '':\n last_mean = BLANK_CELL\n\n if bench_idx == 0:\n if num_benchmarks == 10:\n fudge = 4\n elif num_benchmarks == 12:\n fudge = 5\n else:\n fudge = 0\n vm_cell = '\\\\multirow{%s}{*}{\\\\rotatebox[origin=c]{90}{%s}}' \\\n % (num_benchmarks + fudge, vm)\n else:\n vm_cell = ''\n row_add = [BLANK_CELL, vm_cell, classification, last_cpt,\n steady_iter_var, time_steady, last_mean, steady_time_var]\n if not row: # First bench in this row, needs the vm column.\n if vm in diff and bench in diff[vm]:\n bname = colour_tex_cell(diff[vm][bench][INTERSECTION], bench)\n else:\n bname = bench\n row.insert(0, escape(bname))\n row.extend(row_add)\n vm_idx += 1\n fp.write('&'.join(row))\n # Only -ve space row if not next to a midrule\n if not longtable and bench_idx < num_vms - 1:\n fp.write('\\\\\\\\[-3pt] \\n')\n else:\n fp.write('\\\\\\\\ \\n')\n bench_idx += 1\n if split_row_idx < vms_per_split - 1:\n if longtable:\n fp.write('\\\\hline\\n')\n else:\n fp.write('\\\\midrule\\n')\n split_row_idx += 1\n if longtable:\n fp.write(end_longtable())\n else:\n fp.write(end_table())\n if with_preamble:\n if not longtable:\n fp.write('\\\\end{adjustbox}\\n')\n fp.write('\\\\end{table*}\\n')\n fp.write('\\\\end{landscape}\\n')\n fp.write(end_document())",
"def create_latex_table(data, id):\n bd = data['bd']\n sd = data['sd']\n \n filename = 'LatestResults.tex'\n file = r'..\\latex\\tables\\\\' + filename\n\n if os.path.exists(file):\n f_temp = os.path.splitext(file)[0] # without extension\n os.rename(file, f_temp + '_' + id + '.tex')\n\n f = codecs.open(file, 'w', 'utf-8')\n \n f.write('\\n' + r'\\begin{table}' + '\\n')\n f.write(r' \\centering' + '\\n')\n f.write(r' \\caption{Results for each drum instrument with batch sizes 64, 256 and 512.}' + '\\n')\n f.write(r' \\begin{tabular}{l c c c}' + '\\n')\n f.write(r' \\textbf{Batch size} & Metric & BD & SD \\\\' + '\\n')\n f.write(r' \\midrule' + '\\n')\n f.write(r' \\midrule' + '\\n')\n \n for batch_size in BATCHES:\n f.write(' ' + str(batch_size).rstrip('\\n'))\n # 0.805 +- 0.02\n f.write(r' & P & ' + r'$' + '{:.3}'.format(bd[batch_size]['p_mean']) + r' \\pm ' + '{:.3f}'.format(bd[batch_size]['p_std']) + '$' + r' & ' + r'$' + '{:.3}'.format(sd[batch_size]['p_mean']) + r' \\pm ' + '{:.3f}'.format(sd[batch_size]['p_std']) + '$' + r' \\\\' + '\\n')\n f.write(r' & R & ' + r'$' + '{:.3}'.format(bd[batch_size]['r_mean']) + r' \\pm ' + '{:.3f}'.format(bd[batch_size]['r_std']) + '$' + r' & ' + r'$' + '{:.3}'.format(sd[batch_size]['r_mean']) + r' \\pm ' + '{:.3f}'.format(sd[batch_size]['r_std']) + '$' + r' \\\\' + '\\n')\n f.write(r' & F & ' + r'$' + '{:.3}'.format(bd[batch_size]['f_mean']) + r' \\pm ' + '{:.3f}'.format(bd[batch_size]['f_std']) + '$' + r' & ' + r'$' + '{:.3}'.format(sd[batch_size]['f_mean']) + r' \\pm ' + '{:.3f}'.format(sd[batch_size]['f_std']) + '$' + r' \\\\' + '\\n')\n # Don't write horizontal line on the last batch.\n if batch_size != BATCHES[-1]:\n f.write(r' \\midrule' + '\\n')\n\n f.write(r' \\end{tabular}' + '\\n')\n f.write(r' \\label{tab:ResultsTable}' + '\\n')\n f.write(r'\\end{table}' + '\\n')\n f.close()",
"def write_to_latex(arr, title, n, m, function_type, region):\r\n df = pd.DataFrame(arr)\r\n df.to_csv(df.to_csv('%s_n=%s_m=%s.csv'\r\n % (title, n, m)))\r\n with open('%s_n=%s_m=%s_%s_%s.tex' %\r\n (title, n, m, function_type, region), 'w') as tf:\r\n tf.write(df.to_latex())",
"def write_html_file(out_table, outpath):\r\n page_out = PAGE_HTML % ('Taxa Summaries', out_table)\r\n out = open(outpath, \"w+\")\r\n out.write(page_out)\r\n out.close()",
"def to_latex_table(self, parameter_dict=None, save_to_file=None):\n import os\n\n if save_to_file is not None and os.path.isfile(\"{}\".format(save_to_file)):\n raise FileExistsError(\n \"The file {} already exists.\".format(save_to_file)\n )\n\n table = self.latex_table([self.samples_dict], parameter_dict)\n if save_to_file is None:\n print(table)\n elif os.path.isfile(\"{}\".format(save_to_file)):\n logger.warning(\n \"File {} already exists. Printing to stdout\".format(save_to_file)\n )\n print(table)\n else:\n with open(save_to_file, \"w\") as f:\n f.writelines([table])",
"def markdown_table(self, which):\n if which == 'C':\n coef = 'C'\n elif which == 'c':\n coef = 'c'\n elif which == 'f':\n coef = 'f'\n str = '|order|'\n for i in range(1,N+1):\n str = str + '$%s_{%d}$ |' % (coef,i)\n str = str + '\\n|'\n for i in range(1,N+1):\n str = str + '-|'\n str = str + '\\n'\n for i in range(1,self.N+1):\n str = str + (self.dat[i]).markdown_row(self.N, which)\n return str",
"def ascii_table(self, tablefmt=\"pipe\"):\n methods = self.methods\n xvalues = self.xvalues\n plot_matrix = self.plot_matrix\n\n import tabulate\n # https://pypi.python.org/pypi/tabulate\n aug_table = np.hstack((np.array(methods)[:, np.newaxis], plot_matrix))\n return tabulate.tabulate(aug_table, xvalues, tablefmt=tablefmt)",
"def print_for_latex(results, sizes, models):\n for model in models:\n for t in ['nl', 'wl']:\n line = model + ' ' + t.upper()\n for size in sizes:\n key = model + '_' + t + '_' + str(size)\n line += ' & ' + str(round(results[key], 4))\n line += ' \\\\\\hline'\n print(line)",
"def print_table2(df, eval_dir):\n\n out_file = os.path.join(eval_dir, 'table2.txt')\n\n\n with open(out_file, \"w\") as text_file:\n\n for idx, struc_name in enumerate(['LV', 'RV', 'Myo']):\n # new line\n header_string = ' & '\n line_string = '({}) '.format(struc_name)\n\n for p_idx, phase in enumerate(['ED', 'ES']):\n for measure in ['dice', 'assd', 'hd']:\n\n header_string += ' & {} ({}) '.format(phase, measure)\n\n dat = df.loc[(df['phase'] == phase) & (df['struc'] == struc_name)]\n\n if measure == 'dice':\n\n line_string += ' & {:.3f}\\,({:.3f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n else:\n line_string += ' & {:.2f}\\,({:.2f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n\n if p_idx == 0:\n header_string += ' & '\n line_string += ' & '\n\n header_string += ' \\\\\\\\ \\n'\n line_string += ' \\\\\\\\ \\n'\n\n if idx == 0:\n text_file.write(header_string)\n\n text_file.write(line_string)\n\n return 0",
"def displayHTMLtable(acc_sent2, acc_wv03, acc, prec_sent2, prec_wv03, prec, recall_sent2, recall_wv03, recall):\n\n methods = ['Sent2 NBR', 'WV03 NBR', 'WV03 RF']\n accuracies = [\"{:.2%}\".format(acc_sent2), \"{:.2%}\".format(acc_wv03), \"{:.2%}\".format(acc)]\n precisions = [\"{:.2%}\".format(prec_sent2), \"{:.2%}\".format(prec_wv03), \"{:.2%}\".format(prec)]\n recalls = [\"{:.2%}\".format(recall_sent2), \"{:.2%}\".format(recall_wv03), \"{:.2%}\".format(recall)]\n\n data = methods + accuracies + precisions + recalls\n\n data = np.reshape(data, (4, 3)).T\n\n display(HTML(\n '<table style=\"width:100%;\"><th>Method</th><th>Accuracy</th><th>Precision</th><th>Recall</th><tr>{}</tr></table>'.format(\n '</tr><tr>'.join(\n '<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in data)\n )\n ))",
"def pretty(self):\n #table = [\"\".join([\"%8s \" % s for s in self.alpha.getSymbols()])]\n table = []\n for row in PWM.getFreq(self):\n table.append(\"\".join([\"%8.6f \" % y for y in row]))\n return table",
"def _make_tex_table(self, tabletitle):\r\n stattable = (\r\n r\"\"\"\r\n \\begin{table}[h!]\r\n \\caption{%s}\r\n \\centering\r\n \\begin{tabular}{l l l l l}\r\n \\toprule\r\n \\textbf{Statistic} & \\textbf{Inlet} & \\textbf{Outlet} \\\\\"\"\"\r\n % tabletitle\r\n )\r\n\r\n stats = [\r\n {\"name\": \"Count\", \"attribute\": \"N\", \"rule\": \"top\", \"forceint\": True},\r\n {\"name\": \"Number of NDs\", \"attribute\": \"ND\", \"forceint\": True},\r\n {\"name\": \"Min; Max\", \"attribute\": [\"min\", \"max\"], \"twoval\": True},\r\n {\"name\": \"Mean\", \"attribute\": \"mean\"},\r\n {\r\n \"name\": \"(95\\% confidence interval)\",\r\n \"attribute\": \"mean_conf_interval\",\r\n \"twoval\": True,\r\n \"ci\": True,\r\n \"rule\": \"none\",\r\n },\r\n {\"name\": \"Standard Deviation\", \"attribute\": \"std\"},\r\n {\"name\": \"Log. Mean\", \"attribute\": \"logmean\"},\r\n {\r\n \"name\": \"(95\\% confidence interval)\",\r\n \"attribute\": \"logmean_conf_interval\",\r\n \"twoval\": True,\r\n \"ci\": True,\r\n \"rule\": \"none\",\r\n },\r\n {\"name\": \"Log. Standard Deviation\", \"attribute\": \"logstd\"},\r\n {\"name\": \"Geo. Mean\", \"attribute\": \"geomean\"},\r\n {\r\n \"name\": \"(95\\% confidence interval)\",\r\n \"attribute\": \"geomean_conf_interval\",\r\n \"twoval\": True,\r\n \"ci\": True,\r\n \"rule\": \"none\",\r\n },\r\n {\"name\": \"Coeff. of Variation\", \"attribute\": \"cov\"},\r\n {\"name\": \"Skewness\", \"attribute\": \"skew\"},\r\n {\"name\": \"Median\", \"attribute\": \"median\"},\r\n {\r\n \"name\": \"(95\\% confidence interval)\",\r\n \"attribute\": \"median_conf_interval\",\r\n \"twoval\": True,\r\n \"ci\": True,\r\n \"rule\": \"none\",\r\n },\r\n {\"name\": \"Quartiles\", \"attribute\": [\"pctl25\", \"pctl75\"], \"twoval\": True},\r\n {\r\n \"name\": \"Number of Pairs\",\r\n \"attribute\": \"n_pairs\",\r\n \"rule\": \"top\",\r\n \"fromdataset\": True,\r\n \"sigfigs\": 1,\r\n \"forceint\": True,\r\n },\r\n {\r\n \"name\": \"Wilcoxon p-value\",\r\n \"attribute\": \"wilcoxon_p\",\r\n \"fromdataset\": True,\r\n \"pval\": True,\r\n \"tex\": True,\r\n },\r\n {\r\n \"name\": \"Mann-Whitney p-value\",\r\n \"attribute\": \"mannwhitney_p\",\r\n \"fromdataset\": True,\r\n \"pval\": True,\r\n \"tex\": True,\r\n },\r\n ]\r\n for s in stats:\r\n stattable += self._tex_table_row(**s)\r\n\r\n stattable += r\"\"\"\r\n \\bottomrule\r\n \\end{tabular}\r\n \\end{table}\"\"\"\r\n\r\n return stattable + \"\\n\"",
"def to_latex_table(self, tab=\" \", caption=\"TODO\", label=\"TODO\"):\n return \"\".join(\n (\n \"\\\\begin{center}\\n\",\n f\"{tab}\\\\begin{{table}}[ht]\\n\",\n f\"{tab*2}\\\\centering\\n\",\n f'{tab*2}\\\\rowcolors{{2}}{{white}}{{gray!25}}\\n'\n f\"{tab*2}\\\\begin{{tabular}}{{crrrrrr}}\\n\",\n (\n f\"{tab*3}\\\\cellcolor[gray]{{0.7}} & \\\\multicolumn{{2}}{{c}}\"\n \"{BT\\\\cellcolor[gray]{0.7}} & \\\\multicolumn{2}{c}{BJ\"\n \"\\\\cellcolor[gray]{0.7}} & \\\\multicolumn{2}{c}\"\n \"{CBJ\\\\cellcolor[gray]{0.7}} \\\\\\\\\\n\"\n ),\n (\n f\"{tab*3}\\\\cellcolor[gray]{{0.7}} Test suite & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Nodes} & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Time(s)} & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Nodes} & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Time(s)} & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Nodes} & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Time(s)}\\\\\\\\\\n\"\n ),\n \"\".join(\n (\n f\"{tab*3}{i} & {bt.nodes_expanded} & {bt.time} \"\n f\"& {bj.nodes_expanded} & {bj.time} & {cbj.nodes_expanded} & \"\n f\"{cbj.time}\\\\\\\\\\n\"\n for i, (bt, bj, cbj) in enumerate(zip(*self.data))\n )\n ),\n f\"{tab*2}\\\\end{{tabular}}\\n\"\n f\"{tab*2}\\\\caption{{{caption}}}\\n\"\n f\"{tab*2}\\\\label{{tab:{label}}}\\n\"\n f\"{tab}\\\\end{{table}}\\n\"\n \"\\\\end{center}\",\n )\n )",
"def summarize_as_table(self):\n h = human_readable_size\n h_throughput = human_readable_throughput\n table = [\n ['Total Time (seconds)', '%.3f' % self.total_time,\n self.std_dev_total_time],\n ['Maximum Memory', h(self.max_memory), h(self.std_dev_max_memory)],\n ['Maximum CPU (percent)', '%.1f' % self.max_cpu,\n self.std_dev_max_cpu],\n ['Maximum Sent Throughput', h_throughput(self.max_sent_throughput),\n h_throughput(self.max_sent_throughput)],\n ['Maximum Recv Throughput', h_throughput(self.max_recv_throughput),\n h_throughput(self.max_recv_throughput)],\n ['Average Memory', h(self.average_memory),\n h(self.std_dev_average_memory)],\n ['Average CPU (percent)', '%.1f' % self.average_cpu,\n self.std_dev_average_cpu],\n ['Average Sent Throughput',\n h_throughput(self.average_sent_throughput),\n h_throughput(self.average_sent_throughput)],\n ['Average Recv Throughput',\n h_throughput(self.average_recv_throughput),\n h_throughput(self.average_recv_throughput)],\n ]\n return tabulate(\n table,\n headers=[\n 'Metric over %s run(s)' % (self.total_files),\n 'Mean',\n 'Standard Deviation'\n ],\n tablefmt=\"grid\"\n )",
"def print_metrics(self):\n output = \"\"\n metrics = self.get_all_metrics()\n for k, v in metrics.items():\n # Print the help line\n output += \"\\n# HELP {name} {help}\\n\".format(name=v['name'],\n help=v['help'])\n # and the type line\n output += \"# TYPE {name} {type}\\n\".format(name=v['name'],\n type=v['type'])\n for sample in v['values']:\n labels = json.loads(sample, object_pairs_hook=OrderedDict)\n if v['type'] == 'histogram' and labels.get('le') == '_sum':\n labels.pop('le', None)\n mname = '{name}_sum'.format(name=v['name'])\n elif v['type'] == 'histogram' and labels.get('le') == '+Inf':\n labels.pop('le', None)\n mname = '{name}_count'.format(name=v['name'])\n elif v['type'] == 'histogram':\n mname = '{name}_bucket'.format(name=v['name'])\n else:\n mname = v['name']\n output += \"{name}{labels} {value}\\n\".format(name=mname,\n labels=self.format_labels(labels),\n value=self.format_value(v['values'][sample]))\n return output",
"def print_table_results(train, devel, test, tablefmt, file=sys.stdout):\n\n # Lazy load tabulate\n global tabulate\n if tabulate is None:\n try:\n from tabulate import tabulate\n except ImportError:\n print('Printing latex results requires the `tabulate` package. Tabulate can be installed by running: \\n'\n '$pip install tabulate')\n sys.exit(1)\n\n def _evaluate(dataset: dict, name: str, metrics=None):\n \"\"\"\n Fetch the given metrics from the given dataset metric dictionary in the order they were given\n :param dataset: dictionary containing metrics for a specific dataset\n :param metrics: list of metric names to fetch\n :return: list of metric values\n \"\"\"\n if metrics is None:\n metrics = ['Accuracy', 'AUROC', 'AUPRC', 'Precision', 'Recall', 'F1', 'F2']\n measures = [dataset[metric] for metric in metrics]\n measures.insert(0, name)\n return measures\n\n # Create a LaTeX table using tabulate\n table = tabulate([_evaluate(train, 'train'),\n _evaluate(devel, 'devel'),\n _evaluate(test, 'test')],\n headers=['Data', 'Acc.', 'AUROC', 'AUPRC', 'P', 'R', 'F1', 'F2'],\n tablefmt=tablefmt)\n print(table, file=file)",
"def tab_delim_table(self):\n self.generate()\n\n header = ' \\t '.join([r'{: ^7}'.format(col) for col in self.columns])\n lines = []\n for row in self.rows:\n bits = []\n for col in self.columns:\n if col in self.formatters:\n bits.append(self.formatters[col].format(row[col]))\n else:\n bits.append(self.formatters.get(col, '{: ^7}').format(row[col] if row[col] else ''))\n lines.append(' \\t '.join(bits))\n\n return \"{}\\n{}\".format(header, '\\n'.join(lines))",
"def print_table(self):\n print(\"%-12s%-12s%-12s%-12s%-12s\" % (\"index\",\"balance\",\"payment\",\"interest\",\"amortization\"))\n print(\"-------------------------------------------------------------\")\n for i in self.table[\"index\"]:\n print(\"%-12i%-12i%-12i%-12i%-12i\" % (self.table[\"index\"][i],self.table[\"balance\"][i]\\\n ,self.table[\"payment\"][i],self.table[\"interest\"][i],\\\n self.table[\"amortization\"][i]))",
"def write_latex(\n cosmology, file, *, overwrite=False, cls=QTable, latex_names=True, **kwargs\n):\n # Check that the format is 'latex', 'ascii.latex' (or not specified)\n fmt = kwargs.pop(\"format\", \"ascii.latex\")\n if fmt != \"ascii.latex\":\n raise ValueError(f\"format must be 'ascii.latex', not {fmt}\")\n\n # Set cosmology_in_meta as false for now since there is no metadata being kept\n table = to_table(cosmology, cls=cls, cosmology_in_meta=False)\n\n cosmo_cls = type(cosmology)\n for name in table.columns.keys():\n param = getattr(cosmo_cls, name, None)\n if not isinstance(param, Parameter) or param.unit in (None, u.one):\n continue\n # Get column to correct unit\n table[name] <<= param.unit\n\n # Convert parameter names to LaTeX format\n if latex_names:\n new_names = [_FORMAT_TABLE.get(k, k) for k in cosmology.__parameters__]\n table.rename_columns(cosmology.__parameters__, new_names)\n\n table.write(file, overwrite=overwrite, format=\"ascii.latex\", **kwargs)",
"def exportTable(self):\n\t\tself.pdf = \tself.dir + \"/application.pdf\"\n\t\tpdf = pisa.CreatePDF(\n\t\t\tfile(self.html, \"r\" ),\n\t\t\tfile(self.pdf, \"wb\")\n\t\t\t)",
"def to_html_table(self):\n td = '<td>'\n nwtd = '<td nowrap=\"true\">'\n ftd = '<td class=\"format\">'\n ctd = '<td class=\"cen\">'\n etd = '</td>'\n \n if self.is_power_onoff():\n out = td + 'Power On/Off' + etd\n else:\n out = nwtd + '<strong>' + self['target'].ljust(20) + '</strong>' + etd\n\n if 'Date' in self:\n out += ctd + self['Date'] + etd\n else:\n out += td + etd\n\n if 'UTstart' in self:\n out += ctd + self['UTstart'] + etd\n else:\n out += td + etd\n\n if 'UTend' in self:\n out += ctd + self['UTend'] + etd\n else:\n out += td + etd\n\n if 'exposure' in self:\n out += ctd + self['exposure'] + etd\n else:\n out += td + etd\n\n if 'sample' in self:\n out += ctd + self['sample'] + etd\n else:\n out += td + etd\n\n if 'nframe' in self:\n out += ctd + self['nframe'] + etd\n else:\n out += td + etd\n \n if self.is_power_onoff():\n out += (td + etd)*3\n else:\n speed = self['speed']\n out += ctd + self['filters'].ljust(11) + etd + ctd + self['x_bin'] + 'x' + self['y_bin'] + etd + ctd + speed + etd \n \n if self.number_windows() > 0:\n out += ctd + self['x1_size'].rjust(4) + 'x' + self['y1_size'].ljust(4) + etd + td + self['x1_start'].ljust(3) + etd + td + self['y1_start'].ljust(4) + etd\n else:\n out += (td + etd)*3\n \n if self.number_windows() > 1:\n out += ctd + self['x2_size'].rjust(4) + 'x' + self['y2_size'].ljust(4) + etd + td + self['x2_start'].ljust(3) + etd + td + self['y2_start'].ljust(4) + etd\n else:\n out += (td + etd)*3\n\n if 'grating' in self:\n out += ctd + self['grating'] + etd\n else:\n out += td + etd\n\n if 'slit_width' in self:\n out += ctd + self['slit_width'] + etd\n else:\n out += td + etd\n\n if 'slit_angle' in self:\n out += ctd + self['slit_angle'] + etd\n else:\n out += td + etd\n \n if 'ID' in self:\n out += ctd + self['ID'] + etd\n else:\n out += td + etd\n\n if 'PI' in self:\n out += ctd + self['PI'] + etd\n else:\n out += td + etd\n \n if 'Comment' in self:\n out += nwtd + self['Comment'] + etd\n else:\n out += td + etd\n\n return out",
"def latex_table(result, *, decimal_places=3, label=None):\n if label is None:\n label = 'tbl:stat_results'\n\n table_df = result.rankdf\n columns = table_df.columns.to_list()\n if result.omnibus != 'bayes' and result.pvalue >= result.alpha or \\\n result.omnibus == 'bayes' and len({'smaller', 'larger'}.intersection(set(result.rankdf['decision']))) == 0:\n columns.remove('effect_size')\n columns.remove('magnitude')\n if result.posthoc == 'tukeyhsd':\n columns.remove('meanrank')\n columns.insert(columns.index('ci_lower'), 'CI')\n columns.remove('ci_lower')\n columns.remove('ci_upper')\n rename_map = {}\n if result.effect_size == 'cohen_d':\n rename_map['effect_size'] = '$d$'\n elif result.effect_size == 'cliff_delta':\n rename_map['effect_size'] = r'D-E-L-T-A'\n elif result.effect_size == 'akinshin_gamma':\n rename_map['effect_size'] = r'G-A-M-M-A'\n rename_map['magnitude'] = 'Magnitude'\n rename_map['mad'] = 'MAD'\n rename_map['median'] = 'MED'\n rename_map['meanrank'] = 'MR'\n rename_map['mean'] = 'M'\n rename_map['std'] = 'SD'\n rename_map['decision'] = 'Decision'\n format_string = '[{0[ci_lower]:.' + str(decimal_places) + 'f}, {0[ci_upper]:.' + str(decimal_places) + 'f}]'\n table_df['CI'] = table_df.agg(format_string.format, axis=1)\n table_df = table_df[columns]\n if result.omnibus == 'bayes':\n table_df.at[table_df.index[0], 'decision'] = '-'\n table_df = table_df.rename(rename_map, axis='columns')\n\n float_format = lambda x: (\"{:0.\" + str(decimal_places) + \"f}\").format(x) if not np.isnan(x) else '-'\n table_string = table_df.to_latex(float_format=float_format, na_rep='-').strip()\n table_string = table_string.replace('D-E-L-T-A', r'$\\delta$')\n table_string = table_string.replace('G-A-M-M-A', r'$\\gamma$')\n table_string = table_string.replace(r'p\\_equal', r'$P(\\textit{equal})$')\n table_string = table_string.replace(r'p\\_smaller', r'$P(\\textit{smaller})$')\n print(r\"\\begin{table}[h]\")\n print(r\"\\centering\")\n print(table_string)\n print(r\"\\caption{Summary of populations}\")\n print(r\"\\label{%s}\" % label)\n print(r\"\\end{table}\")",
"def to_latex_table(self, labels=\"all\", parameter_dict=None, save_to_file=None):\n import os\n\n if save_to_file is not None and os.path.isfile(\"{}\".format(save_to_file)):\n raise FileExistsError(\n \"The file {} already exists.\".format(save_to_file)\n )\n if labels != \"all\" and isinstance(labels, str) and labels not in self.labels:\n raise ValueError(\"The label %s does not exist.\" % (labels))\n elif labels == \"all\":\n labels = list(self.labels)\n elif isinstance(labels, str):\n labels = [labels]\n elif isinstance(labels, list):\n for ll in labels:\n if ll not in list(self.labels):\n raise ValueError(\"The label %s does not exist.\" % (ll))\n\n table = self.latex_table(\n [self.samples_dict[label] for label in labels], parameter_dict,\n labels=labels\n )\n if save_to_file is None:\n print(table)\n elif os.path.isfile(\"{}\".format(save_to_file)):\n logger.warning(\n \"File {} already exists. Printing to stdout\".format(save_to_file)\n )\n print(table)\n else:\n with open(save_to_file, \"w\") as f:\n f.writelines([table])",
"def print_output_tables(cls,\n wfns=None, file=None,\n print_intensities=True,\n print_energies=True,\n print_energy_corrections=True,\n print_transition_moments=True,\n operators=None,\n logger=None, sep_char=\"=\", sep_len=100):\n\n if logger is None:\n logger = wfns.logger\n if logger is not None:\n def print_block(label, *args, **kwargs):\n with logger.block(tag=label):\n logger.log_print(\" \".join(\"{}\".format(x) for x in args), **kwargs)\n else:\n if file is None:\n file = sys.stdout\n\n def print_label(label, file=file, **opts):\n lablen = len(label) + 2\n split_l = int(np.floor((sep_len - lablen) / 2))\n split_r = int(np.ceil((sep_len - lablen) / 2))\n print(sep_char * split_l, label, sep_char * split_r, **opts, file=file)\n\n def print_footer(label=None, file=file, **opts):\n print(sep_char * sep_len, **opts, file=file)\n\n def print_block(label, *args, file=file, **kwargs):\n print_label(label, file=file, **kwargs)\n print(*args, file=file, **kwargs)\n print_footer(file=file, **kwargs)\n\n if print_energy_corrections:\n print_block(\"Energy Corrections\", wfns.format_energy_corrections_table())\n if print_energies:\n if wfns.degenerate_transformation is not None:\n print_block(\"Deperturbed Energies\",\n wfns.format_deperturbed_energies_table()\n )\n print_block(\n \"Degenerate Energies\",\n wfns.format_energies_table()\n )\n else:\n print_block(\"States Energies\",\n wfns.format_energies_table()\n )\n\n if print_intensities:\n ints = wfns.intensities # to make sure they're computed before printing starts\n if print_transition_moments:\n if wfns.degenerate_transformation is not None:\n for a, m in zip([\"X\", \"Y\", \"Z\"], wfns.format_deperturbed_dipole_contribs_tables()):\n print_block(\"{} Deperturbed Dipole Contributions\".format(a), m)\n\n print_block(\"Deperturbed IR Data\",\n wfns.format_deperturbed_intensities_table()\n )\n\n for a, m in zip([\"X\", \"Y\", \"Z\"], wfns.format_dipole_contribs_tables()):\n print_block(\"{} Dipole Contributions\".format(a), m)\n print_block(\"IR Data\", wfns.format_intensities_table())\n\n if operators is not None:\n print_block(\"Operator Data\", wfns.format_operator_table(operators))",
"def print_latex(self):\n\n pdf = pylatex.Document(\n \"default\"\n )\n\n with pdf.create(pylatex.Section(\n \"Equações Diofantinas\"\n )) as section:\n\n section.append(\"Equação:\")\n ultimo = self.numbers[-1]\n eq = []\n cont = 1\n for i in self.numbers:\n simbolo = \"+\"\n if i == ultimo:\n simbolo = \"= 1\"\n eq.append(\n pylatex.NoEscape(\n \" {}x_{} {}\".format(i, cont, simbolo)\n )\n )\n cont = cont + 1\n\n section.append(pylatex.Math(data=eq))\n\n text = \"n = {}\".format(self.order)\n section.append(text)\n\n m = pylatex.Matrix(self.take_vec(), mtype='b')\n matrix = pylatex.Math(data=['b = ', m])\n section.append(matrix)\n\n m = pylatex.Matrix(self.take_matrix(), mtype='b')\n matrix = pylatex.Math(data=['A = ', m])\n section.append(matrix)\n\n section.append(\"Resposta = {}\".format(self.cofactor_matrix()))\n\n section.append(pylatex.LineBreak())\n section.append(\"Confirmando:\")\n section.append(pylatex.LineBreak())\n s = 0\n for i in range(len(self.numbers)):\n r = self.numbers[i] * self.cofactor_matrix()[i]\n s = s + r\n resp = \"\\t {}\\t{} \\t* \\t{} \\t= \\t{} \\t({})\\n\".format(\n i,\n self.numbers[i],\n self.cofactor_matrix()[i],\n r,\n s\n )\n section.append(resp)\n\n if self.create_pdf:\n pdf.generate_pdf()\n\n pdf.generate_tex()"
] | [
"0.6714302",
"0.65567935",
"0.64777684",
"0.6454308",
"0.62580484",
"0.6248004",
"0.60726035",
"0.60565686",
"0.6048208",
"0.5996834",
"0.59627306",
"0.593798",
"0.59338284",
"0.5898554",
"0.5889402",
"0.58879846",
"0.5881314",
"0.58484817",
"0.58275473",
"0.5825261",
"0.58162695",
"0.579993",
"0.5770554",
"0.5757738",
"0.575552",
"0.57512856",
"0.57455444",
"0.57296884",
"0.5711571",
"0.57056826"
] | 0.67665 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.